diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 34af632814a3..0f95b023ecaa 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -17,3 +17,7 @@ README.md @jafermarq @tanertopal @danieljanes # Changelog /doc/source/ref-changelog.md @jafermarq @tanertopal @danieljanes + +# GitHub Actions and Workflows +/.github/workflows @Robert-Steiner @tanertopal @danieljanes +/.github/actions @Robert-Steiner @tanertopal @danieljanes diff --git a/.github/CODE_OF_CONDUCT.md b/.github/CODE_OF_CONDUCT.md index 0a8c39f4e08f..af8d6265e6b3 100644 --- a/.github/CODE_OF_CONDUCT.md +++ b/.github/CODE_OF_CONDUCT.md @@ -55,7 +55,7 @@ further defined and clarified by project maintainers. ## Enforcement Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting the project team at contact@adap.com. All +reported by contacting the project team at hello@flower.ai. All complaints will be reviewed and investigated and will result in a response that is deemed necessary and appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. diff --git a/.github/workflows/_docker-build.yml b/.github/workflows/_docker-build.yml index 99ec0671db66..219c44ee1b3c 100644 --- a/.github/workflows/_docker-build.yml +++ b/.github/workflows/_docker-build.yml @@ -57,7 +57,9 @@ jobs: import hashlib import os - hash = hashlib.sha256('''${{ inputs.build-args }}'''.encode()) + hash = hashlib.sha256('''${{ inputs.namespace-repository }} + ${{ inputs.file-dir }} + ${{ inputs.build-args }}'''.encode()) with open(os.environ['GITHUB_OUTPUT'], 'a') as fh: print(f"id={hash.hexdigest()}", file=fh) @@ -69,22 +71,22 @@ jobs: - name: Extract metadata (tags, labels) for Docker id: meta - uses: docker/metadata-action@31cebacef4805868f9ce9a0cb03ee36c32df2ac4 # v5.3.0 + uses: docker/metadata-action@8e5442c4ef9f78752691e2d8f8d19755c6f78e81 # v5.5.1 with: images: ${{ inputs.namespace-repository }} - name: Set up Docker Buildx - uses: docker/setup-buildx-action@f95db51fddba0c2d1ec667646a06c2ce06100226 # v3.0.0 + uses: docker/setup-buildx-action@d70bba72b1f3fd22344832f00baa16ece964efeb # v3.3.0 - name: Login to Docker Hub - uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3.0.0 + uses: docker/login-action@e92390c5fb421da1463c202d546fed0ec5c39f20 # v3.1.0 with: username: ${{ secrets.dockerhub-user }} password: ${{ secrets.dockerhub-token }} - name: Build and push id: build - uses: docker/build-push-action@4a13e500e55cf31b7a5d59a38ab2040ab0f42f56 # v5.1.0 + uses: docker/build-push-action@2cdde995de11925a030ce8070c3d77a52ffcf1c0 # v5.3.0 with: platforms: ${{ matrix.platform.docker }} context: "{{defaultContext}}:${{ inputs.file-dir }}" @@ -98,7 +100,7 @@ jobs: touch "/tmp/digests/${digest#sha256:}" - name: Upload digest - uses: actions/upload-artifact@1eb3cb2b3e0f29609092a73eb033bb759a334595 # v4.1.0 + uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3 with: name: digests-${{ steps.build-id.outputs.id }}-${{ matrix.platform.name }} path: /tmp/digests/* @@ -114,7 +116,7 @@ jobs: metadata: ${{ steps.meta.outputs.json }} steps: - name: Download digests - uses: actions/download-artifact@eaceaf801fd36c7dee90939fad912460b18a1ffe # v4.1.2 + uses: actions/download-artifact@65a9edc5881444af0b9093a5e628f2fe47ea3b2e # v4.1.7 with: pattern: digests-${{ needs.build.outputs.build-id }}-* path: /tmp/digests @@ -122,16 +124,16 @@ jobs: - name: Docker meta id: meta - uses: docker/metadata-action@31cebacef4805868f9ce9a0cb03ee36c32df2ac4 # v5.3.0 + uses: docker/metadata-action@8e5442c4ef9f78752691e2d8f8d19755c6f78e81 # v5.5.1 with: images: ${{ inputs.namespace-repository }} tags: ${{ inputs.tags }} - name: Set up Docker Buildx - uses: docker/setup-buildx-action@f95db51fddba0c2d1ec667646a06c2ce06100226 # v3.0.0 + uses: docker/setup-buildx-action@d70bba72b1f3fd22344832f00baa16ece964efeb # v3.3.0 - name: Login to Docker Hub - uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3.0.0 + uses: docker/login-action@e92390c5fb421da1463c202d546fed0ec5c39f20 # v3.1.0 with: username: ${{ secrets.dockerhub-user }} password: ${{ secrets.dockerhub-token }} diff --git a/.github/workflows/cpp.yml b/.github/workflows/cpp.yml index 7efc879bcee2..97d545132dbb 100644 --- a/.github/workflows/cpp.yml +++ b/.github/workflows/cpp.yml @@ -68,16 +68,18 @@ jobs: cmake -DUSE_LOCAL_FLWR=ON -S . -B build cmake --build build pip install ../.. - timeout 2m python server.py & - pid=$! + timeout 3m flower-superlink --insecure & + sleep 10 + timeout 2m build/flwr_client 0 127.0.0.1:9092 & sleep 3 - build/flwr_client 0 127.0.0.1:8080 & + timeout 2m build/flwr_client 1 127.0.0.1:9092 & sleep 3 - build/flwr_client 1 127.0.0.1:8080 & + flower-server-app server:app --insecure & + pid=$! wait $pid res=$? if [[ "$res" = "0" ]]; - then echo "Training worked correctly"; + then echo "Training worked correctly" && exit 0; else echo "Training had an issue" && exit 1; fi diff --git a/.github/workflows/deprecated_baselines.yml b/.github/workflows/deprecated_baselines.yml index 6af1b277ac5f..0859c948e909 100644 --- a/.github/workflows/deprecated_baselines.yml +++ b/.github/workflows/deprecated_baselines.yml @@ -2,12 +2,12 @@ name: Deprecated-Baselines on: push: - branches: - - main + branches: ['main'] + paths: ['baselines/flwr_baselines/**'] pull_request: - branches: - - main - + branches: ['main'] + paths: ['baselines/flwr_baselines/**'] + concurrency: group: ${{ github.workflow }}-${{ github.ref == 'refs/heads/main' && github.run_id || github.event.pull_request.number || github.ref }} cancel-in-progress: true diff --git a/.github/workflows/docker-client.yml b/.github/workflows/docker-client.yml deleted file mode 100644 index 3c2d83596733..000000000000 --- a/.github/workflows/docker-client.yml +++ /dev/null @@ -1,36 +0,0 @@ -name: Build docker client image - -on: - workflow_dispatch: - inputs: - flwr-version: - description: "Version of Flower e.g. (1.7.0)." - required: true - type: string - -permissions: - contents: read - -jobs: - build-client-images: - name: Build client images - uses: ./.github/workflows/_docker-build.yml - # run only on default branch when using it with workflow_dispatch - if: github.ref_name == github.event.repository.default_branch - strategy: - fail-fast: false - matrix: - python-version: ["3.8", "3.9", "3.10", "3.11"] - with: - namespace-repository: flwr/client - file-dir: src/docker/client - build-args: | - FLWR_VERSION=${{ github.event.inputs.flwr-version }} - BASE_IMAGE_TAG=py${{ matrix.python-version }}-ubuntu22.04 - tags: | - ${{ github.event.inputs.flwr-version }}-py${{ matrix.python-version }}-ubuntu22.04 - ${{ github.event.inputs.flwr-version }} - latest - secrets: - dockerhub-user: ${{ secrets.DOCKERHUB_USERNAME }} - dockerhub-token: ${{ secrets.DOCKERHUB_TOKEN }} diff --git a/.github/workflows/docker-server.yml b/.github/workflows/docker-superlink.yml similarity index 56% rename from .github/workflows/docker-server.yml rename to .github/workflows/docker-superlink.yml index 1e43715207d4..b6a6b4114ba4 100644 --- a/.github/workflows/docker-server.yml +++ b/.github/workflows/docker-superlink.yml @@ -1,50 +1,45 @@ -name: Build docker server image +name: Build docker SuperLink image on: workflow_dispatch: inputs: flwr-version: - description: "Version of Flower e.g. (1.7.0)." + description: "Version of Flower." required: true type: string - base-image-tag: - description: "The tag of the Flower base image." - required: false - type: string - default: "py3.11-ubuntu22.04" permissions: contents: read jobs: - build-server-images: + build-superlink-images: name: Build images uses: ./.github/workflows/_docker-build.yml # run only on default branch when using it with workflow_dispatch if: github.ref_name == github.event.repository.default_branch with: - namespace-repository: flwr/server - file-dir: src/docker/server + namespace-repository: flwr/superlink + file-dir: src/docker/superlink build-args: | FLWR_VERSION=${{ github.event.inputs.flwr-version }} - BASE_IMAGE_TAG=${{ github.event.inputs.base-image-tag }} + PYTHON_VERSION=3.11 + UBUNTU_VERSION=ubuntu22.04 tags: | - ${{ github.event.inputs.flwr-version }}-${{ github.event.inputs.base-image-tag }} + ${{ github.event.inputs.flwr-version }}-py3.11-ubuntu22.04 ${{ github.event.inputs.flwr-version }} - latest secrets: dockerhub-user: ${{ secrets.DOCKERHUB_USERNAME }} dockerhub-token: ${{ secrets.DOCKERHUB_TOKEN }} summary: - name: Build images + name: Summary runs-on: ubuntu-22.04 - needs: build-server-images + needs: build-superlink-images timeout-minutes: 10 steps: - run: | echo "### Images" >> $GITHUB_STEP_SUMMARY echo "" >> $GITHUB_STEP_SUMMARY - for IMAGE in $(echo ${{ toJson(needs.build-server-images.outputs.metadata) }} | jq -r '.tags[]' ); do + for IMAGE in $(echo ${{ toJson(needs.build-superlink-images.outputs.metadata) }} | jq -r '.tags[]' ); do echo "- $IMAGE" >> $GITHUB_STEP_SUMMARY done diff --git a/.github/workflows/docker-supernode.yml b/.github/workflows/docker-supernode.yml new file mode 100644 index 000000000000..83b53764b794 --- /dev/null +++ b/.github/workflows/docker-supernode.yml @@ -0,0 +1,52 @@ +name: Build docker SuperNode image + +on: + workflow_dispatch: + inputs: + flwr-version: + description: "Version of Flower." + required: true + type: string + +permissions: + contents: read + +jobs: + build-supernode-images: + name: Build images + uses: ./.github/workflows/_docker-build.yml + # run only on default branch when using it with workflow_dispatch + if: github.ref_name == github.event.repository.default_branch + strategy: + fail-fast: false + matrix: + image: [ + { + py-version: "3.8", + tags: "${{ github.event.inputs.flwr-version }}-py3.8-ubuntu22.04" + }, + { + py-version: "3.9", + tags: "${{ github.event.inputs.flwr-version }}-py3.9-ubuntu22.04" + }, + { + py-version: "3.10", + tags: "${{ github.event.inputs.flwr-version }}-py3.10-ubuntu22.04" + }, + { + py-version: "3.11", + # those are two tags -py3.11-py3.11-ubuntu22.04 and separated by a \n + tags: "${{ github.event.inputs.flwr-version }}-py3.11-ubuntu22.04\n${{ github.event.inputs.flwr-version }}" + }, + ] + with: + namespace-repository: flwr/supernode + file-dir: src/docker/supernode + build-args: | + FLWR_VERSION=${{ github.event.inputs.flwr-version }} + PYTHON_VERSION=${{ matrix.image.py-version }} + UBUNTU_VERSION=ubuntu22.04 + tags: ${{ matrix.image.tags }} + secrets: + dockerhub-user: ${{ secrets.DOCKERHUB_USERNAME }} + dockerhub-token: ${{ secrets.DOCKERHUB_TOKEN }} diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index a4c769fdf850..3f010a4c37b0 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -36,7 +36,7 @@ jobs: cd datasets python -m poetry install - name: Build docs - run: ./dev/build-docs.sh + run: ./dev/build-docs.sh ${{ github.ref == 'refs/heads/main' && github.repository == 'adap/flower' && !github.event.pull_request.head.repo.fork }} - name: Deploy docs if: ${{ github.ref == 'refs/heads/main' && github.repository == 'adap/flower' && !github.event.pull_request.head.repo.fork }} env: diff --git a/.github/workflows/e2e.yml b/.github/workflows/e2e.yml index 62f3c0a78ce4..57802e598546 100644 --- a/.github/workflows/e2e.yml +++ b/.github/workflows/e2e.yml @@ -64,6 +64,8 @@ jobs: - directory: bare-https + - directory: bare-client-auth + - directory: jax - directory: pytorch @@ -130,10 +132,13 @@ jobs: if: ${{ matrix.dataset }} run: python -c "${{ matrix.dataset }}" - name: Run edge client test + if: ${{ matrix.directory != 'bare-client-auth' }} run: ./../test.sh "${{ matrix.directory }}" - name: Run virtual client test + if: ${{ matrix.directory != 'bare-client-auth' }} run: python simulation.py - name: Run driver test + if: ${{ matrix.directory != 'bare-client-auth' }} run: ./../test_driver.sh "${{ matrix.directory }}" - name: Run driver test with REST if: ${{ matrix.directory == 'bare' }} @@ -141,6 +146,9 @@ jobs: - name: Run driver test with SQLite database if: ${{ matrix.directory == 'bare' }} run: ./../test_driver.sh bare sqlite + - name: Run driver test with client authentication + if: ${{ matrix.directory == 'bare-client-auth' }} + run: ./../test_driver.sh bare client-auth strategies: runs-on: ubuntu-22.04 diff --git a/.github/workflows/framework-release.yml b/.github/workflows/framework-release.yml index 04b68fd38af9..a941b47d58fc 100644 --- a/.github/workflows/framework-release.yml +++ b/.github/workflows/framework-release.yml @@ -42,4 +42,4 @@ jobs: curl $wheel_url --output dist/$wheel_name curl $tar_url --output dist/$tar_name - python -m poetry publish -u __token__ -p ${{ secrets.PYPI_TOKEN }} + python -m poetry publish -u __token__ -p ${{ secrets.PYPI_TOKEN_RELEASE_FLWR }} diff --git a/.github/workflows/release-nightly.yml b/.github/workflows/release-nightly.yml index 823ff1513790..16bd720b4991 100644 --- a/.github/workflows/release-nightly.yml +++ b/.github/workflows/release-nightly.yml @@ -3,21 +3,62 @@ name: Release nightly on: schedule: - cron: "0 23 * * *" + - cron: "30 23 * * *" env: FLWR_TELEMETRY_ENABLED: 0 jobs: - release_nightly: + release-nightly: runs-on: ubuntu-22.04 - name: Nightly + name: Relase nightly on PyPI if: github.repository == 'adap/flower' + outputs: + name: ${{ steps.release.outputs.name }} + version: ${{ steps.release.outputs.version }} + skip: ${{ steps.release.outputs.skip }} steps: - uses: actions/checkout@v4 - name: Bootstrap uses: ./.github/actions/bootstrap - name: Release nightly + if: github.event.schedule == '0 23 * * *' env: PYPI_TOKEN: ${{ secrets.PYPI_TOKEN }} + run: ./dev/publish-nightly.sh + - name: Read nightly version and name + if: github.event.schedule == '30 23 * * *' + id: release run: | - ./dev/publish-nightly.sh + RESULT=$(./dev/publish-nightly.sh --skip-publish) + if [ "$RESULT" == "There were no commits in the last 24 hours." ]; then + echo "skip=true" >> $GITHUB_OUTPUT + fi + + echo "name=$(poetry version | awk {'print $1'})" >> $GITHUB_OUTPUT + echo "version=$(poetry version -s)" >> $GITHUB_OUTPUT + + build-docker-images: + name: Build nightly images + if: github.repository == 'adap/flower' && needs.release-nightly.outputs.skip != 'true' && github.event.schedule == '30 23 * * *' + uses: ./.github/workflows/_docker-build.yml + needs: release-nightly + strategy: + fail-fast: false + matrix: + images: [ + { repository: "flwr/superlink", file-dir: "src/docker/superlink" }, + { repository: "flwr/supernode", file-dir: "src/docker/supernode" } + ] + with: + namespace-repository: ${{ matrix.images.repository }} + file-dir: ${{ matrix.images.file-dir }} + build-args: | + FLWR_VERSION=${{ needs.release-nightly.outputs.version }} + FLWR_PACKAGE=${{ needs.release-nightly.outputs.name }} + tags: | + ${{ needs.release-nightly.outputs.version }} + nightly + secrets: + dockerhub-user: ${{ secrets.DOCKERHUB_USERNAME }} + dockerhub-token: ${{ secrets.DOCKERHUB_TOKEN }} diff --git a/.github/workflows/swift.yml b/.github/workflows/swift.yml index 2ca596a59361..e0233f5703a9 100644 --- a/.github/workflows/swift.yml +++ b/.github/workflows/swift.yml @@ -18,22 +18,22 @@ jobs: run: working-directory: src/swift/flwr name: Test - runs-on: macos-latest + runs-on: macos-14 steps: - - uses: fwal/setup-swift@cdbe0f7f4c77929b6580e71983e8606e55ffe7e4 + - uses: fwal/setup-swift@v2 with: - swift-version: 5 + swift-version: 5.10 - uses: actions/checkout@v4 - name: Run tests - run: arch -x86_64 xcodebuild test -scheme flwr -destination 'platform=iOS Simulator,name=iPhone 14 Pro Max,OS=16.2' + run: arch -x86_64 xcodebuild test -scheme flwr -destination 'platform=iOS Simulator,name=iPhone 15 Pro Max,OS=17.2' build_docs: - runs-on: macos-latest + runs-on: macos-14 name: Build docs steps: - - uses: fwal/setup-swift@cdbe0f7f4c77929b6580e71983e8606e55ffe7e4 + - uses: fwal/setup-swift@v2 with: - swift-version: 5 + swift-version: 5.10 - uses: actions/checkout@v4 - name: Build docs run: ./dev/build-swift-api-ref.sh @@ -41,12 +41,12 @@ jobs: deploy_docs: needs: "build_docs" if: ${{ github.ref == 'refs/heads/main' && github.repository == 'adap/flower' && !github.event.pull_request.head.repo.fork }} - runs-on: macos-latest + runs-on: macos-14 name: Deploy docs steps: - - uses: fwal/setup-swift@cdbe0f7f4c77929b6580e71983e8606e55ffe7e4 + - uses: fwal/setup-swift@v2 with: - swift-version: 5 + swift-version: 5.10 - uses: actions/checkout@v4 - name: Build and deploy docs env: diff --git a/.gitignore b/.gitignore index 7a152abdc715..b0962c2783f0 100644 --- a/.gitignore +++ b/.gitignore @@ -3,6 +3,7 @@ data/ doc/source/api_documentation doc/source/_build +doc/source/dataset/ flwr_logs .cache diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 000000000000..ad6cb69f3052 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,18 @@ +repos: + - repo: local + hooks: + - id: format-code + name: Format Code + entry: ./dev/format.sh + language: script + # Ensures the script runs from the repository root: + pass_filenames: false + stages: [commit] + + - id: run-tests + name: Run Tests + entry: ./dev/test.sh + language: script + # Ensures the script runs from the repository root: + pass_filenames: false + stages: [commit] diff --git a/baselines/doc/source/conf.py b/baselines/doc/source/conf.py index dad8650cddaa..a9525c44ab7b 100644 --- a/baselines/doc/source/conf.py +++ b/baselines/doc/source/conf.py @@ -37,7 +37,7 @@ author = "The Flower Authors" # The full version, including alpha/beta/rc tags -release = "1.7.0" +release = "1.8.0" # -- General configuration --------------------------------------------------- diff --git a/baselines/fedpft/.gitignore b/baselines/fedpft/.gitignore new file mode 100644 index 000000000000..4ab8207aedb6 --- /dev/null +++ b/baselines/fedpft/.gitignore @@ -0,0 +1,3 @@ +outputs/ +multirun/ +.ruff_cache/ \ No newline at end of file diff --git a/baselines/fedpft/LICENSE b/baselines/fedpft/LICENSE new file mode 100644 index 000000000000..d64569567334 --- /dev/null +++ b/baselines/fedpft/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/baselines/fedpft/README.md b/baselines/fedpft/README.md new file mode 100644 index 000000000000..45bddfda6103 --- /dev/null +++ b/baselines/fedpft/README.md @@ -0,0 +1,112 @@ +--- +title: Parametric Feature Transfer, One-shot Federated Learning with Foundation Models +url: https://arxiv.org/abs/2402.01862 +labels: [foundation-models, pre-trained, one-shot, one-round] +dataset: [CIFAR-100, Caltech101] +--- + +# FedPFT: One-shot Federated Learning with Foundation Models + +> Note: If you use this baseline in your work, please remember to cite the original authors of the paper as well as the Flower paper. + +**Paper:** [arxiv.org/abs/2402.01862](https://arxiv.org/abs/2402.01862) + +**Authors:** Mahdi Beitollahi, Alex Bie, Sobhan Hemati, Leo Maxime Brunswic, Xu Li, Xi Chen, Guojun Zhang. + +**Abstract:** In one-shot federated learning (FL), clients collaboratively train a global model in a single round of communication. Existing approaches for one-shot FL enhance communication efficiency at the expense of diminished accuracy. This paper introduces FedPFT (Federated Learning with Parametric Feature Transfer), a methodology that harnesses the transferability of foundation models to enhance both accuracy and communication efficiency in one-shot FL. The approach involves transferring per-client parametric models (specifically, Gaussian mixtures) of features extracted from foundation models. Subsequently, each parametric model is employed to generate synthetic features for training a classifier head. Experimental results on eight datasets demonstrate that FedPFT enhances the communication-accuracy frontier in both centralized and decentralized FL scenarios, as well as across diverse data-heterogeneity settings such as covariate shift and task shift, with improvements of up to 20.6%. Additionally, FedPFT adheres to the data minimization principle of FL, as clients do not send real features. We demonstrate that sending real features is vulnerable to potent reconstruction attacks. Moreover, we show that FedPFT is amenable to formal privacy guarantees via differential privacy, demonstrating favourable privacy-accuracy tradeoffs. + + +## About this baseline + +**What’s implemented:** The code in this directory replicates the centralized experiments in *Parametric Feature Transfer, One-shot Federated Learning with Foundation Models* (Beitollahi et al., 2024) for CIFAR-100 and Caltech101 datasets, which proposed the FedPFT algorithm. Concretely, it replicates the results in Section 5.2. + +**Datasets:** CIFAR-100 and Caltech101 from HuggingFace + +**Hardware Setup:** These experiments were run on a desktop machine with 8 CPU threads and Nvidia 4070 with 8GB of VRAM. + +**Contributors:** Mahdi Beitollahi (mahdi.beitollahi@queensu.ca). + + +## Experimental Setup + +**Task:** Image classification + +**Model:** This directory utilizes two pre-trained, frozen models as shown in Table 1 of the paper: +* ResNet50 pre-trained on ImageNet is used for CIFAR-100 dataset(see `models/resnet50`). +* CLIP, ViT-B/32 pre-trained on web dataset is used for Caltech101 dataset (see `models/clip_vit`) + +**Dataset:** This baseline includes the CIFAR-100 and Caltech101 datasets via [flwr-datasets](https://flower.ai/docs/datasets/). By default, it will be partitioned into 50 clients following a Dirichlet distribution with $\alpha$=0.1. + +| Dataset | #classes | #partitions | partitioning method | partition settings | +| :------ | :---: | :---: | :---: | :---: | +| CIFAR-100 | 100 | 50 | Dirichlet distribution | $\alpha$=0.1 | +| Caltech101 | 101 | 50 | Dirichlet distribution | $\alpha$=0.1 | + +**Training Hyperparameters:** The following table shows the main hyperparameters for this baseline with their default value (i.e. the value used if you run `python main.py` directly) + +| Description | Default Value | +| ----------- | ----- | +| total clients | 50 | +| clients per round | 50 | +| number of rounds | 1 | +| client resources | {'num_cpus': 2.0, 'num_gpus': 0.0 }| +| data partition | distribution with $\alpha$=0.1 | +| Number of mixtures | 1 | +| Covariance type | spherical | +| tolerance | 1e-12 | +| maximum EM iterations | 1e3 | + + +## Environment Setup + +To construct the Python environment, simply run: + +```bash +# Set directory to use python 3.10 (install with `pyenv install ` if you don't have it) +pyenv local 3.10.12 + +# Tell poetry to use python3.10 +poetry env use 3.10.12 + +# Install +poetry install +``` + + +## Running the Experiments + +To run this FedPFT with CIFAR-100 baseline, first ensure you have activated your Poetry environment (execute `poetry shell` from this directory), then: + +```bash +python -m fedpft.main # this will run using the default settings in the `conf/config.yaml` + +# you can override settings directly from the command line +python -m fedpft.main dataset=Caltech101 model=clip # will set dataset to Caltech101 and the pre-trained model to Clip-ViT/B32 +``` + +To run using FedAvg: +```bash +# this will use a frozen, pre-trained model and train the classifier head +python -m fedpft.main strategy=FedAvg client=FedAvg num_rounds=20 dataset=Caltech101 model=clip num_gpus=0.2 + +``` + + +## Expected Results + + +With the following command, we run both FedPFT and FedAvg configurations. + +```bash +# FedPFT +python -m fedpft.main dataset=CIFAR100 model=resnet50 +python -m fedpft.main dataset=Caltech101 model=clip + +# FedAvg with pre-trained, frozen models +python -m fedpft.main strategy=fedavg client=fedavg dataset=CIFAR100 model=resnet50 num_rounds=20 strategy.on_fit_config_fn.num_epochs=1 num_gpus=0.5 +python -m fedpft.main strategy=fedavg client=fedavg dataset=Caltech101 model=clip num_rounds=20 num_gpus=0.2 +``` + +The above commands would generate results that you can plot and would look like the plot shown below. This plot was generated using the jupyter notebook in the `docs/` directory of this baseline after running the commands above. + +![](_static/FedPft.png) diff --git a/baselines/fedpft/_static/FedPft.png b/baselines/fedpft/_static/FedPft.png new file mode 100644 index 000000000000..76028f4f24b0 Binary files /dev/null and b/baselines/fedpft/_static/FedPft.png differ diff --git a/baselines/fedpft/docs/viz_and_plot_results.ipynb b/baselines/fedpft/docs/viz_and_plot_results.ipynb new file mode 100644 index 000000000000..68077f7b59c7 --- /dev/null +++ b/baselines/fedpft/docs/viz_and_plot_results.ipynb @@ -0,0 +1,166 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 16, + "id": "5e0cf2a9-b782-48de-ac45-128726a26e64", + "metadata": {}, + "outputs": [], + "source": [ + "import pickle\n", + "import yaml\n", + "from pathlib import Path\n", + "import os\n", + "\n", + "import numpy as np\n", + "import matplotlib.pyplot as plt" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "id": "7ea3e149-ce6f-4ba0-aa41-e0501a04efe3", + "metadata": {}, + "outputs": [], + "source": [ + "def saveFig(name, fig):\n", + " fig.savefig(\n", + " name,\n", + " dpi=None,\n", + " facecolor=fig.get_facecolor(),\n", + " edgecolor=\"none\",\n", + " orientation=\"portrait\",\n", + " format=\"png\",\n", + " transparent=False,\n", + " bbox_inches=\"tight\",\n", + " pad_inches=0.2,\n", + " metadata=None,\n", + " )" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "id": "4b010856-0d99-4d81-8fb0-7a927f10eeaf", + "metadata": {}, + "outputs": [], + "source": [ + "# Update the path belows to the directories containing the results for FedPFT and FedAvg\n", + "path_fedpft_resutls_cifar100 = os.path.join(os.path.realpath('..'),'outputs','2024-04-14','16-36-16')\n", + "path_fedpft_resutls_caltech101 = os.path.join(os.path.realpath('..'),'outputs','2024-04-14','16-44-20')\n", + "\n", + "path_fedavg_resutls_cifar100 = os.path.join(os.path.realpath('..'),'outputs','2024-04-14','23-24-25')\n", + "path_fedavg_resutls_caltech101 = os.path.join(os.path.realpath('..'),'outputs','2024-04-14','22-32-11')\n" + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "id": "2e3e165c-1ce6-4efa-a4e1-1372586e436e", + "metadata": {}, + "outputs": [], + "source": [ + "# load results\n", + "def read_accuracies(path_to_pickle):\n", + " for result in list(Path(path_to_pickle).glob(\"*.pkl\")):\n", + " with open(result, \"rb\") as handle:\n", + " data = pickle.load(handle)\n", + "\n", + " accuracies = data['history'].metrics_distributed['accuracy']\n", + " return accuracies\n" + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "id": "77b70c73", + "metadata": {}, + "outputs": [], + "source": [ + "fedpft_cifar = read_accuracies(path_fedpft_resutls_cifar100)\n", + "fedpft_caltech = read_accuracies(path_fedpft_resutls_caltech101)\n", + "fedavg_cifar = read_accuracies(path_fedavg_resutls_cifar100)\n", + "fedavg_caltech = read_accuracies(path_fedavg_resutls_caltech101)\n" + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "id": "e1a678de", + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAA1MAAAD0CAYAAAB+SXxXAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuNCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8WgzjOAAAACXBIWXMAAA9hAAAPYQGoP6dpAABrxElEQVR4nO3deVxU1fsH8M+wg4obmyCBqIm7hfueG6aZZpo7aoqpoCmZWwVuZWWplVu5fnPfyiVNJdTURNPU0nJFcUEBQQVlHZjz++P+ZmCYAYZhhhng83695iVz7vZwGOeZZ+6558qEEAJERERERERUJBamDoCIiIiIiKg0YjFFRERERESkBxZTREREREREemAxRUREREREpAcWU0RERERERHpgMUVERERERKQHFlNERERERER6YDFFRERERESkBxZTREREREREemAxRURERKSnzp07o3PnziV+3A0bNkAmk+H8+fMlfmxzFh0dDZlMhg0bNqja5syZA5lMZrKYTPUaoZLBYopMIioqCu+99x58fHxgZ2cHR0dHtGvXDt988w3S0tJU63l7e+ONN95Q21Ymk2l9uLm5qa337Nkz2NnZQSaT4erVq1rjGDVqlNo+bG1t8fLLLyM0NBTp6eka62/fvh3Dhw9H3bp1IZPJCnxzzMjIwIwZM+Du7g57e3u0atUK4eHhWtc9ffo02rdvDwcHB7i5uWHy5Ml48eJFvvvWV94+c3R0RKdOnXDgwAGDH0tJmdhkMhl2796tsVyZ5BISEoq879OnT2POnDl49uyZxrLOnTtrfZ307NlTY92i/K2IqHTTNf/o6+HDh5gzZw4uXbpU/GAN5M8//8TEiRPh5+cHa2vrQguLtWvXon79+rCzs0PdunXx3Xffaaxz/fp1TJ06FW3btlXl2ujoaCP9BpLjx4+jf//+cHNzg42NDVxcXNCnTx/89NNPRj0uACgUCjg7O+PLL7+ElZUVhg8fnu+6z58/h729Pfr376+xLL/clPcxZ84cjW1z59PcebxZs2ZYtmwZsrOzDfkrk46sTB0AlT8HDhzAwIEDYWtri4CAADRq1AiZmZk4deoUPvzwQ/z777/44YcfCtxH9+7dERAQoNZmb2+v9nznzp2qImvz5s1YsGCB1n3Z2tpizZo1AICkpCTs3bsX8+fPR1RUFDZv3qy27sqVK/HXX3+hRYsWSExMLDDGUaNGYdeuXZgyZQrq1q2LDRs2oFevXjh27Bjat2+vWu/SpUvo2rUr6tevj8WLF+PBgwf46quvcPPmTfz6668FHkMfyr4TQuDu3btYuXIl+vTpg19//RX+/v4GP15u8+bNQ//+/Q32DeHp06cxd+5cjBo1ClWqVNFYXrNmTSxcuFCtzd3dXWM9Xf9WRFS6GSL/FObhw4eYO3cuvL290axZM8MEXkwHDx7EmjVr0KRJE/j4+ODGjRv5rvv9999j/PjxePvttxESEoKTJ09i8uTJSE1NxYwZM1TrRUZG4ttvv0WDBg1Qv359oxePYWFhmDdvHurWrYv33nsPXl5eSExMxMGDB/H2229j8+bNGDp0qNZtP/74Y8ycObNYx//zzz+RkJCA3r1749ixY9i7dy9SU1Ph4OCgse5PP/2E9PR0VcF15MgR1bKPPvoIY8eOVT0/d+4cvv32W8yePRv169dXtTdp0iTfWIYMGYJevXoBkD63HDx4EJMmTcLdu3exaNGiYv2epAdBVIJu374tKlasKHx9fcXDhw81lt+8eVMsXbpU9dzLy0v07t1bbR0AIigoqNBjdezYUfTv319MnTpV1KpVS+s6I0eOFBUqVFBrUygUonXr1kImk4nY2Fi1Zffu3RPZ2dlCCCEaNmwoOnXqpHW/Z8+eFQDEokWLVG1paWmidu3aok2bNmrrvv7666JGjRoiKSlJ1bZ69WoBQBw+fLjQ37MotPXdf//9JwCI119/3aDHUrpz544AIJo1ayYAiN27d6stDwsLEwDE48ePi7zvRYsWCQDizp07Gss6deokGjZsWOg+ivK3IqLSq6j5R1edOnVSywXnzp0TAMT69euLEW3h1q9fLwCIc+fOFbpubGysSE1NFUIIERQUJPL7+JeamiqqV6+ukXeHDRsmKlSoIJ48eaJqS0xMFMnJyUKIgt+LDWHnzp0CgBgwYIDIzMzUWH7o0CGxf/9+IUROzjF0/3/yySfCy8tLCCHExo0bBQCxdetWrev26NFDVK5cWaSnpxe6X+XvduzYsULXVf5uufOVENLnlhYtWgh3d/dC90GGx2F+VKK+/PJLvHjxAmvXrkWNGjU0ltepUwfvv/9+sY9z7949nDx5EoMHD8bgwYNx584dnD59WqdtZTIZ2rdvDyEEbt++rbbM09MTFhaF/7fZtWsXLC0tMW7cOFWbnZ0dxowZg8jISNy/fx8AkJycjPDwcAwfPhyOjo6qdQMCAlCxYkXs2LFDp5iLo379+nByckJUVJRae0ZGBsLCwlCnTh3Y2trC09MT06dPR0ZGhtp64eHhaN++PapUqYKKFSuiXr16mD17tsZxBg8ejJdffhnz5s2DEKLQuM6ePYuePXuicuXKcHBwQKdOnfDHH3+ols+ZMwcffvghAKBWrVqqIQ95h5lkZWUVOGRS178VEZVuRc0/69evR5cuXeDi4gJbW1s0aNAAK1euLPAYx48fR4sWLQAAo0ePVr0v5b5+p7D3NqWYmBiMGTMG7u7usLW1Ra1atTBhwgRkZmaqrZeRkYGQkBA4OzujQoUKeOutt/D48WO1dVxdXTVGb2hz7NgxJCYmYuLEiWrtQUFBSElJURsSXq1aNVSqVKnQfRrCJ598gmrVqmHdunWwtrbWWO7v769xSUBu2q6ZkslkCA4OxubNm1GvXj3Y2dnBz88PJ06c0LqPAwcOoHfv3gCAt956CxUqVMCWLVs01ouPj0dERAQGDBgAW1tbAMa/Zkomk8HV1RVWVhxwZgrsdSpR+/fvh4+PD9q2bVus/aSnp2tcY1OpUiXVG9fWrVtRoUIFvPHGG7C3t0ft2rWxefNmnY+r/EBetWpVveK7ePEiXn75ZbUCCQBatmwJQBra5+npicuXLyMrKwvNmzdXW8/GxgbNmjXDxYsX9Tp+USQlJeHp06eoXbu2qk2hUODNN9/EqVOnMG7cONSvXx+XL1/GkiVLcOPGDezZswcA8O+//+KNN95AkyZNMG/ePNja2uLWrVtaPxhYWlri448/RkBAAH7++WetY8mVjh49itdffx1+fn4ICwuDhYWF6oPNyZMn0bJlS/Tv3x83btzA1q1bsWTJEjg5OQEAnJ2dVfu5ceMGKlSogMzMTLi6uiIwMBChoaFqyVjXvxURlW5FzT8rV65Ew4YN8eabb8LKygr79+/HxIkToVAoEBQUpHWb+vXrY968eQgNDcW4cePQoUMHAFAdU5f3NkAaKtiyZUs8e/YM48aNg6+vL2JiYrBr1y6kpqbCxsZGdcxJkyahatWqCAsLQ3R0NJYuXYrg4GBs3769yH2kzDl5c5Kfnx8sLCxw8eLFAq8VMoabN2/i2rVrePfddw1evP3+++/Yvn07Jk+eDFtbW6xYsQI9e/bEn3/+iUaNGqnWi42NxcWLFzFv3jwAQIUKFdC3b1/s2rULT548QbVq1VTrbt++HdnZ2Rg2bJhBY80tNTVV9RkoOTkZv/76Kw4dOoRZs2YZ7ZhUAFOfGqPyIykpSQAQffv21Xmb/Ib5aXvkPqXfuHFjMWzYMNXz2bNnCycnJyGXy9X2pRzm9/jxY/H48WNx69Yt8dVXXwmZTCYaNWokFApFvrEVNMyvYcOGokuXLhrt//77rwAgVq1aJYTIOb1/4sQJjXUHDhwo3Nzc8j2+PgCIMWPGiMePH4v4+Hhx/vx50bNnT41hAxs3bhQWFhbi5MmTatuvWrVKABB//PGHEEKIJUuWFDpEL/ewhKysLFG3bl3RtGlTVd/mHeanUChE3bp1hb+/v1r/p6amilq1aonu3bur2goaWvLuu++KOXPmiN27d4sff/xRvPnmmwKAeOedd9TW0/VvRUSllz75RzksLjd/f3/h4+Oj1qbrML+ivLcFBAQICwsLrUP4lNsqh/l169ZNbX9Tp04VlpaW4tmzZ1p/r4KG+QUFBQlLS0uty5ydncXgwYO1LjPmML+9e/cKAGLJkiU6ra9tmJ8yz+Sm/Oxw/vx5Vdvdu3eFnZ2deOutt9TWXbt2rbC3t1d7TRw4cEAAEN9//73auq1btxYeHh6qSwKE0HyN5KbPMD9tjwkTJhT4mYWMh8P8qMQkJycDgEG+Werbty/Cw8PVHsrJE/755x9cvnwZQ4YMUa0/ZMgQJCQk4PDhwxr7SklJgbOzM5ydnVGnTh1MmzYN7dq1w969e/WeKCEtLU11liw3Ozs71fLc/+a3riFmlspr7dq1cHZ2houLC5o3b46IiAhMnz4dISEhqnV27tyJ+vXrw9fXFwkJCapHly5dAEhDQQCoJn3Yu3cvFApFocdWnp36+++/VWe38rp06RJu3ryJoUOHIjExUXXslJQUdO3aFSdOnNDpWGvXrkVYWBj69++PESNGYO/evQgMDMSOHTtw5swZ1Xq6/q2IqPTSJ//kHhaXlJSEhIQEdOrUCbdv30ZSUlKRY9D1vU2hUGDPnj3o06ePxhkiABp5ady4cWptHTp0QHZ2Nu7evVvkGNPS0tTOeuVmrJxUGEN+dsirTZs28PPzUz1/6aWX0LdvXxw+fFhtZryDBw/itddeU3tN9OjRA87OzmpD/e7cuYMzZ85gyJAhOl0SoK9x48apPvvs3r0bQUFB+P7779XyOJUcDvOjEqMcRvX8+fNi76tmzZro1q2b1mWbNm1ChQoV4OPjg1u3bgGQkoC3tzc2b96sGvOsZGdnh/379wMAHjx4gC+//BLx8fE6jS/Pj729vca1RQBU060r9638N791C4shNjZW7XnlypUL3aZv374IDg5GZmYmzp07h88++wypqalqb/w3b97E1atX1YbM5RYfHw8AGDRoENasWYOxY8di5syZ6Nq1K/r3748BAwbkm0iGDRuG+fPnY968eejXr5/G8ps3bwIARo4cme/vkJSUpNcQzA8++ACrV6/Gb7/9htatWwPQ/W9FRKWXPvnnjz/+QFhYGCIjI5Gamqq2LCkpCZUrVy5SDLq+t2VmZiI5OVltmFlBXnrpJbXnyvfGp0+fFik+QHq/y3tNlpIuOUlXSUlJaoWZjY2N2lC53Az52SGvunXrarS9/PLLSE1NxePHj+Hm5ga5XI7w8HCNmWGtrKwwaNAgrFixAjExMfDw8FAVVsUd4peZmYknT56oteXOx3Xr1lX7DKScJXfp0qV499130bhx42Idn4qGxRSVGEdHR7i7u+PKlStGO4YQAlu3bkVKSgoaNGigsTw+Ph4vXrxAxYoVVW2WlpZqb0r+/v7w9fXFe++9h3379ukVR40aNRATE6PR/ujRIwA503MrL4JWtuddV9s03nmPk9v69esxatSoArfJXYj26tULTk5OCA4Oxmuvvaa6jkmhUKBx48ZYvHix1n0oryGyt7fHiRMncOzYMRw4cACHDh3C9u3b0aVLFxw5cgSWlpYa2yrPTo0aNQp79+7VWK4867Ro0aJ8pxXO/fcrCmXcuZOUrn8rIiq9ipp/oqKi0LVrV/j6+mLx4sXw9PSEjY0NDh48iCVLluh0djwvXd/b8n6ILoy291kAOk30k1eNGjWQnZ2N+Ph4uLi4qNozMzORmJhosPfD999/H//73/9Uzzt16oTjx49rXdfX1xcAcPnyZYMcu6hOnTqF5ORk1VTkuQ0fPhzLli3D1q1bMW3aNGzduhUNGjQo9pT4p0+fxmuvvabWdufOnQK36dq1K5YtW4YTJ06wmCphLKaoRL3xxhv44YcfEBkZiTZt2hh8/7///jsePHiAefPmqd2vAZC+pRs3bhz27NlT4AW0NWrUwNSpUzF37lycOXNGdQajKJo1a4Zjx44hOTlZbWKDs2fPqpYDQKNGjWBlZYXz58/jnXfeUa2XmZmJS5cuqbVpk/fGsg0bNixyrO+99x6WLFmCjz/+GG+99RZkMhlq166Nv//+G127di10qKOFhQW6du2Krl27YvHixfjss8/w0Ucf4dixY/mePRw+fDgWLFiAuXPn4s0331RbppwIw9HRMd/tlYo6DFM5O2Pub/h0/VsRUelWlPyzf/9+ZGRkYN++fWpnfpRDnAuS3/uSru9tzs7OcHR0NOoXj/lRvt+dP39erXg4f/48FAqFwd4Pp0+frpaHCxpp8PLLL6NevXrYu3cvvvnmG72/TNNGebYwtxs3bsDBwUGVJw4cOIAGDRrA29tbY91WrVqhdu3a2LJlC7p3745///0Xn376abHjatq0qUZ+d3Nz0xiNkltWVhYAFDh7LRkHr5miEjV9+nRUqFABY8eORVxcnMbyqKgofPPNN3rvXznE78MPP8SAAQPUHoGBgahbt67GjXi1mTRpEhwcHPD555/rFceAAQOQnZ2tdvPHjIwMrF+/Hq1atVKdIalcuTK6deuGTZs2qQ1h2LhxI168eIGBAwcWeJxu3bqpPbRN91sYKysrfPDBB7h69arqTNE777yDmJgYrF69WmP9tLQ0pKSkAIDWb1CVyVbb0Dkl5dmpS5cuaZz98/PzQ+3atfHVV19pTQq5p/ytUKECAODZs2dq6yQnJ2scXwihunFz7psT6/q3IqLSrSj5R3m2J/fZnaSkJKxfv77Q4+T3vqTre5uFhQX69euH/fv34/z58xrr6XPGSVddunRBtWrVNKaAX7lyJRwcHDSGyeurQYMGarkr93VL2sydOxeJiYkYO3asqmjI7ciRI/jll1+KHEdkZCQuXLigen7//n3s3bsXPXr0UL0GDh48WODvPWzYMFy8eBFhYWGQyWT53ji4KKpWraqR35XX8eZHeblC06ZNi318KhqemaISpfwGZ9CgQahfv77aHehPnz6NnTt3FjpMLT8ZGRnYvXs3unfvnu+bzptvvolvvvlGYwhDXtWrV8fo0aOxYsUKXL16VXWW68SJE6p7UDx+/BgpKSmqD+gdO3ZEx44dAUjfVg0cOBCzZs1CfHw86tSpg//973+Ijo7G2rVr1Y716aefom3btujUqRPGjRuHBw8e4Ouvv0aPHj3Qs2dPvfqiqEaNGoXQ0FB88cUX6NevH0aMGIEdO3Zg/PjxOHbsGNq1a4fs7Gxcu3YNO3bswOHDh9G8eXPMmzcPJ06cQO/eveHl5YX4+HisWLECNWvWRPv27Qs8pvLaqUuXLqm1W1hYYM2aNXj99dfRsGFDjB49Gh4eHoiJicGxY8fg6OioShrKBPzRRx9h8ODBsLa2Rp8+fXDhwgUMGTIEQ4YMQZ06dZCWloaff/4Zf/zxB8aNG4dXX31Vdbyi/K2IqPQqSv7p0aMHbGxs0KdPH7z33nt48eIFVq9eDRcXF63DsvMep0qVKli1ahUqVaqEChUqoFWrVqhVq5bO722fffYZjhw5osoL9evXx6NHj7Bz506cOnVKNfmPru7evYuNGzcCgKpAU+YuLy8vjBgxAoA0dHv+/PkICgrCwIED4e/vj5MnT2LTpk349NNP1a5rSkpKwnfffQcAqtthLFu2DFWqVEGVKlUQHBxcpBgLMmjQIFy+fBmffvopLl68iCFDhsDLywuJiYk4dOgQIiIitN7zqTCNGjWCv7+/2tTogFS8AdLQuqtXrxZ4f7Hhw4dj3rx52Lt3L9q1a6f1DJahXbhwAZs2bQIgXUsWERGB3bt3o23btujRo4fRj095mHQuQSq3bty4IQIDA4W3t7ewsbERlSpVEu3atRPfffed2h3D85saPSgoSGOfu3fvFgDE2rVr8z3u8ePHBQDxzTffCCFypkbXJioqSlhaWoqRI0eq2pTTq2p7hIWFqW2flpYmpk2bJtzc3IStra1o0aKFOHTokNZjnTx5UrRt21bY2dkJZ2dnERQUpLqzvCHl13dCCDFnzhy16VkzMzPFF198IRo2bChsbW1F1apVhZ+fn5g7d65ISkoSQggREREh+vbtK9zd3YWNjY1wd3cXQ4YMETdu3FDtN787tguRM7UvtEyvfvHiRdG/f39RvXp1YWtrK7y8vMQ777wjIiIi1NabP3++8PDwEBYWFqqpeW/fvi0GDhwovL29hZ2dnXBwcBB+fn5i1apVWqeOLcrfiohKN13zz759+0STJk2EnZ2d8Pb2Fl988YVYt26dxhTg2qa93rt3r2jQoIGwsrLSmKZb1/e2u3fvioCAAOHs7CxsbW2Fj4+PCAoKEhkZGUKInPfPvNOnHzt2TGOqbWWbtoe2Kbt/+OEHUa9ePWFjYyNq164tlixZovHeWdA03V5eXoX/IfSgzDkuLi7CyspKODs7iz59+oi9e/dqxKXL1OhBQUFi06ZNom7dusLW1la88sorav22bNkyUblyZY3bquTVokULAUCsWLFC63JjTo1uZWUlfHx8xIcffiieP39e6D7I8GRCGPF8MRERERGRmZHJZAgKCsKyZcvyXadXr16oWLEiduzYUYKRUWnDYX5ERERERHl07twZHTp0MHUYZOZYTBERERER5TF9+nRTh0ClAGfzIyIiIiIi0gOLKSIiokKcOHECffr0gbu7O2QyGfbs2VPoNsePH8err74KW1tb1KlTBxs2bDB6nESkGyFEgddLEemKxRQREVEhUlJS0LRpUyxfvlyn9e/cuYPevXvjtddew6VLlzBlyhSMHTsWhw8fNnKkRERUkjibHxERURHIZDL8/PPP6NevX77rzJgxAwcOHMCVK1dUbYMHD8azZ89w6NChEoiSiIhKAiegAKBQKPDw4UNUqlQJMpnM1OEQEZUrQgg8f/4c7u7usLAoGwMmIiMj0a1bN7U2f39/TJkyJd9tMjIykJGRoXquUCjw5MkTVK9enbmJiKgEFSUvsZgC8PDhQ3h6epo6DCKicu3+/fuoWbOmqcMwiNjYWLi6uqq1ubq6Ijk5GWlpabC3t9fYZuHChZg7d25JhUhERIXQJS+xmAJQqVIlAFKHOTo6Fnl7uVyOI0eOoEePHrC2tjZ0eKUO+8Ow2J+Gxf40vOL2aXJyMjw9PVXvxeXVrFmzEBISonqelJSEl156CXfu3NGrb+RyOY4dO4bXXnut3L/W2ReGxz41LPanYRW3P58/f45atWrp9N7LYgpQDZ9wdHTUu5hycHCAo6Mj/wOA/WFo7E/DYn8anqH6tCwNZXNzc0NcXJxaW1xcHBwdHbWelQIAW1tb2NraarRXq1atWLmpevXq5f61zr4wPPapYbE/Dau4/ancRpe8VDYGpxMREZmRNm3aICIiQq0tPDwcbdq0MVFERERkDCymiIiICvHixQtcunQJly5dAiBNfX7p0iXcu3cPgDRELyAgQLX++PHjcfv2bUyfPh3Xrl3DihUrsGPHDkydOtUU4RMRkZGwmCIiIirE+fPn8corr+CVV14BAISEhOCVV15BaGgoAODRo0eqwgoAatWqhQMHDiA8PBxNmzbF119/jTVr1sDf398k8RMRkXHwmikiIqJCdO7cGQXdlnHDhg1at7l48aIRoyIiIlPjmSkiIiIiIiI9sJgiIiIiIiLSA4spIiIiIiIiPbCYIiIiIiIi0gOLKSIiotIuLc246xMRkVYspoiIiEqz1auBJk2A+/d1W//+fWn91auNGxcRUTnAYoqIiKi0SksDvvwSuHUL6Ny58ILq/n1pvVu3pO14hoqIqFhYTBEREZVW9vbA0aOAjw9w+3bBBZWykLp9W1r/6FFpeyIi0huLKSIiotLM0xM4frzggipvIXX8uLQdEREVC4spIiKi0k5bQRUTIy2LiWEhRURkJCymiIiIyoK8BVWvXlJ7r14spIiIjMQsi6nly5fD29sbdnZ2aNWqFf78888C13/27BmCgoJQo0YN2Nra4uWXX8bBgwdLKFoiIiIzkbugio6W2qKjWUgRERmJ2RVT27dvR0hICMLCwnDhwgU0bdoU/v7+iI+P17p+ZmYmunfvjujoaOzatQvXr1/H6tWr4eHhUcKRExERmQFPT2DjRvW2jRtZSBERGYGVqQPIa/HixQgMDMTo0aMBAKtWrcKBAwewbt06zJw5U2P9devW4cmTJzh9+jSsra0BAN7e3iUZMhERkfm4fx8YMUK9bcQInpkiIjICsyqmMjMz8ddff2HWrFmqNgsLC3Tr1g2RkZFat9m3bx/atGmDoKAg7N27F87Ozhg6dChmzJgBS0tLrdtkZGQgIyND9Tw5ORkAIJfLIZfLixy3cht9ti2L2B+Gxf40LPan4RW3T/m3MKDcs/bVry+1eXsDV69K7SyoiIgMyqyKqYSEBGRnZ8PV1VWt3dXVFdeuXdO6ze3bt3H06FEMGzYMBw8exK1btzBx4kTI5XKEhYVp3WbhwoWYO3euRvuRI0fg4OCgd/zh4eF6b1sWsT8Mi/1pWOxPw9O3T1NTUw0cSTmVd/rzgweBv/+W/u3aNWeWPxZUREQGY1bFlD4UCgVcXFzwww8/wNLSEn5+foiJicGiRYvyLaZmzZqFkJAQ1fPk5GR4enqiR48ecHR0LHIMcrkc4eHh6N69u2qoYXnG/jAs9qdhsT8Nr7h9qhwdQMWg7T5Sbm5SMeXhIT1XLmdBRURkMGZVTDk5OcHS0hJxcXFq7XFxcXBzc9O6TY0aNWBtba02pK9+/fqIjY1FZmYmbGxsNLaxtbWFra2tRru1tXXhHwTS0vK9Y7zW7QtYv6zTqT9JZ+xPw2J/Gp6+fcq/QzHld0Pe3MMnlbP8saAiIjIosyqmbGxs4Ofnh4iICPTr1w+AdOYpIiICwcHBWrdp164dtmzZAoVCAQsLaXLCGzduoEaNGloLqWJZvRr48kvg6FHdEtD9+0CXLsD06UBgoGFjISIiSkuT8owu95HKW1B16QL880+5/cKPiAxPoZC+xynuIyureHFkZcnw9981UbmyDJ07G+RXy5dZFVMAEBISgpEjR6J58+Zo2bIlli5dipSUFNXsfgEBAfDw8MDChQsBABMmTMCyZcvw/vvvY9KkSbh58yY+++wzTJ482bCBpaVJhdStW7p9o5f7m8IvvwSGD2fCIiIiw7K3l76w0/WLPmVBpfyij3mJqNRKSwNu3JDml7l2Dbh+HSjuJahCSIVMYcVOZqb2doXCML9b8VkB8ENMjKL8FVODBg3C48ePERoaitjYWDRr1gyHDh1STUpx79491RkoAPD09MThw4cxdepUNGnSBB4eHnj//fcxY8YMwwZmby8lKl2GSOQdcnH0KBMWEREZR2Bg0b6w8/TkGSmiUiQhQSqWlEWT8t/oaKn4IdMyu2IKAIKDg/Md1nf8+HGNtjZt2uDMmTNGjgr5jznPfT1XfmPXiYiIjKWohRELKSKzkp0NxMU54NAhGW7eVC+aEhJMG5tMBlhbF/6wsdFtPV0eVlbScfWVnZ2Nf//9F337NgBgUej6xWGWxZRZ01ZQRURIy2JicqafZSFFREREVO4IAaSkSEWQ8vH4sfaflc+fPLGCQtFd52NUqgT4+kq3k/P1zXlUr178+K2s1AubfG7batbkcgUOHryDHj3qG/1YLKb0kbeg6tULWLhQ+peFFBEREVGJSE/P//qd4jyKus+UFPUiKT29qL+J9tMw7u7qRZPyX3f34p25IcNhMaWv3AVVdLTUFh3NQoqIiIhID3I5kJiY/xkcbW1paaaOungqVgScnIDq1RWwtIxDp04uaNjQEvXrA/XqAZUrmzpCKgyLqeLw9AQ2bgS6dctp27iRhRQRERGZTEFDzJQPIQAHB6BCBenf/B75LbeyAl68sEZMjFQEpaZqPlJStLcrl6Wk5BRPjx8DSUmm7rnisbaWCiPlw9m54OfVq+dcviiXZ+PgwT/Rq1cvWFuXwnF15RiLqeK4fx8YMUK9bcQInpkiIiIigxICuHcPuHxZ+vhRUKFUMmdrrAH0KokDabC0zClIqlUDbG2NMylCUbZ1cAAcHTn0rjxiMaWv3LP21f//i9u8vaWpV3hneSKiMmf58uVYtGgRYmNj0bRpU3z33Xdo2bJlvusvXboUK1euxL179+Dk5IQBAwZg4cKFsLOzK8GoqTRKSgKuXJFmsP/nH6mAunwZSE42dWTGUbVqwWdw8rZXrsyihcyHXsXU2bNn0apVK0PHUnrknf784EHg77+lf5Wz+bGgIqJyIjUVePrU1tRhGDU3bd++HSEhIVi1ahVatWqFpUuXwt/fH9evX4eLi4vG+lu2bMHMmTOxbt06tG3bFjdu3MCoUaMgk8mwePFio8RIpU9WlnTTVWXBpPz37t2i78vKKv9CRFubhYXuw/K0taekKPDs2WN4eTmjYkWLIg0RzN1etaoUO1FppdfLt02bNmjcuDECAwMxfPhwVKlSxcBhmTFt95Fyc5OKKQ8P7fehYkFFRKWIQgE8eQLEx0uPuLiCf37xwho1arTHsGGmjduYuWnx4sUIDAzE6NGjAQCrVq3CgQMHsG7dOsycOVNj/dOnT6Ndu3YYOnQoAMDb2xtDhgzB2bNnDRYTlR5CALGx6gXTP/8A//0nzRqni5deApo0ARo3BurWzSmQlP+W9BAz6RqfM/9/jY9x7+NDZM70KqaGDx+O3bt3Y/LkyZg+fToGDBiAwMBAdOjQwdDxmZf8bsgrl+esk9+NfVlQEZEJpafnFEGFFUiPH0s3kCyKZ89Mf2bKWLkpMzMTf/31F2bNmqVqs7CwQLdu3RAZGal1m7Zt22LTpk34888/0bJlS9y+fRsHDx7EiLzX2eaSkZGBjIwM1fPk/x/TJZfLIc+dZ3Sk3EafbcuakuyL1FTgv/9kuHwZuHJFhsuXZbhyRYaEBN0qnUqVBBo1EmjcWKBxY6BRI4GGDQUK+24gK6v4sRcFX1+Gxf40rOL2Z1G206uY+vHHH/Hdd99h06ZNWLt2LTZt2oTNmzejbt26CAwMxMiRI+Hk5KTPrs1XWhrQpYtu95HKW1B16SJ9BcU7zhORgQgBPH2q25mj+HjjXGtRrRrg4gK4uCiQlRWP7GwXWFsb/ji6MlZuSkhIQHZ2NlxdXdXaXV1dce3aNa3bDB06FAkJCWjfvj2EEMjKysL48eMxe/bsfI+zcOFCzJ07V6P9yJEjcHBwKHLcSuHh4XpvW9YYsi8UCiAurgKiox1x964joqMdce+eIx49qgAhCi+cLCwUcHdPgbd3Ery8nsPLKwleXslwcUlTO8OUlAScPm2wsA2Ory/DYn8alr79mZqaqvO6eo9SrVy5MoKCghAUFIQLFy5g9erV2LZtGz788EN89NFH6Nu3LwIDA9Et97ThpZm9PTB9OvDll8DRo4WfaVIWVF26SNuxkCKiQggBPH8uFUGxsdKjoJ8N/QWmjY2yOAJcXdX/zfuzszNUhZM03Oc8LC1NM7NXbuaSm44fP47PPvsMK1asQKtWrXDr1i28//77mD9/Pj755BOt28yaNQshISGq58nJyfD09ESPHj3g6OhY5BjkcjnCw8PRvXt3WJuyyjUDxe2LhATpLFPOmSbg339lSE3V7WyTm1vO2aaGDaV/69fH/09GYgfAtbBdmB2+vgyL/WlYxe3P5CJ8A2mQS/5effVVrFy5EosXL8bOnTsxe/Zs7Nq1C7t27YKXlxfGjx+PCRMmoFKlSoY4nOkEBgLDh+teGHl68owUEUGhkIbOPXyo/lAWRrkLJUNPaVylSsFFUe7nZW2GLEPlJicnJ1haWiIuLk6tPS4uDm5ublq3+eSTTzBixAiMHTsWANC4cWOkpKRg3Lhx+Oijj2BhoXmNia2tLWxtNYdLWltbF+vDVXG3L0sK64uMDGlS3rzXNj16pNv+7e2Bhg1zrm1S/uvsLANQhv5z5cLXl2GxPw1L3/4syjYGmz/l6dOn+PHHH7FmzRo8fPgQMpkM7dq1w9WrVzFz5kwsXboUe/fuRYsWLQx1SNMoamHEQoqozFIOtYuJ0SyU8hZNhrqewcJCOivk5iYVQW5u2gsj5cPGxjDHLa0MkZtsbGzg5+eHiIgI9OvXDwCgUCgQERGB4OBgrdukpqZqFEyWltKNOIUQhvnlSG+579mUu2i6fl236wVlMmnEf96iqXZt6R5IRFR+FLuYOnbsGFavXo09e/YgPT0dzs7O+PDDD/Hee+/Bx8cHGRkZWLduHaZPn45JkybhzJkzhoibiMjo0tOlD1x370qP6Oicnx88kAqlXPMFFEv16jnFkfKR+7nyZycnfljThaFzU0hICEaOHInmzZujZcuWWLp0KVJSUlSz+wUEBMDDwwMLFy4EAPTp0weLFy/GK6+8ohrm98knn6BPnz6qoopKzv37wKFD3vj1VwtcuVK0ezZVq5ZTLCkLp4YNgYoVjRszEZUOehVTcXFxWL9+PdauXYvbt29DCIFOnTph/Pjx6N+/v9qpMVtbW0yYMAG3bt3C8uXLDRY4EVFxvXihvVBS/hwbW7z9y2TS2SF3d+0PZaHEM0iGYczcNGjQIDx+/BihoaGIjY1Fs2bNcOjQIdWkFPfu3VM7E/Xxxx9DJpPh448/RkxMDJydndGnTx98+umnhv/FSSuFAoiIAFasAPbts4JC0bTA9a2tgfr11c82NWkC1KhRtoa/EpFh6VVM1axZEwqFAlWrVsWUKVMwbtw41KtXr8BtnJ2dkanrzRSIiPSUnQ0kJmqf1U66LskS1651wpgxVkhM1P84VatKt5bLXRzlfe7qCpPOblfeGDs3BQcH5zus7/jx42rPraysEBYWhrCwMJ32TYbz9CmwYQOwciVw86ayVb0a8vTUHKJXrx7/vxJR0elVTLVq1Qrjx4/HwIEDtV4sq83MmTO13tiQiEgXycnAnTvSsLu8BVLuoikhQboeIn8WAKoUerwaNQAvL+nh7a3+80svcYiPOWJuKt8uXACWLwe2btWcyMXdXaBDh+t47706aNbMClWrmiZGIip79CqmTp06Zeg4iKicS0+XhtbduaP98eSJ4Y5lYaFAzZoyeHvLtBZMnp6AnZ3hjkclg7mp/ElPB3bskIbynT2rubxLF2DiROD117MQHn4d7dvX5tknIjIovYqpBw8e4MKFC+jYsSOqaLkl99OnT3Hy5En4+fnBw8OjuDESURmQnS1N2pBfsfTwYfH2b2enffrvvG1Vq8rx55+/ok+f1zn9bBnD3FR+3L4NrFoFrFsHjeG6jo7AyJHAhAnSNVCA4e/JRkSkpFcxtWDBAuzcuRMP8/n04+DggHfffReDBw/GsmXLihUgEZUOQkhD7fIrlu7d0296cAsLoGZNoFYt6eHtnTO7Xe5iqWJF3S4Sl8sBS0tOTV0WMTeVbdnZwKFD0lmoX3/VHM7bpAkQFAQMHcphuERUcvQqpo4ePYoePXrkOybd1tYWPXr0wG+//Vas4IjIvDx7llMcRUerF0vR0UBqqn77dXHJKZbyPl56iReFk26Ym8qm5GTpLNSqVdJ7TW7W1sDAgdJQvrZtOeseEZU8vYqpmJgYvP322wWu4+Xlhf379+sVFBGZRmamdAbp9m3pQ8vt2+o/P32q334dHfMvlry9gQoVDPprUDnF3FT2HDgAvPeedGPs3F56CRg/HhgzRvoyhojIVPQqpmxsbJBcyN3ukpOTIeNXRERmRQjg8WPNIkn58/370r1ZisrWViqK8iuYqlblN8ZkfMxNZUdCAjBlCrB5s3q7v790Fqp3b968mojMg17FVOPGjbF//34sXrxY63CK9PR07Nu3D40bNy52gERUdC9eANeuAVevAv/9J/0bFSUVTCkpRd+f8rolHx/txZKbm7QOkSkxN5V+QgA7dwLBwdIXP0o9egDffQe8/LLpYiMi0kavYmr06NEYM2YM3nzzTaxcuRI+Pj6qZVFRUZg4cSIePnyIefPmGSxQItKUmKheMCl/vn+/6PuqVk0qjHx8coom5b8vvQTY2Bg+fiJDYm4q3R4+lCaQ2LMnp61KFWDJEml2Pp5QJCJzpHcxdfDgQezevRu+vr6oVasWPDw8EBMTgzt37iArKwuDBg3C6NGjDR0vUbkjBJCYaIeICBlu3FAvmnJ/c1sYGxtpKJ62YqlWLelDC1FpxtxUOgkBrF8PhIQASUk57W+9Jd2Et0YN08VGRFQYvYopANixYweWL1+OFStW4Nq1a7h58yYAoEGDBggKCsKECRMMFiRReZCZCdy6JQ3PU39Y4flzf533U6WKdG+VBg2kf5U/v/QSh+JR2cfcVLpERwPjxgHh4TltLi7AsmXAgAE8G0VE5k/vYkomkyE4OBjBwcFISUlBUlISKleujAqclouoQE+faiuYpGuasrO1baH904Sra07BlLtwcnPjBxAqv5ibSgeFQjrrNGuW+nWcw4cDS5cC1aubLDQioiLRu5jKrUKFCkxURHkkJAAXLwJXrqgXTfHxuu9DJgO8vASqVYtHx45OaNjQUlU4Va1qvNiJygLmJvN0/bo0pfkff+S01awJfP890KuX6eIiItKHQYopovLu0SPgwgX1x717um9vbw/4+uY86tWT/q1bF7C2zsLBg2fQq1cvWFtzLmAiKp2ysoCvvgLmzAEyMnLax48HvvhCuh8dEVFpo3cxdf/+fSxYsAC//fYbHj58iMzMTI11ZDIZsrKyihUgkTkRQiqS8hZOsbG6bV+jhnrRpHzUrJn/9UxyueHiJyrrmJvM099/A+++K71fKtWuDaxZA3TubLKwiIiKTa9i6vbt22jVqhWePn2Khg0bIiMjA15eXrCzs8Pt27chl8vRtGlTVOH0YFSKKRTSdUx5C6cnTwrftlIl4JVXgFdfBZo2la5pqlcPqFzZ+HETlVfMTeZHLgfmzQM+/1w6MwVIXxxNnSq1OziYNj4iouLSq5iaO3cukpKSEBERgU6dOsHCwgKjR49GaGgoHj16hAkTJuC///7Db7/9Zuh4iQxOoZDONv37r/rj6lUgNbXw7atWBfz8pMJJ+ahdmzPnEZU05ibzIoR0NmrTppy2hg2BdeuAli1NFxcRkSHp9XHvt99+Q69evdCpUydVmxACAFCjRg1s374dADB79my9A1u+fDm8vb1hZ2eHVq1a4c8//9Rpu23btkEmk6Ffv356H5vKJiGAu3eBgwelcfujR0sJ3dFRus/SG28AM2YAP/4I/PWX9kLK1VW6QPrjj4GffpKm9U1MlKb1/eILYNAg6TonFlJEJa8kchPp7vvvcwopKysgLEw6u89CiojKEr3OTCUkJMDX1zdnJ1ZWSM31ydPW1hbdu3fHnty3MS+C7du3IyQkBKtWrUKrVq2wdOlS+Pv74/r163Bxccl3u+joaEybNg0dOnTQ67hUdsTHSzPpKc8yXbki3eT2xQvdtpfJpLNLDRuqn3Fydzdu3ESkP2PnJtLd+fPA++/nPN+6VbpvFBFRWaNXMeXk5ISUXDeGcHJyQnR0tPqOrazw7NkzvYJavHgxAgMDVXepX7VqFQ4cOIB169Zh5syZWrfJzs7GsGHDMHfuXJw8eVLvY1Ppk5QknUk6dy7noetMejKZdFaqYUP1h6+vNMMeEZUexs5NpJsnT6TCSTn3x5QpLKSIqOzSq5iqW7cuoqKiVM9btmyJw4cP4/bt2/Dx8cHjx4+xa9cu1K5du8j7zszMxF9//YVZs2ap2iwsLNCtWzdERkbmu928efPg4uKCMWPG4OTJkwUeIyMjAxm55mVNTk4GAMjlcsj1mDpNuY0+25ZFxuyP9HTg779lOHdOhvPnpceNG7rdodbbW6BBA4H69aV/GzYU8PXN/wJoc/lz8vVlWOxPwytunxrqb2HM3ES6USiAgABpSDUAtGkjDYEmIiqr9CqmXn/9dcyZMwfPnj1DlSpVMGXKFOzfvx9NmjRB/fr1cevWLSQnJ2POnDlF3ndCQgKys7Ph6uqq1u7q6opr165p3ebUqVNYu3YtLl26pNMxFi5ciLlz52q0HzlyBA7FmFooPDxc723LouL2R3a2DPfuVcKtW1Vw82ZV3LpVBXfvOiI7u+ALkuzsslC79jPUrv0MXl7JeOml56hZ8zns7bPV1ouN1X1Kc3PA15dhsT8NT98+TdVlphcdGDM3kW4+/xw4cED62ckJ2LEDsLExbUxERMakVzE1YcIEdO7cGZaW0g1EO3fujG3btmHOnDm4cuUKvLy8sGDBAgQGBho0WG2eP3+OESNGYPXq1XByctJpm1mzZiEkJET1PDk5GZ6enujRowcc9bhroFwuR3h4OLp37w5ra+sib1/W6Nsf2dnAX3/J8OuvMhw7JsPFizKkpRV81snaWqBpU4HmzaWHn590tsnSsjKAsjEPOV9fhsX+NLzi9qlydEBxmVNuKo+OHgU++UT6WSYDtmyR7qFHRFSW6VVMOTo6olWrVmptAwcOxMCBA4sdkJOTEywtLREXF6fWHhcXBzc3N431o6KiEB0djT59+qjaFAoFAGls/PXr1zWGdNja2sLW1lZjX9bW1sX6cFXc7csaXfojMRE4fFiaYe/wYSAhIf91LSyA+vWBFi1yHk2ayGBrq9swv9KOry/DYn8anr59aqi/gzFzExXs4UNgyBBpmB8AzJkDdO9u0pCIiEqEXsVUly5d0K5dO8yfP9/Q8cDGxgZ+fn6IiIhQTW+uUCgQERGB4OBgjfV9fX1x+fJltbaPP/4Yz58/xzfffANPT0+Dx0j6UyikqXF//VUqoM6elaYs18bHR71wevVVoGLFko2XiEoPY+Ymyp9cLt0WIj5eeu7vL90+goioPNCrmDp79ixat25t6FhUQkJCMHLkSDRv3hwtW7bE0qVLkZKSoprdLyAgAB4eHli4cCHs7OzQqFEjte2Vd7fP206m8fQpcOSIVED9+mtOws2rUiXpm8xevYCePQEPj5KNk4hKN2PnJtJu9mzg1CnpZ09P6d5SvNceEZUXehVTvr6+uKucqscIBg0ahMePHyM0NBSxsbFo1qwZDh06pJqU4t69e7DgO7XZEgK4fdsRn39ugSNHgNOnc4Z+5NWwoVQ89eoFtG3LC5WJSH/Gzk2k6eefpZugA4C1tTThhI6XLxMRlQl6FVOTJk1CcHAw/vvvPzRo0MDQMQEAgoODtQ7rA4Djx48XuO2GDRsMHxAVKD1duvh4717gl1+s8PDha1rXc3AAunWTiqfXXwdeeqmEAyWiMqskchPluHULGDUq5/nXXwM8MUhE5Y1exZSPjw86d+6M1q1b47333kOLFi3g6uoKmUxzIoCOHTsWO0gyT4mJ0hS4e/dKk0fk3CtT/XXg6ysVTr16AR06AFrm/iAiKjZj56bly5dj0aJFiI2NRdOmTfHdd9+hZcuW+a7/7NkzfPTRR/jpp5/w5MkTeHl5YenSpejVq1eRj21u0tKkG/EqJ2J85x0gn+8/iYjKNL2Kqc6dO0Mmk0EIga+//lprolLKzs7OdxmVPlFRUvG0d680Rl7b8D07O4GGDeMQEOCMN96whI9PycdJROWPMXPT9u3bERISglWrVqFVq1ZYunQp/P39cf36dbi4uGisn5mZie7du8PFxQW7du2Ch4cH7t69q7qmt7SbNAn4+2/p53r1gDVrpOnQiYjKG72KqdDQ0AKTFJUdCgVw7lxOAfXff9rXc3IC3ngD6NsX6Nw5C7//fha9evWCtbVlyQZMROWWMXPT4sWLERgYqJoIadWqVThw4ADWrVuHmTNnaqy/bt06PHnyBKdPn1ZN/e7t7W2U2Era+vXA2rXSzw4OwO7d0gRCRETlkV7FFO8eX7alpwMREVLxtH8/EBurfb26daXiqW9foE0b4P/vkwm5vORiJSJSMlZuyszMxF9//YVZs2ap2iwsLNCtWzdERkZq3Wbfvn1o06YNgoKCsHfvXjg7O2Po0KGYMWOG6qbCeWVkZCAjI0P1XHkzY7lcDrkeb6zKbfTZNj9//w1MnGgF5XDu5cuz8PLLwuzf943RF+Ud+9Sw2J+GVdz+LMp2ehVTVPZkZ0vTl69bJ01fnnP9Uw6ZTLq4WFlA+fqWfJxERCUtISEB2dnZqhlllVxdXXHt2jWt29y+fRtHjx7FsGHDcPDgQdy6dQsTJ06EXC5HWFiY1m0WLlyIuXPnarQfOXIEDg4OescfHh6u97a5paRYYdq0TkhPl8609ex5B1Wr/oODBw2y+xJhqL6gHOxTw2J/Gpa+/ZmamqrzuiymyrnYWKmA+uEHQNuMwnZ20r2f+vaVhvHl+SxBRERaKBQKuLi44IcffoClpSX8/PwQExODRYsW5VtMzZo1CyEhIarnycnJ8PT0RI8ePeDo6FjkGORyOcLDw9G9e3fVUEN9CQEMGmSJR4+k25K8+qoCO3bUhJ1dzWLtt6QYsi9Iwj41LPanYRW3P5UjA3ShVzFlYWGh07h0mUyGrKwsfQ5BRqRQSNOYf/89sGcPkPdP5OQE9OkjFVDdu0tj4omIzJ2xcpOTkxMsLS0RFxen1h4XFwc3Nzet29SoUQPW1tZqQ/rq16+P2NhYZGZmwkbLTfVsbW1hq2W6U2tr62J9uCru9gCweLGULwCgShVg1y4LVKpU+u73aIi+IHXsU8NifxqWvv1ZlG30KqY6duyoNWElJSXh5s2bSElJQdOmTcvMrEVlxePHwIYN0lmoW7fUl8lkgL8/MH480Ls3YMVzllQIuVxeKmfrlMvlsLKyQnp6eqmM3xzl7VNLS0uTfBgwVm6ysbGBn58fIiIi0K9fPwDSmaeIiIh874fYrl07bNmyBQqFQnWT+Rs3bqBGjRpaCylzduoUMH16zvONG4FatUwXD1FBmJsI0N6fxspNen1kLuimuampqZg5cyYOHTrEcZ9mQAjgxAnpLNTu3UBmpvpyV1dgzBhg7FgmR9JNcnIyEhIS1C6UL02EEHBzc8P9+/c5K6mBaOtTW1tbODk56TU8TV/GzE0hISEYOXIkmjdvjpYtW2Lp0qVISUlRze4XEBAADw8PLFy4EAAwYcIELFu2DO+//z4mTZqEmzdv4rPPPsPkyZP1+t1MJT4eGDRIuq4WAGbNkoZ8E5kb5ibKLb/+NEZuMvj5BwcHB3z77bdo0aIFPvzwQ6xfv97QhyAdPHkC/PijVERpuz66a1fpLNSbbwKl7EtSMqHk5GTExMSgYsWKcHJygrW1dal701coFHjx4gUqVqyoOmNAxZO7T2UyGeRyOZKSkhATEwMAJVpQ5ae4uWnQoEF4/PgxQkNDERsbi2bNmuHQoUOqSSnu3bun9nry9PTE4cOHMXXqVDRp0gQeHh54//33MWPGDIP+XsaUnQ0MHQo8fCg979wZmDfPpCERacXcRHnl7U8hhNFyk9EGc3Xo0AGbNm0y1u5JCyGAyEhg1Spg505pivPcnJyA0aOBwEBpWnOiokpISEDFihVRs2bNUpeolBQKBTIzM2FnZ8eEZSB5+9Te3h6VKlXCgwcPkJCQYBbFlFJxclNwcHC+w/q0nRVr06YNzpw5o9exzMHKldJtMgDAzQ3YupVDwMk8MTdRXtr601i5yWhvi48fP8aLFy+MtXvKRaEAdu0CPvss5470uXXqBLz3HtC/P6Dl2mYincjlcmRkZMDJyanUJisqOTKZDJUrV0ZMTAzkcrnZXFDN3KQbuRxYtCjn+bZtUkFFZG6Ym6gojJGbDF5MKRQKbN68Gdu3b0fz5s0NvXvKJTsb2LEDWLAA+O8/9WVVqwIjRwLjxgH165smPipblBdwmsuHYjJ/ytdKdna2yV83zE1Fs2sXcO+e9HOvXtKXckTmiLmJisrQuUmvYsrHx0dre1ZWFuLj41WVnvJCXDKsrCxg+3apiMp7PVTLlkBQEDBwIGBvb5r4qGzjN3+kq5J+rTA3GYYQ6melPvzQdLEQ6Yq5iXRl6NeKXsWUQqHQGoi1tTUaNWqEFi1aIDg4GA0bNix2gJQjKwvYskUqom7eVF/Wrh0QFgZ06yZNc05EVN4wNxnG0aPAxYvSz82b86wUEVFB9CqmoqOjDRwGFUQuBzZtAj79FIiKUl/WoYNURHXpwiKKiMo35ibDyH1Wato05hYiooJwuhAzJpcDa9cC9eoB776rXkh17gwcOybdQ6prVyY7orJKJpOhc+fOpg6Dyol//gEOH5Z+9vYG3n7bpOEQkZlibsqhVzH14MED7Nu3D8+ePdO6/OnTp9i3b59qHncqmsxM4IcfpOnLx44F7tzJWda1K/D771IhxdcwUcmKjo6GTCYr8JHf+6Kx3L17F5aWlpDJZFiU+5RCOcTcVHxff53zc0gIp0InKg2Ym0xLr7fJBQsWYOfOnXiovJNfHg4ODnj33XcxePBgLFu2rFgBlicZGcC6dcDChcD9++rLuneXhvO1a2ea2IgoR+3atTF8+HCty+zs7Eo0lnXr1qmuFVq3bh0+LMezBTA3Fc+DB9J1uYA0I+y775o2HiIqGuYm09CrmDp69Ch69OgB23xuWmRra4sePXrgt99+K1Zw5UV2NrBmjTSxxIMH6st69gRCQ4E2bUwTGxFpqlOnDubMmWPqMKBQKLBhwwY4OTnhjTfewIYNG3D69Gm0bdvW1KGZBHNT8Xz7rTTREQBMnAhUqGDaeIioaJibTEOvYX4xMTHw9vYucB0vLy8OpdDBhQtA69bA+PHqhVTv3sDZs8Cvv7KQIiqN/vnnHwwePBg1atSAjY0NvLy8MGnSJCQmJmpdf82aNWjUqBHs7Ozg6emJ6dOnIz09vcBjhIeH4969exg8eDDGjBkDAFi7dq3aOvPnz4dMJsOPP/6odR8//fQTZDIZPvroI4325s2bw97eHq6urggMDMTTp0/h7e1d6Pu/qTA36S85Gfj+e+lnW1tg0iTTxkNExsHcZHh6FVM2NjZITk4ucJ3k5GTO+V+A58+BKVOAFi2A8+dz2vv0Ac6dA375RbpnFBGVPvv27UPLli2xb98+dO7cGVOmTEHjxo2xbNkytGnTBk+fPlVbf/78+QgMDERCQgICAwMxcOBAbN++HQMHDizwOMrkFBAQgPbt28PHxwc7duzAixcvVOsMHz4cMpkMmzZt0rqPjRs3AgBGjBihalu3bh3efvtt3Lx5EwEBARg5ciQiIyPRvXt3yOVyvfqkJDA36e+HH6SCCgACAgBXV9PGQ0SGx9xkJEIPHTp0EJ6eniI9PV3r8rS0NFGzZk3Rtm1bfXZf4pKSkgQAkZSUpNf2mZmZYs+ePSIzM7PQdRUKIXbvFsLDQwjp1ojSo2FDIU6e1OvwZqco/UGFM5f+TEtLE//9959IS0szaRzFlZ2dLZ4+fSqys7OLvO2dO3cEAFG7dm0RFham8YiMjBQJCQnC0dFReHh4iOjoaLXtt27dKgCI4OBgVdvNmzeFlZWV8PDwEHFxcar2pKQkUa9ePQFAdOrUSSOWhIQEYWNjI3x9fVVtoaGhAoBYs2aN2rrt27cXlpaW4uHDh2rtiYmJwsbGRjRv3lzV9vTpU1GxYkVRoUIFcePGDVW7XC4XXbp0EQCEl5eX2n7y61NdXzPFfQ9WYm5Sp+t7R0aGek66dk2vw5k1c3kfLUvMpU+Zm5ibtOWmgvpTl9dMUd5/9bpmavTo0RgzZgzefPNNrFy5Uu2u81FRUZg4cSIePnyIefPmFaPMK3uio6WhE7/8ktNmby9NLDF1KmBjY7LQiIqteXMgNtbUURTMzQ3488/i7ycqKgpz587VaK9SpQoiIyORnJyMZcuWwcvLS2354MGDsWjRImzbtg3fffcdAGDLli3IyspCSEgIXFxcVOs6Ojri448/VvtWLreNGzciMzNTbXlAQADmzZuHtWvXqoZWANI3e6dOncLWrVsREhKiat++fTsyMzPVLljeu3cvXrx4gcmTJ6Nu3bqqdisrKyxYsMCsx7wzN+ln+3ZAOfLxzTel23EQlRXMTcxNxqZ3MXXw4EHs3r0bvr6+qFWrFjw8PBATE4M7d+4gKysLgwYNwujRow0db6kklwNLlgBz5wKpqTntvXoBy5YBtWqZLjYiQ4mNzflAVtb5+/vj0KFDWpcNGjQIAHD27FlE5b3LNoD09HQkJCQgISEBTk5O+PvvvwEAHTp00FhXW5vS2rVrIZPJ1JJN7dq10bZtW5w+fRpXr15F/fr1AQDvvPMOJk+ejI0bN6olrE2bNsHKygpDhgxRtSnjad++vcYxW7VqBSszniubuanohFC/SW8ZnnCLyinmJglzk/HofeQdO3Zg+fLlWLFiBa5du4abN28CABo0aICgoCBMmDDBYEGWZqdPA++9B1y5ktPm7g588410M0QO3aeyws3N1BEUriRifPLkCQBg+fLlBa6XkpICJycnJCUlAYDaN39KrvlcuHL27FlcuXIFr732Gl566SW1ZQEBATh9+jTWrVunurdHlSpV8MYbb2D37t3477//0KBBA0RFReH06dPo1auX2rGV1xxpi8fCwgJOTk4F/l6mxtxUNEeOAJcvSz+3bs3bb1DZw9wkYW4yHr2LKZlMhuDgYAQHByMlJQVJSUmoXLkyKnAuVQDAkyfAzJnA6tU5bRYWQHAwMH8+4OhoutiIjCH3RCrmTKEw7v4d//8/9+XLl9GoUaNC169cuTIAID4+XmPoRVxcnNZtlBf3Hjt2LN/JFH788Ud89tlnsLa2BiANp9i9ezc2btyIhQsXqi76zTtUQxl/fHy8xj4VCgUSEhLg4eFR6O9lKsxNRZP7rNS0afyCj8oe5iYJc5Px6DWbX14VKlSAu7s7kxWkIRMbNwK+vuqFlJ+fNB72m29YSBGVZa1atQIAREZG6rR+06ZNAQAnT57UWKatLSUlBdu2bYODgwPGjBmj9dGkSRPEx8fjl1wXaPbq1QvVq1fHli1boFAosHnzZlSqVAl9+/bVGs8ff/yhcew///wTWcobEZUCzE0Fu3gRiIiQfq5TB+jXz6ThEJERMTcZUaFTVGhx6tQpMXXqVPHo0SOtyx8+fCimTp0qIiMj9dl9iTPUjEmXL2eKLl3UZ+mrVEmIb78VIivLwEGbMXOZ4aesMJf+5IxJOTMm+fv757tOfHy8qFSpknB2dhZXrlzRWJ6SkqL23njz5k1haWmp84xJ69evFwBEQEBAvjEcPnxYABC9e/dWa584caIAIBYuXCgAiFGjRmlsq5wxqWLFiuLWrVuqdrlcLrp162bWs/kxN6kr7L1j6NCcXLViRXEiNX/m8j5alphLnzI3MTdpy00lOZufXmemFi9ejP3798Mtn0GeNWrUwC+//IIlS5bos/tSJz0d2Lq1Hvz8rHD0aE77wIHA1avSDH6WlqaLj4hKjrOzM7Zu3YoXL16gadOmeOONNzBt2jRMmjQJffr0gZubm9od6uvUqYPQ0FDExMSgSZMmmDx5MkJCQtC4cWO1GYuUlMMoCppEoVu3bqhZsyYOHTqEhw8fqtqVwyZCQ0PVnudWpUoVLF68GC9evICfnx/Gjx+PGTNm4JVXXsHTp0/h7u4OCwuDDGowOOYm3d29K83iBwBOTsDIkaaNh4iMi7nJePQ66rlz57TOppFbx44dcebMGb2CKk1OnwZefdUK27f7IjNTGh/q7Q0cOADs2AGY8aUFRGQkvXv3xsWLFzFq1ChcuXIF3333HTZv3oy7d+9i9OjRmD9/vtr6oaGhWL16NapXr47vv/8eO3fuxDvvvIMdO3aorXf9+nWcOnUKtWrVQqdOnfI9voWFBUaOHIns7Gxs2LBB1d66dWvUrVsXcrkcNWvWROfOnbVuHxgYiJ07d8LHxwcbNmzAhg0b0Lp1axw5cgTJycmqsevmhrlJd0uXAtnZ0s9BQYCDg0nDIaISwNxkHHpNQBEfH1/oRV5ubm5aLxIra6ysAOUMk1ZWAh9+KMPHHzMxEZVF3t7eEELotG69evWwZs0anfc9duxYjB07VqM99/Hq1aun8/EXLFiABQsWaLTfuHFDp+0HDBiAAQMGqLXdunULL168QD0zvRERc5Nunj7NuabXzk4qpoio9GJuMm1u0uvMVJUqVXDv3r0C17l79y4qVqyoV1ClScuWwHvvKVC/fiLOncvCZ5+xkCKi0u3p06fIyMhQa0tLS8PUqVMBAP3MdKYC5ibdfP89kJIi/Tx6NODsbNp4iIh0Ya65Sa9iqnXr1vj5559x//59rcvv3buHPXv2FOtuxMuXL4e3tzfs7OzQqlUr/FnAraFXr16NDh06oGrVqqhatSq6detW4PqG9uWXCnz66Sk0bFhihyQiMprff/8d7u7uGDJkCGbMmIExY8agQYMG+OWXX9ClSxfVzR/NTUnkptIuI0OaVRaQpkHPdZ9MIiKzZq65Sa9iKiQkBKmpqWjXrh1+/PFHPHr0CADw6NEj/O9//0O7du2QlpaGDz74QK+gtm/fjpCQEISFheHChQto2rQp/P398x2acfz4cQwZMgTHjh1DZGQkPD090aNHD8SU0C2v7eyke0gREZUFDRs2RPfu3fHHH3/g22+/xZYtW1CxYkXMnz8fBw4cMNsJKIydm8qCzZuB2Fjp57fekqZEJyIqDcw1N+l1zVTHjh2xePFifPDBB6pZO2QymWq8pIWFBb755ht07NhRr6AWL16MwMBA1b5XrVqFAwcOYN26dZg5c6bG+ps3b1Z7vmbNGuzevRsREREICAjQKwYiovKqbt262LZtm6nDKDJj56bSTqEAvvoq5/mHH5ouFiKiojLX3KRXMQUA77//Pl577TWsWrUK586dQ1JSEqpUqYKWLVti/PjxaNSoETIyMmBra1uk/WZmZuKvv/7CrFmzVG0WFhbo1q2bzjcaS01NhVwuR7Vq1bQuz8jIUBtzmZycDACQy+WQy+VFile5Xe5/yzv2h2GZS3/K5XIIIaBQKKAw9q3ajUj5wVr5u1Dx5denCoUCQgjI5XJYFnB/CEO+to2Vm8qCX3+VbtcBAO3bA61bmzYeIqKyQO9iCgCaNGmCFStWaLRfuHABQUFB2LZtGxITE4u0z4SEBGRnZ8PV1VWt3dXVFdeuXdNpHzNmzIC7uzu6deumdfnChQsxd+5cjfYjR47AoRizR4SHh+u9bVnE/jAsU/enlZUV3Nzc8OLFC2RmZpo0FkN4/vy5qUMoc/L2aWZmJtLS0nDixIkC706fmppq0DiMkZvKgkWLcn7mWSkiIsMoVjGV27Nnz7Bp0yasXbsW//zzD4QQsLe3N9Tudfb5559j27ZtOH78OOzs7LSuM2vWLITkuuo2OTlZdZ2VPnPUy+VyhIeHo3v37rC2ttY79rKC/WFY5tKf6enpuH//PipWrJjv/63SQAiB58+fo1KlSpDJZKYOp0zIr0/T09Nhb2+Pjh07FviaUY4OMAZD5qbly5dj0aJFiI2NRdOmTfHdd9+hZcuWhW63bds2DBkyBH379sWePXv0OnZxnTsH/P679HO9esAbb5gkDCKiMqfYxdRvv/2GtWvXYu/evcjIyIAQAm3atMHo0aP1mlXDyckJlpaWiIuLU2uPi4vL9672Sl999RU+//xz/Pbbb2jSpEm+69na2mod4mFtbV2sD6vF3b6sYX8Ylqn7Mzs7GzKZDBYWFmY7AYEulMPQlL8LFV9+fWphYQGZTFboa9cYr2tD5yblxEirVq1Cq1atsHTpUvj7++P69etwcXHJd7vo6GhMmzYNHTp0KM6vU2y5r5X64ANOmkREZCh6vZ3ev38f8+bNQ61ateDv74/t27ejevXqEEJg1KhR+OOPPzB27FhUqlSpyPu2sbGBn58fIiIiVG0KhQIRERFo06ZNvtt9+eWXmD9/Pg4dOoTmzZvr82sREVEpZszclHtipAYNGmDVqlVwcHDAunXr8t0mOzsbw4YNw9y5c+Hj41OcX61Ybt8Gdu2SfnZxAUaMMFkoRERljs5npuRyOfbs2YO1a9ciIiIC2dnZqFChAoYNG4aAgAB06dIFVlZWsLIq/sjBkJAQjBw5Es2bN0fLli2xdOlSpKSkqGZnCggIgIeHBxYuXAgA+OKLLxAaGootW7bA29sbsf8/72vFihXL/c0ZiYjKspLITfpOjDRv3jy4uLhgzJgxOHnyZKHHMdbkSEuXSjP5AcDEidmwtFSgvM0PZC4T+ZQl5tKnnByJtCmoP3WZHKkor2uds4u7uzuePHkCmUyG1157DQEBAejfvz8qVKig88F0NWjQIDx+/BihoaGIjY1Fs2bNcOjQIdWkFPfu3VMbSrJy5UpkZmZiwIABavsJCwvDnDlzDB4fERGZh5LITfpMjHTq1CmsXbsWly5d0vk4xpgcKTnZGuvXS9ex2dpmwcfnCA4eLL8Fhakn8imLTN2nnByJCqKtP3WZHKkoEyPpXEwlJibCwsICU6dOxfTp0+Hs7KzzQfQRHByM4OBgrcuOHz+u9jw6OtqosRARkXkq6dyki+fPn2PEiBFYvXo1nJycdN7OGJMjBQbeRUaGlOrHjpVh8ODuRd5PWWAuE/mUJebSp5wcibQpqD91mRypKBMj6VxMjRo1Cjt37sTixYvx7bffwt/fHyNGjEDfvn1hY2Oj8wGJiIgMpSRyU1EnRoqKikJ0dDT69OmjalMOM7GyssL169dRu3Ztje0MPTlSejpw4EAtANKEEx98YAlr6/zv91UemHoin7LI1H3KyZFIm4L6U5fJkYrymtb5r7Vu3To8evQI33//PV599VX88ssvGDx4MFxdXfHee+/h1KlTOh+UiIh0I5PJ0LlzZ1OHYbZKIjcVdWIkX19fXL58GZcuXVI93nzzTbz22mu4dOkSPD09ix2TLjZtkiEpSfrWdcAAoFatEjksEZUDzE05ilT6VqxYEWPHjkVkZCT+/fdfTJkyBTY2Nli9ejU6deoEmUyG69ev4+7du8aKl4jKmrQ0465vQNHR0ZDJZAU+nj17ZvQ4Ro0apXFcR0dHtGjRAkuWLNG4cLawmC9dulToOnkf5qQkclNISAhWr16N//3vf7h69SomTJigMTGScoIKOzs7NGrUSO1RpUoVVKpUCY0aNSqR0RwKBbBkSc5ZKN6kl6iImJuKrLzmJr2nN6pfvz6+/vprfPHFF6qZlMLDw3Hy5EnUrl0bnTp1wqhRozCCc7ASUX5Wrwa+/BI4ehTQ5dv6+/eBLl2A6dOBwEDjx5eP2rVrY/jw4VqXleSY/TFjxqBmzZoQQuD+/fv46aefEBISgqNHj2L//v1q61avXj3f61Dd3NwQFham0b506VIkJSVpXWaujJWbijoxkqnt3w/cvCl9sOjUSYHmzc0nNiKzx9xULOUuNwkDun//vpg3b57w8fERMplMWFhYGHL3RpOUlCQAiKSkJL22z8zMFHv27BGZmZkGjqx0Yn8Ylrn0Z1pamvjvv/9EWlqaYXaYmipEnTpCAEL4+Ahx717B69+7J60HSNulpup12OzsbPH06VORnZ1d5G3v3LkjAAh/f3+9jq0PAKJTp05qbSNHjhQARGRkpFp7TEyMcHFxEQDEsWPH1PZRr169Ih/by8tL6JIm8utTXV8zxX0PLkx5zE3jx0v/VQAh9u6VGyG60sVc3kfLEnPpU+Ym5iZtCupPXV4zRXn/NehXVTVr1sQnn3yCqKgohIeHY/DgwYbcPRGVJfb20rd+Pj7SXUU7d5a+3dPm/n1p+e3b0vpHj0rbm7F//vkHgwcPRo0aNWBjYwMvLy9MmjQJiYmJWtdfs2YNGjVqBDs7O3h6emL69OlIT08v0jHd3d3Rv39/AMC5c+eK/TuUFeUxN61YAUREZOH11+/A31+YOhyi0oO5SQ1zU+GKf4fdfHTt2hVdu3Y11u6JqCzw9ASOH89JRp07S89zD6vIm6zyLjdD+/btwzvvvAMLCwv07dsXnp6e+O+//7Bs2TIcPnwYZ8+eRdWqVVXrz58/H6GhoXB1dUVgYCCsra2xfft2XL16Ve8YzO26JnNRXnKTTAZ06CDw/Pk/sLCoaepwiEoX5iYAzE26MloxRUSkk4KSlpkmq1u3bmm9IXjPnj1Rt25djBgxAk5OTvjjjz/g5eWlWr5t2zYMGTIEoaGh+O6771T7mjdvHjw8PHDhwgW4uLgAAObMmYOWLVsWKa7Y2Fj8/PPPAKCxbUJCgtaYW7dujZ49exbpOEREZR5zE3OTjlhMEZHpaUtaGzcCI0aYXbICpPsIzZ07V6O9SpUqiIyMRHJyMpYtW6aWrABg8ODBWLRoEbZt26ZKWFu2bEFWVhZCQkJUyQoAHB0d8fHHHxc4UcKaNWtw6NAhCCHw4MED/PTTT3j27Bn69u2Ljh07qq2bmJioNeb333+/1CQsIqISxdzE3KQDFlNEZB7yJq127aR2M0tWAODv749Dhw5pXTZo0CAAwNmzZxEVFaWxPD09HQkJCUhISICTkxP+/vtvAECHDh001tXWltvatWtVP1esWBH169fHsGHDEBQUpLFuvXr1cO3atQL3R0REeTA3aazL3KSOxRQRmQ9PT+lbP2WyAqTnZpSsCvPkyRMAwPLlywtcLyUlBU5OTkhKSgIAtW/+lJTTbucnMjISrVu31jNSIiLSCXOTGuYmdbzxBBGZj/v3peETuY0Ykf9MSmbI0dERAHD58mUIIfJ9KIdZVK5cGQAQHx+vsa+4uLiSC5yIiLRjblLD3KSOxRQRmYe8F/T+8YduU9OamVatWgGQvpnTRdOmTQEAJ0+e1FimrY2IiEoQc5PGMuYmdSymiMj0tM2M1Lat9G8pS1qjR49GpUqV8NFHH+Hff//VWJ6amoozZ86ong8dOhSWlpZYvHix2jeAycnJWLBgQYnETEREWjA3MTfpgMUUEZlWQVPMKi/8LUVJy9nZGVu3bsWLFy/QtGlTvPHGG5g2bRomTZqEPn36wM3NTW0a2Dp16iA0NBQxMTFo0qQJJk+ejJCQEDRu3Bh169Y13S9CRFSeMTcxN+mIE1AQkenocq8OXW6eaGZ69+6NixcvYtGiRfjtt98QHh6OChUqoGbNmhg9ejSGDx+utn5oaCjc3d2xZMkSfP/993BxccHgwYMxb948ODg4mOi3ICIqp5ibADA36YrFFBGZRloa0KWLbvfqyJu0unQB/vkHsLcvwYABb29vCCF0WrdevXpYs2aNzvseO3Ysxo4dq9Gu7XgbNmzAhg0bdN63rjHnFR0drdd2RESlFnOTGuamwnGYHxGZhr09MH06UKeObt/mKZNWnTrSdiWcrIiIqBxgbqIi4pkpIjKdwEBg+HDdk4+np0m+9SMionKEuYmKgGemiMi0ipp8mKyIiMjYmJtIRyymiIiIiIiI9MBiioiIiIiISA8spoiIiIiIiPTAYoqIikTfqUyp/OFrhYhKCt9vSFeGfq2wmCIinVhaWgIA5HK5iSOh0kL5WlG+doiIDI25iYrK0LmJxRQR6cTa2hq2trZISkriN4BUKCEEkpKSYGtrC2tra1OHQ0RlFHMTFYUxchPvM0VEOnNyckJMTAwePHiAypUrw9raGjKZzNRhFYlCoUBmZibS09NhYcHvkwwhd5/KZDLI5XIkJSXhxYsX8PDwMHV4RFTGMTdRXnn7UwhhtNzEYoqIdObo6AgASEhIQExMjImj0Y8QAmlpabC3ty91ydZcaetTW1tbeHh4qF4zRETGwtxEeeXXn8bITSymiKhIHB0d4ejoCLlcjuzsbFOHU2RyuRwnTpxAx44dOfzMQPL2qaWlJfuWiEoUcxPlpq0/jZWbWEwRkV6sra1L5Ru+paUlsrKyYGdnVyrjN0fsUyIyF8xNBJRsf3JQJhERERERkR5YTBEREREREemBxRQREREREZEezLaYWr58Oby9vWFnZ4dWrVrhzz//LHD9nTt3wtfXF3Z2dmjcuDEOHjxYQpESEVF5UJS8tHr1anTo0AFVq1ZF1apV0a1bt0LzGBERlT5mWUxt374dISEhCAsLw4ULF9C0aVP4+/sjPj5e6/qnT5/GkCFDMGbMGFy8eBH9+vVDv379cOXKlRKOnIiIyqKi5qXjx49jyJAhOHbsGCIjI+Hp6YkePXqU2mmbiYhIO7MsphYvXozAwECMHj0aDRo0wKpVq+Dg4IB169ZpXf+bb75Bz5498eGHH6J+/fqYP38+Xn31VSxbtqyEIyciorKoqHlp8+bNmDhxIpo1awZfX1+sWbMGCoUCERERJRw5EREZk9lNjZ6ZmYm//voLs2bNUrVZWFigW7duiIyM1LpNZGQkQkJC1Nr8/f2xZ88eretnZGQgIyND9TwpKQkA8OTJE8jl8iLHLJfLkZqaisTERE5nCfaHobE/DYv9aXjF7dPnz58DkG6yaI70yUt5paamQi6Xo1q1avmuw9xkPOwLw2OfGhb707BKMi+ZXTGVkJCA7OxsuLq6qrW7urri2rVrWreJjY3Vun5sbKzW9RcuXIi5c+dqtNeqVUvPqImIqLieP3+OypUrmzoMDfrkpbxmzJgBd3d3dOvWLd91mJuIiMyLLnnJ7IqpkjBr1iy1M1kKhQJPnjxB9erVIZPJiry/5ORkeHp64v79+3B0dDRkqKUS+8Ow2J+Gxf40vOL2qRACz58/h7u7uxGiM73PP/8c27Ztw/Hjx2FnZ5fvesxNxsO+MDz2qWGxPw2rJPOS2RVTTk5OsLS0RFxcnFp7XFwc3NzctG7j5uZWpPVtbW1ha2ur1lalShX9g/5/jo6O/A+QC/vDsNifhsX+NLzi9Kk5npFS0icvKX311Vf4/PPP8dtvv6FJkyYFrsvcZHzsC8NjnxoW+9OwSiIvmd0EFDY2NvDz81O7SFd50W6bNm20btOmTRuNi3rDw8PzXZ+IiEhX+uQlAPjyyy8xf/58HDp0CM2bNy+JUImIqISZ3ZkpAAgJCcHIkSPRvHlztGzZEkuXLkVKSgpGjx4NAAgICICHhwcWLlwIAHj//ffRqVMnfP311+jduze2bduG8+fP44cffjDlr0FERGVEUfPSF198gdDQUGzZsgXe3t6qa3grVqyIihUrmuz3ICIiwzLLYmrQoEF4/PgxQkNDERsbi2bNmuHQoUOqi3/v3bsHC4uck2pt27bFli1b8PHHH2P27NmoW7cu9uzZg0aNGpVIvLa2tggLC9MYnlFesT8Mi/1pWOxPwysPfVrUvLRy5UpkZmZiwIABavsJCwvDnDlzSiTm8vB30RX7wvDYp4bF/jSskuxPmTDXuWiJiIiIiIjMmNldM0VERERERFQasJgiIiIiIiLSA4spIiIiIiIiPbCYIiIiIiIi0gOLqWI4ceIE+vTpA3d3d8hkMuzZs8fUIZnMnDlzIJPJ1B6+vr6mDqtUKez1JIRAaGgoatSoAXt7e3Tr1g03b940TbClQGH9OWrUKI3XbM+ePU0TbCmwcOFCtGjRApUqVYKLiwv69euH69evq62Tnp6OoKAgVK9eHRUrVsTbb7+tcaNbMj7mphzMTcXDvGR4zE2GZQ65icVUMaSkpKBp06ZYvny5qUMxCw0bNsSjR49Uj1OnTpk6pFKlsNfTl19+iW+//RarVq3C2bNnUaFCBfj7+yM9Pb2EIy0ddPn/2bNnT7XX7NatW0swwtLl999/R1BQEM6cOYPw8HDI5XL06NEDKSkpqnWmTp2K/fv3Y+fOnfj999/x8OFD9O/f34RRl0/MTeqYm/THvGR4zE2GZRa5SZBBABA///yzqcMwmbCwMNG0aVNTh1Fm5H09KRQK4ebmJhYtWqRqe/bsmbC1tRVbt241QYSli7b/nyNHjhR9+/Y1STxlQXx8vAAgfv/9dyGE9Hq0trYWO3fuVK1z9epVAUBERkaaKsxyj7mJuclQmJcMj7nJ8EyRm3hmigzm5s2bcHd3h4+PD4YNG4Z79+6ZOqQy486dO4iNjUW3bt1UbZUrV0arVq0QGRlpwshKt+PHj8PFxQX16tXDhAkTkJiYaOqQSo2kpCQAQLVq1QAAf/31F+Ryudpr1NfXFy+99BJfo2RSzE3GwbxkPMxN+jNFbmIxRQbRqlUrbNiwAYcOHcLKlStx584ddOjQAc+fPzd1aGVCbGwsAMDV1VWt3dXVVbWMiqZnz5748ccfERERgS+++AK///47Xn/9dWRnZ5s6NLOnUCgwZcoUtGvXDo0aNQIgvUZtbGxQpUoVtXX5GiVTYm4yHuYl42Bu0p+pcpOVQfZC5d7rr7+u+rlJkyZo1aoVvLy8sGPHDowZM8aEkRFpN3jwYNXPjRs3RpMmTVC7dm0cP34cXbt2NWFk5i8oKAhXrlzhtSdk9pibqLRhbtKfqXITz0yRUVSpUgUvv/wybt26ZepQygQ3NzcA0Jh9Ji4uTrWMisfHxwdOTk58zRYiODgYv/zyC44dO4aaNWuq2t3c3JCZmYlnz56prc/XKJkT5ibDYV4qGcxNujFlbmIxRUbx4sULREVFoUaNGqYOpUyoVasW3NzcEBERoWpLTk7G2bNn0aZNGxNGVnY8ePAAiYmJfM3mQwiB4OBg/Pzzzzh69Chq1aqlttzPzw/W1tZqr9Hr16/j3r17fI2S2WBuMhzmpZLB3FQwc8hNHOZXDC9evFD7puDOnTu4dOkSqlWrhpdeesmEkZW8adOmoU+fPvDy8sLDhw8RFhYGS0tLDBkyxNShlRqFvZ6mTJmCBQsWoG7duqhVqxY++eQTuLu7o1+/fqYL2owV1J/VqlXD3Llz8fbbb8PNzQ1RUVGYPn066tSpA39/fxNGbb6CgoKwZcsW7N27F5UqVVKNNa9cuTLs7e1RuXJljBkzBiEhIahWrRocHR0xadIktGnTBq1btzZx9OULc1MO5qbiYV4yPOYmwzKL3GSQOQHLqWPHjgkAGo+RI0eaOrQSN2jQIFGjRg1hY2MjPDw8xKBBg8StW7dMHVapUtjrSaFQiE8++US4uroKW1tb0bVrV3H9+nXTBm3GCurP1NRU0aNHD+Hs7Cysra2Fl5eXCAwMFLGxsaYO22xp60sAYv369ap10tLSxMSJE0XVqlWFg4ODeOutt8SjR49MF3Q5xdyUg7mpeJiXDI+5ybDMITfJ/j8QIiIiIiIiKgJeM0VERERERKQHFlNERERERER6YDFFRERERESkBxZTREREREREemAxRUREREREpAcWU0RERERERHpgMUVERERERKQHFlNERERERER6YDFFREUmk8nQuXNnU4dBREQEgHmJTIfFFJERREdHQyaTqT2sra3h4eGBd955B+fPnzd1iEREVI4wLxEZh5WpAyAqy2rXro3hw4cDAFJSUvDXX39h586d2LNnD3777Td07NjRxBESEVF5wrxEZFgspoiMqE6dOpgzZ45a2+eff45Zs2bhk08+we+//26awIiIqFxiXiIyLA7zIyphY8aMAQD89ddfau0JCQmYMmUKatWqBVtbW7i4uOCdd97BlStXNPbRuXNnyGQyrfsfNWoUZDIZoqOjVW0bNmyATCbDhg0bcOTIEbRt2xYODg6oXr06Ro4cicTERK37WrNmDRo1agQ7Ozt4enpi+vTpSE9P1/M3JyIic8S8RKQ/npkiMhErq5z/fo8fP0abNm0QFRWFzp07Y/Dgwbhz5w527dqFAwcO4PDhw2jfvn2xj7lv3z4cOHAAffr0Qdu2bXHixAn8+OOPiIqKwqlTp9TWnT9/PkJDQ+Hq6orAwEBYW1tj+/btuHr1arHjICIi88O8RFR0LKaIStiaNWsAQC0JzZgxA1FRUZg1axY+++wzVfvBgwfRu3dvjB49GtevX4eFRfFOJu/fvx/Hjx9Hu3btAADZ2dno1q0bjh8/jjNnzqB169YAgFu3bmHevHnw8PDAhQsX4OLiAgCYM2cOWrZsWawYiIjIvDAvEemPw/yIjOjWrVuYM2cO5syZgw8//BBdunTB7Nmz4erqikWLFgEAMjMzsXXrVlSvXh0ff/yx2va9evVC9+7dcevWLfzxxx/Fjmfo0KGqhAUAlpaWGDlyJADg3LlzqvYtW7YgKysLISEhqoQFAI6OjhoxEhFR6cG8RGRYPDNFZERRUVGYO3euWpubmxtOnjyJOnXqAACuXbuG9PR0vPbaa3BwcNDYx2uvvYbw8HBcunQJHTp0KFY8fn5+Gm01a9YEADx79kzV9vfffwOA1uMVNwYiIjId5iUiw+KZKSIj8vf3hxACQgjEx8dj0aJFiI+Px5tvvokXL14AAJKTkwEArq6uWvdRo0YNtfWKw9HRUaNNOUY+Oztb1ZaUlAQAat/+KeUXJxERmT/mJSLDYjFFVEKcnZ0xbdo0zJ49G1evXlUNS1Amkri4OK3bxcbGqq0HQDVGPSsrS2N9ZcIpjsqVKwMA4uPjNZblFycREZUuzEtExcdiiqiEzZ49G+7u7lixYgWio6Ph6+sLOzs7nDt3DqmpqRrrHz9+HADQrFkzVVvVqlUBADExMWrrKhQK1VCI4mjatCkA4OTJkxrLtLUREVHpxbxEpD8WU0QlzN7eHjNmzIBcLsf8+fNhY2ODIUOGICEhAQsXLlRb99ChQzh8+DDq1KmjdoFuixYtAEj36cht8eLFuHPnTrFjHDp0KCwtLbF48WK1bwGTk5OxYMGCYu+fiIjMB/MSkf5YTBGZwLhx4+Du7q66l8YXX3wBHx8fLFiwAF27dsXs2bMxdOhQ9OnTBw4ODli/fr3a9LOjR49G1apVMWfOHLz11luYNm0aOnfujM8//xydOnUqdnx16tRBaGgoYmJi0KRJE0yePBkhISFo3Lgx6tatW+z9ExGReWFeItIPiykiE7Czs8OsWbOQlZWFuXPnwtnZGWfPnsXkyZMRFRWFr776CuHh4ejXrx/Onj2rcWNEV1dXHDt2DF27dsWRI0ewevVqVKlSBWfOnIG3t7dBYgwNDcXq1atRvXp1fP/999i5cyfeeecd7NixwyD7JyIi88G8RKQfmRBCmDoIIiIiIiKi0oZnpoiIiIiIiPTAYoqIiIiIiEgPLKaIiIiIiIj0wGKKiIiIiIhIDyymiIiIiIiI9MBiioiIiIiISA8spoiIiIiIiPTAYoqIiIiIiEgPLKaIiIiIiIj0wGKKiIiIiIhIDyymiIiIiIiI9MBiioiIiIiISA//BxFKjUHVmVR3AAAAAElFTkSuQmCC", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "color1 = 'blue'\n", + "color2 = 'red'\n", + "\n", + "def viz():\n", + " fig, axs = plt.subplots(figsize=(10, 2), nrows=1, ncols=2)\n", + " \n", + " # cifar100\n", + " axs[0].plot([r for r, _ in fedavg_cifar], [a for _, a in fedavg_cifar], label='FedAvg', color=color1, linewidth=2.0)\n", + " axs[0].scatter([r for r, _ in fedpft_cifar], [a for _, a in fedpft_cifar], label='FedPFT', color=color2, marker='x', s=100)\n", + " axs[0].set_title('CIFAR100 - ResNet50')\n", + " axs[0].set_ylim(0, 0.7)\n", + " \n", + " # caltech101\n", + " axs[1].plot([r for r, _ in fedavg_caltech], [a for _, a in fedavg_caltech], label='FedAvg', color=color1, linewidth=2.0)\n", + " axs[1].scatter([r for r, _ in fedpft_caltech], [a for _, a in fedpft_caltech], label='FedPFT', color=color2, marker='x', s=100)\n", + " axs[1].set_title('Caltech101 - Clip/ViT-B')\n", + " axs[1].set_ylim(0.2, 1)\n", + " \n", + " for ax in axs:\n", + " ax.set_xticks([1, 5, 10 , 15, 20])\n", + " ax.grid()\n", + " ax.legend(fontsize=14, loc='lower right')\n", + " ax.set_xlabel(\"Round\", fontsize=14)\n", + " ax.set_ylabel(\"Accuracy\", fontsize=14)\n", + "\n", + " return fig\n", + "\n", + "f = viz()\n" + ] + }, + { + "cell_type": "code", + "execution_count": 22, + "id": "92460065", + "metadata": {}, + "outputs": [], + "source": [ + "saveFig(\"FedPft.png\", f)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.11" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/baselines/fedpft/fedpft/__init__.py b/baselines/fedpft/fedpft/__init__.py new file mode 100644 index 000000000000..a5e567b59135 --- /dev/null +++ b/baselines/fedpft/fedpft/__init__.py @@ -0,0 +1 @@ +"""Template baseline package.""" diff --git a/baselines/fedpft/fedpft/client.py b/baselines/fedpft/fedpft/client.py new file mode 100644 index 000000000000..c20aefe92367 --- /dev/null +++ b/baselines/fedpft/fedpft/client.py @@ -0,0 +1,172 @@ +"""Define your client class and a function to construct such clients. + +Please overwrite `flwr.client.NumPyClient` or `flwr.client.Client` and create a function +to instantiate your client. +""" + +from collections import OrderedDict +from typing import Callable, Dict, List, Tuple + +import flwr as fl +import torch +from flwr.common.typing import NDArrays, Scalar +from hydra.utils import instantiate +from omegaconf import DictConfig +from torch import nn +from torch.utils.data import DataLoader + +from fedpft.models import extract_features, test, train +from fedpft.utils import gmmparam_to_ndarrays, learn_gmm + + +class FedPFTClient(fl.client.NumPyClient): + """Flower FedPFTClient.""" + + # pylint: disable=too-many-arguments + def __init__( + self, + trainloader: DataLoader, + testloader: DataLoader, + feature_extractor: torch.nn.Module, + num_classes: int, + device: torch.device, + ) -> None: + """FedPFT client strategy. + + Implementation based on https://arxiv.org/abs/2402.01862 + + Parameters + ---------- + trainloader : DataLoader + Dataset used for learning GMMs + testloader : DataLoader + Dataset used for evaluating `classifier_head` sent from the server + feature_extractor : torch.nn.Module + Model used to extract features of each client + num_classes : int + Number of total classes in the dataset + device : torch.device + Device used to extract features and evaluate `classifier_head` + """ + self.trainloader = trainloader + self.testloader = testloader + self.feature_extractor = feature_extractor + self.classifier_head = nn.Linear( + feature_extractor.hidden_dimension, num_classes + ) + self.device = device + + def get_parameters(self, config) -> NDArrays: + """Return the parameters of the `classifier_head`.""" + return [ + val.cpu().numpy() for _, val in self.classifier_head.state_dict().items() + ] + + def set_parameters(self, parameters: NDArrays) -> None: + """Set the parameters of the `classifier_head`.""" + params_dict = zip(self.classifier_head.state_dict().keys(), parameters) + state_dict = OrderedDict({k: torch.tensor(v) for k, v in params_dict}) + self.classifier_head.load_state_dict(state_dict, strict=True) + + def fit( + self, parameters: NDArrays, config: Dict[str, Scalar] + ) -> Tuple[NDArrays, int, Dict]: + """Fit a GMM on features and return GMM parameters.""" + # Extracting features + features, labels = extract_features( + dataloader=self.trainloader, + feature_extractor=self.feature_extractor, + device=self.device, + ) + + # Learning GMM + gmm_list = learn_gmm( + features=features, + labels=labels, + n_mixtures=int(config["n_mixtures"]), + cov_type=str(config["cov_type"]), + seed=int(config["seed"]), + tol=float(config["tol"]), + max_iter=int(config["max_iter"]), + ) + + # Reshaping GMM parameters into an NDArray + return [array for gmm in gmm_list for array in gmmparam_to_ndarrays(gmm)], 0, {} + + def evaluate( + self, parameters: NDArrays, config: Dict[str, Scalar] + ) -> Tuple[float, int, Dict]: + """Evaluate `classifier_head` on the test data.""" + self.set_parameters(parameters) + loss, acc = test( + classifier_head=self.classifier_head, + dataloader=self.testloader, + feature_extractor=self.feature_extractor, + device=self.device, + ) + return loss, len(self.testloader.dataset), {"accuracy": acc} + + +class FedAvgClient(FedPFTClient): + """Flower FedAvgClient.""" + + def fit( + self, parameters: NDArrays, config: Dict[str, Scalar] + ) -> Tuple[NDArrays, int, Dict]: + """Train the classifier head.""" + self.set_parameters(parameters) + + # train classifier head + opt = torch.optim.AdamW( + params=self.classifier_head.parameters(), lr=float(config["lr"]) + ) + train( + classifier_head=self.classifier_head, + dataloader=self.trainloader, + feature_extractor=self.feature_extractor, + device=self.device, + num_epochs=int(config["num_epochs"]), + opt=opt, + ) + return self.get_parameters(config={}), len(self.trainloader.dataset), {} + + +# pylint: disable=too-many-arguments +def generate_client_fn( + client_cfg: DictConfig, + trainloaders: List[DataLoader], + testloaders: List[DataLoader], + feature_extractor: torch.nn.Module, + num_classes: int, + device: torch.device, +) -> Callable[[str], fl.client.NumPyClient]: + """Generate the client function that creates the Flower Clients. + + Parameters + ---------- + client_cfg : DictConfig + Type of client + trainloaders : List[DataLoader] + List of train dataloaders for clients + testloaders : List[DataLoader] + List of test dataloaders for clients + feature_extractor : torch.nn.Module + Pre-trained model as the backbone + num_classes : int + Number of classes in the dataset + device : torch.device + Device to load the `feature_extractor` + """ + + def client_fn(cid: str) -> fl.client.NumPyClient: + """Create a FedPFT client.""" + return instantiate( + client_cfg, + trainloader=trainloaders[int(cid)], + testloader=testloaders[int(cid)], + feature_extractor=feature_extractor, + num_classes=num_classes, + device=device, + ) + + return client_fn diff --git a/baselines/fedpft/fedpft/conf/base.yaml b/baselines/fedpft/fedpft/conf/base.yaml new file mode 100644 index 000000000000..01b1495c241f --- /dev/null +++ b/baselines/fedpft/fedpft/conf/base.yaml @@ -0,0 +1,15 @@ +--- + +num_clients: 50 +dirichlet_alpha: 0.1 +num_rounds: 1 +num_cpus: 1 +num_gpus: 1 +batch_size: 64 +device: cuda + +defaults: + - strategy: fedpft + - client: fedpft + - model: resnet50 + - dataset: CIFAR100 diff --git a/baselines/fedpft/fedpft/conf/client/fedavg.yaml b/baselines/fedpft/fedpft/conf/client/fedavg.yaml new file mode 100644 index 000000000000..10fc2b0f922e --- /dev/null +++ b/baselines/fedpft/fedpft/conf/client/fedavg.yaml @@ -0,0 +1,2 @@ +--- +_target_: fedpft.client.FedAvgClient \ No newline at end of file diff --git a/baselines/fedpft/fedpft/conf/client/fedpft.yaml b/baselines/fedpft/fedpft/conf/client/fedpft.yaml new file mode 100644 index 000000000000..6ef0f175976b --- /dev/null +++ b/baselines/fedpft/fedpft/conf/client/fedpft.yaml @@ -0,0 +1,2 @@ +--- +_target_: fedpft.client.FedPFTClient \ No newline at end of file diff --git a/baselines/fedpft/fedpft/conf/dataset/CIFAR100.yaml b/baselines/fedpft/fedpft/conf/dataset/CIFAR100.yaml new file mode 100644 index 000000000000..322c2d80c183 --- /dev/null +++ b/baselines/fedpft/fedpft/conf/dataset/CIFAR100.yaml @@ -0,0 +1,10 @@ +--- +_target_: fedpft.dataset.Dataset +name: cifar100 +dataset: CIFAR100 +num_classes: 100 +image_column_name: img +partition_by: fine_label +num_clients: ${num_clients} +dirichlet_alpha: ${dirichlet_alpha} +batch_size: ${batch_size} \ No newline at end of file diff --git a/baselines/fedpft/fedpft/conf/dataset/Caltech101.yaml b/baselines/fedpft/fedpft/conf/dataset/Caltech101.yaml new file mode 100644 index 000000000000..96dcc50fa8d2 --- /dev/null +++ b/baselines/fedpft/fedpft/conf/dataset/Caltech101.yaml @@ -0,0 +1,10 @@ +--- +_target_: fedpft.dataset.Dataset +name: caltech101 +dataset: clip-benchmark/wds_vtab-caltech101 +num_classes: 102 +image_column_name: webp +partition_by: cls +num_clients: ${num_clients} +dirichlet_alpha: ${dirichlet_alpha} +batch_size: ${batch_size} \ No newline at end of file diff --git a/baselines/fedpft/fedpft/conf/model/clip.yaml b/baselines/fedpft/fedpft/conf/model/clip.yaml new file mode 100644 index 000000000000..23d350a23479 --- /dev/null +++ b/baselines/fedpft/fedpft/conf/model/clip.yaml @@ -0,0 +1,9 @@ +feature_extractor: + _target_: fedpft.models.clip_vit + name: openai/clip-vit-base-patch32 +transform: + _target_: fedpft.models.transform + mean: [0.48145466, 0.4578275, 0.40821073] + std: [0.26862954, 0.26130258, 0.27577711] +image_input_size: 224 +hidden_dimension: 768 diff --git a/baselines/fedpft/fedpft/conf/model/resnet50.yaml b/baselines/fedpft/fedpft/conf/model/resnet50.yaml new file mode 100644 index 000000000000..260d9151e680 --- /dev/null +++ b/baselines/fedpft/fedpft/conf/model/resnet50.yaml @@ -0,0 +1,8 @@ +feature_extractor: + _target_: fedpft.models.resnet50 +transform: + _target_: fedpft.models.transform + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] +image_input_size: 224 +hidden_dimension: 2048 diff --git a/baselines/fedpft/fedpft/conf/strategy/fedavg.yaml b/baselines/fedpft/fedpft/conf/strategy/fedavg.yaml new file mode 100644 index 000000000000..b7703e78eb65 --- /dev/null +++ b/baselines/fedpft/fedpft/conf/strategy/fedavg.yaml @@ -0,0 +1,12 @@ +--- +_target_: fedpft.strategy.FedAvg +fraction_fit: 1 +fraction_evaluate: 1 +accept_failures: False +on_fit_config_fn: + _target_: fedpft.server.fedavg_get_on_fit_config_fn + learning_rate: 0.01 + num_epochs: 10 +evaluate_metrics_aggregation_fn: + _target_: fedpft.server.weighted_average + _partial_: true \ No newline at end of file diff --git a/baselines/fedpft/fedpft/conf/strategy/fedpft.yaml b/baselines/fedpft/fedpft/conf/strategy/fedpft.yaml new file mode 100644 index 000000000000..5612193071d1 --- /dev/null +++ b/baselines/fedpft/fedpft/conf/strategy/fedpft.yaml @@ -0,0 +1,22 @@ +--- +_target_: fedpft.strategy.FedPFT +fraction_fit: 1 +fraction_evaluate: 1 +accept_failures: False +num_classes: ${dataset.num_classes} +feature_dimension: ${model.hidden_dimension} +device: ${device} +server_batch_size: 32 +num_epochs: 50 +server_opt: + lr: 1e-4 +on_fit_config_fn: + _target_: fedpft.server.fedpft_get_on_fit_config_fn + n_mixtures: 1 + cov_type: spherical + seed: 0 + tol: 1e-12 + max_iter: 10000 +evaluate_metrics_aggregation_fn: + _target_: fedpft.server.weighted_average + _partial_: true diff --git a/baselines/fedpft/fedpft/dataset.py b/baselines/fedpft/fedpft/dataset.py new file mode 100644 index 000000000000..df41d10996ae --- /dev/null +++ b/baselines/fedpft/fedpft/dataset.py @@ -0,0 +1,111 @@ +"""Dataset creation.""" + +from typing import Callable, Dict + +from flwr_datasets.federated_dataset import FederatedDataset +from flwr_datasets.partitioner import DirichletPartitioner +from torch.utils.data import DataLoader +from torchvision import transforms + + +# pylint: disable=too-many-instance-attributes +class Dataset: + """Dataset class.""" + + # pylint: disable=too-many-locals, too-many-arguments + def __init__( + self, + dataset: str, + num_clients: int, + batch_size: int, + dirichlet_alpha: float, + partition_by: str, + image_column_name: str, + transform: transforms, + image_input_size: int, + seed: int = 0, + split_size: float = 0.8, + **kwargs, + ) -> None: + """Load the dataset and partition it using dirichlet distribution. + + Parameters + ---------- + dataset : str + Name or path of the dataset to be downloaded from HuggingFace. + num_clients: int + Number of clients. + batch_size: int + Batch size of training and testing dataloaders of clients. + dirichlet_alpha: float + Alpha parameter of Dirichlet distribution. + partition_by: str + Label named used for partitioning the dataset. + image_column_name: str + Column name of image in the dataset. + transform: transforms + Transformation of each batch. + image_input_size: int + Input size of pre-trained model. + seed: int, optional + Seed for partitioning the dataset. Default is 0. + split_size: float, optional + The portion of dataset to be used as training and rest as test. + """ + self.dataset = dataset + self.num_clients = num_clients + self.image_input_size = image_input_size + self.transform = transform + self.batch_size = batch_size + self.dirichlet_alpha = dirichlet_alpha + self.partition_by = partition_by + self.seed = seed + self.split_size = split_size + self.image_column_name = image_column_name + self.kwargs = kwargs + + def get_loaders(self): + """Partition the datasets and return a list of dataloaders.""" + partitioner = DirichletPartitioner( + num_partitions=self.num_clients, + partition_by=self.partition_by, + alpha=self.dirichlet_alpha, + min_partition_size=10, + self_balancing=True, + ) + + fds = FederatedDataset( + dataset=self.dataset, partitioners={"train": partitioner} + ) + # Create train/val for each partition and wrap it into DataLoader + trainloaders, testloaders = [], [] + for partition_id in range(self.num_clients): + partition = fds.load_partition(partition_id) + partition = partition.with_transform(self.apply_batch_transforms()) + partition = partition.train_test_split( + train_size=self.split_size, seed=self.seed + ) + trainloaders.append( + DataLoader(partition["train"], batch_size=self.batch_size) + ) + testloaders.append( + DataLoader(partition["test"], batch_size=self.batch_size) + ) + + return trainloaders, testloaders + + def apply_batch_transforms(self) -> Callable[[Dict], Dict]: + """Apply batch transforms for each batch.""" + + def batch_transform(batch): + batch_img = [ + self.transform( + img.resize((self.image_input_size, self.image_input_size)) + ) + for img in batch[self.image_column_name] + ] + batch_label = list(batch[self.partition_by]) + + return {"img": batch_img, "label": batch_label} + + return batch_transform diff --git a/baselines/fedpft/fedpft/dataset_preparation.py b/baselines/fedpft/fedpft/dataset_preparation.py new file mode 100644 index 000000000000..83a9c5dd9e20 --- /dev/null +++ b/baselines/fedpft/fedpft/dataset_preparation.py @@ -0,0 +1 @@ +"""Handle the dataset partitioning and (optionally) complex downloads.""" diff --git a/baselines/fedpft/fedpft/main.py b/baselines/fedpft/fedpft/main.py new file mode 100644 index 000000000000..debc4b35f526 --- /dev/null +++ b/baselines/fedpft/fedpft/main.py @@ -0,0 +1,89 @@ +"""Run FL with frozen, pre-trained models.""" + +import pickle +from pathlib import Path + +import flwr as fl +import hydra +import torch +from hydra.core.hydra_config import HydraConfig +from hydra.utils import instantiate +from omegaconf import DictConfig, OmegaConf + +from fedpft.client import generate_client_fn + + +# pylint: disable=too-many-locals +@hydra.main(config_path="conf", config_name="base", version_base=None) +def main(cfg: DictConfig) -> None: + """Run federated learning with frozen, pre-trained models. + + Parameters + ---------- + cfg : DictConfig + An omegaconf object that stores the hydra config. + """ + # Print Config + print(OmegaConf.to_yaml(cfg)) + + # Set device + device = torch.device(cfg.device) + + # Prepare dataset + trainloaders, testloaders = instantiate( + cfg.dataset, + transform=cfg.model.transform, + image_input_size=cfg.model.image_input_size, + ).get_loaders() + + # Define clients + client_fn = generate_client_fn( + client_cfg=cfg.client, + trainloaders=trainloaders, + testloaders=testloaders, + feature_extractor=instantiate(cfg.model.feature_extractor), + num_classes=cfg.dataset.num_classes, + device=device, + ) + + # Setup strategy + strategy = instantiate(cfg.strategy) + + # Start simulation + history = fl.simulation.start_simulation( + client_fn=client_fn, + num_clients=cfg.num_clients, + config=fl.server.ServerConfig(num_rounds=cfg.num_rounds), + strategy=strategy, + client_resources={"num_cpus": cfg.num_cpus, "num_gpus": cfg.num_gpus}, + ) + + # Save results + accuracy_per_round = history.metrics_distributed["accuracy"] + print(accuracy_per_round) + save_path = HydraConfig.get().runtime.output_dir + + strategy_name = strategy.__class__.__name__ + + def format_variable(x): + return f"{x!r}" if isinstance(x, bytes) else x + + file_suffix: str = ( + f"_{format_variable(strategy_name)}" + f"_{format_variable(cfg.dataset.name)}" + f"_clients={format_variable(cfg.num_clients)}" + f"_rounds={format_variable(cfg.num_rounds)}" + f"_finalacc={format_variable(accuracy_per_round[-1][1]):.2f}" + ) + filename = "results" + file_suffix + ".pkl" + + print(f">>> Saving {filename}") + results_path = Path(save_path) / filename + results = {"history": history} + + with open(str(results_path), "wb") as hist_file: + pickle.dump(results, hist_file, protocol=pickle.HIGHEST_PROTOCOL) + + +if __name__ == "__main__": + main() diff --git a/baselines/fedpft/fedpft/models.py b/baselines/fedpft/fedpft/models.py new file mode 100644 index 000000000000..3a57cf521414 --- /dev/null +++ b/baselines/fedpft/fedpft/models.py @@ -0,0 +1,226 @@ +"""Models, training and eval functions.""" + +import logging +from typing import List, Optional, Tuple + +import numpy as np +import torch +import torch.utils +import torchvision.transforms as transforms +from flwr.common.logger import log +from numpy.typing import NDArray +from torch import nn +from torch.utils.data import DataLoader +from torchvision import models +from transformers import CLIPModel + + +def resnet50() -> torch.nn.modules: + """Return ResNet-50 model as feature extractor.""" + resnet50_model = models.resnet50(weights=models.ResNet50_Weights.DEFAULT) + + # Remove last layer and flatten outputs + resnet50_model = torch.nn.Sequential( + *(list(resnet50_model.children())[:-1]), torch.nn.Flatten() + ) + + # Set the hidden_dimension + resnet50_model.hidden_dimension = 2048 + + return resnet50_model + + +def clip_vit(name: str) -> torch.nn.modules: + """Return CLIP-ViT as feature extractor. + + Parameters + ---------- + name : str + Name of the CLIP model on transformer library, + e.g. `openai/clip-vit-base-patch32`. + """ + + class ClipVit(nn.Module): + """Wrap outputs to return only pooled outputs.""" + + def __init__(self, vision_model): + super().__init__() + self.vision_model = vision_model + self.hidden_dimension = vision_model.config.hidden_size + + def forward(self, x): + """Return pooled output (CLS token).""" + output = self.vision_model(x) + return output[1] + + vision_model = CLIPModel.from_pretrained(name).vision_model + + return ClipVit(vision_model) + + +def transform(mean: List, std: List) -> transforms.Compose: + """Return `transforms.Compose` function for normalizing images. + + Parameters + ---------- + mean : List + Sequence of means for each channel + std : List + Sequence of standard deviations for each channel. + + Returns + ------- + transforms.Compose + Transform function for normalizing images + """ + transform_comp = transforms.Compose( + [ + transforms.ToTensor(), + transforms.Normalize(mean, std), + ] + ) + return transform_comp + + +def extract_features( + dataloader: DataLoader, feature_extractor: torch.nn.Module, device: torch.device +) -> Tuple[NDArray, NDArray]: + """Extract features and labels from images using feature extractor. + + Parameters + ---------- + dataloader : DataLoader + Dataloader containing {'img': img, 'label': label} + dicts to be extracted. + feature_extractor : torch.nn.Module + Model for extracting features. + device : torch.device + Device for loading `feature_extractor`. + + Returns + ------- + features : NDArray + 2D array containing features extracted from `feature_extractor`. + labels : NDArray + 2D array containing labels of `features`. + """ + feature_extractor.to(device) + + features, labels = [], [] + for sample in dataloader: + batch_samples = sample["img"].to(device) + batch_label = sample["label"].to(device) + with torch.no_grad(): + feature = feature_extractor(batch_samples) + features.append(feature.cpu().detach().numpy()) + labels.append(batch_label.cpu().detach().numpy()) + + # reshape feauturs and labels into a single numpy array + features_np = np.concatenate(features, axis=0).astype("float64") + labels_np = np.concatenate(labels) + + return features_np, labels_np + + +def test( + classifier_head: torch.nn.Linear, + dataloader: DataLoader, + feature_extractor: torch.nn.Module, + device: torch.device, +) -> Tuple[float, float]: + """Evaluates the `classifier_head` on the dataset. + + Parameters + ---------- + classifier_head : torch.nn.Linear + Classifier head model. + dataloader : DataLoader + Dataset used for evaluating `classifier_head` containing + {'img': img, 'label': label} dicts. + feature_extractor : torch.nn.Module + Model used for extracting features from the `dataloader`. + device : torch.device + Device for loading `feature_extractor`. + + Returns + ------- + loss : float + CrossEntropy Loss of `classifier_head` on the dataset. + accuracy : float + Accuracy of `classifier_head` on the dataset. + """ + classifier_head.eval() + feature_extractor.eval() + classifier_head.to(device) + feature_extractor.to(device) + + correct, total, loss = 0, 0, 0 + for sample in dataloader: + samples = sample["img"].to(device) + labels = sample["label"].to(device) + with torch.no_grad(): + feature = feature_extractor(samples) + output = classifier_head(feature) + pred = torch.max(output, 1)[1].data.squeeze() + correct += (pred == labels).sum().item() + total += samples.shape[0] + running_loss = nn.CrossEntropyLoss()(output, labels) + loss += running_loss.cpu().item() + + return loss, correct / total + + +# pylint: disable=too-many-locals, too-many-arguments +def train( + classifier_head: torch.nn.Linear, + dataloader: DataLoader, + opt: torch.optim.Optimizer, + num_epochs: int, + device: torch.device, + feature_extractor: Optional[torch.nn.Module] = None, + verbose: Optional[bool] = False, +) -> None: + """Trains the `classifier_head`. + + Parameters + ---------- + classifier_head : torch.nn.Linear + Classifier head model. + dataloader : DataLoader + Dataset used for evaluating `classifier_head` + containing {'img': img, 'label': label} dicts. + opt : torch.optim.Optimizer + Optimizer for the `classifier_head`. + num_epochs: int + Number of epochs to train the `classifier_head`. + device : torch.device + Device for loading `feature_extractor`. + feature_extractor : torch.nn.Module, Optional + Model used for extracting features from the `dataloader`, optional. + `verbose` : bool, Optional + Whether or not log the accuracy during the training. Defaults to False. + """ + classifier_head.to(device) + if feature_extractor: + feature_extractor.eval() + feature_extractor.to(device) + + for epoch in range(num_epochs): + correct, total, loss = 0, 0, 0 + for _, batch in enumerate(dataloader): + classifier_head.zero_grad() + samples = batch["img"].to(device) + labels = batch["label"].to(device) + if feature_extractor: + with torch.no_grad(): + samples = feature_extractor(samples) + output = classifier_head(samples) + pred = torch.max(output, 1)[1].data.squeeze() + correct += (pred == labels).sum().item() + total += samples.shape[0] + running_loss = nn.CrossEntropyLoss()(output, labels) + loss += running_loss + running_loss.backward() + opt.step() + if verbose: + log(logging.INFO, "Epoch: %s --- Accuracy: %s", epoch + 1, correct / total) diff --git a/baselines/fedpft/fedpft/server.py b/baselines/fedpft/fedpft/server.py new file mode 100644 index 000000000000..9c6c605884d6 --- /dev/null +++ b/baselines/fedpft/fedpft/server.py @@ -0,0 +1,96 @@ +"""Create global evaluation function.""" + +from typing import Callable, Dict, List, Tuple + +from flwr.common import Metrics + + +def fedpft_get_on_fit_config_fn( + n_mixtures: int, cov_type: str, seed: int, tol: float, max_iter: int +) -> Callable[[int], Dict[str, str]]: + """Return a function which returns FedPFT training configurations. + + Parameters + ---------- + n_mixtures : int + Number of mixtures for GMMs + cov_type : str + Type of covariance + seed : int + Seed for learning and sampling from the GMMs + tol : float + Error tolerance for learning GMMs + max_iter : int + Maximum number of iteration for EM algorithm + + Returns + ------- + Callable[[int], Dict[str, str]] + Function to return a config with the `lr` and `num_epochs` + """ + + # pylint: disable=unused-argument + def fit_config(server_round: int) -> Dict[str, str]: + """Return a configuration for training Gaussian Mixtures.""" + config = { + "n_mixtures": str(n_mixtures), + "cov_type": cov_type, + "seed": str(seed), + "tol": str(tol), + "max_iter": str(max_iter), + } + return config + + return fit_config + + +def fedavg_get_on_fit_config_fn( + learning_rate: float, + num_epochs: int, +) -> Callable[[int], Dict[str, str]]: + """Return a function which returns FedAvg training configurations. + + Parameters + ---------- + learning_rate : float + Client's learning rate + num_epochs : int + Number of epochs for local learning of clients + + Returns + ------- + Callable[[int], Dict[str, str]] + Function to return a config with the `learning_rate` and `num_epochs` + """ + + # pylint: disable=unused-argument + def fit_config(server_round: int) -> Dict[str, str]: + """Return a configuration number of epochs and learning rate.""" + config = { + "lr": str(learning_rate), + "num_epochs": str(num_epochs), + } + return config + + return fit_config + + +def weighted_average(metrics: List[Tuple[int, Metrics]]) -> Metrics: + """Aggregate with weighted average during evaluation. + + Parameters + ---------- + metrics : List[Tuple[int, Metrics]] + The list of metrics to aggregate. + + Returns + ------- + Metrics + The weighted average metric. + """ + # Multiply accuracy of each client by number of examples used + accuracies = [num_examples * float(m["accuracy"]) for num_examples, m in metrics] + examples = [num_examples for num_examples, _ in metrics] + + # Aggregate and return custom metric (weighted average) + return {"accuracy": int(sum(accuracies)) / int(sum(examples))} diff --git a/baselines/fedpft/fedpft/strategy.py b/baselines/fedpft/fedpft/strategy.py new file mode 100644 index 000000000000..2e4302bde45c --- /dev/null +++ b/baselines/fedpft/fedpft/strategy.py @@ -0,0 +1,147 @@ +"""FedPFT strategy.""" + +from typing import Dict, List, Optional, Tuple, Union + +import torch +from flwr.common import ( + FitRes, + Parameters, + Scalar, + ndarrays_to_parameters, + parameters_to_ndarrays, +) +from flwr.server.client_proxy import ClientProxy +from flwr.server.strategy import FedAvg +from omegaconf import DictConfig +from sklearn.mixture import GaussianMixture as GMM +from torch.utils.data import DataLoader + +from fedpft.models import train +from fedpft.utils import chunks, ndarrays_to_gmmparam + + +class FedPFT(FedAvg): + """Implementation of FedPFT. + + https://arxiv.org/abs/2402.01862 + Authors: + Mahdi Beitollahi, Alex Bie, Sobhan Hemati, Leo Maxime Brunswic, + Xu Li, Xi Chen, Guojun Zhang. + """ + + def __init__( + self, + *args, + num_classes: int, + feature_dimension: int, + server_opt: DictConfig, + server_batch_size: int, + num_epochs: int, + device: torch.device, + **kwargs, + ) -> None: + """Create FedPFT strategy. + + Parameters + ---------- + num_classes : int + Number of classes in the dataset. + feature_dimension : int + Size of feature embeddings + server_opt : DictConfig + Configuration of server optimizer for training classifier head. + server_batch_size : int + Batch size of synthetic features. + num_epochs : int + Number of epochs to train the classifier head. + + Attributes + ---------- + device : torch.device() + Device to train the classifier head at the server. + """ + super().__init__(*args, **kwargs) + self.num_classes = num_classes + self.feature_dimension = feature_dimension + self.server_opt = server_opt + self.server_batch_size = server_batch_size + self.num_epochs = num_epochs + self.device = device + + # pylint: disable=too-many-locals + def aggregate_fit( + self, + server_round: int, + results: List[Tuple[ClientProxy, FitRes]], + failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]], + ) -> Tuple[Optional[Parameters], Dict[str, Scalar]]: + """Learn a classifier head by generating samples from the GMMs.""" + # Do not aggregate if there are failures. + if not self.accept_failures and failures: + raise Exception("there are failures and failures are not accepted") + + assert self.on_fit_config_fn is not None + config = self.on_fit_config_fn(server_round) + + # Sample from the GMMs to create synthetic feature dataset + synthetic_features_dataset: List[Union[Dict, Tuple]] = [] + for _, fit_res in results: + # Convert byte parameters into ndarrays and GMMParameters + ndarray = parameters_to_ndarrays(fit_res.parameters) + all_gmm_parameters = [ + ndarrays_to_gmmparam(array) for array in chunks(ndarray, 5) + ] + + # Sample from GMM_label pairs to create synthetic features + for gmm_parameter in all_gmm_parameters: + gmm = GMM( + n_components=int(config["n_mixtures"]), + covariance_type=config["cov_type"], + random_state=int(config["seed"]), + tol=float(config["tol"]), + max_iter=int(config["max_iter"]), + ) + # Set values of the GMMs + gmm.means_ = gmm_parameter.means.astype("float32") + gmm.weights_ = gmm_parameter.weights.astype("float32") + gmm.covariances_ = gmm_parameter.covariances.astype("float32") + + # Sample features + syn_features, _ = gmm.sample(gmm_parameter.num_samples) + syn_features = torch.tensor(syn_features, dtype=torch.float32) + gmm_labels = torch.tensor( + [int(gmm_parameter.label)] * int(gmm_parameter.num_samples) + ) + + # Add to train data + synthetic_features_dataset += list(zip(syn_features, gmm_labels)) + + # Train a classifier head + synthetic_features_dataset = [ + {"img": img, "label": label} for img, label in synthetic_features_dataset + ] + synthetic_loader = DataLoader( + synthetic_features_dataset, + batch_size=self.server_batch_size, + shuffle=True, + ) + classifier_head = torch.nn.Linear(self.feature_dimension, self.num_classes) + opt = torch.optim.AdamW( + params=classifier_head.parameters(), lr=self.server_opt.lr + ) + + train( + classifier_head=classifier_head, + dataloader=synthetic_loader, + device=self.device, + num_epochs=self.num_epochs, + opt=opt, + verbose=True, + ) + + # Send the classifier head to clients + classifier_ndarray = [ + val.cpu().numpy() for _, val in classifier_head.state_dict().items() + ] + + return ndarrays_to_parameters(classifier_ndarray), {} diff --git a/baselines/fedpft/fedpft/utils.py b/baselines/fedpft/fedpft/utils.py new file mode 100644 index 000000000000..b7812d556d01 --- /dev/null +++ b/baselines/fedpft/fedpft/utils.py @@ -0,0 +1,103 @@ +"""Utility functions.""" + +from dataclasses import dataclass +from typing import List + +import numpy as np +from numpy.typing import NDArray +from sklearn.mixture import GaussianMixture + + +@dataclass +class GMMParameters: + """GMM parameters.""" + + label: NDArray + means: NDArray + weights: NDArray + covariances: NDArray + num_samples: NDArray + + +def gmmparam_to_ndarrays(gmm: GMMParameters) -> List[NDArray]: + """Convert gmm object to NumPy ndarrays.""" + return [gmm.label, gmm.means, gmm.weights, gmm.covariances, gmm.num_samples] + + +def ndarrays_to_gmmparam(ndarrays: NDArray) -> GMMParameters: + """Convert NumPy ndarray to GMM object.""" + return GMMParameters( + label=ndarrays[0], + means=ndarrays[1], + weights=ndarrays[2], + covariances=ndarrays[3], + num_samples=ndarrays[4], + ) + + +# pylint: disable=too-many-arguments +def learn_gmm( + features: NDArray, + labels: NDArray, + n_mixtures: int, + cov_type: str, + seed: int, + tol: float = 1e-12, + max_iter: int = 1000, +) -> List[GMMParameters]: + """Learn a list of 16-bits GMMs for each label. + + Parameters + ---------- + features : NDArray + A 2-d array with size (n_samples, feature_dimension) containing + extracted features for all the samples. + labels : NDArray + An array with size (n_samples) containing labels associated for + each sample in `features`. + n_mixtures : int + Number of mixtures in each Gaussian Mixture. + cov_type : str + Covariance type of Gaussian Mixtures, e.g. spherical. + seed: int + Seed for learning and sampling from Gaussian Mixtures. + tol: float + Tolerance of Gaussian Mixtures. + max_iter: int + Number of maximum iterations to learn the Gaussian Mixtures. + + Returns + ------- + List[GMMParameters] + Returns a list containing the GMMParameters for each class. + """ + gmm_list = [] + for label in np.unique(labels): + cond_features = features[label == labels] + if ( + len(cond_features) > n_mixtures + ): # number of samples should be larger than `n_mixtures`. + gmm = GaussianMixture( + n_components=n_mixtures, + covariance_type=cov_type, + random_state=seed, + tol=tol, + max_iter=max_iter, + ) + gmm.fit(cond_features) + gmm_list.append( + GMMParameters( + label=np.array(label), + means=gmm.means_.astype("float16"), + weights=gmm.weights_.astype("float16"), + covariances=gmm.covariances_.astype("float16"), + num_samples=np.array(len(cond_features)), + ) + ) + return gmm_list + + +def chunks(lst, chunk_size): + """Yield successive chunk_size-sized chunks from lst.""" + for i in range(0, len(lst), chunk_size): + yield lst[i : i + chunk_size] diff --git a/baselines/fedpft/pyproject.toml b/baselines/fedpft/pyproject.toml new file mode 100644 index 000000000000..11bbddd0e17d --- /dev/null +++ b/baselines/fedpft/pyproject.toml @@ -0,0 +1,144 @@ +[build-system] +requires = ["poetry-core>=1.4.0"] +build-backend = "poetry.masonry.api" + +[tool.poetry] +name = "fedpft" # <----- Ensure it matches the name of your baseline directory containing all the source code +version = "1.0.0" +description = "Flower Baselines" +license = "Apache-2.0" +authors = ["The Flower Authors "] +readme = "README.md" +homepage = "https://flower.ai" +repository = "https://github.com/adap/flower" +documentation = "https://flower.ai" +classifiers = [ + "Development Status :: 3 - Alpha", + "Intended Audience :: Developers", + "Intended Audience :: Science/Research", + "License :: OSI Approved :: Apache Software License", + "Operating System :: MacOS :: MacOS X", + "Operating System :: POSIX :: Linux", + "Programming Language :: Python", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3 :: Only", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: Implementation :: CPython", + "Topic :: Scientific/Engineering", + "Topic :: Scientific/Engineering :: Artificial Intelligence", + "Topic :: Scientific/Engineering :: Mathematics", + "Topic :: Software Development", + "Topic :: Software Development :: Libraries", + "Topic :: Software Development :: Libraries :: Python Modules", + "Typing :: Typed", +] + +[tool.poetry.dependencies] +python = ">=3.8.15, <3.12.0" # don't change this +flwr = { extras = ["simulation"], version = "1.5.0" } +hydra-core = "1.3.2" # don't change this +torch = {url = "https://download.pytorch.org/whl/cu117/torch-1.13.0%2Bcu117-cp310-cp310-linux_x86_64.whl"} +scikit-learn = "1.2.2" +flwr-datasets = "0.1.0" +torchvision = {url = "https://download.pytorch.org/whl/cu117/torchvision-0.14.0%2Bcu117-cp310-cp310-linux_x86_64.whl"} +transformers = "4.39.3" +datasets = "2.18.0" + +[tool.poetry.dev-dependencies] +isort = "==5.13.2" +black = "==24.2.0" +docformatter = "==1.7.5" +mypy = "==1.4.1" +pylint = "==2.8.2" +flake8 = "==3.9.2" +pytest = "==6.2.4" +pytest-watch = "==4.2.0" +ruff = "==0.0.272" +types-requests = "==2.27.7" +virtualenv = "==20.21.0" + +[tool.isort] +line_length = 88 +indent = " " +multi_line_output = 3 +include_trailing_comma = true +force_grid_wrap = 0 +use_parentheses = true + +[tool.black] +line-length = 88 +target-version = ["py38", "py39", "py310", "py311"] + +[tool.pytest.ini_options] +minversion = "6.2" +addopts = "-qq" +testpaths = [ + "flwr_baselines", +] + +[tool.mypy] +ignore_missing_imports = true +strict = false +plugins = "numpy.typing.mypy_plugin" + +[tool.pylint."MESSAGES CONTROL"] +disable = "bad-continuation,duplicate-code,too-few-public-methods,useless-import-alias" +good-names = "i,j,k,_,x,y,X,Y" +signature-mutators = "hydra.main.main" + +[tool.pylint.typecheck] +generated-members = "numpy.*, torch.*, tensorflow.*" + +[[tool.mypy.overrides]] +module = [ + "importlib.metadata.*", + "importlib_metadata.*", +] +follow_imports = "skip" +follow_imports_for_stubs = true +disallow_untyped_calls = false + +[[tool.mypy.overrides]] +module = "torch.*" +follow_imports = "skip" +follow_imports_for_stubs = true + +[tool.docformatter] +wrap-summaries = 88 +wrap-descriptions = 88 + +[tool.ruff] +target-version = "py38" +line-length = 88 +select = ["D", "E", "F", "W", "B", "ISC", "C4"] +fixable = ["D", "E", "F", "W", "B", "ISC", "C4"] +ignore = ["B024", "B027"] +exclude = [ + ".bzr", + ".direnv", + ".eggs", + ".git", + ".hg", + ".mypy_cache", + ".nox", + ".pants.d", + ".pytype", + ".ruff_cache", + ".svn", + ".tox", + ".venv", + "__pypackages__", + "_build", + "buck-out", + "build", + "dist", + "node_modules", + "venv", + "proto", +] + +[tool.ruff.pydocstyle] +convention = "numpy" diff --git a/baselines/flwr_baselines/pyproject.toml b/baselines/flwr_baselines/pyproject.toml index f0b2ac84e66e..add99938d2a3 100644 --- a/baselines/flwr_baselines/pyproject.toml +++ b/baselines/flwr_baselines/pyproject.toml @@ -51,6 +51,7 @@ wget = "^3.2" virtualenv = "^20.24.6" pandas = "^1.5.3" pyhamcrest = "^2.0.4" +pillow = "==10.2.0" [tool.poetry.dev-dependencies] isort = "==5.13.2" diff --git a/datasets/doc/source/.gitignore b/datasets/doc/source/.gitignore new file mode 100644 index 000000000000..e9341a1383b7 --- /dev/null +++ b/datasets/doc/source/.gitignore @@ -0,0 +1 @@ +ref-api/ diff --git a/datasets/doc/source/conf.py b/datasets/doc/source/conf.py index e5c61b5559cb..755147bc9e1d 100644 --- a/datasets/doc/source/conf.py +++ b/datasets/doc/source/conf.py @@ -38,7 +38,7 @@ author = "The Flower Authors" # The full version, including alpha/beta/rc tags -release = "0.0.2" +release = "0.1.0" # -- General configuration --------------------------------------------------- diff --git a/datasets/doc/source/how-to-use-with-local-data.rst b/datasets/doc/source/how-to-use-with-local-data.rst new file mode 100644 index 000000000000..276f6d6936ee --- /dev/null +++ b/datasets/doc/source/how-to-use-with-local-data.rst @@ -0,0 +1,257 @@ +Use with Local Data +=================== + +You can partition your local files and Python objects in +``Flower Datasets`` library using any available ``Partitioner``. + +This guide details how to create a `Hugging Face `_ `Dataset `_ which is the required type of input for Partitioners. +We will cover: + +* local files: CSV, JSON, image, audio, +* in-memory data: dictionary, list, pd.DataFrame, np.ndarray. + + +General Overview +---------------- +An all-in-one dataset preparation (downloading, preprocessing, partitioning) happens +using `FederatedDataset `_. However, we +will use only the `Partitioner` here since we use locally accessible data. + +The rest of this guide will explain how to create a +`Dataset `_ +from local files and existing (in memory) Python objects. + +Local Files +----------- +CSV +^^^ +.. code-block:: python + + from datasets import load_dataset + from flwr_datasets.partitioner import ChosenPartitioner + + # Single file + data_files = "path-to-my-file.csv" + + # Multiple Files + data_files = [ "path-to-my-file-1.csv", "path-to-my-file-2.csv", ...] + dataset = load_dataset("csv", data_files=data_files) + + # Divided Dataset + data_files = { + "train": single_train_file_or_list_of_files, + "test": single_test_file_or_list_of_files, + "can-have-more-splits": ... + } + dataset = load_dataset("csv", data_files=data_files) + + partitioner = ChosenPartitioner(...) + partitioner.dataset = dataset + partition = partitioner.load_partition(partition_id=0) + +JSON +^^^^ + +.. code-block:: python + + from datasets import load_dataset + from flwr_datasets.partitioner import ChosenPartitioner + + # Single file + data_files = "path-to-my-file.json" + + # Multitple Files + data_files = [ "path-to-my-file-1.json", "path-to-my-file-2.json", ...] + dataset = load_dataset("json", data_files=data_files) + + # Divided Dataset + data_files = { + "train": single_train_file_or_list_of_files, + "test": single_test_file_or_list_of_files, + "can-have-more-splits": ... + } + dataset = load_dataset("json", data_files=data_files) + + partitioner = ChosenPartitioner(...) + partitioner.dataset = dataset + partition = partitioner.load_partition(partition_id=0) + + +Image +^^^^^ +You can create an image dataset in two ways: + +1) give a path the directory + +The directory needs to be structured in the following way: dataset-name/split/class/name. For example: + +.. code-block:: + + mnist/train/1/unique_name.png + mnist/train/1/unique_name.png + mnist/train/2/unique_name.png + ... + mnist/test/1/unique_name.png + mnist/test/1/unique_name.png + mnist/test/2/unique_name.png + +Then, the path you can give is `./mnist`. + +.. code-block:: python + + from datasets import load_dataset + from flwr_datasets.partitioner import ChosenPartitioner + + # Directly from a directory + dataset = load_dataset("imagefolder", data_dir="/path/to/folder") + partitioner = ChosenPartitioner(...) + partitioner.dataset = dataset + partition = partitioner.load_partition(partition_id=0) + +2) create a dataset from a CSV/JSON file and cast the path column to Image. + +.. code-block:: python + + from datasets import Image, load_dataset + from flwr_datasets.partitioner import ChosenPartitioner + + dataset = load_dataset(...) + dataset = dataset.cast_column("path", Image()) + + partitioner = ChosenPartitioner(...) + partitioner.dataset = dataset + partition = partitioner.load_partition(partition_id=0) + + +Audio +^^^^^ +Analogously to the image datasets, there are two methods here: + +1) give a path to the directory + +.. code-block:: python + + from datasets import load_dataset + from flwr_datasets.partitioner import ChosenPartitioner + + dataset = load_dataset("audiofolder", data_dir="/path/to/folder") + + partitioner = ChosenPartitioner(...) + partitioner.dataset = dataset + partition = partitioner.load_partition(partition_id=0) + +2) create a dataset from a CSV/JSON file and cast the path column to Audio. + +.. code-block:: python + + from datasets import Audio, load_dataset + from flwr_datasets.partitioner import ChosenPartitioner + + dataset = load_dataset(...) + dataset = dataset.cast_column("path", Audio()) + + partitioner = ChosenPartitioner(...) + partitioner.dataset = dataset + partition = partitioner.load_partition(partition_id=0) + +In-Memory +--------- + +From dictionary +^^^^^^^^^^^^^^^ +.. code-block:: python + + from datasets import Dataset + from flwr_datasets.partitioner import ChosenPartitioner + data = {"features": [1, 2, 3], "labels": [0, 0, 1]} + dataset = Dataset.from_dict(data) + + partitioner = ChosenPartitioner(...) + partitioner.dataset = dataset + partition = partitioner.load_partition(partition_id=0) + +From list +^^^^^^^^^ +.. code-block:: python + + from datasets import Dataset + from flwr_datasets.partitioner import ChosenPartitioner + + my_list = [ + {"features": 1, "labels": 0}, + {"features": 2, "labels": 0}, + {"features": 3, "labels": 1} + ] + dataset = Dataset.from_list(my_list) + + partitioner = ChosenPartitioner(...) + partitioner.dataset = dataset + partition = partitioner.load_partition(partition_id=0) + +From pd.DataFrame +^^^^^^^^^^^^^^^^^ +.. code-block:: python + + from datasets import Dataset + from flwr_datasets.partitioner import ChosenPartitioner + + data = {"features": [1, 2, 3], "labels": [0, 0, 1]} + df = pd.DataFrame(data) + dataset = Dataset.from_pandas(df) + + partitioner = ChosenPartitioner(...) + partitioner.dataset = dataset + partition = partitioner.load_partition(partition_id=0) + +From np.ndarray +^^^^^^^^^^^^^^^ +The np.ndarray will be first transformed to pd.DataFrame + +.. code-block:: python + + from datasets import Dataset + from flwr_datasets.partitioner import ChosenPartitioner + + data = np.array([[1, 2, 3], [0, 0, 1]]).T + # You can add the column names by passing columns=["features", "labels"] + df = pd.DataFrame(data) + dataset = Dataset.from_pandas(df) + + partitioner = ChosenPartitioner(...) + partitioner.dataset = dataset + partition = partitioner.load_partition(partition_id=0) + +Partitioner Details +------------------- +Partitioning is triggered automatically during the first ``load_partition`` call. +You do not need to call any “do_partitioning” method. + +Partitioner abstraction is designed to allow for a single dataset assignment. + +.. code-block:: python + + partitioner.dataset = your_dataset + +If you need to do the same partitioning on a different dataset, create a new Partitioner +for that, e.g.: + +.. code-block:: python + + from flwr_datasets.partitioner import IidPartitioner + + iid_partitioner_for_mnist = IidPartitioner(num_partitions=10) + iid_partitioner_for_mnist.dataset = mnist_dataset + + iid_partitioner_for_cifar = IidPartitioner(num_partitions=10) + iid_partitioner_for_cifar.dataset = cifar_dataset + + +More Resources +-------------- +If you are looking for more details or you have not found the format you are looking for, please visit the `HuggingFace Datasets docs `_. +This guide is based on the following ones: + +* `General Information `_ +* `Tabular Data `_ +* `Image Data `_ +* `Audio Data `_ diff --git a/datasets/doc/source/how-to-use-with-pytorch.rst b/datasets/doc/source/how-to-use-with-pytorch.rst index 613f00a9a059..4228ead2a281 100644 --- a/datasets/doc/source/how-to-use-with-pytorch.rst +++ b/datasets/doc/source/how-to-use-with-pytorch.rst @@ -63,7 +63,7 @@ expected by a model with a convolutional layer. If you want to divide the dataset, you can use (at any point before passing the dataset to the DataLoader):: - partition_train_test = partition.train_test_split(test_size=0.2) + partition_train_test = partition.train_test_split(test_size=0.2, seed=42) partition_train = partition_train_test["train"] partition_test = partition_train_test["test"] diff --git a/datasets/doc/source/index.rst b/datasets/doc/source/index.rst index fd226b308bd5..2144c527f8cd 100644 --- a/datasets/doc/source/index.rst +++ b/datasets/doc/source/index.rst @@ -31,6 +31,7 @@ Problem-oriented how-to guides show step-by-step how to achieve a specific goal. how-to-use-with-pytorch how-to-use-with-tensorflow how-to-use-with-numpy + how-to-use-with-local-data how-to-disable-enable-progress-bar References diff --git a/datasets/e2e/pytorch/pytorch_test.py b/datasets/e2e/pytorch/pytorch_test.py index 5bac8f770f23..1f5e4cbb3ad1 100644 --- a/datasets/e2e/pytorch/pytorch_test.py +++ b/datasets/e2e/pytorch/pytorch_test.py @@ -65,7 +65,7 @@ def _create_trainloader(self, batch_size: int) -> DataLoader: partition_id = 0 fds = FederatedDataset(dataset=self.dataset_name, partitioners={"train": 100}) partition = fds.load_partition(partition_id, "train") - partition_train_test = partition.train_test_split(test_size=0.2) + partition_train_test = partition.train_test_split(test_size=0.2, seed=42) partition_train_test = partition_train_test.map( lambda img: {"img": self.transforms(img)}, input_columns="img" ) diff --git a/datasets/e2e/scikit-learn/sklearn_test.py b/datasets/e2e/scikit-learn/sklearn_test.py index e5e6d347ee37..7ce4659b6cd8 100644 --- a/datasets/e2e/scikit-learn/sklearn_test.py +++ b/datasets/e2e/scikit-learn/sklearn_test.py @@ -29,7 +29,7 @@ def _get_partition_data(self): fds = FederatedDataset(dataset=self.dataset_name, partitioners={"train": 10}) partition = fds.load_partition(partition_id, "train") partition.set_format("numpy") - partition_train_test = partition.train_test_split(test_size=0.2) + partition_train_test = partition.train_test_split(test_size=0.2, seed=42) X_train, y_train = partition_train_test["train"]["image"], partition_train_test[ "train"]["label"] X_test, y_test = partition_train_test["test"]["image"], partition_train_test[ diff --git a/datasets/flwr_datasets/federated_dataset.py b/datasets/flwr_datasets/federated_dataset.py index 55a7e597f6b4..6c41eaa3562f 100644 --- a/datasets/flwr_datasets/federated_dataset.py +++ b/datasets/flwr_datasets/federated_dataset.py @@ -59,7 +59,8 @@ class FederatedDataset: argument. Defaults to True. seed : Optional[int] Seed used for dataset shuffling. It has no effect if `shuffle` is False. The - seed cannot be set in the later stages. + seed cannot be set in the later stages. If `None`, then fresh, unpredictable entropy + will be pulled from the OS. Defaults to 42. Examples -------- diff --git a/datasets/flwr_datasets/federated_dataset_test.py b/datasets/flwr_datasets/federated_dataset_test.py index 7ca2b44570ca..5d5179122e3b 100644 --- a/datasets/flwr_datasets/federated_dataset_test.py +++ b/datasets/flwr_datasets/federated_dataset_test.py @@ -20,29 +20,70 @@ from typing import Dict, Union from unittest.mock import Mock, patch +import numpy as np import pytest from parameterized import parameterized, parameterized_class import datasets from datasets import Dataset, DatasetDict, concatenate_datasets from flwr_datasets.federated_dataset import FederatedDataset +from flwr_datasets.mock_utils_test import _load_mocked_dataset from flwr_datasets.partitioner import IidPartitioner, Partitioner +mocked_datasets = ["cifar100", "svhn", "sentiment140", "speech_commands"] + @parameterized_class( + ("dataset_name", "test_split", "subset"), [ - {"dataset_name": "mnist", "test_split": "test"}, - {"dataset_name": "cifar10", "test_split": "test"}, - {"dataset_name": "fashion_mnist", "test_split": "test"}, - {"dataset_name": "sasha/dog-food", "test_split": "test"}, - {"dataset_name": "zh-plus/tiny-imagenet", "test_split": "valid"}, - ] + # Downloaded + # #Image datasets + ("mnist", "test", ""), + ("cifar10", "test", ""), + ("fashion_mnist", "test", ""), + ("sasha/dog-food", "test", ""), + ("zh-plus/tiny-imagenet", "valid", ""), + # Text + ("scikit-learn/adult-census-income", None, ""), + # Mocked + # #Image + ("cifar100", "test", ""), + # Note: there's also the extra split and full_numbers subset + ("svhn", "test", "cropped_digits"), + # Text + ("sentiment140", "test", ""), # aka twitter + # Audio + ("speech_commands", "test", "v0.01"), + ], ) -class RealDatasetsFederatedDatasetsTrainTest(unittest.TestCase): - """Test Real Dataset (MNIST, CIFAR10) in FederatedDatasets.""" +class BaseFederatedDatasetsTest(unittest.TestCase): + """Test Real/Mocked Datasets used in FederatedDatasets. + + The setUp method mocks the dataset download via datasets.load_dataset if it is in + the `mocked_datasets` list. + """ dataset_name = "" test_split = "" + subset = "" + + def setUp(self) -> None: + """Mock the dataset download prior to each method if needed. + + If the `dataset_name` is in the `mocked_datasets` list, then the dataset + download is mocked. + """ + if self.dataset_name in mocked_datasets: + self.patcher = patch("datasets.load_dataset") + self.mock_load_dataset = self.patcher.start() + self.mock_load_dataset.return_value = _load_mocked_dataset( + self.dataset_name, [200, 100], ["train", self.test_split], self.subset + ) + + def tearDown(self) -> None: + """Clean up after the dataset mocking.""" + if self.dataset_name in mocked_datasets: + patch.stopall() @parameterized.expand( # type: ignore [ @@ -61,14 +102,25 @@ def test_load_partition_size(self, _: str, train_num_partitions: int) -> None: dataset_fds = FederatedDataset( dataset=self.dataset_name, partitioners={"train": train_num_partitions} ) - dataset_partition0 = dataset_fds.load_partition(0, "train") + # Compute the actual partition sizes + partition_sizes = [] + for node_id in range(train_num_partitions): + partition_sizes.append(len(dataset_fds.load_partition(node_id, "train"))) + + # Create the expected sizes of partitions dataset = datasets.load_dataset(self.dataset_name) - self.assertEqual( - len(dataset_partition0), len(dataset["train"]) // train_num_partitions - ) + full_train_length = len(dataset["train"]) + expected_sizes = [] + default_partition_size = full_train_length // train_num_partitions + mod = full_train_length % train_num_partitions + for i in range(train_num_partitions): + expected_sizes.append(default_partition_size + (1 if i < mod else 0)) + self.assertEqual(partition_sizes, expected_sizes) def test_load_split(self) -> None: """Test if the load_split works with the correct split name.""" + if self.test_split is None: + return dataset_fds = FederatedDataset( dataset=self.dataset_name, partitioners={"train": 100} ) @@ -78,6 +130,8 @@ def test_load_split(self) -> None: def test_multiple_partitioners(self) -> None: """Test if the dataset works when multiple partitioners are specified.""" + if self.test_split is None: + return num_train_partitions = 100 num_test_partitions = 100 dataset_fds = FederatedDataset( @@ -97,7 +151,7 @@ def test_multiple_partitioners(self) -> None: def test_no_need_for_split_keyword_if_one_partitioner(self) -> None: """Test if partitions got with and without split args are the same.""" - fds = FederatedDataset(dataset="mnist", partitioners={"train": 10}) + fds = FederatedDataset(dataset=self.dataset_name, partitioners={"train": 10}) partition_loaded_with_no_split_arg = fds.load_partition(0) partition_loaded_with_verbose_split_arg = fds.load_partition(0, "train") self.assertTrue( @@ -109,6 +163,8 @@ def test_no_need_for_split_keyword_if_one_partitioner(self) -> None: def test_resplit_dataset_into_one(self) -> None: """Test resplit into a single dataset.""" + if self.test_split is None: + return dataset = datasets.load_dataset(self.dataset_name) dataset_length = sum([len(ds) for ds in dataset.values()]) fds = FederatedDataset( @@ -122,6 +178,8 @@ def test_resplit_dataset_into_one(self) -> None: # pylint: disable=protected-access def test_resplit_dataset_to_change_names(self) -> None: """Test resplitter to change the names of the partitions.""" + if self.test_split is None: + return fds = FederatedDataset( dataset=self.dataset_name, partitioners={"new_train": 100}, @@ -138,6 +196,8 @@ def test_resplit_dataset_to_change_names(self) -> None: def test_resplit_dataset_by_callable(self) -> None: """Test resplitter to change the names of the partitions.""" + if self.test_split is None: + return def resplit(dataset: DatasetDict) -> DatasetDict: return DatasetDict( @@ -157,8 +217,13 @@ def resplit(dataset: DatasetDict) -> DatasetDict: self.assertEqual(len(full), dataset_length) -class ArtificialDatasetTest(unittest.TestCase): - """Test using small artificial dataset, mocked load_dataset.""" +class ShufflingResplittingOnArtificialDatasetTest(unittest.TestCase): + """Test shuffling and resplitting using small artificial dataset. + + The purpose of this class is to ensure the order of samples remains as expected. + + The load_dataset method is mocked and the artificial dataset is returned. + """ # pylint: disable=no-self-use def _dummy_setup(self, train_rows: int = 10, test_rows: int = 5) -> DatasetDict: @@ -360,9 +425,26 @@ def datasets_are_equal(ds1: Dataset, ds2: Dataset) -> bool: # Iterate over each row and check for equality for row1, row2 in zip(ds1, ds2): - if row1 != row2: + # Ensure all keys are the same in both rows + if set(row1.keys()) != set(row2.keys()): return False + # Compare values for each key + for key in row1: + if key == "audio": + # Special handling for 'audio' key + if not all( + [ + np.array_equal(row1[key]["array"], row2[key]["array"]), + row1[key]["path"] == row2[key]["path"], + row1[key]["sampling_rate"] == row2[key]["sampling_rate"], + ] + ): + return False + elif row1[key] != row2[key]: + # Direct comparison for other keys + return False + return True diff --git a/datasets/flwr_datasets/mock_utils_test.py b/datasets/flwr_datasets/mock_utils_test.py new file mode 100644 index 000000000000..78aff1f1cdd7 --- /dev/null +++ b/datasets/flwr_datasets/mock_utils_test.py @@ -0,0 +1,377 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Utils for mocking datasets.""" + + +import io +import random +import string +from datetime import datetime, timedelta +from typing import Any, Dict, List, Set, Tuple, Union + +import numpy as np +from PIL import Image + +import datasets +from datasets import ClassLabel, Dataset, DatasetDict, Features, Value + + +def _generate_artificial_strings( + num_rows: int, num_unique: int, string_length: int, seed: int = 42 +) -> List[str]: + """Create list of strings for categories or labels mocking. + + Note to keep the seed the same if you reuse this function for in creation of the + dataset for multiple splits. + + Parameters + ---------- + num_rows: int + Number of rows = number of elements in the list. + num_unique: int + Number of unique strings that will be initially created. + string_length: int + Length of each string. + seed: int + Seed to the random package. + + Returns + ------- + string_column : List[str] + List of generated strings. + """ + random.seed(seed) + unique_strings: Set[str] = set() + while len(unique_strings) < num_unique: + random_str = "".join( + random.choices(string.ascii_letters + string.digits, k=string_length) + ) + unique_strings.add(random_str) + + unique_strings_list = list(unique_strings) + artificial_column = unique_strings_list.copy() + remaining_to_allocate = num_rows - num_unique + for _ in range(remaining_to_allocate): + artificial_column.append(random.choice(unique_strings_list)) + return artificial_column + + +def _generate_artificial_categories(num_rows: int, choices: List[Any]) -> List[str]: + """Create list of strings from given `choices` list.""" + artificial_column = choices.copy() + remaining_to_allocate = num_rows - len(choices) + for _ in range(remaining_to_allocate): + artificial_column.append(random.choice(choices)) + return artificial_column + + +def _generate_random_word(length: int) -> str: + """Generate a random word of the given length.""" + return "".join(random.choices(string.ascii_letters, k=length)) + + +def _generate_random_text_column(num_rows: int, length: int) -> List[str]: + """Generate a list of random text of specified length.""" + text_col = [] + for _ in range(num_rows): + text_col.append(_generate_random_word(length)) + return text_col + + +def _generate_random_sentence( + min_word_length: int, + max_word_length: int, + min_sentence_length: int, + max_sentence_length: int, +) -> str: + """Generate a random sentence with words of random lengths.""" + sentence_length = random.randint(min_sentence_length, max_sentence_length) + sentence: List[str] = [] + while len(" ".join(sentence)) < sentence_length: + word_length = random.randint(min_word_length, max_word_length) + word = _generate_random_word(word_length) + sentence.append(word) + return " ".join(sentence) + + +def _generate_random_sentences( + num_rows: int, + min_word_length: int, + max_word_length: int, + min_sentence_length: int, + max_sentence_length: int, +) -> List[str]: + """Generate a list of random sentences.""" + text_col = [ + _generate_random_sentence( + min_word_length, max_word_length, min_sentence_length, max_sentence_length + ) + for _ in range(num_rows) + ] + return text_col + + +def _make_num_rows_none(column: List[Any], num_none: int) -> List[Any]: + """Assign none num_none times to the given list.""" + column_copy = column.copy() + none_positions = random.sample(range(len(column_copy)), num_none) + for pos in none_positions: + column_copy[pos] = None + return column_copy + + +def _generate_random_date( + start_date: datetime, + end_date: datetime, + date_format: str = "%a %b %d %H:%M:%S %Y", + as_string: bool = True, +) -> Union[str, datetime]: + """Generate a random date between start_date and end_date.""" + time_between_dates = end_date - start_date + random_seconds = random.randint(0, int(time_between_dates.total_seconds())) + random_date = start_date + timedelta(seconds=random_seconds) + + if as_string: + return random_date.strftime(date_format) + return random_date + + +def _generate_random_date_column( + num_rows: int, + start_date: datetime, + end_date: datetime, + date_format: str = "%a %b %d %H:%M:%S %Y", + as_string: bool = True, +) -> List[Union[str, datetime]]: + """Generate a list of random dates.""" + return [ + _generate_random_date(start_date, end_date, date_format, as_string) + for _ in range(num_rows) + ] + + +def _generate_random_int_column(num_rows: int, min_int: int, max_int: int) -> List[int]: + """Generate a list of ints.""" + return [random.randint(min_int, max_int) for _ in range(num_rows)] + + +def _generate_random_bool_column(num_rows: int) -> List[bool]: + """Generate a list of bools.""" + return [random.choice([True, False]) for _ in range(num_rows)] + + +def _generate_random_image_column( + num_rows: int, + image_size: Union[Tuple[int, int], Tuple[int, int, int]], + simulate_type: str, +) -> List[Any]: + """Simulate the images with the format that is found in HF Hub. + + Directly using `Image.fromarray` does not work because it creates `PIL.Image.Image`. + """ + # Generate numpy images + np_images = [] + for _ in range(num_rows): + np_images.append(np.random.randint(0, 255, size=image_size, dtype=np.uint8)) + # Change the format to the PIL.PngImagePlugin.PngImageFile + # or the PIL.JpegImagePlugin.JpegImageFile format + pil_imgs = [] + for np_image in np_images: + # Convert the NumPy array to a PIL image + pil_img_beg = Image.fromarray(np_image) # type: ignore + + # Save the image to an in-memory bytes buffer + in_memory_file = io.BytesIO() + pil_img_beg.save(in_memory_file, format=simulate_type) + in_memory_file.seek(0) + + # Reload the image as a PngImageFile + pil_image_end = Image.open(in_memory_file) + pil_imgs.append(pil_image_end) + return pil_imgs + + +def generate_random_audio_column( + num_rows: int, + sampling_rate: int, + length_in_samples: int, +) -> List[Dict[str, Any]]: + """Simulate the audio column. + + Audio column in the datset is comprised from an array or floats, sample_rate and a + path. + """ + # Generate numpy images + audios = [] + for _ in range(num_rows): + audio_array = np.random.uniform(low=-1.0, high=1.0, size=length_in_samples) + audios.append( + {"path": None, "array": audio_array, "sampling_rate": sampling_rate} + ) + return audios + + +def _mock_sentiment140(num_rows: int) -> Dataset: + users = _generate_artificial_strings( + num_rows=num_rows, num_unique=30, string_length=5 + ) + sentiment = _generate_artificial_categories(num_rows=num_rows, choices=[0, 4]) + query = ["NO_QUERY"] * num_rows + + # Sentences + min_word_length = 3 + max_word_length = 8 + min_sentence_length = 20 + max_sentence_length = 60 + + text = _generate_random_sentences( + num_rows, + min_word_length, + max_word_length, + min_sentence_length, + max_sentence_length, + ) + + start_date = datetime(2009, 1, 1) + end_date = datetime(2010, 12, 31) + date_format = "%a %b %d %H:%M:%S %Y" + + # Generate a list of random dates as strings + date = _generate_random_date_column( + num_rows, start_date, end_date, date_format, as_string=True + ) + + features = Features( + { + "text": Value(dtype="string"), + "date": Value(dtype="string"), + "user": Value(dtype="string"), + "sentiment": Value(dtype="int32"), + "query": Value(dtype="string"), + } + ) + dataset = datasets.Dataset.from_dict( + { + "user": users, + "sentiment": sentiment, + "query": query, + "text": text, + "date": date, + }, + features=features, + ) + return dataset + + +def _mock_cifar100(num_rows: int) -> Dataset: + imgs = _generate_random_image_column(num_rows, (32, 32, 3), "PNG") + unique_fine_labels = _generate_artificial_strings( + num_rows=100, num_unique=100, string_length=10, seed=42 + ) + fine_label = _generate_artificial_categories(num_rows, unique_fine_labels) + unique_coarse_labels = _generate_artificial_strings( + num_rows=20, num_unique=20, string_length=10, seed=42 + ) + + coarse_label = _generate_artificial_categories(num_rows, unique_coarse_labels) + features = Features( + { + "img": datasets.Image(decode=True), + "fine_label": ClassLabel(names=unique_fine_labels), + "coarse_label": ClassLabel(names=unique_coarse_labels), + } + ) + dataset = datasets.Dataset.from_dict( + {"img": imgs, "coarse_label": coarse_label, "fine_label": fine_label}, + features=features, + ) + return dataset + + +def _mock_svhn_cropped_digits(num_rows: int) -> Dataset: + imgs = _generate_random_image_column(num_rows, (32, 32, 3), "PNG") + unique_labels = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"] + label = _generate_artificial_categories(num_rows, unique_labels) + features = Features( + { + "image": datasets.Image(decode=True), + "label": ClassLabel(names=unique_labels), + } + ) + dataset = datasets.Dataset.from_dict( + {"image": imgs, "label": label}, features=features + ) + return dataset + + +def _mock_speach_commands(num_rows: int) -> Dataset: + sampling_rate = 16_000 + length_in_samples = 16_000 + imgs = generate_random_audio_column( + num_rows=num_rows, + sampling_rate=sampling_rate, + length_in_samples=length_in_samples, + ) + unique_labels = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"] + label = _generate_artificial_categories(num_rows, unique_labels) + is_unknown = _generate_random_bool_column(num_rows) + utterance_id = _generate_random_int_column(num_rows, 0, 10) + unique_ids = _generate_random_text_column(num_rows // 10, 5) + speaker_id = _generate_artificial_categories(num_rows, unique_ids) + speaker_id = _make_num_rows_none(speaker_id, 10) + features = Features( + { + "audio": datasets.Audio( + sampling_rate=sampling_rate, mono=True, decode=True + ), + "is_unknown": Value(dtype="bool"), + "speaker_id": Value(dtype="string"), + "utterance_id": Value(dtype="int8"), + "label": ClassLabel(names=unique_labels), + } + ) + dataset = datasets.Dataset.from_dict( + { + "audio": imgs, + "is_unknown": is_unknown, + "speaker_id": speaker_id, + "utterance_id": utterance_id, + "label": label, + }, + features=features, + ) + return dataset + + +dataset_name_to_mock_function = { + "cifar100": _mock_cifar100, + "sentiment140": _mock_sentiment140, + "svhn_cropped_digits": _mock_svhn_cropped_digits, + "speech_commands_v0.01": _mock_speach_commands, +} + + +def _load_mocked_dataset( + dataset_name: str, + num_rows: List[int], + split_names: List[str], + subset: str = "", +) -> DatasetDict: + dataset_dict = {} + name = dataset_name if subset == "" else dataset_name + "_" + subset + dataset_creation_fnc = dataset_name_to_mock_function[name] + for params in zip(num_rows, split_names): + dataset_dict[params[1]] = dataset_creation_fnc(params[0]) + return datasets.DatasetDict(dataset_dict) diff --git a/datasets/flwr_datasets/utils.py b/datasets/flwr_datasets/utils.py index a6e4fa8d0f0b..c6f6900a99cd 100644 --- a/datasets/flwr_datasets/utils.py +++ b/datasets/flwr_datasets/utils.py @@ -29,6 +29,11 @@ "fashion_mnist", "sasha/dog-food", "zh-plus/tiny-imagenet", + "scikit-learn/adult-census-income", + "cifar100", + "svhn", + "sentiment140", + "speech_commands", ] @@ -133,6 +138,7 @@ def divide_dataset( >>> train_test = divide_dataset(dataset=partition, division=division) >>> train, test = train_test["train"], train_test["test"] """ + _check_division_config_correctness(division) dataset_length = len(dataset) ranges = _create_division_indices_ranges(dataset_length, division) if isinstance(division, (list, tuple)): @@ -162,7 +168,7 @@ def _create_division_indices_ranges( for fraction in division: end_idx += int(dataset_length * fraction) ranges.append(range(start_idx, end_idx)) - start_idx += end_idx + start_idx = end_idx elif isinstance(division, dict): ranges = [] start_idx = 0 @@ -170,7 +176,7 @@ def _create_division_indices_ranges( for fraction in division.values(): end_idx += int(dataset_length * fraction) ranges.append(range(start_idx, end_idx)) - start_idx += end_idx + start_idx = end_idx else: TypeError( f"The type of the `division` should be dict, " @@ -274,6 +280,7 @@ def concatenate_divisions( concatenated_divisions : Dataset A dataset created as concatenation of the divisions from all partitions. """ + _check_division_config_correctness(partition_division) divisions = [] zero_len_divisions = 0 for partition_id in range(partitioner.num_partitions): diff --git a/datasets/flwr_datasets/utils_test.py b/datasets/flwr_datasets/utils_test.py index 3bf5afddf978..4add9f88eeb5 100644 --- a/datasets/flwr_datasets/utils_test.py +++ b/datasets/flwr_datasets/utils_test.py @@ -31,13 +31,32 @@ "expected_concatenation_size", ), [ + # Create 1 division + ((1.0,), [40], 0, 40), + ({"train": 1.0}, [40], "train", 40), + # Create 2 divisions ((0.8, 0.2), [32, 8], 1, 8), - ([0.8, 0.2], [32, 8], 1, 8), ({"train": 0.8, "test": 0.2}, [32, 8], "test", 8), + # Create 3 divisions + ([0.6, 0.2, 0.2], [24, 8, 8], 1, 8), + ({"train": 0.6, "valid": 0.2, "test": 0.2}, [24, 8, 8], "test", 8), + # Create 4 divisions + ([0.4, 0.2, 0.2, 0.2], [16, 8, 8, 8], 1, 8), + ({"0": 0.4, "1": 0.2, "2": 0.2, "3": 0.2}, [16, 8, 8, 8], "1", 8), # Not full dataset + # Create 1 division + ([0.8], [32], 0, 32), + ({"train": 0.8}, [32], "train", 32), + # Create 2 divisions ([0.2, 0.1], [8, 4], 1, 4), ((0.2, 0.1), [8, 4], 0, 8), ({"train": 0.2, "test": 0.1}, [8, 4], "test", 4), + # Create 3 divisions + ([0.6, 0.2, 0.1], [24, 8, 4], 2, 4), + ({"train": 0.6, "valid": 0.2, "test": 0.1}, [24, 8, 4], "test", 4), + # Create 4 divisions + ([0.4, 0.2, 0.1, 0.2], [16, 8, 4, 8], 2, 4), + ({"0": 0.4, "1": 0.2, "2": 0.1, "3": 0.2}, [16, 8, 4, 8], "2", 4), ], ) class UtilsTests(unittest.TestCase): @@ -60,7 +79,7 @@ def test_correct_sizes(self) -> None: else: lengths = [len(split) for split in divided_dataset.values()] - self.assertEqual(lengths, self.sizes) + self.assertEqual(self.sizes, lengths) def test_correct_return_types(self) -> None: """Test correct types of the divided dataset based on the config.""" diff --git a/datasets/pyproject.toml b/datasets/pyproject.toml index 5800faf3f272..7dfa60138582 100644 --- a/datasets/pyproject.toml +++ b/datasets/pyproject.toml @@ -54,7 +54,7 @@ exclude = [ [tool.poetry.dependencies] python = "^3.8" numpy = "^1.21.0" -datasets = "^2.14.3" +datasets = "^2.14.6" pillow = { version = ">=6.2.1", optional = true } soundfile = { version = ">=0.12.1", optional = true } librosa = { version = ">=0.10.0.post2", optional = true } diff --git a/dev/aws-ami-bootstrap-tf.sh b/dev/aws-ami-bootstrap-tf.sh deleted file mode 100755 index 8799a254cbcc..000000000000 --- a/dev/aws-ami-bootstrap-tf.sh +++ /dev/null @@ -1,39 +0,0 @@ -#!/bin/bash - -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -# This script can be used to create an AWS EC2 AMI which contains the dependencies required -# to execute Flower TensorFlow based baselines. The AWS EC2 AMI might not always reflect all -# dependencies listed in `pyproject.toml`, but it should at least have most of them. - -# Prepare machine dependencies -sudo apt update -sudo apt-get install -y make build-essential libssl-dev zlib1g-dev libbz2-dev libreadline-dev \ - libsqlite3-dev wget curl llvm libncurses5-dev libncursesw5-dev xz-utils tk-dev libffi-dev \ - liblzma-dev python-openssl git -sudo apt install -y python3.7 python3-pip - -# Install project dependencies -python3.7 -m pip install -U pip==23.3.1 setuptools==68.2.2 -python3.7 -m pip install -U numpy==1.18.1 grpcio==1.27.2 google==2.0.3 protobuf==3.12.1 \ - boto3==1.12.36 boto3_type_annotations==0.3.1 paramiko==2.7.1 docker==4.2.0 matplotlib==3.2.1 \ - tensorflow-cpu==2.6.2 - -# Preload datasets -python3.7 -c "import tensorflow as tf; tf.keras.datasets.mnist.load_data()" -python3.7 -c "import tensorflow as tf; tf.keras.datasets.fashion_mnist.load_data()" -python3.7 -c "import tensorflow as tf; tf.keras.datasets.cifar10.load_data()" -python3.7 -c "import tensorflow as tf; tf.keras.datasets.cifar100.load_data()" diff --git a/dev/aws-ami-bootstrap-torch.sh b/dev/aws-ami-bootstrap-torch.sh deleted file mode 100755 index 835a3994c28a..000000000000 --- a/dev/aws-ami-bootstrap-torch.sh +++ /dev/null @@ -1,36 +0,0 @@ -#!/bin/bash - -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -# This script can be used to create an AWS EC2 AMI which contains the dependencies required -# to execute Flower PyTorch based baselines. The AWS EC2 AMI might not always reflect all -# dependencies listed in `pyproject.toml`, but it should at least have most of them. - -# Prepare machine dependencies -sudo apt update -sudo apt-get install -y make build-essential libssl-dev zlib1g-dev libbz2-dev libreadline-dev \ - libsqlite3-dev wget curl llvm libncurses5-dev libncursesw5-dev xz-utils tk-dev libffi-dev \ - liblzma-dev python-openssl git -sudo apt install -y python3.7 python3-pip - -# Install project dependencies -python3.7 -m pip install -U pip==23.3.1 setuptools==68.2.2 -python3.7 -m pip install -U numpy==1.18.1 grpcio==1.27.2 google==2.0.3 protobuf==3.12.1 \ - boto3==1.12.36 boto3_type_annotations==0.3.1 paramiko==2.7.1 docker==4.2.0 matplotlib==3.2.1 \ - tqdm==4.48.2 torch==1.6.0 torchvision==0.7.0 - -# Preload datasets -python3.7 -m flwr_experimental.baseline.dataset.pytorch_cifar_partitioned diff --git a/dev/build-docs.sh b/dev/build-docs.sh index 45a4dfca0adf..f8d4f91508de 100755 --- a/dev/build-docs.sh +++ b/dev/build-docs.sh @@ -17,4 +17,9 @@ cd $ROOT cd $ROOT cd doc -./build-versioned-docs.sh + +if [ "$1" = true ]; then + ./build-versioned-docs.sh +else + make html +fi diff --git a/dev/build-swift-api-ref.sh b/dev/build-swift-api-ref.sh index d5ed0a872c16..5b88f9a68320 100755 --- a/dev/build-swift-api-ref.sh +++ b/dev/build-swift-api-ref.sh @@ -28,7 +28,7 @@ find ~/Library/Developer/Xcode/DerivedData -name "flwr.doccarchive" -exec rm -Rf # In case no XCode, please refer to: https://github.com/nodejs/node-gyp/issues/569. # Generate API reference for the Swift SDK by running `xcodebuild docbuild` in src directory. cd src/swift/flwr && \ -arch -x86_64 xcodebuild docbuild -scheme flwr -destination 'platform=iOS Simulator,name=iPhone 15 Pro Max,OS=17.0.1' +arch -x86_64 xcodebuild docbuild -scheme flwr -destination 'platform=iOS Simulator,name=iPhone 15 Pro Max,OS=17.2' # Find the generated `doccarchive` file in XCode's derived data folder and copy it to the SwiftDoc directory. cd ../../../ diff --git a/dev/publish-nightly.sh b/dev/publish-nightly.sh index f3e8d170f6c5..a42af1f17cfc 100755 --- a/dev/publish-nightly.sh +++ b/dev/publish-nightly.sh @@ -16,20 +16,24 @@ # ============================================================================== set -e -cd "$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"/../ +cd "$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"/../ # This script will build and publish a nightly release of Flower under the condition # that at least one commit was made in the last 24 hours. # It will rename the package name in the pyproject.toml to from "flwr" to "flwr-nightly". # The version name in the pyproject.toml will be appended with "-dev" and the current date. -# The result will be a release on PyPi of the package "flwr-nightly" of version e.g. +# The result will be a release on PyPi of the package "flwr-nightly" of version e.g. # "0.1.1.dev20200716" as seen at https://pypi.org/project/flwr-nightly/ +# If the script is called with the flag `--skip-publish`, the name and version are changed +# in the pyproject.toml but the package won't be published. if [[ $(git log --since="24 hours ago" --pretty=oneline) ]]; then sed -i -E "s/^name = \"(.+)\"/name = \"\1-nightly\"/" pyproject.toml - sed -i -E "s/^version = \"(.+)\"/version = \"\1-dev$(date '+%Y%m%d')\"/" pyproject.toml - python -m poetry build - python -m poetry publish -u __token__ -p $PYPI_TOKEN + sed -i -E "s/^version = \"(.+)\"/version = \"\1.dev$(date '+%Y%m%d')\"/" pyproject.toml + if [ "$1" != "--skip-publish" ]; then + python -m poetry build + python -m poetry publish -u __token__ -p $PYPI_TOKEN + fi else echo "There were no commits in the last 24 hours." fi diff --git a/dev/update-examples.sh b/dev/update-examples.sh index c802e21503b7..1076b4621984 100755 --- a/dev/update-examples.sh +++ b/dev/update-examples.sh @@ -3,10 +3,80 @@ set -e cd "$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"/../ ROOT=`pwd` -INDEX=$ROOT/examples/README.md +INDEX=$ROOT/examples/doc/source/index.md INSERT_LINE=6 +copy_markdown_files () { + for file in $1/*.md; do + # Copy the README into the source of the Example docs as the name of the example + if [[ $(basename "$file") = "README.md" ]]; then + cp $file $ROOT/examples/doc/source/$1.md 2>&1 >/dev/null + else + # If the example contains other markdown files, copy them to the source of the Example docs + cp $file $ROOT/examples/doc/source/$(basename "$file") 2>&1 >/dev/null + fi + done +} + +add_gh_button () { + gh_text="[\"View](https://github.com/adap/flower/blob/main/examples/$1)" + readme_file="$ROOT/examples/doc/source/$1.md" + + if ! grep -Fq "$gh_text" "$readme_file"; then + awk -v text="$gh_text" ' + /^# / && !found { + print $0 "\n" text; + found=1; + next; + } + { print } + ' "$readme_file" > tmpfile && mv tmpfile "$readme_file" + fi +} + +copy_images () { + if [ -d "$1/_static" ]; then + cp $1/_static/**.{jpg,png,jpeg} $ROOT/examples/doc/source/_static/ 2>/dev/null || true + fi +} + +add_to_index () { + (echo $INSERT_LINE; echo a; echo $1; echo .; echo wq) | ed $INDEX 2>&1 >/dev/null +} + +add_single_entry () { + # Copy markdown files to correct folder + copy_markdown_files $1 + + # Add button linked to GitHub + add_gh_button $1 + + # Copy all images of the _static folder into the examples + # docs static folder + copy_images $1 + + # Insert the name of the example into the index file + add_to_index $1 +} + +add_all_entries () { + cd $ROOT/examples + # Iterate through each folder in examples/ + for d in $(printf '%s\n' */ | sort -V); do + # Add entry based on the name of the folder + example=${d%/} + + if [[ $example != doc ]]; then + add_single_entry $example + fi + done +} + +# Clean up before starting +rm -f $ROOT/examples/doc/source/*.md rm -f $INDEX + +# Create empty index file touch $INDEX echo "# Flower Examples Documentation" >> $INDEX @@ -16,22 +86,6 @@ echo "---" >> $INDEX echo "maxdepth: 1" >> $INDEX echo "---" >> $INDEX -rm -f "examples/doc/source/*.md" - -cd examples/ -for d in $(printf '%s\n' */ | sort -V); do - example=${d%/} - # For each example, copy the README into the source of the Example docs - [[ $example != doc ]] && cp $example/README.md $ROOT/examples/doc/source/$example.md 2>&1 >/dev/null - # For each example, copy all images of the _static folder into the examples - # docs static folder - [[ $example != doc ]] && [ -d "$example/_static" ] && { - cp $example/_static/**.{jpg,png,jpeg} $ROOT/examples/doc/source/_static/ 2>/dev/null || true - } - # For each example, insert the name of the example into the index file - [[ $example != doc ]] && (echo $INSERT_LINE; echo a; echo $example; echo .; echo wq) | ed $INDEX 2>&1 >/dev/null -done +add_all_entries echo "\`\`\`" >> $INDEX - -cp $INDEX $ROOT/examples/doc/source/index.md diff --git a/doc/locales/fr/LC_MESSAGES/framework-docs.po b/doc/locales/fr/LC_MESSAGES/framework-docs.po index e7c7783c48ff..80222e5409d2 100644 --- a/doc/locales/fr/LC_MESSAGES/framework-docs.po +++ b/doc/locales/fr/LC_MESSAGES/framework-docs.po @@ -3,9 +3,9 @@ msgid "" msgstr "" "Project-Id-Version: Flower Docs\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2024-02-13 11:23+0100\n" +"POT-Creation-Date: 2024-03-15 14:23+0000\n" "PO-Revision-Date: 2023-09-05 17:54+0000\n" -"Last-Translator: Charles Beauville \n" +"Last-Translator: Charles Beauville \n" "Language: fr\n" "Language-Team: French \n" @@ -13,7 +13,7 @@ msgstr "" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=utf-8\n" "Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 2.13.1\n" +"Generated-By: Babel 2.14.0\n" #: ../../source/contributor-explanation-architecture.rst:2 msgid "Flower Architecture" @@ -27,9 +27,7 @@ msgstr "Moteur client Edge" msgid "" "`Flower `_ core framework architecture with Edge " "Client Engine" -msgstr "" -"`Flower `_ architecture de base avec Edge Client " -"Engine" +msgstr "`Flower `_ architecture de base avec Edge Client Engine" #: ../../source/contributor-explanation-architecture.rst:13 msgid "Virtual Client Engine" @@ -40,8 +38,8 @@ msgid "" "`Flower `_ core framework architecture with Virtual " "Client Engine" msgstr "" -"`Flower `_ architecture de base avec moteur de client" -" virtuel" +"`Flower `_ architecture de base avec moteur de client " +"virtuel" #: ../../source/contributor-explanation-architecture.rst:21 msgid "Virtual Client Engine and Edge Client Engine in the same workload" @@ -86,9 +84,8 @@ msgstr "" #: ../../source/contributor-how-to-build-docker-images.rst:19 msgid "" -"Please follow the first section on `Run Flower using Docker " -"`_ " -"which covers this step in more detail." +"Please follow the first section on :doc:`Run Flower using Docker ` which covers this step in more detail." msgstr "" #: ../../source/contributor-how-to-build-docker-images.rst:23 @@ -303,7 +300,7 @@ msgid "" "to help us in our effort to make Federated Learning accessible to as many" " people as possible by contributing to those translations! This might " "also be a great opportunity for those wanting to become open source " -"contributors with little prerequistes." +"contributors with little prerequisites." msgstr "" #: ../../source/contributor-how-to-contribute-translations.rst:13 @@ -355,7 +352,7 @@ msgstr "" #: ../../source/contributor-how-to-contribute-translations.rst:47 msgid "" -"You input your translation in the textbox at the top and then, once you " +"You input your translation in the text box at the top and then, once you " "are happy with it, you either press ``Save and continue`` (to save the " "translation and go to the next untranslated string), ``Save and stay`` " "(to save the translation and stay on the same page), ``Suggest`` (to add " @@ -393,8 +390,8 @@ msgstr "" #: ../../source/contributor-how-to-contribute-translations.rst:69 msgid "" "If you want to add a new language, you will first have to contact us, " -"either on `Slack `_, or by opening an " -"issue on our `GitHub repo `_." +"either on `Slack `_, or by opening an issue" +" on our `GitHub repo `_." msgstr "" #: ../../source/contributor-how-to-create-new-messages.rst:2 @@ -438,12 +435,13 @@ msgid "Message Types for Protocol Buffers" msgstr "Types de messages pour les tampons de protocole" #: ../../source/contributor-how-to-create-new-messages.rst:32 +#, fuzzy msgid "" "The first thing we need to do is to define a message type for the RPC " "system in :code:`transport.proto`. Note that we have to do it for both " "the request and response messages. For more details on the syntax of " -"proto3, please see the `official documentation " -"`_." +"proto3, please see the `official documentation `_." msgstr "" "La première chose à faire est de définir un type de message pour le " "système RPC dans :code:`transport.proto`. Notez que nous devons le faire " @@ -592,9 +590,10 @@ msgstr "" "conteneur." #: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:11 +#, fuzzy msgid "" "Source: `Official VSCode documentation " -"`_" +"`_" msgstr "" "Source : `Documentation officielle de VSCode " "`_" @@ -648,9 +647,10 @@ msgstr "" "cas-là, consulte les sources suivantes :" #: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:23 +#, fuzzy msgid "" "`Developing inside a Container " -"`_" msgstr "" "`Développement à l'intérieur d'un conteneur " @@ -658,9 +658,10 @@ msgstr "" "requirements>`_" #: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:24 +#, fuzzy msgid "" "`Remote development in Containers " -"`_" +"`_" msgstr "" "`Développement à distance dans les conteneurs " "`_" @@ -961,8 +962,8 @@ msgstr "Ajoute une nouvelle section ``Unreleased`` dans ``changelog.md``." #: ../../source/contributor-how-to-release-flower.rst:25 msgid "" -"Merge the pull request on the same day (i.e., before a new nightly release" -" gets published to PyPI)." +"Merge the pull request on the same day (i.e., before a new nightly " +"release gets published to PyPI)." msgstr "" "Fusionne la pull request le jour même (c'est-à-dire avant qu'une nouvelle" " version nightly ne soit publiée sur PyPI)." @@ -977,11 +978,12 @@ msgstr "Nom de la pré-version" #: ../../source/contributor-how-to-release-flower.rst:33 msgid "" -"PyPI supports pre-releases (alpha, beta, release candidate). Pre-releases " -"MUST use one of the following naming patterns:" +"PyPI supports pre-releases (alpha, beta, release candidate). Pre-releases" +" MUST use one of the following naming patterns:" msgstr "" -"PyPI prend en charge les préversions (alpha, bêta, version candidate). Les" -" préversions DOIVENT utiliser l'un des modèles de dénomination suivants :" +"PyPI prend en charge les préversions (alpha, bêta, version candidate). " +"Les préversions DOIVENT utiliser l'un des modèles de dénomination " +"suivants :" #: ../../source/contributor-how-to-release-flower.rst:35 msgid "Alpha: ``MAJOR.MINOR.PATCHaN``" @@ -1318,21 +1320,23 @@ msgid "Request for Flower Baselines" msgstr "Demande pour une nouvelle Flower Baseline" #: ../../source/contributor-ref-good-first-contributions.rst:25 +#, fuzzy msgid "" "If you are not familiar with Flower Baselines, you should probably check-" -"out our `contributing guide for baselines `_." +"out our `contributing guide for baselines " +"`_." msgstr "" "Si tu n'es pas familier avec les Flower Baselines, tu devrais " "probablement consulter notre `guide de contribution pour les baselines " "`_." #: ../../source/contributor-ref-good-first-contributions.rst:27 +#, fuzzy msgid "" "You should then check out the open `issues " "`_" " for baseline requests. If you find a baseline that you'd like to work on" -" and that has no assignes, feel free to assign it to yourself and start " +" and that has no assignees, feel free to assign it to yourself and start " "working on it!" msgstr "" "Tu devrais ensuite consulter les `issues ouvertes " @@ -1444,9 +1448,8 @@ msgstr "" #, fuzzy msgid "" "If you're familiar with how contributing on GitHub works, you can " -"directly checkout our `getting started guide for contributors " -"`_." +"directly checkout our :doc:`getting started guide for contributors " +"`." msgstr "" "Si tu es familier avec le fonctionnement des contributions sur GitHub, tu" " peux directement consulter notre `guide de démarrage pour les " @@ -1454,21 +1457,22 @@ msgstr "" "contributors.html>`_ et des exemples de `bonnes premières contributions " "`_." -#: ../../source/contributor-tutorial-contribute-on-github.rst:11 +#: ../../source/contributor-tutorial-contribute-on-github.rst:10 msgid "Setting up the repository" msgstr "Mise en place du référentiel" -#: ../../source/contributor-tutorial-contribute-on-github.rst:22 +#: ../../source/contributor-tutorial-contribute-on-github.rst:21 msgid "**Create a GitHub account and setup Git**" msgstr "**Créer un compte GitHub et configurer Git**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:14 +#: ../../source/contributor-tutorial-contribute-on-github.rst:13 +#, fuzzy msgid "" "Git is a distributed version control tool. This allows for an entire " "codebase's history to be stored and every developer's machine. It is a " "software that will need to be installed on your local machine, you can " -"follow this `guide `_ to set it up." +"follow this `guide `_ to set it up." msgstr "" "Git est un outil de contrôle de version distribué. Il permet de stocker " "l'historique d'une base de code entière sur la machine de chaque " @@ -1476,7 +1480,7 @@ msgstr "" "locale, tu peux suivre ce `guide `_ pour le mettre en place." -#: ../../source/contributor-tutorial-contribute-on-github.rst:17 +#: ../../source/contributor-tutorial-contribute-on-github.rst:16 msgid "" "GitHub, itself, is a code hosting platform for version control and " "collaboration. It allows for everyone to collaborate and work from " @@ -1486,7 +1490,7 @@ msgstr "" "contrôle des versions et la collaboration. Il permet à chacun de " "collaborer et de travailler de n'importe où sur des dépôts à distance." -#: ../../source/contributor-tutorial-contribute-on-github.rst:19 +#: ../../source/contributor-tutorial-contribute-on-github.rst:18 msgid "" "If you haven't already, you will need to create an account on `GitHub " "`_." @@ -1494,7 +1498,7 @@ msgstr "" "Si ce n'est pas déjà fait, tu devras créer un compte sur `GitHub " "`_." -#: ../../source/contributor-tutorial-contribute-on-github.rst:21 +#: ../../source/contributor-tutorial-contribute-on-github.rst:20 msgid "" "The idea behind the generic Git and GitHub workflow boils down to this: " "you download code from a remote repository on GitHub, make changes " @@ -1506,14 +1510,15 @@ msgstr "" " des modifications localement et tu en gardes une trace à l'aide de Git, " "puis tu télécharges ton nouvel historique à nouveau sur GitHub." -#: ../../source/contributor-tutorial-contribute-on-github.rst:33 +#: ../../source/contributor-tutorial-contribute-on-github.rst:32 msgid "**Forking the Flower repository**" msgstr "**Fourche le dépôt de Flower**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:25 +#: ../../source/contributor-tutorial-contribute-on-github.rst:24 +#, fuzzy msgid "" "A fork is a personal copy of a GitHub repository. To create one for " -"Flower, you must navigate to https://github.com/adap/flower (while " +"Flower, you must navigate to ``_ (while " "connected to your GitHub account) and click the ``Fork`` button situated " "on the top right of the page." msgstr "" @@ -1522,7 +1527,7 @@ msgstr "" "étant connecté à ton compte GitHub) et cliquer sur le bouton ``Fork`` " "situé en haut à droite de la page." -#: ../../source/contributor-tutorial-contribute-on-github.rst:30 +#: ../../source/contributor-tutorial-contribute-on-github.rst:29 msgid "" "You can change the name if you want, but this is not necessary as this " "version of Flower will be yours and will sit inside your own account " @@ -1535,11 +1540,11 @@ msgstr "" " devrais voir dans le coin supérieur gauche que tu es en train de " "regarder ta propre version de Flower." -#: ../../source/contributor-tutorial-contribute-on-github.rst:48 +#: ../../source/contributor-tutorial-contribute-on-github.rst:47 msgid "**Cloning your forked repository**" msgstr "**Clonage de ton dépôt forké**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:36 +#: ../../source/contributor-tutorial-contribute-on-github.rst:35 msgid "" "The next step is to download the forked repository on your machine to be " "able to make changes to it. On your forked repository page, you should " @@ -1551,7 +1556,7 @@ msgstr "" "forké, tu dois d'abord cliquer sur le bouton ``Code`` à droite, ce qui te" " permettra de copier le lien HTTPS du dépôt." -#: ../../source/contributor-tutorial-contribute-on-github.rst:42 +#: ../../source/contributor-tutorial-contribute-on-github.rst:41 msgid "" "Once you copied the \\, you can open a terminal on your machine, " "navigate to the place you want to download the repository to and type:" @@ -1560,7 +1565,7 @@ msgstr "" "machine, naviguer jusqu'à l'endroit où tu veux télécharger le référentiel" " et taper :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:48 +#: ../../source/contributor-tutorial-contribute-on-github.rst:47 #, fuzzy msgid "" "This will create a ``flower/`` (or the name of your fork if you renamed " @@ -1569,15 +1574,15 @@ msgstr "" "Cela créera un dossier `flower/` (ou le nom de ta fourche si tu l'as " "renommée) dans le répertoire de travail actuel." -#: ../../source/contributor-tutorial-contribute-on-github.rst:67 +#: ../../source/contributor-tutorial-contribute-on-github.rst:66 msgid "**Add origin**" msgstr "**Ajouter l'origine**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:51 +#: ../../source/contributor-tutorial-contribute-on-github.rst:50 msgid "You can then go into the repository folder:" msgstr "Tu peux ensuite aller dans le dossier du référentiel :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:57 +#: ../../source/contributor-tutorial-contribute-on-github.rst:56 msgid "" "And here we will need to add an origin to our repository. The origin is " "the \\ of the remote fork repository. To obtain it, we can do as " @@ -1589,7 +1594,7 @@ msgstr "" "indiqué précédemment en allant sur notre dépôt fork sur notre compte " "GitHub et en copiant le lien." -#: ../../source/contributor-tutorial-contribute-on-github.rst:62 +#: ../../source/contributor-tutorial-contribute-on-github.rst:61 msgid "" "Once the \\ is copied, we can type the following command in our " "terminal:" @@ -1597,26 +1602,27 @@ msgstr "" "Une fois que le \\ est copié, nous pouvons taper la commande " "suivante dans notre terminal :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:91 +#: ../../source/contributor-tutorial-contribute-on-github.rst:90 msgid "**Add upstream**" msgstr "**Ajouter en amont**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:70 +#: ../../source/contributor-tutorial-contribute-on-github.rst:69 +#, fuzzy msgid "" "Now we will add an upstream address to our repository. Still in the same " -"directroy, we must run the following command:" +"directory, we must run the following command:" msgstr "" "Nous allons maintenant ajouter une adresse en amont à notre dépôt. " "Toujours dans le même directroy, nous devons exécuter la commande " "suivante :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:77 +#: ../../source/contributor-tutorial-contribute-on-github.rst:76 msgid "The following diagram visually explains what we did in the previous steps:" msgstr "" "Le schéma suivant explique visuellement ce que nous avons fait dans les " "étapes précédentes :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:81 +#: ../../source/contributor-tutorial-contribute-on-github.rst:80 msgid "" "The upstream is the GitHub remote address of the parent repository (in " "this case Flower), i.e. the one we eventually want to contribute to and " @@ -1630,7 +1636,7 @@ msgstr "" "simplement l'adresse distante GitHub du dépôt forké que nous avons créé, " "c'est-à-dire la copie (fork) dans notre propre compte." -#: ../../source/contributor-tutorial-contribute-on-github.rst:85 +#: ../../source/contributor-tutorial-contribute-on-github.rst:84 msgid "" "To make sure our local version of the fork is up-to-date with the latest " "changes from the Flower repository, we can execute the following command:" @@ -1639,27 +1645,28 @@ msgstr "" "dernières modifications du dépôt Flower, nous pouvons exécuter la " "commande suivante :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:94 +#: ../../source/contributor-tutorial-contribute-on-github.rst:93 msgid "Setting up the coding environment" msgstr "Mise en place de l'environnement de codage" -#: ../../source/contributor-tutorial-contribute-on-github.rst:96 +#: ../../source/contributor-tutorial-contribute-on-github.rst:95 +#, fuzzy msgid "" -"This can be achieved by following this `getting started guide for " -"contributors`_ (note that you won't need to clone the repository). Once " -"you are able to write code and test it, you can finally start making " -"changes!" +"This can be achieved by following this :doc:`getting started guide for " +"contributors ` (note " +"that you won't need to clone the repository). Once you are able to write " +"code and test it, you can finally start making changes!" msgstr "" "Pour ce faire, tu peux suivre ce `guide de démarrage pour les " "contributeurs`_ (note que tu n'auras pas besoin de cloner le dépôt). Une " "fois que tu es capable d'écrire du code et de le tester, tu peux enfin " "commencer à faire des changements !" -#: ../../source/contributor-tutorial-contribute-on-github.rst:101 +#: ../../source/contributor-tutorial-contribute-on-github.rst:100 msgid "Making changes" msgstr "Apporter des changements" -#: ../../source/contributor-tutorial-contribute-on-github.rst:103 +#: ../../source/contributor-tutorial-contribute-on-github.rst:102 msgid "" "Before making any changes make sure you are up-to-date with your " "repository:" @@ -1667,15 +1674,15 @@ msgstr "" "Avant de faire des changements, assure-toi que tu es à jour avec ton " "référentiel :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:109 +#: ../../source/contributor-tutorial-contribute-on-github.rst:108 msgid "And with Flower's repository:" msgstr "Et avec le référentiel de Flower :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:123 +#: ../../source/contributor-tutorial-contribute-on-github.rst:122 msgid "**Create a new branch**" msgstr "**Créer une nouvelle branche**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:116 +#: ../../source/contributor-tutorial-contribute-on-github.rst:115 msgid "" "To make the history cleaner and easier to work with, it is good practice " "to create a new branch for each feature/project that needs to be " @@ -1685,7 +1692,7 @@ msgstr "" "une bonne pratique de créer une nouvelle branche pour chaque " "fonctionnalité/projet qui doit être mis en œuvre." -#: ../../source/contributor-tutorial-contribute-on-github.rst:119 +#: ../../source/contributor-tutorial-contribute-on-github.rst:118 msgid "" "To do so, just run the following command inside the repository's " "directory:" @@ -1693,21 +1700,21 @@ msgstr "" "Pour ce faire, il suffit d'exécuter la commande suivante dans le " "répertoire du référentiel :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:126 +#: ../../source/contributor-tutorial-contribute-on-github.rst:125 msgid "**Make changes**" msgstr "**Apporter des modifications**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:126 +#: ../../source/contributor-tutorial-contribute-on-github.rst:125 msgid "Write great code and create wonderful changes using your favorite editor!" msgstr "" "Écris du bon code et crée de merveilleuses modifications à l'aide de ton " "éditeur préféré !" -#: ../../source/contributor-tutorial-contribute-on-github.rst:139 +#: ../../source/contributor-tutorial-contribute-on-github.rst:138 msgid "**Test and format your code**" msgstr "**Teste et mets en forme ton code**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:129 +#: ../../source/contributor-tutorial-contribute-on-github.rst:128 msgid "" "Don't forget to test and format your code! Otherwise your code won't be " "able to be merged into the Flower repository. This is done so the " @@ -1717,15 +1724,15 @@ msgstr "" "pourra pas être fusionné dans le dépôt Flower, et ce, afin que la base de" " code reste cohérente et facile à comprendre." -#: ../../source/contributor-tutorial-contribute-on-github.rst:132 +#: ../../source/contributor-tutorial-contribute-on-github.rst:131 msgid "To do so, we have written a few scripts that you can execute:" msgstr "Pour ce faire, nous avons écrit quelques scripts que tu peux exécuter :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:151 +#: ../../source/contributor-tutorial-contribute-on-github.rst:150 msgid "**Stage changes**" msgstr "**Changements de scène**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:142 +#: ../../source/contributor-tutorial-contribute-on-github.rst:141 msgid "" "Before creating a commit that will update your history, you must specify " "to Git which files it needs to take into account." @@ -1733,11 +1740,11 @@ msgstr "" "Avant de créer un commit qui mettra à jour ton historique, tu dois " "spécifier à Git les fichiers qu'il doit prendre en compte." -#: ../../source/contributor-tutorial-contribute-on-github.rst:144 +#: ../../source/contributor-tutorial-contribute-on-github.rst:143 msgid "This can be done with:" msgstr "Cela peut se faire avec :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:150 +#: ../../source/contributor-tutorial-contribute-on-github.rst:149 msgid "" "To check which files have been modified compared to the last version " "(last commit) and to see which files are staged for commit, you can use " @@ -1747,11 +1754,11 @@ msgstr "" "version (last commit) et pour voir quels fichiers sont mis à disposition " "pour le commit, tu peux utiliser la commande :code:`git status`." -#: ../../source/contributor-tutorial-contribute-on-github.rst:161 +#: ../../source/contributor-tutorial-contribute-on-github.rst:160 msgid "**Commit changes**" msgstr "**Commit changes**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:154 +#: ../../source/contributor-tutorial-contribute-on-github.rst:153 msgid "" "Once you have added all the files you wanted to commit using :code:`git " "add`, you can finally create your commit using this command:" @@ -1760,7 +1767,7 @@ msgstr "" "l'aide de :code:`git add`, tu peux enfin créer ta livraison à l'aide de " "cette commande :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:160 +#: ../../source/contributor-tutorial-contribute-on-github.rst:159 msgid "" "The \\ is there to explain to others what the commit " "does. It should be written in an imperative style and be concise. An " @@ -1770,11 +1777,11 @@ msgstr "" "commit. Il doit être écrit dans un style impératif et être concis. Un " "exemple serait :code:`git commit -m \"Ajouter des images au README\"`." -#: ../../source/contributor-tutorial-contribute-on-github.rst:172 +#: ../../source/contributor-tutorial-contribute-on-github.rst:171 msgid "**Push the changes to the fork**" msgstr "**Pousser les changements vers la fourche**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:164 +#: ../../source/contributor-tutorial-contribute-on-github.rst:163 msgid "" "Once we have committed our changes, we have effectively updated our local" " history, but GitHub has no way of knowing this unless we push our " @@ -1785,7 +1792,7 @@ msgstr "" "moyen de le savoir à moins que nous ne poussions nos modifications vers " "l'adresse distante de notre origine :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:171 +#: ../../source/contributor-tutorial-contribute-on-github.rst:170 msgid "" "Once this is done, you will see on the GitHub that your forked repo was " "updated with the changes you have made." @@ -1793,15 +1800,15 @@ msgstr "" "Une fois que c'est fait, tu verras sur GitHub que ton repo forké a été " "mis à jour avec les modifications que tu as apportées." -#: ../../source/contributor-tutorial-contribute-on-github.rst:175 +#: ../../source/contributor-tutorial-contribute-on-github.rst:174 msgid "Creating and merging a pull request (PR)" msgstr "Créer et fusionner une pull request (PR)" -#: ../../source/contributor-tutorial-contribute-on-github.rst:206 +#: ../../source/contributor-tutorial-contribute-on-github.rst:205 msgid "**Create the PR**" msgstr "**Créer le PR**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:178 +#: ../../source/contributor-tutorial-contribute-on-github.rst:177 msgid "" "Once you have pushed changes, on the GitHub webpage of your repository " "you should see the following message:" @@ -1809,12 +1816,12 @@ msgstr "" "Une fois que tu as poussé les modifications, sur la page web GitHub de " "ton dépôt, tu devrais voir le message suivant :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:182 +#: ../../source/contributor-tutorial-contribute-on-github.rst:181 #, fuzzy msgid "Otherwise you can always find this option in the ``Branches`` page." msgstr "Sinon, tu peux toujours trouver cette option dans la page `Branches`." -#: ../../source/contributor-tutorial-contribute-on-github.rst:184 +#: ../../source/contributor-tutorial-contribute-on-github.rst:183 #, fuzzy msgid "" "Once you click the ``Compare & pull request`` button, you should see " @@ -1823,13 +1830,13 @@ msgstr "" "Une fois que tu as cliqué sur le bouton `Compare & pull request`, tu " "devrais voir quelque chose de similaire à ceci :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:188 +#: ../../source/contributor-tutorial-contribute-on-github.rst:187 msgid "At the top you have an explanation of which branch will be merged where:" msgstr "" "En haut, tu as une explication de quelle branche sera fusionnée à quel " "endroit :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:192 +#: ../../source/contributor-tutorial-contribute-on-github.rst:191 msgid "" "In this example you can see that the request is to merge the branch " "``doc-fixes`` from my forked repository to branch ``main`` from the " @@ -1839,7 +1846,7 @@ msgstr "" "branche ``doc-fixes`` de mon dépôt forké à la branche ``main`` du dépôt " "Flower." -#: ../../source/contributor-tutorial-contribute-on-github.rst:194 +#: ../../source/contributor-tutorial-contribute-on-github.rst:193 msgid "" "The input box in the middle is there for you to describe what your PR " "does and to link it to existing issues. We have placed comments (that " @@ -1851,7 +1858,7 @@ msgstr "" "commentaires (qui ne seront pas rendus une fois le PR ouvert) pour te " "guider tout au long du processus." -#: ../../source/contributor-tutorial-contribute-on-github.rst:197 +#: ../../source/contributor-tutorial-contribute-on-github.rst:196 msgid "" "It is important to follow the instructions described in comments. For " "instance, in order to not break how our changelog system works, you " @@ -1860,7 +1867,7 @@ msgid "" ":ref:`changelogentry` appendix." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:201 +#: ../../source/contributor-tutorial-contribute-on-github.rst:200 msgid "" "At the bottom you will find the button to open the PR. This will notify " "reviewers that a new PR has been opened and that they should look over it" @@ -1870,7 +1877,7 @@ msgstr "" "qui informera les réviseurs qu'un nouveau PR a été ouvert et qu'ils " "doivent le consulter pour le fusionner ou demander des modifications." -#: ../../source/contributor-tutorial-contribute-on-github.rst:204 +#: ../../source/contributor-tutorial-contribute-on-github.rst:203 msgid "" "If your PR is not yet ready for review, and you don't want to notify " "anyone, you have the option to create a draft pull request:" @@ -1879,11 +1886,11 @@ msgstr "" " personne, tu as la possibilité de créer un brouillon de demande de " "traction :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:209 +#: ../../source/contributor-tutorial-contribute-on-github.rst:208 msgid "**Making new changes**" msgstr "**Faire de nouveaux changements**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:209 +#: ../../source/contributor-tutorial-contribute-on-github.rst:208 msgid "" "Once the PR has been opened (as draft or not), you can still push new " "commits to it the same way we did before, by making changes to the branch" @@ -1893,11 +1900,11 @@ msgstr "" "toujours y pousser de nouveaux commits de la même manière qu'auparavant, " "en apportant des modifications à la branche associée au PR." -#: ../../source/contributor-tutorial-contribute-on-github.rst:231 +#: ../../source/contributor-tutorial-contribute-on-github.rst:230 msgid "**Review the PR**" msgstr "**Review the PR**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:212 +#: ../../source/contributor-tutorial-contribute-on-github.rst:211 msgid "" "Once the PR has been opened or once the draft PR has been marked as " "ready, a review from code owners will be automatically requested:" @@ -1906,7 +1913,7 @@ msgstr "" " étant prêt, une révision des propriétaires de code sera automatiquement " "demandée :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:216 +#: ../../source/contributor-tutorial-contribute-on-github.rst:215 msgid "" "Code owners will then look into the code, ask questions, request changes " "or validate the PR." @@ -1914,11 +1921,11 @@ msgstr "" "Les propriétaires du code vont alors se pencher sur le code, poser des " "questions, demander des modifications ou valider le RP." -#: ../../source/contributor-tutorial-contribute-on-github.rst:218 +#: ../../source/contributor-tutorial-contribute-on-github.rst:217 msgid "Merging will be blocked if there are ongoing requested changes." msgstr "La fusion sera bloquée s'il y a des changements demandés en cours." -#: ../../source/contributor-tutorial-contribute-on-github.rst:222 +#: ../../source/contributor-tutorial-contribute-on-github.rst:221 msgid "" "To resolve them, just push the necessary changes to the branch associated" " with the PR:" @@ -1926,11 +1933,11 @@ msgstr "" "Pour les résoudre, il suffit de pousser les changements nécessaires vers " "la branche associée au PR :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:226 +#: ../../source/contributor-tutorial-contribute-on-github.rst:225 msgid "And resolve the conversation:" msgstr "Et résous la conversation :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:230 +#: ../../source/contributor-tutorial-contribute-on-github.rst:229 msgid "" "Once all the conversations have been resolved, you can re-request a " "review." @@ -1938,11 +1945,11 @@ msgstr "" "Une fois que toutes les conversations ont été résolues, tu peux " "redemander un examen." -#: ../../source/contributor-tutorial-contribute-on-github.rst:251 +#: ../../source/contributor-tutorial-contribute-on-github.rst:250 msgid "**Once the PR is merged**" msgstr "**Une fois que le PR est fusionné**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:234 +#: ../../source/contributor-tutorial-contribute-on-github.rst:233 msgid "" "If all the automatic tests have passed and reviewers have no more changes" " to request, they can approve the PR and merge it." @@ -1951,7 +1958,7 @@ msgstr "" " de modifications à demander, ils peuvent approuver le PR et le " "fusionner." -#: ../../source/contributor-tutorial-contribute-on-github.rst:238 +#: ../../source/contributor-tutorial-contribute-on-github.rst:237 msgid "" "Once it is merged, you can delete the branch on GitHub (a button should " "appear to do so) and also delete it locally by doing:" @@ -1960,36 +1967,38 @@ msgstr "" "(un bouton devrait apparaître pour le faire) et aussi la supprimer " "localement en faisant :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:245 +#: ../../source/contributor-tutorial-contribute-on-github.rst:244 msgid "Then you should update your forked repository by doing:" msgstr "Ensuite, tu dois mettre à jour ton dépôt forké en faisant :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:254 +#: ../../source/contributor-tutorial-contribute-on-github.rst:253 msgid "Example of first contribution" msgstr "Exemple de première contribution" -#: ../../source/contributor-tutorial-contribute-on-github.rst:257 +#: ../../source/contributor-tutorial-contribute-on-github.rst:256 msgid "Problem" msgstr "Problème" -#: ../../source/contributor-tutorial-contribute-on-github.rst:259 +#: ../../source/contributor-tutorial-contribute-on-github.rst:258 +#, fuzzy msgid "" -"For our documentation, we’ve started to use the `Diàtaxis framework " +"For our documentation, we've started to use the `Diàtaxis framework " "`_." msgstr "" "Pour notre documentation, nous avons commencé à utiliser le cadre " "`Diàtaxis `_." -#: ../../source/contributor-tutorial-contribute-on-github.rst:261 +#: ../../source/contributor-tutorial-contribute-on-github.rst:260 +#, fuzzy msgid "" -"Our “How to” guides should have titles that continue the sencence “How to" -" …”, for example, “How to upgrade to Flower 1.0”." +"Our \"How to\" guides should have titles that continue the sentence \"How" +" to …\", for example, \"How to upgrade to Flower 1.0\"." msgstr "" "Nos guides \"Comment faire\" devraient avoir des titres qui poursuivent " "la phrase \"Comment faire pour...\", par exemple, \"Comment passer à " "Flower 1.0\"." -#: ../../source/contributor-tutorial-contribute-on-github.rst:263 +#: ../../source/contributor-tutorial-contribute-on-github.rst:262 msgid "" "Most of our guides do not follow this new format yet, and changing their " "title is (unfortunately) more involved than one might think." @@ -1998,50 +2007,55 @@ msgstr "" "changer leur titre est (malheureusement) plus compliqué qu'on ne le " "pense." -#: ../../source/contributor-tutorial-contribute-on-github.rst:265 +#: ../../source/contributor-tutorial-contribute-on-github.rst:264 +#, fuzzy msgid "" -"This issue is about changing the title of a doc from present continious " +"This issue is about changing the title of a doc from present continuous " "to present simple." msgstr "" "Cette question porte sur le changement du titre d'un document du présent " "continu au présent simple." -#: ../../source/contributor-tutorial-contribute-on-github.rst:267 +#: ../../source/contributor-tutorial-contribute-on-github.rst:266 +#, fuzzy msgid "" -"Let's take the example of “Saving Progress” which we changed to “Save " -"Progress”. Does this pass our check?" +"Let's take the example of \"Saving Progress\" which we changed to \"Save " +"Progress\". Does this pass our check?" msgstr "" "Prenons l'exemple de \"Sauvegarder la progression\" que nous avons " "remplacé par \"Sauvegarder la progression\". Est-ce que cela passe notre " "contrôle ?" -#: ../../source/contributor-tutorial-contribute-on-github.rst:269 -msgid "Before: ”How to saving progress” ❌" +#: ../../source/contributor-tutorial-contribute-on-github.rst:268 +#, fuzzy +msgid "Before: \"How to saving progress\" ❌" msgstr "Avant : \"Comment sauvegarder les progrès\" ❌" -#: ../../source/contributor-tutorial-contribute-on-github.rst:271 -msgid "After: ”How to save progress” ✅" +#: ../../source/contributor-tutorial-contribute-on-github.rst:270 +#, fuzzy +msgid "After: \"How to save progress\" ✅" msgstr "Après : \"Comment sauvegarder la progression\" ✅" -#: ../../source/contributor-tutorial-contribute-on-github.rst:274 +#: ../../source/contributor-tutorial-contribute-on-github.rst:273 msgid "Solution" msgstr "Solution" -#: ../../source/contributor-tutorial-contribute-on-github.rst:276 +#: ../../source/contributor-tutorial-contribute-on-github.rst:275 +#, fuzzy msgid "" -"This is a tiny change, but it’ll allow us to test your end-to-end setup. " -"After cloning and setting up the Flower repo, here’s what you should do:" +"This is a tiny change, but it'll allow us to test your end-to-end setup. " +"After cloning and setting up the Flower repo, here's what you should do:" msgstr "" "C'est un tout petit changement, mais il nous permettra de tester ta " "configuration de bout en bout. Après avoir cloné et configuré le repo " "Flower, voici ce que tu dois faire :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:278 +#: ../../source/contributor-tutorial-contribute-on-github.rst:277 #, fuzzy msgid "Find the source file in ``doc/source``" msgstr "Trouve le fichier source dans `doc/source`" -#: ../../source/contributor-tutorial-contribute-on-github.rst:279 +#: ../../source/contributor-tutorial-contribute-on-github.rst:278 #, fuzzy msgid "" "Make the change in the ``.rst`` file (beware, the dashes under the title " @@ -2050,20 +2064,20 @@ msgstr "" "Effectue la modification dans le fichier `.rst` (attention, les tirets " "sous le titre doivent être de la même longueur que le titre lui-même)" -#: ../../source/contributor-tutorial-contribute-on-github.rst:280 +#: ../../source/contributor-tutorial-contribute-on-github.rst:279 +#, fuzzy msgid "" -"Build the docs and check the result: ``_" msgstr "" -"Construis les documents et vérifie le résultat : " -"``_" +"Construis les documents et vérifie le résultat : ``_" -#: ../../source/contributor-tutorial-contribute-on-github.rst:283 +#: ../../source/contributor-tutorial-contribute-on-github.rst:282 msgid "Rename file" msgstr "Renommer le fichier" -#: ../../source/contributor-tutorial-contribute-on-github.rst:285 +#: ../../source/contributor-tutorial-contribute-on-github.rst:284 msgid "" "You might have noticed that the file name still reflects the old wording." " If we just change the file, then we break all existing links to it - it " @@ -2076,21 +2090,22 @@ msgstr "" "important** d'éviter cela, car briser des liens peut nuire à notre " "classement dans les moteurs de recherche." -#: ../../source/contributor-tutorial-contribute-on-github.rst:288 -msgid "Here’s how to change the file name:" +#: ../../source/contributor-tutorial-contribute-on-github.rst:287 +#, fuzzy +msgid "Here's how to change the file name:" msgstr "Voici comment changer le nom du fichier :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:290 +#: ../../source/contributor-tutorial-contribute-on-github.rst:289 #, fuzzy msgid "Change the file name to ``save-progress.rst``" msgstr "Change le nom du fichier en `save-progress.rst`" -#: ../../source/contributor-tutorial-contribute-on-github.rst:291 +#: ../../source/contributor-tutorial-contribute-on-github.rst:290 #, fuzzy msgid "Add a redirect rule to ``doc/source/conf.py``" msgstr "Ajouter une règle de redirection à `doc/source/conf.py`" -#: ../../source/contributor-tutorial-contribute-on-github.rst:293 +#: ../../source/contributor-tutorial-contribute-on-github.rst:292 #, fuzzy msgid "" "This will cause a redirect from ``saving-progress.html`` to ``save-" @@ -2099,11 +2114,11 @@ msgstr "" "Cela entraînera une redirection de `saving-progress.html` vers `save-" "progress.html`, les anciens liens continueront à fonctionner." -#: ../../source/contributor-tutorial-contribute-on-github.rst:296 +#: ../../source/contributor-tutorial-contribute-on-github.rst:295 msgid "Apply changes in the index file" msgstr "Applique les changements dans le fichier d'index" -#: ../../source/contributor-tutorial-contribute-on-github.rst:298 +#: ../../source/contributor-tutorial-contribute-on-github.rst:297 #, fuzzy msgid "" "For the lateral navigation bar to work properly, it is very important to " @@ -2114,46 +2129,47 @@ msgstr "" "très important de mettre également à jour le fichier `index.rst`. C'est " "là que nous définissons toute l'arborescence de la barre de navigation." -#: ../../source/contributor-tutorial-contribute-on-github.rst:301 +#: ../../source/contributor-tutorial-contribute-on-github.rst:300 #, fuzzy msgid "Find and modify the file name in ``index.rst``" msgstr "Trouve et modifie le nom du fichier dans `index.rst`" -#: ../../source/contributor-tutorial-contribute-on-github.rst:304 +#: ../../source/contributor-tutorial-contribute-on-github.rst:303 msgid "Open PR" msgstr "Open PR" -#: ../../source/contributor-tutorial-contribute-on-github.rst:306 +#: ../../source/contributor-tutorial-contribute-on-github.rst:305 +#, fuzzy msgid "" -"Commit the changes (commit messages are always imperative: “Do " -"something”, in this case “Change …”)" +"Commit the changes (commit messages are always imperative: \"Do " +"something\", in this case \"Change …\")" msgstr "" "Valide les modifications (les messages de validation sont toujours " "impératifs : \"Fais quelque chose\", dans ce cas \"Modifie...\")" -#: ../../source/contributor-tutorial-contribute-on-github.rst:307 +#: ../../source/contributor-tutorial-contribute-on-github.rst:306 msgid "Push the changes to your fork" msgstr "Transmets les changements à ta fourchette" -#: ../../source/contributor-tutorial-contribute-on-github.rst:308 +#: ../../source/contributor-tutorial-contribute-on-github.rst:307 msgid "Open a PR (as shown above)" msgstr "Ouvre un RP (comme indiqué ci-dessus)" -#: ../../source/contributor-tutorial-contribute-on-github.rst:309 +#: ../../source/contributor-tutorial-contribute-on-github.rst:308 msgid "Wait for it to be approved!" msgstr "Attends qu'elle soit approuvée !" -#: ../../source/contributor-tutorial-contribute-on-github.rst:310 +#: ../../source/contributor-tutorial-contribute-on-github.rst:309 msgid "Congrats! 🥳 You're now officially a Flower contributor!" msgstr "" "Félicitations 🥳 Tu es désormais officiellement une contributrice de " "Flower !" -#: ../../source/contributor-tutorial-contribute-on-github.rst:314 +#: ../../source/contributor-tutorial-contribute-on-github.rst:313 msgid "How to write a good PR title" msgstr "Comment écrire un bon titre de PR" -#: ../../source/contributor-tutorial-contribute-on-github.rst:316 +#: ../../source/contributor-tutorial-contribute-on-github.rst:315 msgid "" "A well-crafted PR title helps team members quickly understand the purpose" " and scope of the changes being proposed. Here's a guide to help you " @@ -2163,7 +2179,7 @@ msgstr "" "comprendre l'intérêt et le scope des changements proposés. Voici un guide" " pour vous aider à écrire des bons titres de PR :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:318 +#: ../../source/contributor-tutorial-contribute-on-github.rst:317 msgid "" "1. Be Clear and Concise: Provide a clear summary of the changes in a " "concise manner. 1. Use Actionable Verbs: Start with verbs like \"Add,\" " @@ -2181,7 +2197,7 @@ msgstr "" "capitalisation et une ponctuation : Suivre les règles de grammaire pour " "la clarté." -#: ../../source/contributor-tutorial-contribute-on-github.rst:324 +#: ../../source/contributor-tutorial-contribute-on-github.rst:323 msgid "" "Let's start with a few examples for titles that should be avoided because" " they do not provide meaningful information:" @@ -2189,27 +2205,27 @@ msgstr "" "Commençons par quelques exemples de titres qui devraient être évités " "parce qu'ils ne fournissent pas d'information significative :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:326 +#: ../../source/contributor-tutorial-contribute-on-github.rst:325 msgid "Implement Algorithm" msgstr "Implement Algorithm" -#: ../../source/contributor-tutorial-contribute-on-github.rst:327 +#: ../../source/contributor-tutorial-contribute-on-github.rst:326 msgid "Database" msgstr "Database" -#: ../../source/contributor-tutorial-contribute-on-github.rst:328 +#: ../../source/contributor-tutorial-contribute-on-github.rst:327 msgid "Add my_new_file.py to codebase" msgstr "Add my_new_file.py to codebase" -#: ../../source/contributor-tutorial-contribute-on-github.rst:329 +#: ../../source/contributor-tutorial-contribute-on-github.rst:328 msgid "Improve code in module" msgstr "Improve code in module" -#: ../../source/contributor-tutorial-contribute-on-github.rst:330 +#: ../../source/contributor-tutorial-contribute-on-github.rst:329 msgid "Change SomeModule" msgstr "Change SomeModule" -#: ../../source/contributor-tutorial-contribute-on-github.rst:332 +#: ../../source/contributor-tutorial-contribute-on-github.rst:331 msgid "" "Here are a few positive examples which provide helpful information " "without repeating how they do it, as that is already visible in the " @@ -2219,24 +2235,24 @@ msgstr "" "répéter comment ils le font, comme cela est déjà visible dans la section " "\"Files changed\" de la PR :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:334 +#: ../../source/contributor-tutorial-contribute-on-github.rst:333 msgid "Update docs banner to mention Flower Summit 2023" msgstr "Update docs banner to mention Flower Summit 2023" -#: ../../source/contributor-tutorial-contribute-on-github.rst:335 +#: ../../source/contributor-tutorial-contribute-on-github.rst:334 msgid "Remove unnecessary XGBoost dependency" msgstr "Remove unnecessary XGBoost dependency" -#: ../../source/contributor-tutorial-contribute-on-github.rst:336 +#: ../../source/contributor-tutorial-contribute-on-github.rst:335 msgid "Remove redundant attributes in strategies subclassing FedAvg" msgstr "Remove redundant attributes in strategies subclassing FedAvg" -#: ../../source/contributor-tutorial-contribute-on-github.rst:337 +#: ../../source/contributor-tutorial-contribute-on-github.rst:336 #, fuzzy msgid "Add CI job to deploy the staging system when the ``main`` branch changes" msgstr "Add CI job to deploy the staging system when the `main` branch changes" -#: ../../source/contributor-tutorial-contribute-on-github.rst:338 +#: ../../source/contributor-tutorial-contribute-on-github.rst:337 msgid "" "Add new amazing library which will be used to improve the simulation " "engine" @@ -2244,7 +2260,7 @@ msgstr "" "Add new amazing library which will be used to improve the simulation " "engine" -#: ../../source/contributor-tutorial-contribute-on-github.rst:342 +#: ../../source/contributor-tutorial-contribute-on-github.rst:341 #: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:548 #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:946 #: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:727 @@ -2253,7 +2269,7 @@ msgstr "" msgid "Next steps" msgstr "Prochaines étapes" -#: ../../source/contributor-tutorial-contribute-on-github.rst:344 +#: ../../source/contributor-tutorial-contribute-on-github.rst:343 msgid "" "Once you have made your first PR, and want to contribute more, be sure to" " check out the following :" @@ -2261,148 +2277,149 @@ msgstr "" "Une fois que tu auras fait ton premier RP, et que tu voudras contribuer " "davantage, ne manque pas de consulter les sites suivants :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:346 +#: ../../source/contributor-tutorial-contribute-on-github.rst:345 +#, fuzzy msgid "" -"`Good first contributions `_, where you should particularly look " -"into the :code:`baselines` contributions." +":doc:`Good first contributions `, where you should particularly look into the " +":code:`baselines` contributions." msgstr "" "`Bonnes premières contributions `_, où vous devriez " "particulièrement regarder les contributions :code:`baselines`." -#: ../../source/contributor-tutorial-contribute-on-github.rst:350 +#: ../../source/contributor-tutorial-contribute-on-github.rst:349 #: ../../source/fed/0000-20200102-fed-template.md:60 msgid "Appendix" msgstr "Annexe" -#: ../../source/contributor-tutorial-contribute-on-github.rst:355 +#: ../../source/contributor-tutorial-contribute-on-github.rst:354 #, fuzzy msgid "Changelog entry" msgstr "Changelog" -#: ../../source/contributor-tutorial-contribute-on-github.rst:357 +#: ../../source/contributor-tutorial-contribute-on-github.rst:356 msgid "" "When opening a new PR, inside its description, there should be a " "``Changelog entry`` header." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:359 +#: ../../source/contributor-tutorial-contribute-on-github.rst:358 msgid "" "Above this header you should see the following comment that explains how " "to write your changelog entry:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:361 +#: ../../source/contributor-tutorial-contribute-on-github.rst:360 msgid "" "Inside the following 'Changelog entry' section, you should put the " "description of your changes that will be added to the changelog alongside" " your PR title." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:364 +#: ../../source/contributor-tutorial-contribute-on-github.rst:363 msgid "" -"If the section is completely empty (without any token) or non-existant, " +"If the section is completely empty (without any token) or non-existent, " "the changelog will just contain the title of the PR for the changelog " "entry, without any description." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:367 +#: ../../source/contributor-tutorial-contribute-on-github.rst:366 msgid "" "If the section contains some text other than tokens, it will use it to " "add a description to the change." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:369 +#: ../../source/contributor-tutorial-contribute-on-github.rst:368 msgid "" "If the section contains one of the following tokens it will ignore any " "other text and put the PR under the corresponding section of the " "changelog:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:371 +#: ../../source/contributor-tutorial-contribute-on-github.rst:370 msgid " is for classifying a PR as a general improvement." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:373 +#: ../../source/contributor-tutorial-contribute-on-github.rst:372 msgid " is to not add the PR to the changelog" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:375 +#: ../../source/contributor-tutorial-contribute-on-github.rst:374 msgid " is to add a general baselines change to the PR" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:377 +#: ../../source/contributor-tutorial-contribute-on-github.rst:376 msgid " is to add a general examples change to the PR" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:379 +#: ../../source/contributor-tutorial-contribute-on-github.rst:378 msgid " is to add a general sdk change to the PR" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:381 +#: ../../source/contributor-tutorial-contribute-on-github.rst:380 msgid " is to add a general simulations change to the PR" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:383 +#: ../../source/contributor-tutorial-contribute-on-github.rst:382 msgid "Note that only one token should be used." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:385 +#: ../../source/contributor-tutorial-contribute-on-github.rst:384 msgid "" "Its content must have a specific format. We will break down what each " "possibility does:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:387 +#: ../../source/contributor-tutorial-contribute-on-github.rst:386 msgid "" "If the ``### Changelog entry`` section contains nothing or doesn't exist," " the following text will be added to the changelog::" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:391 +#: ../../source/contributor-tutorial-contribute-on-github.rst:390 msgid "" "If the ``### Changelog entry`` section contains a description (and no " "token), the following text will be added to the changelog::" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:397 +#: ../../source/contributor-tutorial-contribute-on-github.rst:396 msgid "" "If the ``### Changelog entry`` section contains ````, nothing will " "change in the changelog." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:399 +#: ../../source/contributor-tutorial-contribute-on-github.rst:398 msgid "" "If the ``### Changelog entry`` section contains ````, the " "following text will be added to the changelog::" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:403 +#: ../../source/contributor-tutorial-contribute-on-github.rst:402 msgid "" "If the ``### Changelog entry`` section contains ````, the " "following text will be added to the changelog::" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:407 +#: ../../source/contributor-tutorial-contribute-on-github.rst:406 msgid "" "If the ``### Changelog entry`` section contains ````, the " "following text will be added to the changelog::" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:411 +#: ../../source/contributor-tutorial-contribute-on-github.rst:410 msgid "" "If the ``### Changelog entry`` section contains ````, the following " "text will be added to the changelog::" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:415 +#: ../../source/contributor-tutorial-contribute-on-github.rst:414 msgid "" "If the ``### Changelog entry`` section contains ````, the " "following text will be added to the changelog::" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:419 +#: ../../source/contributor-tutorial-contribute-on-github.rst:418 msgid "" "Note that only one token must be provided, otherwise, only the first " "action (in the order listed above), will be performed." @@ -2436,10 +2453,11 @@ msgstr "" "virtualenv>`_" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:12 +#, fuzzy msgid "" "Flower uses :code:`pyproject.toml` to manage dependencies and configure " "development tools (the ones which support it). Poetry is a build tool " -"which supports `PEP 517 `_." +"which supports `PEP 517 `_." msgstr "" "Flower utilise un fichier :code:`pyproject.toml` pour gérer les " "dependences et configurer les outils de développement (du moins ceux qui " @@ -2645,9 +2663,9 @@ msgid "" "`_, a federated training strategy " "designed for non-iid data. We are using PyTorch to train a Convolutional " "Neural Network(with Batch Normalization layers) on the CIFAR-10 dataset. " -"When applying FedBN, only few changes needed compared to `Example: " -"PyTorch - From Centralized To Federated `_." +"When applying FedBN, only few changes needed compared to :doc:`Example: " +"PyTorch - From Centralized To Federated `." msgstr "" "Ce tutoriel te montrera comment utiliser Flower pour construire une " "version fédérée d'une charge de travail d'apprentissage automatique " @@ -2668,10 +2686,10 @@ msgstr "Formation centralisée" #: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:10 #, fuzzy msgid "" -"All files are revised based on `Example: PyTorch - From Centralized To " -"Federated `_. The only thing to do is modifying the file called " -":code:`cifar.py`, revised part is shown below:" +"All files are revised based on :doc:`Example: PyTorch - From Centralized " +"To Federated `. The only " +"thing to do is modifying the file called :code:`cifar.py`, revised part " +"is shown below:" msgstr "" "Tous les fichiers sont révisés sur la base de `Exemple : PyTorch - From " "Centralized To Federated `_, the following parts are easy to follow, onyl " -":code:`get_parameters` and :code:`set_parameters` function in " -":code:`client.py` needed to revise. If not, please read the `Example: " -"PyTorch - From Centralized To Federated `_. first." +"If you have read :doc:`Example: PyTorch - From Centralized To Federated " +"`, the following parts are" +" easy to follow, only :code:`get_parameters` and :code:`set_parameters` " +"function in :code:`client.py` needed to revise. If not, please read the " +":doc:`Example: PyTorch - From Centralized To Federated `. first." msgstr "" "Si vous avez lu `Exemple : PyTorch - From Centralized To Federated " "`_. This " -"will allow you see how easy it is to wrap your code with Flower and begin" -" training in a federated way. We provide you with two helper scripts, " -"namely *run-server.sh*, and *run-clients.sh*. Don't be afraid to look " -"inside, they are simple enough =)." -msgstr "" -"Maintenant que nous avons installé toutes nos dépendances, lançons un " -"simple entraînement distribué avec deux clients et un serveur. Notre " -"procédure d'entraînement et l'architecture de notre réseau sont basées " -"sur l'exemple MNIST de base de PyTorch " -"`_. Cela te " -"permettra de voir à quel point il est facile d'envelopper ton code avec " -"Flower et de commencer l'entraînement de manière fédérée. Nous te " -"fournissons deux scripts d'aide, à savoir *run-server.sh*, et *run-" -"clients.sh*. N'aie pas peur de regarder à l'intérieur, ils sont assez " -"simples =)." - -#: ../../source/example-walkthrough-pytorch-mnist.rst:31 -msgid "" -"Go ahead and launch on a terminal the *run-server.sh* script first as " -"follows:" -msgstr "Lance sur un terminal le script *run-server.sh* d'abord comme suit :" - -#: ../../source/example-walkthrough-pytorch-mnist.rst:38 -msgid "Now that the server is up and running, go ahead and launch the clients." -msgstr "Maintenant que le serveur est opérationnel, vas-y et lance les clients." - -#: ../../source/example-walkthrough-pytorch-mnist.rst:45 -msgid "" -"Et voilà! You should be seeing the training procedure and, after a few " -"iterations, the test accuracy for each client." -msgstr "" -"Et voilà ! Tu devrais voir la procédure d'entraînement et, après quelques" -" itérations, la précision du test pour chaque client." - -#: ../../source/example-walkthrough-pytorch-mnist.rst:66 -msgid "Now, let's see what is really happening inside." -msgstr "Maintenant, voyons ce qui se passe réellement à l'intérieur." - -#: ../../source/example-walkthrough-pytorch-mnist.rst:69 -#: ../../source/tutorial-quickstart-ios.rst:129 -#: ../../source/tutorial-quickstart-mxnet.rst:226 -#: ../../source/tutorial-quickstart-pytorch.rst:203 -#: ../../source/tutorial-quickstart-scikitlearn.rst:157 -#: ../../source/tutorial-quickstart-tensorflow.rst:98 -#: ../../source/tutorial-quickstart-xgboost.rst:309 -msgid "Flower Server" -msgstr "Serveur de Flower" +#: ../../source/explanation-differential-privacy.rst:2 +#: ../../source/explanation-differential-privacy.rst:11 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:303 +#, fuzzy +msgid "Differential Privacy" +msgstr "Confidentialité différentielle" -#: ../../source/example-walkthrough-pytorch-mnist.rst:71 +#: ../../source/explanation-differential-privacy.rst:3 msgid "" -"Inside the server helper script *run-server.sh* you will find the " -"following code that basically runs the :code:`server.py`" +"The information in datasets like healthcare, financial transactions, user" +" preferences, etc., is valuable and has the potential for scientific " +"breakthroughs and provides important business insights. However, such " +"data is also sensitive and there is a risk of compromising individual " +"privacy." msgstr "" -"Dans le script d'aide au serveur *run-server.sh*, tu trouveras le code " -"suivant qui exécute le fichier :code:`server.py`" -#: ../../source/example-walkthrough-pytorch-mnist.rst:78 +#: ../../source/explanation-differential-privacy.rst:6 msgid "" -"We can go a bit deeper and see that :code:`server.py` simply launches a " -"server that will coordinate three rounds of training. Flower Servers are " -"very customizable, but for simple workloads, we can start a server using " -"the :ref:`start_server ` function and " -"leave all the configuration possibilities at their default values, as " -"seen below." +"Traditional methods like anonymization alone would not work because of " +"attacks like Re-identification and Data Linkage. That's where " +"differential privacy comes in. It provides the possibility of analyzing " +"data while ensuring the privacy of individuals." msgstr "" -"Nous pouvons aller un peu plus loin et voir que :code:`server.py` lance " -"simplement un serveur qui coordonnera trois tours de formation. Flower " -"Les serveurs sont très personnalisables, mais pour les charges de travail" -" simples, nous pouvons démarrer un serveur à l'aide de la fonction " -":ref:`start_server ` et laisser toutes " -"les possibilités de configuration à leurs valeurs par défaut, comme on " -"peut le voir ci-dessous." - -#: ../../source/example-walkthrough-pytorch-mnist.rst:89 -#: ../../source/tutorial-quickstart-ios.rst:34 -#: ../../source/tutorial-quickstart-mxnet.rst:36 -#: ../../source/tutorial-quickstart-pytorch.rst:37 -#: ../../source/tutorial-quickstart-scikitlearn.rst:40 -#: ../../source/tutorial-quickstart-tensorflow.rst:29 -#: ../../source/tutorial-quickstart-xgboost.rst:55 -msgid "Flower Client" -msgstr "Client de la fleur" -#: ../../source/example-walkthrough-pytorch-mnist.rst:91 +#: ../../source/explanation-differential-privacy.rst:12 msgid "" -"Next, let's take a look at the *run-clients.sh* file. You will see that " -"it contains the main loop that starts a set of *clients*." +"Imagine two datasets that are identical except for a single record (for " +"instance, Alice's data). Differential Privacy (DP) guarantees that any " +"analysis (M), like calculating the average income, will produce nearly " +"identical results for both datasets (O and O' would be similar). This " +"preserves group patterns while obscuring individual details, ensuring the" +" individual's information remains hidden in the crowd." msgstr "" -"Ensuite, jetons un coup d'œil au fichier *run-clients.sh*. Tu verras " -"qu'il contient la boucle principale qui démarre un ensemble de *clients*." -#: ../../source/example-walkthrough-pytorch-mnist.rst:100 -msgid "" -"**cid**: is the client ID. It is an integer that uniquely identifies " -"client identifier." +#: ../../source/explanation-differential-privacy.rst:-1 +msgid "DP Intro" msgstr "" -"**cid** : c'est l'identifiant du client. C'est un nombre entier qui " -"identifie de façon unique l'identifiant du client." -#: ../../source/example-walkthrough-pytorch-mnist.rst:101 -msgid "**sever_address**: String that identifies IP and port of the server." -msgstr "**sever_address** : Chaîne qui identifie l'IP et le port du serveur." - -#: ../../source/example-walkthrough-pytorch-mnist.rst:102 +#: ../../source/explanation-differential-privacy.rst:22 msgid "" -"**nb_clients**: This defines the number of clients being created. This " -"piece of information is not required by the client, but it helps us " -"partition the original MNIST dataset to make sure that every client is " -"working on unique subsets of both *training* and *test* sets." +"One of the most commonly used mechanisms to achieve DP is adding enough " +"noise to the output of the analysis to mask the contribution of each " +"individual in the data while preserving the overall accuracy of the " +"analysis." msgstr "" -"**Cette information n'est pas requise par le client, mais elle nous aide " -"à partitionner l'ensemble de données MNIST original pour nous assurer que" -" chaque client travaille sur des sous-ensembles uniques des ensembles " -"*formation* et *test*." -#: ../../source/example-walkthrough-pytorch-mnist.rst:104 +#: ../../source/explanation-differential-privacy.rst:25 #, fuzzy -msgid "" -"Again, we can go deeper and look inside :code:`flwr_example/quickstart-" -"pytorch/client.py`. After going through the argument parsing code at the " -"beginning of our :code:`main` function, you will find a call to " -":code:`mnist.load_data`. This function is responsible for partitioning " -"the original MNIST datasets (*training* and *test*) and returning a " -":code:`torch.utils.data.DataLoader` s for each of them. We then " -"instantiate a :code:`PytorchMNISTClient` object with our client ID, our " -"DataLoaders, the number of epochs in each round, and which device we want" -" to use for training (CPU or GPU)." -msgstr "" -"Encore une fois, nous pouvons aller plus loin et regarder dans " -":code:`flwr_example/quickstart-pytorch/client.py`. Après avoir parcouru " -"le code d'analyse des arguments au début de notre fonction :code:`main`, " -"tu trouveras un appel à :code:`mnist.load_data`. Cette fonction est " -"responsable du partitionnement des ensembles de données MNIST originaux " -"(*training* et *test*) et renvoie un :code:`torch.utils.data.DataLoader` " -"s pour chacun d'entre eux. Nous instancions ensuite un objet " -":code:`PytorchMNISTClient` avec notre ID client, nos DataLoaders, le " -"nombre d'époques dans chaque tour et le périphérique que nous voulons " -"utiliser pour l'entraînement (CPU ou GPU)." +msgid "Formal Definition" +msgstr "Compiler les définitions ProtoBuf" -#: ../../source/example-walkthrough-pytorch-mnist.rst:119 +#: ../../source/explanation-differential-privacy.rst:26 msgid "" -"The :code:`PytorchMNISTClient` object when finally passed to " -":code:`fl.client.start_client` along with the server's address as the " -"training process begins." +"Differential Privacy (DP) provides statistical guarantees against the " +"information an adversary can infer through the output of a randomized " +"algorithm. It provides an unconditional upper bound on the influence of a" +" single individual on the output of the algorithm by adding noise [1]. A " +"randomized mechanism M provides (:math:`\\epsilon`, " +":math:`\\delta`)-differential privacy if for any two neighboring " +"databases, D :sub:`1` and D :sub:`2`, that differ in only a single " +"record, and for all possible outputs S ⊆ Range(A):" msgstr "" -"L'objet :code:`PytorchMNISTClient` est finalement transmis à " -":code:`fl.client.start_client` avec l'adresse du serveur lorsque le " -"processus de formation commence." -#: ../../source/example-walkthrough-pytorch-mnist.rst:123 -msgid "A Closer Look" -msgstr "Regarder de plus près" - -#: ../../source/example-walkthrough-pytorch-mnist.rst:125 -#, fuzzy +#: ../../source/explanation-differential-privacy.rst:32 msgid "" -"Now, let's look closely into the :code:`PytorchMNISTClient` inside " -":code:`flwr_example.quickstart-pytorch.mnist` and see what it is doing:" +"\\small\n" +"P[M(D_{1} \\in A)] \\leq e^{\\delta} P[M(D_{2} \\in A)] + \\delta" msgstr "" -"Maintenant, examinons de près le :code:`PytorchMNISTClient` à l'intérieur" -" du :code:`flwr_example.quickstart-pytorch.mnist` et voyons ce qu'il fait" -" :" -#: ../../source/example-walkthrough-pytorch-mnist.rst:226 +#: ../../source/explanation-differential-privacy.rst:38 msgid "" -"The first thing to notice is that :code:`PytorchMNISTClient` instantiates" -" a CNN model inside its constructor" +"The :math:`\\epsilon` parameter, also known as the privacy budget, is a " +"metric of privacy loss. It also controls the privacy-utility trade-off; " +"lower :math:`\\epsilon` values indicate higher levels of privacy but are " +"likely to reduce utility as well. The :math:`\\delta` parameter accounts " +"for a small probability on which the upper bound :math:`\\epsilon` does " +"not hold. The amount of noise needed to achieve differential privacy is " +"proportional to the sensitivity of the output, which measures the maximum" +" change in the output due to the inclusion or removal of a single record." msgstr "" -"La première chose à remarquer est que :code:`PytorchMNISTClient` " -"instancie un modèle CNN dans son constructeur" -#: ../../source/example-walkthrough-pytorch-mnist.rst:244 +#: ../../source/explanation-differential-privacy.rst:45 #, fuzzy -msgid "" -"The code for the CNN is available under :code:`quickstart-pytorch.mnist` " -"and it is reproduced below. It is the same network found in `Basic MNIST " -"Example `_." -msgstr "" -"Le code du CNN est disponible sous :code:`quickstart-pytorch.mnist` et il" -" est reproduit ci-dessous. Il s'agit du même réseau que celui que l'on " -"trouve dans `Exemple basique de MNIST " -"`_." - -#: ../../source/example-walkthrough-pytorch-mnist.rst:290 -msgid "" -"The second thing to notice is that :code:`PytorchMNISTClient` class " -"inherits from the :code:`fl.client.Client`, and hence it must implement " -"the following methods:" -msgstr "" -"La deuxième chose à noter est que la classe :code:`PytorchMNISTClient` " -"hérite de :code:`fl.client.Client`, et qu'elle doit donc implémenter les " -"méthodes suivantes :" +msgid "Differential Privacy in Machine Learning" +msgstr "Confidentialité différentielle" -#: ../../source/example-walkthrough-pytorch-mnist.rst:315 +#: ../../source/explanation-differential-privacy.rst:46 msgid "" -"When comparing the abstract class to its derived class " -":code:`PytorchMNISTClient` you will notice that :code:`fit` calls a " -":code:`train` function and that :code:`evaluate` calls a :code:`test`: " -"function." +"DP can be utilized in machine learning to preserve the privacy of the " +"training data. Differentially private machine learning algorithms are " +"designed in a way to prevent the algorithm to learn any specific " +"information about any individual data points and subsequently prevent the" +" model from revealing sensitive information. Depending on the stage at " +"which noise is introduced, various methods exist for applying DP to " +"machine learning algorithms. One approach involves adding noise to the " +"training data (either to the features or labels), while another method " +"entails injecting noise into the gradients of the loss function during " +"model training. Additionally, such noise can be incorporated into the " +"model's output." msgstr "" -"En comparant la classe abstraite à sa classe dérivée " -":code:`PytorchMNISTClient`, tu remarqueras que :code:`fit` appelle une " -"fonction :code:`train` et que :code:`evaluate` appelle une fonction " -":code:`test` :." -#: ../../source/example-walkthrough-pytorch-mnist.rst:317 +#: ../../source/explanation-differential-privacy.rst:53 #, fuzzy -msgid "" -"These functions can both be found inside the same :code:`quickstart-" -"pytorch.mnist` module:" -msgstr "" -"Ces fonctions se trouvent toutes deux dans le même module :code" -":`quickstart-pytorch.mnist` :" +msgid "Differential Privacy in Federated Learning" +msgstr "Mise à l'échelle de l'apprentissage fédéré" -#: ../../source/example-walkthrough-pytorch-mnist.rst:437 +#: ../../source/explanation-differential-privacy.rst:54 msgid "" -"Observe that these functions encapsulate regular training and test loops " -"and provide :code:`fit` and :code:`evaluate` with final statistics for " -"each round. You could substitute them with your custom train and test " -"loops and change the network architecture, and the entire example would " -"still work flawlessly. As a matter of fact, why not try and modify the " -"code to an example of your liking?" +"Federated learning is a data minimization approach that allows multiple " +"parties to collaboratively train a model without sharing their raw data. " +"However, federated learning also introduces new privacy challenges. The " +"model updates between parties and the central server can leak information" +" about the local data. These leaks can be exploited by attacks such as " +"membership inference and property inference attacks, or model inversion " +"attacks." msgstr "" -"Observe que ces fonctions encapsulent les boucles d'entraînement et de " -"test habituelles et fournissent à :code:`fit` et :code:`evaluate` les " -"statistiques finales pour chaque tour. Tu pourrais les remplacer par tes " -"boucles d'entraînement et de test personnalisées et changer " -"l'architecture du réseau, et l'ensemble de l'exemple fonctionnerait " -"toujours parfaitement. En fait, pourquoi ne pas essayer de modifier le " -"code pour en faire un exemple qui te plairait ?" - -#: ../../source/example-walkthrough-pytorch-mnist.rst:444 -msgid "Give It a Try" -msgstr "Fais un essai" -#: ../../source/example-walkthrough-pytorch-mnist.rst:445 +#: ../../source/explanation-differential-privacy.rst:58 msgid "" -"Looking through the quickstart code description above will have given a " -"good understanding of how *clients* and *servers* work in Flower, how to " -"run a simple experiment, and the internals of a client wrapper. Here are " -"a few things you could try on your own and get more experience with " -"Flower:" +"DP can play a crucial role in federated learning to provide privacy for " +"the clients' data." msgstr "" -"En parcourant la description du code de démarrage rapide ci-dessus, tu " -"auras acquis une bonne compréhension du fonctionnement des *clients* et " -"des *serveurs* dans Flower, de l'exécution d'une expérience simple et de " -"la structure interne d'un wrapper client. Voici quelques exemples que tu " -"peux essayer par toi-même pour acquérir plus d'expérience avec Flower :" -#: ../../source/example-walkthrough-pytorch-mnist.rst:448 +#: ../../source/explanation-differential-privacy.rst:60 msgid "" -"Try and change :code:`PytorchMNISTClient` so it can accept different " -"architectures." +"Depending on the granularity of privacy provision or the location of " +"noise addition, different forms of DP exist in federated learning. In " +"this explainer, we focus on two approaches of DP utilization in federated" +" learning based on where the noise is added: at the server (also known as" +" the center) or at the client (also known as the local)." msgstr "" -"Essaie de modifier :code:`PytorchMNISTClient` pour qu'il puisse accepter " -"différentes architectures." -#: ../../source/example-walkthrough-pytorch-mnist.rst:449 -msgid "Modify the :code:`train` function so that it accepts different optimizers" -msgstr "" -"Modifie la fonction :code:`train` pour qu'elle accepte différents " -"optimiseurs" - -#: ../../source/example-walkthrough-pytorch-mnist.rst:450 +#: ../../source/explanation-differential-privacy.rst:63 msgid "" -"Modify the :code:`test` function so that it proves not only the top-1 " -"(regular accuracy) but also the top-5 accuracy?" +"**Central Differential Privacy**: DP is applied by the server and the " +"goal is to prevent the aggregated model from leaking information about " +"each client's data." msgstr "" -"Modifie la fonction :code:`test` pour qu'elle prouve non seulement le " -"top-1 (précision normale) mais aussi le top-5 ?" -#: ../../source/example-walkthrough-pytorch-mnist.rst:451 +#: ../../source/explanation-differential-privacy.rst:65 msgid "" -"Go larger! Try to adapt the code to larger images and datasets. Why not " -"try training on ImageNet with a ResNet-50?" +"**Local Differential Privacy**: DP is applied on the client side before " +"sending any information to the server and the goal is to prevent the " +"updates that are sent to the server from leaking any information about " +"the client's data." msgstr "" -"Essaie d'adapter le code à des images et à des ensembles de données plus " -"grands. Pourquoi ne pas essayer de s'entraîner sur ImageNet avec un " -"ResNet-50 ?" -#: ../../source/example-walkthrough-pytorch-mnist.rst:453 -msgid "You are ready now. Enjoy learning in a federated way!" -msgstr "Tu es prêt maintenant. Profite de l'apprentissage de manière fédérée !" - -#: ../../source/explanation-differential-privacy.rst:2 +#: ../../source/explanation-differential-privacy.rst:-1 +#: ../../source/explanation-differential-privacy.rst:68 +#: ../../source/how-to-use-differential-privacy.rst:11 #, fuzzy -msgid "Differential privacy" +msgid "Central Differential Privacy" msgstr "Confidentialité différentielle" -#: ../../source/explanation-differential-privacy.rst:4 -msgid "" -"Flower provides differential privacy (DP) wrapper classes for the easy " -"integration of the central DP guarantees provided by DP-FedAvg into " -"training pipelines defined in any of the various ML frameworks that " -"Flower is compatible with." -msgstr "" -"Flower fournit des classes d'enveloppe de confidentialité différentielle " -"(DP) pour l'intégration facile des garanties centrales de DP fournies par" -" DP-FedAvg dans les pipelines de formation définis dans n'importe lequel " -"des divers cadres de ML avec lesquels Flower est compatible." - -#: ../../source/explanation-differential-privacy.rst:7 -#, fuzzy -msgid "" -"Please note that these components are still experimental; the correct " -"configuration of DP for a specific task is still an unsolved problem." -msgstr "" -"Note que ces composants sont encore expérimentaux, la configuration " -"correcte du DP pour une tâche spécifique est encore un problème non " -"résolu." - -#: ../../source/explanation-differential-privacy.rst:10 +#: ../../source/explanation-differential-privacy.rst:69 msgid "" -"The name DP-FedAvg is misleading since it can be applied on top of any FL" -" algorithm that conforms to the general structure prescribed by the " -"FedOpt family of algorithms." +"In this approach, which is also known as user-level DP, the central " +"server is responsible for adding noise to the globally aggregated " +"parameters. It should be noted that trust in the server is required." msgstr "" -"Le nom DP-FedAvg est trompeur car il peut être appliqué à n'importe quel " -"algorithme FL qui se conforme à la structure générale prescrite par la " -"famille d'algorithmes FedOpt." -#: ../../source/explanation-differential-privacy.rst:13 -msgid "DP-FedAvg" -msgstr "DP-FedAvg" - -#: ../../source/explanation-differential-privacy.rst:15 +#: ../../source/explanation-differential-privacy.rst:76 msgid "" -"DP-FedAvg, originally proposed by McMahan et al. [mcmahan]_ and extended " -"by Andrew et al. [andrew]_, is essentially FedAvg with the following " -"modifications." +"While there are various ways to implement central DP in federated " +"learning, we concentrate on the algorithms proposed by [2] and [3]. The " +"overall approach is to clip the model updates sent by the clients and add" +" some amount of noise to the aggregated model. In each iteration, a " +"random set of clients is chosen with a specific probability for training." +" Each client performs local training on its own data. The update of each " +"client is then clipped by some value `S` (sensitivity `S`). This would " +"limit the impact of any individual client which is crucial for privacy " +"and often beneficial for robustness. A common approach to achieve this is" +" by restricting the `L2` norm of the clients' model updates, ensuring " +"that larger updates are scaled down to fit within the norm `S`." msgstr "" -"DP-FedAvg, proposé à l'origine par McMahan et al. [mcmahan]_ et étendu " -"par Andrew et al. [andrew]_, est essentiellement FedAvg avec les " -"modifications suivantes." -#: ../../source/explanation-differential-privacy.rst:17 -msgid "" -"**Clipping** : The influence of each client's update is bounded by " -"clipping it. This is achieved by enforcing a cap on the L2 norm of the " -"update, scaling it down if needed." +#: ../../source/explanation-differential-privacy.rst:-1 +msgid "clipping" msgstr "" -"**Clipping** : L'influence de la mise à jour de chaque client est limitée" -" en l'écrêtant. Ceci est réalisé en imposant un plafond à la norme L2 de " -"la mise à jour, en la réduisant si nécessaire." -#: ../../source/explanation-differential-privacy.rst:18 +#: ../../source/explanation-differential-privacy.rst:89 msgid "" -"**Noising** : Gaussian noise, calibrated to the clipping threshold, is " -"added to the average computed at the server." +"Afterwards, the Gaussian mechanism is used to add noise in order to " +"distort the sum of all clients' updates. The amount of noise is scaled to" +" the sensitivity value to obtain a privacy guarantee. The Gaussian " +"mechanism is used with a noise sampled from `N (0, σ²)` where `σ = ( " +"noise_scale * S ) / (number of sampled clients)`." msgstr "" -"**Bruit** : un bruit gaussien, calibré sur le seuil d'écrêtage, est " -"ajouté à la moyenne calculée au niveau du serveur." -#: ../../source/explanation-differential-privacy.rst:20 -#, fuzzy -msgid "" -"The distribution of the update norm has been shown to vary from task-to-" -"task and to evolve as training progresses. This variability is crucial in" -" understanding its impact on differential privacy guarantees, emphasizing" -" the need for an adaptive approach [andrew]_ that continuously adjusts " -"the clipping threshold to track a prespecified quantile of the update " -"norm distribution." +#: ../../source/explanation-differential-privacy.rst:94 +msgid "Clipping" msgstr "" -"Il a été démontré que la distribution de la norme de mise à jour varie " -"d'une tâche à l'autre et évolue au fur et à mesure de la formation. C'est" -" pourquoi nous utilisons une approche adaptative [andrew]_ qui ajuste " -"continuellement le seuil d'écrêtage pour suivre un quantile prédéfini de " -"la distribution de la norme de mise à jour." -#: ../../source/explanation-differential-privacy.rst:23 -msgid "Simplifying Assumptions" -msgstr "Simplifier les hypothèses" - -#: ../../source/explanation-differential-privacy.rst:25 -#, fuzzy +#: ../../source/explanation-differential-privacy.rst:96 msgid "" -"We make (and attempt to enforce) a number of assumptions that must be " -"satisfied to ensure that the training process actually realizes the " -":math:`(\\epsilon, \\delta)` guarantees the user has in mind when " -"configuring the setup." +"There are two forms of clipping commonly used in Central DP: Fixed " +"Clipping and Adaptive Clipping." msgstr "" -"Nous formulons (et tentons d'appliquer) un certain nombre d'hypothèses " -"qui doivent être satisfaites pour que le processus de formation réalise " -"réellement les garanties :math:`(\\epsilon, \\delta)` que l'utilisateur a" -" à l'esprit lorsqu'il configure l'installation." -#: ../../source/explanation-differential-privacy.rst:27 +#: ../../source/explanation-differential-privacy.rst:98 msgid "" -"**Fixed-size subsampling** :Fixed-size subsamples of the clients must be " -"taken at each round, as opposed to variable-sized Poisson subsamples." +"**Fixed Clipping** : A predefined fix threshold is set for the magnitude " +"of clients' updates. Any update exceeding this threshold is clipped back " +"to the threshold value." msgstr "" -"**Sous-échantillonnage de taille fixe** :Des sous-échantillons de taille " -"fixe des clients doivent être prélevés à chaque tour, par opposition aux " -"sous-échantillons de Poisson de taille variable." -#: ../../source/explanation-differential-privacy.rst:28 +#: ../../source/explanation-differential-privacy.rst:100 msgid "" -"**Unweighted averaging** : The contributions from all the clients must " -"weighted equally in the aggregate to eliminate the requirement for the " -"server to know in advance the sum of the weights of all clients available" -" for selection." +"**Adaptive Clipping** : The clipping threshold dynamically adjusts based " +"on the observed update distribution [4]. It means that the clipping value" +" is tuned during the rounds with respect to the quantile of the update " +"norm distribution." msgstr "" -"**Moyenne non pondérée** : Les contributions de tous les clients doivent " -"être pondérées de façon égale dans l'ensemble afin que le serveur n'ait " -"pas à connaître à l'avance la somme des poids de tous les clients " -"disponibles pour la sélection." -#: ../../source/explanation-differential-privacy.rst:29 +#: ../../source/explanation-differential-privacy.rst:102 msgid "" -"**No client failures** : The set of available clients must stay constant " -"across all rounds of training. In other words, clients cannot drop out or" -" fail." +"The choice between fixed and adaptive clipping depends on various factors" +" such as privacy requirements, data distribution, model complexity, and " +"others." msgstr "" -"**Aucune défaillance de client** : L'ensemble des clients disponibles " -"doit rester constant pendant toutes les séries de formation. En d'autres " -"termes, les clients ne peuvent pas abandonner ou échouer." -#: ../../source/explanation-differential-privacy.rst:31 +#: ../../source/explanation-differential-privacy.rst:-1 +#: ../../source/explanation-differential-privacy.rst:105 +#: ../../source/how-to-use-differential-privacy.rst:96 #, fuzzy -msgid "" -"The first two are useful for eliminating a multitude of complications " -"associated with calibrating the noise to the clipping threshold, while " -"the third one is required to comply with the assumptions of the privacy " -"analysis." -msgstr "" -"Les deux premiers sont utiles pour éliminer une multitude de " -"complications liées au calibrage du bruit en fonction du seuil " -"d'écrêtage, tandis que le troisième est nécessaire pour se conformer aux " -"hypothèses de l'analyse de la vie privée." +msgid "Local Differential Privacy" +msgstr "Confidentialité différentielle" -#: ../../source/explanation-differential-privacy.rst:34 +#: ../../source/explanation-differential-privacy.rst:107 msgid "" -"These restrictions are in line with constraints imposed by Andrew et al. " -"[andrew]_." +"In this approach, each client is responsible for performing DP. Local DP " +"avoids the need for a fully trusted aggregator, but it should be noted " +"that local DP leads to a decrease in accuracy but better privacy in " +"comparison to central DP." msgstr "" -"Ces restrictions sont conformes aux contraintes imposées par Andrew et " -"al. [andrew]_." - -#: ../../source/explanation-differential-privacy.rst:37 -msgid "Customizable Responsibility for Noise injection" -msgstr "Responsabilité personnalisable pour l'injection de bruit" - -#: ../../source/explanation-differential-privacy.rst:38 -msgid "" -"In contrast to other implementations where the addition of noise is " -"performed at the server, you can configure the site of noise injection to" -" better match your threat model. We provide users with the flexibility to" -" set up the training such that each client independently adds a small " -"amount of noise to the clipped update, with the result that simply " -"aggregating the noisy updates is equivalent to the explicit addition of " -"noise to the non-noisy aggregate at the server." -msgstr "" -"Contrairement à d'autres implémentations où l'ajout de bruit est effectué" -" au niveau du serveur, tu peux configurer le site d'injection de bruit " -"pour qu'il corresponde mieux à ton modèle de menace. Nous offrons aux " -"utilisateurs la possibilité de configurer l'entraînement de telle sorte " -"que chaque client ajoute indépendamment une petite quantité de bruit à la" -" mise à jour écrêtée, ce qui fait que le simple fait d'agréger les mises " -"à jour bruyantes équivaut à l'ajout explicite de bruit à l'agrégat non " -"bruyant au niveau du serveur." - -#: ../../source/explanation-differential-privacy.rst:41 -msgid "" -"To be precise, if we let :math:`m` be the number of clients sampled each " -"round and :math:`\\sigma_\\Delta` be the scale of the total Gaussian " -"noise that needs to be added to the sum of the model updates, we can use " -"simple maths to show that this is equivalent to each client adding noise " -"with scale :math:`\\sigma_\\Delta/\\sqrt{m}`." -msgstr "" -"Pour être précis, si nous laissons :math:`m` être le nombre de clients " -"échantillonnés à chaque tour et :math:\\sigma_\\Delta` être l'échelle du " -"bruit gaussien total qui doit être ajouté à la somme des mises à jour du " -"modèle, nous pouvons utiliser des mathématiques simples pour montrer que " -"cela équivaut à ce que chaque client ajoute du bruit avec l'échelle " -":math:\\sigma_\\Delta/\\sqrt{m}`." - -#: ../../source/explanation-differential-privacy.rst:44 -msgid "Wrapper-based approach" -msgstr "Approche basée sur l'enveloppe" - -#: ../../source/explanation-differential-privacy.rst:46 -msgid "" -"Introducing DP to an existing workload can be thought of as adding an " -"extra layer of security around it. This inspired us to provide the " -"additional server and client-side logic needed to make the training " -"process differentially private as wrappers for instances of the " -":code:`Strategy` and :code:`NumPyClient` abstract classes respectively. " -"This wrapper-based approach has the advantage of being easily composable " -"with other wrappers that someone might contribute to the Flower library " -"in the future, e.g., for secure aggregation. Using Inheritance instead " -"can be tedious because that would require the creation of new sub- " -"classes every time a new class implementing :code:`Strategy` or " -":code:`NumPyClient` is defined." -msgstr "" -"L'introduction du DP dans une charge de travail existante peut être " -"considérée comme l'ajout d'une couche de sécurité supplémentaire autour " -"d'elle. Cela nous a incités à fournir la logique supplémentaire côté " -"serveur et côté client nécessaire pour rendre le processus de formation " -"différentiellement privé en tant qu'enveloppes pour les instances des " -"classes abstraites :code:`Strategy` et :code:`NumPyClient` " -"respectivement. Cette approche basée sur l'enveloppe a l'avantage d'être " -"facilement composable avec d'autres enveloppes que quelqu'un pourrait " -"contribuer à la bibliothèque Flower à l'avenir, par exemple, pour " -"l'agrégation sécurisée. L'utilisation de l'héritage à la place peut être " -"fastidieuse car cela nécessiterait la création de nouvelles sous-classes " -"chaque fois qu'une nouvelle classe mettant en œuvre :code:`Strategy` ou " -":code:`NumPyClient` est définie." - -#: ../../source/explanation-differential-privacy.rst:49 -msgid "Server-side logic" -msgstr "Logique côté serveur" -#: ../../source/explanation-differential-privacy.rst:51 -#, fuzzy -msgid "" -"The first version of our solution was to define a decorator whose " -"constructor accepted, among other things, a boolean-valued variable " -"indicating whether adaptive clipping was to be enabled or not. We quickly" -" realized that this would clutter its :code:`__init__()` function with " -"variables corresponding to hyperparameters of adaptive clipping that " -"would remain unused when it was disabled. A cleaner implementation could " -"be achieved by splitting the functionality into two decorators, " -":code:`DPFedAvgFixed` and :code:`DPFedAvgAdaptive`, with the latter sub- " -"classing the former. The constructors for both classes accept a boolean " -"parameter :code:`server_side_noising`, which, as the name suggests, " -"determines where noising is to be performed." -msgstr "" -"La première version de notre solution consistait à définir un décorateur " -"dont le constructeur acceptait, entre autres, une variable à valeur " -"booléenne indiquant si l'écrêtage adaptatif devait être activé ou non. " -"Nous nous sommes rapidement rendu compte que cela encombrerait sa " -"fonction :code:`__init__()` avec des variables correspondant aux " -"hyperparamètres de l'écrêtage adaptatif qui resteraient inutilisées " -"lorsque celui-ci était désactivé. Une implémentation plus propre pourrait" -" être obtenue en divisant la fonctionnalité en deux décorateurs, " -":code:`DPFedAvgFixed` et :code:`DPFedAvgAdaptive`, le second sous-" -"classant le premier. Les constructeurs des deux classes acceptent un " -"paramètre booléen :code:`server_side_noising` qui, comme son nom " -"l'indique, détermine l'endroit où le noising doit être effectué." - -#: ../../source/explanation-differential-privacy.rst:54 -#: ../../source/ref-api/flwr.server.strategy.DPFedAvgFixed.rst:2 -msgid "DPFedAvgFixed" -msgstr "DPFedAvgFixed" - -#: ../../source/explanation-differential-privacy.rst:56 -msgid "" -"The server-side capabilities required for the original version of DP-" -"FedAvg, i.e., the one which performed fixed clipping, can be completely " -"captured with the help of wrapper logic for just the following two " -"methods of the :code:`Strategy` abstract class." +#: ../../source/explanation-differential-privacy.rst:116 +msgid "In this explainer, we focus on two forms of achieving Local DP:" msgstr "" -"Les capacités côté serveur requises pour la version originale de DP-" -"FedAvg, c'est-à-dire celle qui effectue un écrêtage fixe, peuvent être " -"entièrement capturées à l'aide d'une logique d'enveloppement pour les " -"deux méthodes suivantes de la classe abstraite :code:`Strategy`." - -#: ../../source/explanation-differential-privacy.rst:58 -msgid "" -":code:`configure_fit()` : The config dictionary being sent by the wrapped" -" :code:`Strategy` to each client needs to be augmented with an additional" -" value equal to the clipping threshold (keyed under " -":code:`dpfedavg_clip_norm`) and, if :code:`server_side_noising=true`, " -"another one equal to the scale of the Gaussian noise that needs to be " -"added at the client (keyed under :code:`dpfedavg_noise_stddev`). This " -"entails *post*-processing of the results returned by the wrappee's " -"implementation of :code:`configure_fit()`." -msgstr "" -":code:`configure_fit()` : Le dictionnaire de configuration envoyé par la " -":code:`Strategy` enveloppée à chaque client doit être augmenté d'une " -"valeur supplémentaire égale au seuil d'écrêtage (indiqué sous " -":code:`dpfedavg_clip_norm`) et, si :code:`server_side_noising=true`, " -"d'une autre égale à l'échelle du bruit gaussien qui doit être ajouté au " -"client (indiqué sous :code:`dpfedavg_noise_stddev`)." - -#: ../../source/explanation-differential-privacy.rst:59 -#, fuzzy -msgid "" -":code:`aggregate_fit()`: We check whether any of the sampled clients " -"dropped out or failed to upload an update before the round timed out. In " -"that case, we need to abort the current round, discarding any successful " -"updates that were received, and move on to the next one. On the other " -"hand, if all clients responded successfully, we must force the averaging " -"of the updates to happen in an unweighted manner by intercepting the " -":code:`parameters` field of :code:`FitRes` for each received update and " -"setting it to 1. Furthermore, if :code:`server_side_noising=true`, each " -"update is perturbed with an amount of noise equal to what it would have " -"been subjected to had client-side noising being enabled. This entails " -"*pre*-processing of the arguments to this method before passing them on " -"to the wrappee's implementation of :code:`aggregate_fit()`." -msgstr "" -":code:`aggregate_fit()`: We check whether any of the sampled clients " -"dropped out or failed to upload an update before the round timed out. In " -"that case, we need to abort the current round, discarding any successful " -"updates that were received, and move on to the next one. On the other " -"hand, if all clients responded successfully, we must force the averaging " -"of the updates to happen in an unweighted manner by intercepting the " -":code:`parameters` field of :code:`FitRes` for each received update and " -"setting it to 1. Furthermore, if :code:`server_side_noising=true`, each " -"update is perturbed with an amount of noise equal to what it would have " -"been subjected to had client-side noising being enabled. This entails " -"*pre*-processing of the arguments to this method before passing them on " -"to the wrappee's implementation of :code:`aggregate_fit()`." - -#: ../../source/explanation-differential-privacy.rst:62 -msgid "" -"We can't directly change the aggregation function of the wrapped strategy" -" to force it to add noise to the aggregate, hence we simulate client-side" -" noising to implement server-side noising." -msgstr "" -"Nous ne pouvons pas modifier directement la fonction d'agrégation de la " -"stratégie enveloppée pour la forcer à ajouter du bruit à l'agrégat, c'est" -" pourquoi nous simulons le bruit côté client pour mettre en œuvre le " -"bruit côté serveur." - -#: ../../source/explanation-differential-privacy.rst:64 -msgid "" -"These changes have been put together into a class called " -":code:`DPFedAvgFixed`, whose constructor accepts the strategy being " -"decorated, the clipping threshold and the number of clients sampled every" -" round as compulsory arguments. The user is expected to specify the " -"clipping threshold since the order of magnitude of the update norms is " -"highly dependent on the model being trained and providing a default value" -" would be misleading. The number of clients sampled at every round is " -"required to calculate the amount of noise that must be added to each " -"individual update, either by the server or the clients." -msgstr "" -"Ces modifications ont été regroupées dans une classe appelée " -":code:`DPFedAvgFixed`, dont le constructeur accepte la stratégie décorée," -" le seuil d'écrêtage et le nombre de clients échantillonnés à chaque tour" -" comme arguments obligatoires. L'utilisateur est censé spécifier le seuil" -" d'écrêtage car l'ordre de grandeur des normes de mise à jour dépend " -"fortement du modèle formé et fournir une valeur par défaut serait " -"trompeur. Le nombre de clients échantillonnés à chaque tour est " -"nécessaire pour calculer la quantité de bruit qui doit être ajoutée à " -"chaque mise à jour individuelle, que ce soit par le serveur ou par les " -"clients." - -#: ../../source/explanation-differential-privacy.rst:67 -#: ../../source/ref-api/flwr.server.strategy.DPFedAvgAdaptive.rst:2 -msgid "DPFedAvgAdaptive" -msgstr "DPFedAvgAdaptive" -#: ../../source/explanation-differential-privacy.rst:69 +#: ../../source/explanation-differential-privacy.rst:118 msgid "" -"The additional functionality required to facilitate adaptive clipping has" -" been provided in :code:`DPFedAvgAdaptive`, a subclass of " -":code:`DPFedAvgFixed`. It overrides the above-mentioned methods to do the" -" following." -msgstr "" -"La fonctionnalité supplémentaire nécessaire pour faciliter l'écrêtage " -"adaptatif a été fournie dans :code:`DPFedAvgAdaptive`, une sous-classe de" -" :code:`DPFedAvgFixed`. Elle remplace les méthodes mentionnées ci-dessus " -"pour effectuer les opérations suivantes." - -#: ../../source/explanation-differential-privacy.rst:71 -msgid "" -":code:`configure_fit()` : It intercepts the config dict returned by " -":code:`super.configure_fit()` to add the key-value pair " -":code:`dpfedavg_adaptive_clip_enabled:True` to it, which the client " -"interprets as an instruction to include an indicator bit (1 if update " -"norm <= clipping threshold, 0 otherwise) in the results returned by it." -msgstr "" -":code:`configure_fit()` : Il intercepte le dict de configuration renvoyé " -"par :code:`super.configure_fit()` pour y ajouter la paire clé-valeur " -":code:`dpfedavg_adaptive_clip_enabled:True`, que le client interprète " -"comme une instruction d'inclure un bit indicateur (1 si la norme de mise " -"à jour <= seuil d'écrêtage, 0 sinon) dans les résultats qu'il renvoie." - -#: ../../source/explanation-differential-privacy.rst:73 -msgid "" -":code:`aggregate_fit()` : It follows a call to " -":code:`super.aggregate_fit()` with one to :code:`__update_clip_norm__()`," -" a procedure which adjusts the clipping threshold on the basis of the " -"indicator bits received from the sampled clients." -msgstr "" -":code:`aggregate_fit()` : Il fait suivre un appel à " -":code:`super.aggregate_fit()` d'un appel à " -":code:`__update_clip_norm__()`, une procédure qui ajuste le seuil " -"d'écrêtage sur la base des bits indicateurs reçus des clients " -"échantillonnés." - -#: ../../source/explanation-differential-privacy.rst:77 -msgid "Client-side logic" -msgstr "Logique côté client" +"Each client adds noise to the local updates before sending them to the " +"server. To achieve (:math:`\\epsilon`, :math:`\\delta`)-DP, considering " +"the sensitivity of the local model to be ∆, Gaussian noise is applied " +"with a noise scale of σ where:" +msgstr "" -#: ../../source/explanation-differential-privacy.rst:79 +#: ../../source/explanation-differential-privacy.rst:120 msgid "" -"The client-side capabilities required can be completely captured through " -"wrapper logic for just the :code:`fit()` method of the " -":code:`NumPyClient` abstract class. To be precise, we need to *post-" -"process* the update computed by the wrapped client to clip it, if " -"necessary, to the threshold value supplied by the server as part of the " -"config dictionary. In addition to this, it may need to perform some extra" -" work if either (or both) of the following keys are also present in the " -"dict." +"\\small\n" +"\\frac{∆ \\times \\sqrt{2 \\times " +"\\log\\left(\\frac{1.25}{\\delta}\\right)}}{\\epsilon}\n" +"\n" msgstr "" -"Les capacités requises côté client peuvent être entièrement capturées par" -" une logique de wrapper pour la seule méthode :code:`fit()` de la classe " -"abstraite :code:`NumPyClient`. Pour être précis, nous devons *post-" -"traiter* la mise à jour calculée par le client wrapped pour l'écrêter, si" -" nécessaire, à la valeur seuil fournie par le serveur dans le cadre du " -"dictionnaire de configuration. En plus de cela, il peut avoir besoin " -"d'effectuer un travail supplémentaire si l'une des clés suivantes (ou les" -" deux) est également présente dans le dict." -#: ../../source/explanation-differential-privacy.rst:81 +#: ../../source/explanation-differential-privacy.rst:125 msgid "" -":code:`dpfedavg_noise_stddev` : Generate and add the specified amount of " -"noise to the clipped update." +"Each client adds noise to the gradients of the model during the local " +"training (DP-SGD). More specifically, in this approach, gradients are " +"clipped and an amount of calibrated noise is injected into the gradients." msgstr "" -":code:`dpfedavg_noise_stddev` : Génère et ajoute la quantité de bruit " -"spécifiée à la mise à jour de l'écrêtage." -#: ../../source/explanation-differential-privacy.rst:82 +#: ../../source/explanation-differential-privacy.rst:128 msgid "" -":code:`dpfedavg_adaptive_clip_enabled` : Augment the metrics dict in the " -":code:`FitRes` object being returned to the server with an indicator bit," -" calculated as described earlier." +"Please note that these two approaches are providing privacy at different " +"levels." msgstr "" -":code:`dpfedavg_adaptive_clip_enabled` : Complète les métriques dict dans" -" l'objet :code:`FitRes` renvoyé au serveur avec un bit indicateur, " -"calculé comme décrit précédemment." -#: ../../source/explanation-differential-privacy.rst:86 -msgid "Performing the :math:`(\\epsilon, \\delta)` analysis" -msgstr "Effectuer l'analyse :math:`(\\epsilon, \\delta)`" +#: ../../source/explanation-differential-privacy.rst:131 +#, fuzzy +msgid "**References:**" +msgstr "Référence" -#: ../../source/explanation-differential-privacy.rst:88 -msgid "" -"Assume you have trained for :math:`n` rounds with sampling fraction " -":math:`q` and noise multiplier :math:`z`. In order to calculate the " -":math:`\\epsilon` value this would result in for a particular " -":math:`\\delta`, the following script may be used." +#: ../../source/explanation-differential-privacy.rst:133 +msgid "[1] Dwork et al. The Algorithmic Foundations of Differential Privacy." msgstr "" -"Supposons que tu te sois entraîné pendant :math:`n` tours avec la " -"fraction d'échantillonnage :math:`q` et le multiplicateur de bruit " -":math:`z`. Afin de calculer la valeur :math:`epsilon` qui en résulterait " -"pour un :math:`\\delta` particulier, le script suivant peut être utilisé." -#: ../../source/explanation-differential-privacy.rst:98 +#: ../../source/explanation-differential-privacy.rst:135 #, fuzzy msgid "" -"McMahan et al. \"Learning Differentially Private Recurrent Language " -"Models.\" International Conference on Learning Representations (ICLR), " -"2017." +"[2] McMahan et al. Learning Differentially Private Recurrent Language " +"Models." msgstr "" "McMahan, H. Brendan, et al. \"Learning differentially private recurrent " "language models\", arXiv preprint arXiv:1710.06963 (2017)." -#: ../../source/explanation-differential-privacy.rst:100 -#, fuzzy +#: ../../source/explanation-differential-privacy.rst:137 msgid "" -"Andrew, Galen, et al. \"Differentially Private Learning with Adaptive " -"Clipping.\" Advances in Neural Information Processing Systems (NeurIPS), " -"2021." +"[3] Geyer et al. Differentially Private Federated Learning: A Client " +"Level Perspective." +msgstr "" + +#: ../../source/explanation-differential-privacy.rst:139 +#, fuzzy +msgid "[4] Galen et al. Differentially Private Learning with Adaptive Clipping." msgstr "" "Andrew, Galen, et al. \"Differentially private learning with adaptive " "clipping\" Advances in Neural Information Processing Systems 34 (2021) : " @@ -5161,6 +4653,7 @@ msgid "As a reference, this document follows the above structure." msgstr "À titre de référence, ce document suit la structure ci-dessus." #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:90 +#: ../../source/ref-api/flwr.common.Metadata.rst:2 msgid "Metadata" msgstr "Métadonnées" @@ -5598,13 +5091,12 @@ msgstr "" #, fuzzy msgid "" "This can be achieved by customizing an existing strategy or by " -"`implementing a custom strategy from scratch " -"`_. " -"Here's a nonsensical example that customizes :code:`FedAvg` by adding a " -"custom ``\"hello\": \"world\"`` configuration key/value pair to the " -"config dict of a *single client* (only the first client in the list, the " -"other clients in this round to not receive this \"special\" config " -"value):" +":doc:`implementing a custom strategy from scratch `. Here's a nonsensical example that customizes :code:`FedAvg`" +" by adding a custom ``\"hello\": \"world\"`` configuration key/value pair" +" to the config dict of a *single client* (only the first client in the " +"list, the other clients in this round to not receive this \"special\" " +"config value):" msgstr "" "Ceci peut être réalisé en personnalisant une stratégie existante ou en " "`mettant en œuvre une stratégie personnalisée à partir de zéro " @@ -6048,11 +5540,12 @@ msgstr "" "modèle global actuel :code:`parameters` et :code:`config` dict" #: ../../source/how-to-implement-strategies.rst:236 +#, fuzzy msgid "" "More sophisticated implementations can use :code:`configure_fit` to " "implement custom client selection logic. A client will only participate " "in a round if the corresponding :code:`ClientProxy` is included in the " -"the list returned from :code:`configure_fit`." +"list returned from :code:`configure_fit`." msgstr "" "Les implémentations plus sophistiquées peuvent utiliser " ":code:`configure_fit` pour mettre en œuvre une logique de sélection des " @@ -6154,11 +5647,12 @@ msgstr "" "le modèle global actuel :code:`parameters` et :code:`config` dict" #: ../../source/how-to-implement-strategies.rst:283 +#, fuzzy msgid "" "More sophisticated implementations can use :code:`configure_evaluate` to " "implement custom client selection logic. A client will only participate " "in a round if the corresponding :code:`ClientProxy` is included in the " -"the list returned from :code:`configure_evaluate`." +"list returned from :code:`configure_evaluate`." msgstr "" "Les implémentations plus sophistiquées peuvent utiliser " ":code:`configure_evaluate` pour mettre en œuvre une logique de sélection " @@ -6334,9 +5828,7 @@ msgid "Install via Docker" msgstr "Installer Flower" #: ../../source/how-to-install-flower.rst:60 -msgid "" -"`How to run Flower using Docker `_" +msgid ":doc:`How to run Flower using Docker `" msgstr "" #: ../../source/how-to-install-flower.rst:63 @@ -6689,17 +6181,17 @@ msgid "Resources" msgstr "Ressources" #: ../../source/how-to-monitor-simulation.rst:234 +#, fuzzy msgid "" -"Ray Dashboard: ``_" +"Ray Dashboard: ``_" msgstr "" "Tableau de bord Ray : ``_" #: ../../source/how-to-monitor-simulation.rst:236 -msgid "" -"Ray Metrics: ``_" +#, fuzzy +msgid "Ray Metrics: ``_" msgstr "" "Ray Metrics : ``_" @@ -7695,7 +7187,8 @@ msgstr "" msgid "" "Remove \"placeholder\" methods from subclasses of ``Client`` or " "``NumPyClient``. If you, for example, use server-side evaluation, then " -"empty placeholder implementations of ``evaluate`` are no longer necessary." +"empty placeholder implementations of ``evaluate`` are no longer " +"necessary." msgstr "" "Supprime les méthodes \"placeholder\" des sous-classes de ``Client`` ou " "de ``NumPyClient``. Si tu utilises, par exemple, l'évaluation côté " @@ -7848,23 +7341,173 @@ msgid "" msgstr "" #: ../../source/how-to-use-built-in-mods.rst:89 -msgid "Enjoy building more robust and flexible ``ClientApp``s with mods!" +msgid "Enjoy building a more robust and flexible ``ClientApp`` with mods!" msgstr "" -#: ../../source/how-to-use-strategies.rst:2 +#: ../../source/how-to-use-differential-privacy.rst:2 #, fuzzy -msgid "Use strategies" -msgstr "Stratégies personnalisées" +msgid "Use Differential Privacy" +msgstr "Confidentialité différentielle" -#: ../../source/how-to-use-strategies.rst:4 +#: ../../source/how-to-use-differential-privacy.rst:3 msgid "" -"Flower allows full customization of the learning process through the " -":code:`Strategy` abstraction. A number of built-in strategies are " -"provided in the core framework." +"This guide explains how you can utilize differential privacy in the " +"Flower framework. If you are not yet familiar with differential privacy, " +"you can refer to :doc:`explanation-differential-privacy`." msgstr "" -"Flower permet une personnalisation complète du processus d'apprentissage " -"grâce à l'abstraction :code:`Stratégie`. Un certain nombre de stratégies " -"intégrées sont fournies dans le cadre principal." + +#: ../../source/how-to-use-differential-privacy.rst:7 +msgid "" +"Differential Privacy in Flower is in a preview phase. If you plan to use " +"these features in a production environment with sensitive data, feel free" +" contact us to discuss your requirements and to receive guidance on how " +"to best use these features." +msgstr "" + +#: ../../source/how-to-use-differential-privacy.rst:12 +msgid "" +"This approach consists of two seprate phases: clipping of the updates and" +" adding noise to the aggregated model. For the clipping phase, Flower " +"framework has made it possible to decide whether to perform clipping on " +"the server side or the client side." +msgstr "" + +#: ../../source/how-to-use-differential-privacy.rst:15 +msgid "" +"**Server-side Clipping**: This approach has the advantage of the server " +"enforcing uniform clipping across all clients' updates and reducing the " +"communication overhead for clipping values. However, it also has the " +"disadvantage of increasing the computational load on the server due to " +"the need to perform the clipping operation for all clients." +msgstr "" + +#: ../../source/how-to-use-differential-privacy.rst:16 +msgid "" +"**Client-side Clipping**: This approach has the advantage of reducing the" +" computational overhead on the server. However, it also has the " +"disadvantage of lacking centralized control, as the server has less " +"control over the clipping process." +msgstr "" + +#: ../../source/how-to-use-differential-privacy.rst:21 +#, fuzzy +msgid "Server-side Clipping" +msgstr "Logique côté serveur" + +#: ../../source/how-to-use-differential-privacy.rst:22 +msgid "" +"For central DP with server-side clipping, there are two :code:`Strategy` " +"classes that act as wrappers around the actual :code:`Strategy` instance " +"(for example, :code:`FedAvg`). The two wrapper classes are " +":code:`DifferentialPrivacyServerSideFixedClipping` and " +":code:`DifferentialPrivacyServerSideAdaptiveClipping` for fixed and " +"adaptive clipping." +msgstr "" + +#: ../../source/how-to-use-differential-privacy.rst:-1 +#, fuzzy +msgid "server side clipping" +msgstr "Logique côté serveur" + +#: ../../source/how-to-use-differential-privacy.rst:31 +msgid "" +"The code sample below enables the :code:`FedAvg` strategy to use server-" +"side fixed clipping using the " +":code:`DifferentialPrivacyServerSideFixedClipping` wrapper class. The " +"same approach can be used with " +":code:`DifferentialPrivacyServerSideAdaptiveClipping` by adjusting the " +"corresponding input parameters." +msgstr "" + +#: ../../source/how-to-use-differential-privacy.rst:52 +#, fuzzy +msgid "Client-side Clipping" +msgstr "Logique côté client" + +#: ../../source/how-to-use-differential-privacy.rst:53 +msgid "" +"For central DP with client-side clipping, the server sends the clipping " +"value to selected clients on each round. Clients can use existing Flower " +":code:`Mods` to perform the clipping. Two mods are available for fixed " +"and adaptive client-side clipping: :code:`fixedclipping_mod` and " +":code:`adaptiveclipping_mod` with corresponding server-side wrappers " +":code:`DifferentialPrivacyClientSideFixedClipping` and " +":code:`DifferentialPrivacyClientSideAdaptiveClipping`." +msgstr "" + +#: ../../source/how-to-use-differential-privacy.rst:-1 +#, fuzzy +msgid "client side clipping" +msgstr "Logique côté client" + +#: ../../source/how-to-use-differential-privacy.rst:63 +msgid "" +"The code sample below enables the :code:`FedAvg` strategy to use " +"differential privacy with client-side fixed clipping using both the " +":code:`DifferentialPrivacyClientSideFixedClipping` wrapper class and, on " +"the client, :code:`fixedclipping_mod`:" +msgstr "" + +#: ../../source/how-to-use-differential-privacy.rst:80 +msgid "" +"In addition to the server-side strategy wrapper, the :code:`ClientApp` " +"needs to configure the matching :code:`fixedclipping_mod` to perform the " +"client-side clipping:" +msgstr "" + +#: ../../source/how-to-use-differential-privacy.rst:97 +msgid "" +"To utilize local differential privacy (DP) and add noise to the client " +"model parameters before transmitting them to the server in Flower, you " +"can use the `LocalDpMod`. The following hyperparameters need to be set: " +"clipping norm value, sensitivity, epsilon, and delta." +msgstr "" + +#: ../../source/how-to-use-differential-privacy.rst:-1 +msgid "local DP mod" +msgstr "" + +#: ../../source/how-to-use-differential-privacy.rst:104 +msgid "Below is a code example that shows how to use :code:`LocalDpMod`:" +msgstr "" + +#: ../../source/how-to-use-differential-privacy.rst:122 +msgid "" +"Please note that the order of mods, especially those that modify " +"parameters, is important when using multiple modifiers. Typically, " +"differential privacy (DP) modifiers should be the last to operate on " +"parameters." +msgstr "" + +#: ../../source/how-to-use-differential-privacy.rst:125 +msgid "Local Training using Privacy Engines" +msgstr "" + +#: ../../source/how-to-use-differential-privacy.rst:126 +msgid "" +"For ensuring data instance-level privacy during local model training on " +"the client side, consider leveraging privacy engines such as Opacus and " +"TensorFlow Privacy. For examples of using Flower with these engines, " +"please refer to the Flower examples directory (`Opacus " +"`_, `Tensorflow" +" Privacy `_)." +msgstr "" + +#: ../../source/how-to-use-strategies.rst:2 +#, fuzzy +msgid "Use strategies" +msgstr "Stratégies personnalisées" + +#: ../../source/how-to-use-strategies.rst:4 +msgid "" +"Flower allows full customization of the learning process through the " +":code:`Strategy` abstraction. A number of built-in strategies are " +"provided in the core framework." +msgstr "" +"Flower permet une personnalisation complète du processus d'apprentissage " +"grâce à l'abstraction :code:`Stratégie`. Un certain nombre de stratégies " +"intégrées sont fournies dans le cadre principal." #: ../../source/how-to-use-strategies.rst:6 msgid "" @@ -8004,11 +7647,11 @@ msgstr "Quickstart tutorials" msgid "How-to guides" msgstr "Guides" -#: ../../source/index.rst:97 +#: ../../source/index.rst:98 msgid "Legacy example guides" msgstr "" -#: ../../source/index.rst:108 ../../source/index.rst:112 +#: ../../source/index.rst:109 ../../source/index.rst:113 msgid "Explanations" msgstr "Explications" @@ -8016,26 +7659,26 @@ msgstr "Explications" msgid "API reference" msgstr "Référence pour l'API" -#: ../../source/index.rst:137 +#: ../../source/index.rst:138 msgid "Reference docs" msgstr "Référence pour la documentation" -#: ../../source/index.rst:153 +#: ../../source/index.rst:154 #, fuzzy msgid "Contributor tutorials" msgstr "Configuration du contributeur" -#: ../../source/index.rst:160 +#: ../../source/index.rst:161 #, fuzzy msgid "Contributor how-to guides" msgstr "Guide pour les contributeurs" -#: ../../source/index.rst:173 +#: ../../source/index.rst:174 #, fuzzy msgid "Contributor explanations" msgstr "Explications" -#: ../../source/index.rst:179 +#: ../../source/index.rst:180 #, fuzzy msgid "Contributor references" msgstr "Configuration du contributeur" @@ -8144,7 +7787,7 @@ msgstr "" "Guides orientés sur la résolutions étapes par étapes de problèmes ou " "objectifs specifiques." -#: ../../source/index.rst:110 +#: ../../source/index.rst:111 msgid "" "Understanding-oriented concept guides explain and discuss key topics and " "underlying ideas behind Flower and collaborative AI." @@ -8152,29 +7795,29 @@ msgstr "" "Guides orientés sur la compréhension et l'explication des sujets et idées" " de fonds sur lesquels sont construits Flower et l'IA collaborative." -#: ../../source/index.rst:120 +#: ../../source/index.rst:121 #, fuzzy msgid "References" msgstr "Référence" -#: ../../source/index.rst:122 +#: ../../source/index.rst:123 msgid "Information-oriented API reference and other reference material." msgstr "Référence de l'API orientée sur l'information pure." -#: ../../source/index.rst:131::1 +#: ../../source/index.rst:132::1 msgid ":py:obj:`flwr `\\" msgstr "" -#: ../../source/index.rst:131::1 flwr:1 of +#: ../../source/index.rst:132::1 flwr:1 of msgid "Flower main package." msgstr "" -#: ../../source/index.rst:148 +#: ../../source/index.rst:149 #, fuzzy msgid "Contributor docs" msgstr "Configuration du contributeur" -#: ../../source/index.rst:150 +#: ../../source/index.rst:151 #, fuzzy msgid "" "The Flower community welcomes contributions. The following docs are " @@ -8201,12 +7844,22 @@ msgstr "flower-driver-api" msgid "flower-fleet-api" msgstr "flower-fleet-api" +#: ../../source/ref-api-cli.rst:37 +#, fuzzy +msgid "flower-client-app" +msgstr "Flower ClientApp." + +#: ../../source/ref-api-cli.rst:47 +#, fuzzy +msgid "flower-server-app" +msgstr "flower-driver-api" + #: ../../source/ref-api/flwr.rst:2 #, fuzzy msgid "flwr" msgstr "Fleur" -#: ../../source/ref-api/flwr.rst:25 ../../source/ref-api/flwr.server.rst:48 +#: ../../source/ref-api/flwr.rst:25 ../../source/ref-api/flwr.server.rst:52 msgid "Modules" msgstr "" @@ -8232,7 +7885,7 @@ msgid ":py:obj:`flwr.server `\\" msgstr "" #: ../../source/ref-api/flwr.rst:35::1 -#: ../../source/ref-api/flwr.server.rst:37::1 flwr.server:1 +#: ../../source/ref-api/flwr.server.rst:41::1 flwr.server:1 #: flwr.server.server.Server:1 of #, fuzzy msgid "Flower server." @@ -8253,7 +7906,6 @@ msgstr "client" #: ../../source/ref-api/flwr.client.rst:13 #: ../../source/ref-api/flwr.common.rst:13 -#: ../../source/ref-api/flwr.server.driver.rst:13 #: ../../source/ref-api/flwr.server.rst:13 #: ../../source/ref-api/flwr.simulation.rst:13 #, fuzzy @@ -8293,10 +7945,10 @@ msgid "Start a Flower NumPyClient which connects to a gRPC server." msgstr "" #: ../../source/ref-api/flwr.client.rst:26 -#: ../../source/ref-api/flwr.common.rst:31 -#: ../../source/ref-api/flwr.server.driver.rst:24 -#: ../../source/ref-api/flwr.server.rst:28 +#: ../../source/ref-api/flwr.common.rst:32 +#: ../../source/ref-api/flwr.server.rst:29 #: ../../source/ref-api/flwr.server.strategy.rst:17 +#: ../../source/ref-api/flwr.server.workflow.rst:17 msgid "Classes" msgstr "" @@ -8311,7 +7963,7 @@ msgstr "" #: ../../source/ref-api/flwr.client.rst:33::1 msgid "" -":py:obj:`ClientApp `\\ \\(client\\_fn\\[\\, " +":py:obj:`ClientApp `\\ \\(\\[client\\_fn\\, " "mods\\]\\)" msgstr "" @@ -8339,8 +7991,12 @@ msgstr "" #: ../../source/ref-api/flwr.client.Client.rst:15 #: ../../source/ref-api/flwr.client.ClientApp.rst:15 #: ../../source/ref-api/flwr.client.NumPyClient.rst:15 +#: ../../source/ref-api/flwr.common.Array.rst:15 #: ../../source/ref-api/flwr.common.ClientMessage.rst:15 +#: ../../source/ref-api/flwr.common.ConfigsRecord.rst:15 +#: ../../source/ref-api/flwr.common.Context.rst:15 #: ../../source/ref-api/flwr.common.DisconnectRes.rst:15 +#: ../../source/ref-api/flwr.common.Error.rst:15 #: ../../source/ref-api/flwr.common.EvaluateIns.rst:15 #: ../../source/ref-api/flwr.common.EvaluateRes.rst:15 #: ../../source/ref-api/flwr.common.FitIns.rst:15 @@ -8349,20 +8005,32 @@ msgstr "" #: ../../source/ref-api/flwr.common.GetParametersRes.rst:15 #: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:15 #: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:15 +#: ../../source/ref-api/flwr.common.Message.rst:15 +#: ../../source/ref-api/flwr.common.MessageType.rst:15 +#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:15 +#: ../../source/ref-api/flwr.common.Metadata.rst:15 +#: ../../source/ref-api/flwr.common.MetricsRecord.rst:15 #: ../../source/ref-api/flwr.common.Parameters.rst:15 +#: ../../source/ref-api/flwr.common.ParametersRecord.rst:15 #: ../../source/ref-api/flwr.common.ReconnectIns.rst:15 +#: ../../source/ref-api/flwr.common.RecordSet.rst:15 #: ../../source/ref-api/flwr.common.ServerMessage.rst:15 #: ../../source/ref-api/flwr.common.Status.rst:15 #: ../../source/ref-api/flwr.server.ClientManager.rst:15 +#: ../../source/ref-api/flwr.server.Driver.rst:15 #: ../../source/ref-api/flwr.server.History.rst:15 +#: ../../source/ref-api/flwr.server.LegacyContext.rst:15 #: ../../source/ref-api/flwr.server.Server.rst:15 +#: ../../source/ref-api/flwr.server.ServerApp.rst:15 #: ../../source/ref-api/flwr.server.ServerConfig.rst:15 #: ../../source/ref-api/flwr.server.SimpleClientManager.rst:15 -#: ../../source/ref-api/flwr.server.driver.Driver.rst:15 -#: ../../source/ref-api/flwr.server.driver.GrpcDriver.rst:15 #: ../../source/ref-api/flwr.server.strategy.Bulyan.rst:15 #: ../../source/ref-api/flwr.server.strategy.DPFedAvgAdaptive.rst:15 #: ../../source/ref-api/flwr.server.strategy.DPFedAvgFixed.rst:15 +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideAdaptiveClipping.rst:15 +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideFixedClipping.rst:15 +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideAdaptiveClipping.rst:15 +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideFixedClipping.rst:15 #: ../../source/ref-api/flwr.server.strategy.FaultTolerantFedAvg.rst:15 #: ../../source/ref-api/flwr.server.strategy.FedAdagrad.rst:15 #: ../../source/ref-api/flwr.server.strategy.FedAdam.rst:15 @@ -8380,6 +8048,9 @@ msgstr "" #: ../../source/ref-api/flwr.server.strategy.Krum.rst:15 #: ../../source/ref-api/flwr.server.strategy.QFedAvg.rst:15 #: ../../source/ref-api/flwr.server.strategy.Strategy.rst:15 +#: ../../source/ref-api/flwr.server.workflow.DefaultWorkflow.rst:15 +#: ../../source/ref-api/flwr.server.workflow.SecAggPlusWorkflow.rst:15 +#: ../../source/ref-api/flwr.server.workflow.SecAggWorkflow.rst:15 msgid "Methods" msgstr "" @@ -8459,9 +8130,12 @@ msgstr "" #: ../../source/ref-api/flwr.client.Client.rst:46 #: ../../source/ref-api/flwr.client.NumPyClient.rst:46 +#: ../../source/ref-api/flwr.common.Array.rst:28 #: ../../source/ref-api/flwr.common.ClientMessage.rst:25 #: ../../source/ref-api/flwr.common.Code.rst:19 +#: ../../source/ref-api/flwr.common.Context.rst:25 #: ../../source/ref-api/flwr.common.DisconnectRes.rst:25 +#: ../../source/ref-api/flwr.common.Error.rst:25 #: ../../source/ref-api/flwr.common.EvaluateIns.rst:25 #: ../../source/ref-api/flwr.common.EvaluateRes.rst:25 #: ../../source/ref-api/flwr.common.EventType.rst:19 @@ -8471,10 +8145,16 @@ msgstr "" #: ../../source/ref-api/flwr.common.GetParametersRes.rst:25 #: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:25 #: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:25 +#: ../../source/ref-api/flwr.common.Message.rst:37 +#: ../../source/ref-api/flwr.common.MessageType.rst:25 +#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:25 +#: ../../source/ref-api/flwr.common.Metadata.rst:25 #: ../../source/ref-api/flwr.common.Parameters.rst:25 #: ../../source/ref-api/flwr.common.ReconnectIns.rst:25 +#: ../../source/ref-api/flwr.common.RecordSet.rst:25 #: ../../source/ref-api/flwr.common.ServerMessage.rst:25 #: ../../source/ref-api/flwr.common.Status.rst:25 +#: ../../source/ref-api/flwr.server.LegacyContext.rst:25 #: ../../source/ref-api/flwr.server.ServerConfig.rst:25 msgid "Attributes" msgstr "" @@ -8492,14 +8172,25 @@ msgstr "" #: flwr.client.numpy_client.NumPyClient.fit #: flwr.client.numpy_client.NumPyClient.get_parameters #: flwr.client.numpy_client.NumPyClient.get_properties -#: flwr.server.app.start_server +#: flwr.common.context.Context flwr.common.message.Error +#: flwr.common.message.Message flwr.common.message.Message.create_error_reply +#: flwr.common.message.Message.create_reply flwr.common.message.Metadata +#: flwr.common.record.parametersrecord.Array flwr.server.app.start_server #: flwr.server.client_manager.ClientManager.register #: flwr.server.client_manager.ClientManager.unregister #: flwr.server.client_manager.SimpleClientManager.register #: flwr.server.client_manager.SimpleClientManager.unregister #: flwr.server.client_manager.SimpleClientManager.wait_for -#: flwr.server.driver.app.start_driver flwr.server.driver.driver.Driver +#: flwr.server.compat.app.start_driver flwr.server.driver.driver.Driver +#: flwr.server.driver.driver.Driver.create_message +#: flwr.server.driver.driver.Driver.pull_messages +#: flwr.server.driver.driver.Driver.push_messages +#: flwr.server.driver.driver.Driver.send_and_receive #: flwr.server.strategy.bulyan.Bulyan +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit #: flwr.server.strategy.fedadagrad.FedAdagrad @@ -8515,7 +8206,10 @@ msgstr "" #: flwr.server.strategy.strategy.Strategy.configure_fit #: flwr.server.strategy.strategy.Strategy.evaluate #: flwr.server.strategy.strategy.Strategy.initialize_parameters -#: flwr.simulation.app.start_simulation of +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow +#: flwr.simulation.app.start_simulation +#: flwr.simulation.run_simulation.run_simulation of #, fuzzy msgid "Parameters" msgstr "Paramètres du modèle." @@ -8534,13 +8228,17 @@ msgstr "" #: flwr.client.numpy_client.NumPyClient.fit #: flwr.client.numpy_client.NumPyClient.get_parameters #: flwr.client.numpy_client.NumPyClient.get_properties -#: flwr.server.app.start_server +#: flwr.common.message.Message.create_reply flwr.server.app.start_server #: flwr.server.client_manager.ClientManager.num_available #: flwr.server.client_manager.ClientManager.register #: flwr.server.client_manager.SimpleClientManager.num_available #: flwr.server.client_manager.SimpleClientManager.register #: flwr.server.client_manager.SimpleClientManager.wait_for -#: flwr.server.driver.app.start_driver +#: flwr.server.compat.app.start_driver +#: flwr.server.driver.driver.Driver.create_message +#: flwr.server.driver.driver.Driver.pull_messages +#: flwr.server.driver.driver.Driver.push_messages +#: flwr.server.driver.driver.Driver.send_and_receive #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit #: flwr.server.strategy.strategy.Strategy.aggregate_evaluate @@ -8565,13 +8263,17 @@ msgstr "" #: flwr.client.client.Client.get_properties #: flwr.client.numpy_client.NumPyClient.get_parameters #: flwr.client.numpy_client.NumPyClient.get_properties -#: flwr.server.app.start_server +#: flwr.common.message.Message.create_reply flwr.server.app.start_server #: flwr.server.client_manager.ClientManager.num_available #: flwr.server.client_manager.ClientManager.register #: flwr.server.client_manager.SimpleClientManager.num_available #: flwr.server.client_manager.SimpleClientManager.register #: flwr.server.client_manager.SimpleClientManager.wait_for -#: flwr.server.driver.app.start_driver +#: flwr.server.compat.app.start_driver +#: flwr.server.driver.driver.Driver.create_message +#: flwr.server.driver.driver.Driver.pull_messages +#: flwr.server.driver.driver.Driver.push_messages +#: flwr.server.driver.driver.Driver.send_and_receive #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit #: flwr.server.strategy.strategy.Strategy.aggregate_evaluate @@ -8623,23 +8325,38 @@ msgstr "" msgid "ClientApp" msgstr "client" -#: flwr.client.client_app.ClientApp:1 flwr.common.typing.ClientMessage:1 +#: flwr.client.client_app.ClientApp:1 flwr.common.constant.MessageType:1 +#: flwr.common.constant.MessageTypeLegacy:1 flwr.common.context.Context:1 +#: flwr.common.message.Error:1 flwr.common.message.Message:1 +#: flwr.common.message.Metadata:1 flwr.common.record.parametersrecord.Array:1 +#: flwr.common.record.recordset.RecordSet:1 flwr.common.typing.ClientMessage:1 #: flwr.common.typing.DisconnectRes:1 flwr.common.typing.EvaluateIns:1 #: flwr.common.typing.EvaluateRes:1 flwr.common.typing.FitIns:1 #: flwr.common.typing.FitRes:1 flwr.common.typing.GetParametersIns:1 #: flwr.common.typing.GetParametersRes:1 flwr.common.typing.GetPropertiesIns:1 #: flwr.common.typing.GetPropertiesRes:1 flwr.common.typing.Parameters:1 #: flwr.common.typing.ReconnectIns:1 flwr.common.typing.ServerMessage:1 -#: flwr.common.typing.Status:1 flwr.server.app.ServerConfig:1 -#: flwr.server.driver.driver.Driver:1 -#: flwr.server.driver.grpc_driver.GrpcDriver:1 flwr.server.history.History:1 -#: flwr.server.server.Server:1 of +#: flwr.common.typing.Status:1 flwr.server.driver.driver.Driver:1 +#: flwr.server.history.History:1 flwr.server.server.Server:1 +#: flwr.server.server_app.ServerApp:1 flwr.server.server_config.ServerConfig:1 +#: flwr.server.workflow.default_workflows.DefaultWorkflow:1 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:1 +#: of msgid "Bases: :py:class:`object`" msgstr "" -#: flwr.client.app.start_client:33 flwr.client.app.start_numpy_client:36 -#: flwr.client.client_app.ClientApp:4 flwr.server.app.start_server:41 -#: flwr.server.driver.app.start_driver:30 of +#: flwr.client.app.start_client:41 flwr.client.app.start_numpy_client:36 +#: flwr.client.client_app.ClientApp:4 +#: flwr.client.client_app.ClientApp.evaluate:4 +#: flwr.client.client_app.ClientApp.query:4 +#: flwr.client.client_app.ClientApp.train:4 flwr.server.app.start_server:41 +#: flwr.server.compat.app.start_driver:32 flwr.server.server_app.ServerApp:4 +#: flwr.server.server_app.ServerApp.main:4 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:29 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:22 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:21 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:14 +#: of #, fuzzy msgid "Examples" msgstr "Exemples de PyTorch" @@ -8663,6 +8380,34 @@ msgid "" "global attribute `app` that points to an object of type `ClientApp`." msgstr "" +#: flwr.client.client_app.ClientApp.evaluate:1::1 of +msgid ":py:obj:`evaluate `\\ \\(\\)" +msgstr "" + +#: flwr.client.client_app.ClientApp.evaluate:1 +#: flwr.client.client_app.ClientApp.evaluate:1::1 of +msgid "Return a decorator that registers the evaluate fn with the client app." +msgstr "" + +#: flwr.client.client_app.ClientApp.evaluate:1::1 of +msgid ":py:obj:`query `\\ \\(\\)" +msgstr "" + +#: flwr.client.client_app.ClientApp.evaluate:1::1 +#: flwr.client.client_app.ClientApp.query:1 of +msgid "Return a decorator that registers the query fn with the client app." +msgstr "" + +#: flwr.client.client_app.ClientApp.evaluate:1::1 of +#, fuzzy +msgid ":py:obj:`train `\\ \\(\\)" +msgstr "serveur.stratégie.Stratégie" + +#: flwr.client.client_app.ClientApp.evaluate:1::1 +#: flwr.client.client_app.ClientApp.train:1 of +msgid "Return a decorator that registers the train fn with the client app." +msgstr "" + #: ../../source/ref-api/flwr.client.NumPyClient.rst:2 msgid "NumPyClient" msgstr "NumPyClient" @@ -8866,7 +8611,7 @@ msgid "" msgstr "" #: flwr.client.app.start_client:19 flwr.client.app.start_numpy_client:22 -#: flwr.server.driver.app.start_driver:21 of +#: flwr.server.compat.app.start_driver:21 of msgid "" "The PEM-encoded root certificates as a byte string or a path string. If " "provided, a secure connection using the certificates will be established " @@ -8886,15 +8631,29 @@ msgid "" "(experimental) - 'rest': HTTP (experimental)" msgstr "" -#: flwr.client.app.start_client:34 flwr.client.app.start_numpy_client:37 of +#: flwr.client.app.start_client:31 of +msgid "" +"The maximum number of times the client will try to connect to the server " +"before giving up in case of a connection error. If set to None, there is " +"no limit to the number of tries." +msgstr "" + +#: flwr.client.app.start_client:35 of +msgid "" +"The maximum duration before the client stops trying to connect to the " +"server in case of connection error. If set to None, there is no limit to " +"the total time." +msgstr "" + +#: flwr.client.app.start_client:42 flwr.client.app.start_numpy_client:37 of msgid "Starting a gRPC client with an insecure server connection:" msgstr "" -#: flwr.client.app.start_client:41 flwr.client.app.start_numpy_client:44 of +#: flwr.client.app.start_client:49 flwr.client.app.start_numpy_client:44 of msgid "Starting an SSL-enabled gRPC client using system certificates:" msgstr "" -#: flwr.client.app.start_client:52 flwr.client.app.start_numpy_client:52 of +#: flwr.client.app.start_client:60 flwr.client.app.start_numpy_client:52 of msgid "Starting an SSL-enabled gRPC client using provided certificates:" msgstr "" @@ -8919,77 +8678,87 @@ msgstr "" msgid "common" msgstr "commun" -#: ../../source/ref-api/flwr.common.rst:29::1 +#: ../../source/ref-api/flwr.common.rst:30::1 +msgid ":py:obj:`array_from_numpy `\\ \\(ndarray\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:30::1 +#: flwr.common.record.conversion_utils.array_from_numpy:1 of +#, fuzzy +msgid "Create Array from NumPy ndarray." +msgstr "Convertit l'objet des paramètres en ndarrays NumPy." + +#: ../../source/ref-api/flwr.common.rst:30::1 msgid ":py:obj:`bytes_to_ndarray `\\ \\(tensor\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:29::1 +#: ../../source/ref-api/flwr.common.rst:30::1 #: flwr.common.parameter.bytes_to_ndarray:1 of msgid "Deserialize NumPy ndarray from bytes." msgstr "Désérialise le tableau numérique NumPy à partir d'octets." -#: ../../source/ref-api/flwr.common.rst:29::1 +#: ../../source/ref-api/flwr.common.rst:30::1 msgid "" ":py:obj:`configure `\\ \\(identifier\\[\\, " "filename\\, host\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:29::1 +#: ../../source/ref-api/flwr.common.rst:30::1 #: flwr.common.logger.configure:1 of msgid "Configure logging to file and/or remote log server." msgstr "" "Configure la journalisation vers un fichier et/ou un serveur de " "journalisation distant." -#: ../../source/ref-api/flwr.common.rst:29::1 +#: ../../source/ref-api/flwr.common.rst:30::1 msgid "" ":py:obj:`event `\\ \\(event\\_type\\[\\, " "event\\_details\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:29::1 +#: ../../source/ref-api/flwr.common.rst:30::1 #: flwr.common.telemetry.event:1 of msgid "Submit create_event to ThreadPoolExecutor to avoid blocking." msgstr "" -#: ../../source/ref-api/flwr.common.rst:29::1 +#: ../../source/ref-api/flwr.common.rst:30::1 msgid "" ":py:obj:`log `\\ \\(level\\, msg\\, \\*args\\, " "\\*\\*kwargs\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:29::1 logging.Logger.log:1 +#: ../../source/ref-api/flwr.common.rst:30::1 logging.Logger.log:1 #: of msgid "Log 'msg % args' with the integer severity 'level'." msgstr "Enregistre 'msg % args' avec le niveau de sévérité entier 'level'." -#: ../../source/ref-api/flwr.common.rst:29::1 +#: ../../source/ref-api/flwr.common.rst:30::1 msgid ":py:obj:`ndarray_to_bytes `\\ \\(ndarray\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:29::1 +#: ../../source/ref-api/flwr.common.rst:30::1 #: flwr.common.parameter.ndarray_to_bytes:1 of msgid "Serialize NumPy ndarray to bytes." msgstr "Sérialise le tableau numérique NumPy en octets." -#: ../../source/ref-api/flwr.common.rst:29::1 +#: ../../source/ref-api/flwr.common.rst:30::1 msgid ":py:obj:`now `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:29::1 +#: ../../source/ref-api/flwr.common.rst:30::1 #: flwr.common.date.now:1 of msgid "Construct a datetime from time.time() with time zone set to UTC." msgstr "" "Construit une date à partir de time.time() avec le fuseau horaire réglé " "sur UTC." -#: ../../source/ref-api/flwr.common.rst:29::1 +#: ../../source/ref-api/flwr.common.rst:30::1 msgid "" ":py:obj:`ndarrays_to_parameters `\\ " "\\(ndarrays\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:29::1 +#: ../../source/ref-api/flwr.common.rst:30::1 #: flwr.common.parameter.ndarrays_to_parameters:1 #: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 #: flwr.server.strategy.fedavg_android.FedAvgAndroid.ndarrays_to_parameters:1 @@ -8997,191 +8766,372 @@ msgstr "" msgid "Convert NumPy ndarrays to parameters object." msgstr "Convertit les ndarrays NumPy en objets de paramètres." -#: ../../source/ref-api/flwr.common.rst:29::1 +#: ../../source/ref-api/flwr.common.rst:30::1 msgid "" ":py:obj:`parameters_to_ndarrays `\\ " "\\(parameters\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:29::1 +#: ../../source/ref-api/flwr.common.rst:30::1 #: flwr.common.parameter.parameters_to_ndarrays:1 of msgid "Convert parameters object to NumPy ndarrays." msgstr "Convertit l'objet des paramètres en ndarrays NumPy." -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 +msgid "" +":py:obj:`Array `\\ \\(dtype\\, shape\\, stype\\, " +"data\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +#: flwr.common.record.parametersrecord.Array:1 of +msgid "Array type." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 msgid "" ":py:obj:`ClientMessage `\\ " "\\(\\[get\\_properties\\_res\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.typing.ClientMessage:1 of msgid "ClientMessage is a container used to hold one result message." msgstr "" "ClientMessage est un conteneur utilisé pour contenir un message de " "résultat." -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 msgid ":py:obj:`Code `\\ \\(value\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.typing.Code:1 of msgid "Client status codes." msgstr "Codes d'état du client." -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 +#, fuzzy +msgid "" +":py:obj:`ConfigsRecord `\\ " +"\\(\\[configs\\_dict\\, keep\\_input\\]\\)" +msgstr "" +"Flower 1.0 : ``start_server(..., " +"config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " +"...)``" + +#: ../../source/ref-api/flwr.common.rst:64::1 +#: flwr.common.record.configsrecord.ConfigsRecord:1 of +#, fuzzy +msgid "Configs record." +msgstr "Configurer les clients" + +#: ../../source/ref-api/flwr.common.rst:64::1 +msgid ":py:obj:`Context `\\ \\(state\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +#: flwr.common.context.Context:1 of +msgid "State of your run." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 msgid ":py:obj:`DisconnectRes `\\ \\(reason\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.typing.DisconnectRes:1 of msgid "DisconnectRes message from client to server." msgstr "Message DisconnectRes envoyé par le client au serveur." -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 msgid "" ":py:obj:`EvaluateIns `\\ \\(parameters\\, " "config\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.typing.EvaluateIns:1 of msgid "Evaluate instructions for a client." msgstr "Évaluer les instructions pour un client." -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 msgid "" ":py:obj:`EvaluateRes `\\ \\(status\\, loss\\, " "num\\_examples\\, metrics\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.typing.EvaluateRes:1 of msgid "Evaluate response from a client." msgstr "Évaluer la réponse d'un client." -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 msgid ":py:obj:`EventType `\\ \\(value\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.telemetry.EventType:1 of msgid "Types of telemetry events." msgstr "Types d'événements télémétriques." -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 msgid ":py:obj:`FitIns `\\ \\(parameters\\, config\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.typing.FitIns:1 of msgid "Fit instructions for a client." msgstr "Instructions d'ajustement pour un client." -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 msgid "" ":py:obj:`FitRes `\\ \\(status\\, parameters\\, " "num\\_examples\\, metrics\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.typing.FitRes:1 of msgid "Fit response from a client." msgstr "Réponse adaptée d'un client." -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 +msgid ":py:obj:`Error `\\ \\(code\\[\\, reason\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +#: flwr.common.message.Error:1 of +msgid "A dataclass that stores information about an error that occurred." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 msgid ":py:obj:`GetParametersIns `\\ \\(config\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.typing.GetParametersIns:1 of msgid "Parameters request for a client." msgstr "Demande de paramètres pour un client." -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 msgid "" ":py:obj:`GetParametersRes `\\ \\(status\\, " "parameters\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.typing.GetParametersRes:1 of msgid "Response when asked to return parameters." msgstr "Réponse lorsqu'on te demande de renvoyer des paramètres." -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 msgid ":py:obj:`GetPropertiesIns `\\ \\(config\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.typing.GetPropertiesIns:1 of msgid "Properties request for a client." msgstr "Demande de propriétés pour un client." -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 msgid "" ":py:obj:`GetPropertiesRes `\\ \\(status\\, " "properties\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.typing.GetPropertiesRes:1 of msgid "Properties response from a client." msgstr "Réponse des propriétés d'un client." -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 +msgid "" +":py:obj:`Message `\\ \\(metadata\\[\\, content\\, " +"error\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +#: flwr.common.message.Message:1 of +msgid "State of your application from the viewpoint of the entity using it." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +msgid ":py:obj:`MessageType `\\ \\(\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +#: flwr.common.constant.MessageType:1 of +msgid "Message type." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +msgid ":py:obj:`MessageTypeLegacy `\\ \\(\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +#: flwr.common.constant.MessageTypeLegacy:1 of +msgid "Legacy message type." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +msgid "" +":py:obj:`Metadata `\\ \\(run\\_id\\, " +"message\\_id\\, src\\_node\\_id\\, ...\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +#: flwr.common.message.Metadata:1 of +msgid "A dataclass holding metadata associated with the current message." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +msgid "" +":py:obj:`MetricsRecord `\\ " +"\\(\\[metrics\\_dict\\, keep\\_input\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +#: flwr.common.record.metricsrecord.MetricsRecord:1 of +msgid "Metrics record." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 msgid ":py:obj:`NDArray `\\" msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 msgid "" "alias of :py:class:`~numpy.ndarray`\\ [:py:obj:`~typing.Any`, " ":py:class:`~numpy.dtype`\\ [:py:obj:`~typing.Any`]]" msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 msgid "" ":py:obj:`Parameters `\\ \\(tensors\\, " "tensor\\_type\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.typing.Parameters:1 of msgid "Model parameters." msgstr "Paramètres du modèle." -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 +msgid "" +":py:obj:`ParametersRecord `\\ " +"\\(\\[array\\_dict\\, keep\\_input\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +#: flwr.common.record.parametersrecord.ParametersRecord:1 of +#, fuzzy +msgid "Parameters record." +msgstr "Paramètres du modèle." + +#: ../../source/ref-api/flwr.common.rst:64::1 msgid ":py:obj:`ReconnectIns `\\ \\(seconds\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.typing.ReconnectIns:1 of msgid "ReconnectIns message from server to client." msgstr "Message de reconnexion du serveur au client." -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 +msgid "" +":py:obj:`RecordSet `\\ " +"\\(\\[parameters\\_records\\, ...\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +#: flwr.common.record.recordset.RecordSet:1 of +msgid "RecordSet stores groups of parameters, metrics and configs." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 msgid "" ":py:obj:`ServerMessage `\\ " "\\(\\[get\\_properties\\_ins\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.typing.ServerMessage:1 of msgid "ServerMessage is a container used to hold one instruction message." msgstr "" "ServerMessage est un conteneur utilisé pour contenir un message " "d'instruction." -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 msgid ":py:obj:`Status `\\ \\(code\\, message\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.typing.Status:1 of msgid "Client status." msgstr "Statut du client." +#: ../../source/ref-api/flwr.common.Array.rst:2 +msgid "Array" +msgstr "" + +#: flwr.common.record.parametersrecord.Array:3 of +msgid "" +"A dataclass containing serialized data from an array-like or tensor-like " +"object along with some metadata about it." +msgstr "" + +#: flwr.common.record.parametersrecord.Array:6 of +msgid "" +"A string representing the data type of the serialised object (e.g. " +"`np.float32`)" +msgstr "" + +#: flwr.common.record.parametersrecord.Array:8 of +msgid "" +"A list representing the shape of the unserialized array-like object. This" +" is used to deserialize the data (depending on the serialization method) " +"or simply as a metadata field." +msgstr "" + +#: flwr.common.record.parametersrecord.Array:12 of +msgid "" +"A string indicating the type of serialisation mechanism used to generate " +"the bytes in `data` from an array-like or tensor-like object." +msgstr "" + +#: flwr.common.record.parametersrecord.Array:15 of +msgid "A buffer of bytes containing the data." +msgstr "" + +#: ../../source/ref-api/flwr.common.Array.rst:26::1 +#, fuzzy +msgid ":py:obj:`numpy `\\ \\(\\)" +msgstr "serveur.stratégie.Stratégie" + +#: ../../source/ref-api/flwr.common.Array.rst:26::1 +#: flwr.common.record.parametersrecord.Array.numpy:1 of +#, fuzzy +msgid "Return the array as a NumPy array." +msgstr "renvoie le poids du modèle sous la forme d'une liste de ndarrays NumPy" + +#: flwr.common.record.parametersrecord.Array.numpy:1::1 of +msgid ":py:obj:`dtype `\\" +msgstr "" + +#: flwr.common.record.parametersrecord.Array.numpy:1::1 of +#, fuzzy +msgid ":py:obj:`shape `\\" +msgstr "serveur.stratégie.Stratégie" + +#: flwr.common.record.parametersrecord.Array.numpy:1::1 of +#, fuzzy +msgid ":py:obj:`stype `\\" +msgstr "serveur.stratégie.Stratégie" + +#: flwr.common.record.parametersrecord.Array.numpy:1::1 of +msgid ":py:obj:`data `\\" +msgstr "" + #: ../../source/ref-api/flwr.common.ClientMessage.rst:2 #, fuzzy msgid "ClientMessage" @@ -9241,6 +9191,106 @@ msgid "" "`\\" msgstr "" +#: ../../source/ref-api/flwr.common.ConfigsRecord.rst:2 +#, fuzzy +msgid "ConfigsRecord" +msgstr "Configurer les clients" + +#: flwr.common.record.configsrecord.ConfigsRecord:1 of +msgid "" +"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " +"[:py:class:`str`, :py:obj:`~typing.Union`\\ [:py:class:`int`, " +":py:class:`float`, :py:class:`str`, :py:class:`bytes`, :py:class:`bool`, " +":py:class:`~typing.List`\\ [:py:class:`int`], :py:class:`~typing.List`\\ " +"[:py:class:`float`], :py:class:`~typing.List`\\ [:py:class:`str`], " +":py:class:`~typing.List`\\ [:py:class:`bytes`], " +":py:class:`~typing.List`\\ [:py:class:`bool`]]]" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`clear `\\ \\(\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1 +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid "Remove all items from R." +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`count_bytes `\\ \\(\\)" +msgstr "" + +#: flwr.common.record.configsrecord.ConfigsRecord.count_bytes:1 +#: flwr.common.record.metricsrecord.MetricsRecord.count_bytes:1 +#: flwr.common.record.parametersrecord.ParametersRecord.count_bytes:1 +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid "Return number of Bytes stored in this object." +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 +#: flwr.common.record.typeddict.TypedDict.get:1 of +msgid "d defaults to None." +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`items `\\ \\(\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`keys `\\ \\(\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 +#: flwr.common.record.typeddict.TypedDict.pop:1 of +msgid "If key is not found, d is returned if given, otherwise KeyError is raised." +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid "" +":py:obj:`update `\\ \\(\\[E\\, " +"\\]\\*\\*F\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 +#: flwr.common.record.typeddict.TypedDict.update:1 of +msgid "Update R from dict/iterable E and F." +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`values `\\ \\(\\)" +msgstr "" + +#: flwr.common.record.configsrecord.ConfigsRecord.count_bytes:3 of +msgid "This function counts booleans as occupying 1 Byte." +msgstr "" + +#: ../../source/ref-api/flwr.common.Context.rst:2 +msgid "Context" +msgstr "" + +#: flwr.common.context.Context:3 of +msgid "" +"Holds records added by the entity in a given run and that will stay " +"local. This means that the data it holds will never leave the system it's" +" running from. This can be used as an intermediate storage or scratchpad " +"when executing mods. It can also be used as a memory to access at " +"different points during the lifecycle of this entity (e.g. across " +"multiple rounds)" +msgstr "" + +#: ../../source/ref-api/flwr.common.Context.rst:28::1 +#, fuzzy +msgid ":py:obj:`state `\\" +msgstr "serveur.stratégie.Stratégie" + #: ../../source/ref-api/flwr.common.DisconnectRes.rst:2 msgid "DisconnectRes" msgstr "" @@ -9249,6 +9299,34 @@ msgstr "" msgid ":py:obj:`reason `\\" msgstr "" +#: ../../source/ref-api/flwr.common.Error.rst:2 +msgid "Error" +msgstr "" + +#: flwr.common.message.Error:3 of +msgid "An identifier for the error." +msgstr "" + +#: flwr.common.message.Error:5 of +msgid "A reason for why the error arose (e.g. an exception stack-trace)" +msgstr "" + +#: flwr.common.Error.code:1::1 of +msgid ":py:obj:`code `\\" +msgstr "" + +#: flwr.common.Error.code:1 flwr.common.Error.code:1::1 of +msgid "Error code." +msgstr "" + +#: flwr.common.Error.code:1::1 of +msgid ":py:obj:`reason `\\" +msgstr "" + +#: flwr.common.Error.code:1::1 flwr.common.Error.reason:1 of +msgid "Reason reported about the error." +msgstr "" + #: ../../source/ref-api/flwr.common.EvaluateIns.rst:2 #, fuzzy msgid "EvaluateIns" @@ -9472,18 +9550,352 @@ msgstr "" msgid ":py:obj:`properties `\\" msgstr "" -#: ../../source/ref-api/flwr.common.NDArray.rst:2 -msgid "NDArray" -msgstr "" +#: ../../source/ref-api/flwr.common.Message.rst:2 +#, fuzzy +msgid "Message" +msgstr "Côté serveur" -#: ../../source/ref-api/flwr.common.Parameters.rst:29::1 -msgid ":py:obj:`tensors `\\" +#: flwr.common.Message.content:1::1 flwr.common.Message.metadata:1 +#: flwr.common.message.Message:3 of +msgid "A dataclass including information about the message to be executed." +msgstr "" + +#: flwr.common.message.Message:5 of +msgid "" +"Holds records either sent by another entity (e.g. sent by the server-side" +" logic to a client, or vice-versa) or that will be sent to it." +msgstr "" + +#: flwr.common.message.Message:8 of +msgid "" +"A dataclass that captures information about an error that took place when" +" processing another message." +msgstr "" + +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +msgid "" +":py:obj:`create_error_reply `\\ " +"\\(error\\, ttl\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +#: flwr.common.message.Message.create_error_reply:1 of +msgid "Construct a reply message indicating an error happened." +msgstr "" + +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +msgid "" +":py:obj:`create_reply `\\ \\(content\\," +" ttl\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +#: flwr.common.message.Message.create_reply:1 of +msgid "Create a reply to this message with specified content and TTL." +msgstr "" + +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +msgid ":py:obj:`has_content `\\ \\(\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +#: flwr.common.message.Message.has_content:1 of +msgid "Return True if message has content, else False." +msgstr "" + +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +msgid ":py:obj:`has_error `\\ \\(\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +#: flwr.common.message.Message.has_error:1 of +msgid "Return True if message has an error, else False." +msgstr "" + +#: flwr.common.Message.content:1::1 of +msgid ":py:obj:`content `\\" +msgstr "" + +#: flwr.common.Message.content:1 flwr.common.Message.content:1::1 +#: of +#, fuzzy +msgid "The content of this message." +msgstr "Évaluer la réponse d'un client." + +#: flwr.common.Message.content:1::1 of +msgid ":py:obj:`error `\\" +msgstr "" + +#: flwr.common.Message.content:1::1 flwr.common.Message.error:1 of +msgid "Error captured by this message." +msgstr "" + +#: flwr.common.Message.content:1::1 of +msgid ":py:obj:`metadata `\\" +msgstr "" + +#: flwr.common.message.Message.create_error_reply:3 of +msgid "The error that was encountered." +msgstr "" + +#: flwr.common.Metadata.dst_node_id:1::1 +#: flwr.common.Metadata.ttl:1 flwr.common.message.Message.create_error_reply:5 +#: flwr.common.message.Message.create_reply:9 flwr.common.message.Metadata:16 +#: of +msgid "Time-to-live for this message." +msgstr "" + +#: flwr.common.message.Message.create_reply:3 of +msgid "" +"The method generates a new `Message` as a reply to this message. It " +"inherits 'run_id', 'src_node_id', 'dst_node_id', and 'message_type' from " +"this message and sets 'reply_to_message' to the ID of this message." +msgstr "" + +#: flwr.common.message.Message.create_reply:7 of +msgid "The content for the reply message." +msgstr "" + +#: flwr.common.message.Message.create_reply:12 of +msgid "A new `Message` instance representing the reply." +msgstr "" + +#: ../../source/ref-api/flwr.common.MessageType.rst:2 +msgid "MessageType" +msgstr "" + +#: ../../source/ref-api/flwr.common.MessageType.rst:30::1 +msgid ":py:obj:`EVALUATE `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.MessageType.rst:30::1 +msgid ":py:obj:`QUERY `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.MessageType.rst:30::1 +msgid ":py:obj:`TRAIN `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:2 +msgid "MessageTypeLegacy" +msgstr "" + +#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:29::1 +msgid ":py:obj:`GET_PARAMETERS `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:29::1 +msgid ":py:obj:`GET_PROPERTIES `\\" +msgstr "" + +#: flwr.common.Metadata.dst_node_id:1::1 +#: flwr.common.Metadata.run_id:1 flwr.common.message.Metadata:3 of +msgid "An identifier for the current run." +msgstr "" + +#: flwr.common.Metadata.dst_node_id:1::1 +#: flwr.common.Metadata.message_id:1 flwr.common.message.Metadata:5 of +msgid "An identifier for the current message." +msgstr "" + +#: flwr.common.Metadata.dst_node_id:1::1 +#: flwr.common.Metadata.src_node_id:1 flwr.common.message.Metadata:7 of +msgid "An identifier for the node sending this message." +msgstr "" + +#: flwr.common.Metadata.dst_node_id:1 +#: flwr.common.Metadata.dst_node_id:1::1 +#: flwr.common.message.Metadata:9 of +msgid "An identifier for the node receiving this message." +msgstr "" + +#: flwr.common.Metadata.dst_node_id:1::1 +#: flwr.common.Metadata.reply_to_message:1 flwr.common.message.Metadata:11 of +msgid "An identifier for the message this message replies to." +msgstr "" + +#: flwr.common.message.Metadata:13 of +msgid "" +"An identifier for grouping messages. In some settings, this is used as " +"the FL round." +msgstr "" + +#: flwr.common.Metadata.dst_node_id:1::1 +#: flwr.common.Metadata.message_type:1 flwr.common.message.Metadata:18 of +msgid "A string that encodes the action to be executed on the receiving end." +msgstr "" + +#: flwr.common.message.Metadata:21 of +msgid "" +"An identifier that can be used when loading a particular data partition " +"for a ClientApp. Making use of this identifier is more relevant when " +"conducting simulations." +msgstr "" + +#: flwr.common.Metadata.dst_node_id:1::1 of +msgid ":py:obj:`dst_node_id `\\" +msgstr "" + +#: flwr.common.Metadata.dst_node_id:1::1 of +msgid ":py:obj:`group_id `\\" +msgstr "" + +#: flwr.common.Metadata.dst_node_id:1::1 +#: flwr.common.Metadata.group_id:1 of +msgid "An identifier for grouping messages." +msgstr "" + +#: flwr.common.Metadata.dst_node_id:1::1 of +msgid ":py:obj:`message_id `\\" +msgstr "" + +#: flwr.common.Metadata.dst_node_id:1::1 of +msgid ":py:obj:`message_type `\\" +msgstr "" + +#: flwr.common.Metadata.dst_node_id:1::1 of +msgid ":py:obj:`partition_id `\\" +msgstr "" + +#: flwr.common.Metadata.dst_node_id:1::1 +#: flwr.common.Metadata.partition_id:1 of +msgid "An identifier telling which data partition a ClientApp should use." +msgstr "" + +#: flwr.common.Metadata.dst_node_id:1::1 of +msgid ":py:obj:`reply_to_message `\\" +msgstr "" + +#: flwr.common.Metadata.dst_node_id:1::1 of +msgid ":py:obj:`run_id `\\" +msgstr "" + +#: flwr.common.Metadata.dst_node_id:1::1 of +msgid ":py:obj:`src_node_id `\\" +msgstr "" + +#: flwr.common.Metadata.dst_node_id:1::1 of +msgid ":py:obj:`ttl `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.MetricsRecord.rst:2 +msgid "MetricsRecord" +msgstr "" + +#: flwr.common.record.metricsrecord.MetricsRecord:1 of +msgid "" +"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " +"[:py:class:`str`, :py:obj:`~typing.Union`\\ [:py:class:`int`, " +":py:class:`float`, :py:class:`~typing.List`\\ [:py:class:`int`], " +":py:class:`~typing.List`\\ [:py:class:`float`]]]" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`clear `\\ \\(\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`count_bytes `\\ \\(\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`items `\\ \\(\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`keys `\\ \\(\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid "" +":py:obj:`update `\\ \\(\\[E\\, " +"\\]\\*\\*F\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`values `\\ \\(\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.NDArray.rst:2 +msgid "NDArray" +msgstr "" + +#: ../../source/ref-api/flwr.common.Parameters.rst:29::1 +msgid ":py:obj:`tensors `\\" msgstr "" #: ../../source/ref-api/flwr.common.Parameters.rst:29::1 msgid ":py:obj:`tensor_type `\\" msgstr "" +#: ../../source/ref-api/flwr.common.ParametersRecord.rst:2 +#, fuzzy +msgid "ParametersRecord" +msgstr "Paramètres du modèle." + +#: flwr.common.record.parametersrecord.ParametersRecord:1 of +msgid "" +"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " +"[:py:class:`str`, :py:class:`~flwr.common.record.parametersrecord.Array`]" +msgstr "" + +#: flwr.common.record.parametersrecord.ParametersRecord:3 of +msgid "" +"A dataclass storing named Arrays in order. This means that it holds " +"entries as an OrderedDict[str, Array]. ParametersRecord objects can be " +"viewed as an equivalent to PyTorch's state_dict, but holding serialised " +"tensors instead." +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`clear `\\ \\(\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`count_bytes `\\ \\(\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`items `\\ \\(\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`keys `\\ \\(\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid "" +":py:obj:`update `\\ \\(\\[E\\, " +"\\]\\*\\*F\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`values `\\ \\(\\)" +msgstr "" + +#: flwr.common.record.parametersrecord.ParametersRecord.count_bytes:3 of +msgid "" +"Note that a small amount of Bytes might also be included in this counting" +" that correspond to metadata of the serialized object (e.g. of NumPy " +"array) needed for deseralization." +msgstr "" + #: ../../source/ref-api/flwr.common.ReconnectIns.rst:2 #, fuzzy msgid "ReconnectIns" @@ -9493,6 +9905,37 @@ msgstr "Collecte centralisée des données" msgid ":py:obj:`seconds `\\" msgstr "" +#: ../../source/ref-api/flwr.common.RecordSet.rst:2 +msgid "RecordSet" +msgstr "" + +#: flwr.common.RecordSet.configs_records:1::1 of +msgid ":py:obj:`configs_records `\\" +msgstr "" + +#: flwr.common.RecordSet.configs_records:1 +#: flwr.common.RecordSet.configs_records:1::1 of +msgid "Dictionary holding ConfigsRecord instances." +msgstr "" + +#: flwr.common.RecordSet.configs_records:1::1 of +msgid ":py:obj:`metrics_records `\\" +msgstr "" + +#: flwr.common.RecordSet.configs_records:1::1 +#: flwr.common.RecordSet.metrics_records:1 of +msgid "Dictionary holding MetricsRecord instances." +msgstr "" + +#: flwr.common.RecordSet.configs_records:1::1 of +msgid ":py:obj:`parameters_records `\\" +msgstr "" + +#: flwr.common.RecordSet.configs_records:1::1 +#: flwr.common.RecordSet.parameters_records:1 of +msgid "Dictionary holding ParametersRecord instances." +msgstr "" + #: ../../source/ref-api/flwr.common.ServerMessage.rst:2 #, fuzzy msgid "ServerMessage" @@ -9531,6 +9974,10 @@ msgstr "" msgid ":py:obj:`message `\\" msgstr "" +#: ../../source/ref-api/flwr.common.array_from_numpy.rst:2 +msgid "array\\_from\\_numpy" +msgstr "" + #: ../../source/ref-api/flwr.common.bytes_to_ndarray.rst:2 msgid "bytes\\_to\\_ndarray" msgstr "" @@ -9581,81 +10028,132 @@ msgstr "" msgid "server" msgstr "serveur" -#: ../../source/ref-api/flwr.server.rst:26::1 +#: ../../source/ref-api/flwr.server.rst:27::1 msgid ":py:obj:`run_driver_api `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:26::1 +#: ../../source/ref-api/flwr.server.rst:27::1 #: flwr.server.app.run_driver_api:1 of #, fuzzy msgid "Run Flower server (Driver API)." msgstr "flower-driver-api" -#: ../../source/ref-api/flwr.server.rst:26::1 +#: ../../source/ref-api/flwr.server.rst:27::1 msgid ":py:obj:`run_fleet_api `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:26::1 +#: ../../source/ref-api/flwr.server.rst:27::1 #: flwr.server.app.run_fleet_api:1 of #, fuzzy msgid "Run Flower server (Fleet API)." msgstr "flower-fleet-api" -#: ../../source/ref-api/flwr.server.rst:26::1 +#: ../../source/ref-api/flwr.server.rst:27::1 msgid ":py:obj:`run_server_app `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:26::1 -#: flwr.server.app.run_server_app:1 of +#: ../../source/ref-api/flwr.server.rst:27::1 +#: flwr.server.run_serverapp.run_server_app:1 of #, fuzzy msgid "Run Flower server app." msgstr "Serveur de Flower" -#: ../../source/ref-api/flwr.server.rst:26::1 +#: ../../source/ref-api/flwr.server.rst:27::1 msgid ":py:obj:`run_superlink `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:26::1 +#: ../../source/ref-api/flwr.server.rst:27::1 #: flwr.server.app.run_superlink:1 of msgid "Run Flower server (Driver API and Fleet API)." msgstr "" -#: ../../source/ref-api/flwr.server.rst:26::1 +#: ../../source/ref-api/flwr.server.rst:27::1 +msgid "" +":py:obj:`start_driver `\\ \\(\\*\\[\\, " +"server\\_address\\, server\\, ...\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.rst:27::1 +#: flwr.server.compat.app.start_driver:1 of +#, fuzzy +msgid "Start a Flower Driver API server." +msgstr "Tout d'abord, démarre un serveur Flower :" + +#: ../../source/ref-api/flwr.server.rst:27::1 msgid "" ":py:obj:`start_server `\\ \\(\\*\\[\\, " "server\\_address\\, server\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:26::1 +#: ../../source/ref-api/flwr.server.rst:27::1 #: flwr.server.app.start_server:1 of msgid "Start a Flower server using the gRPC transport layer." msgstr "" -#: ../../source/ref-api/flwr.server.rst:37::1 +#: ../../source/ref-api/flwr.server.rst:41::1 msgid ":py:obj:`ClientManager `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:37::1 +#: ../../source/ref-api/flwr.server.rst:41::1 #: flwr.server.client_manager.ClientManager:1 of msgid "Abstract base class for managing Flower clients." msgstr "" -#: ../../source/ref-api/flwr.server.rst:37::1 +#: ../../source/ref-api/flwr.server.rst:41::1 +#, fuzzy +msgid "" +":py:obj:`Driver `\\ " +"\\(\\[driver\\_service\\_address\\, ...\\]\\)" +msgstr "" +"Flower 1.0 : ``start_server(..., " +"config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " +"...)``" + +#: ../../source/ref-api/flwr.server.rst:41::1 +#: flwr.server.driver.driver.Driver:1 of +msgid "`Driver` class provides an interface to the Driver API." +msgstr "" + +#: ../../source/ref-api/flwr.server.rst:41::1 msgid ":py:obj:`History `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:37::1 +#: ../../source/ref-api/flwr.server.rst:41::1 #: flwr.server.history.History:1 of msgid "History class for training and/or evaluation metrics collection." msgstr "" -#: ../../source/ref-api/flwr.server.rst:37::1 +#: ../../source/ref-api/flwr.server.rst:41::1 +msgid "" +":py:obj:`LegacyContext `\\ \\(state\\[\\, " +"config\\, strategy\\, ...\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.rst:41::1 +#: flwr.server.compat.legacy_context.LegacyContext:1 of +msgid "Legacy Context." +msgstr "" + +#: ../../source/ref-api/flwr.server.rst:41::1 msgid "" ":py:obj:`Server `\\ \\(\\*\\, client\\_manager\\[\\, " "strategy\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:37::1 +#: ../../source/ref-api/flwr.server.rst:41::1 +#, fuzzy +msgid "" +":py:obj:`ServerApp `\\ \\(\\[server\\, config\\, " +"strategy\\, ...\\]\\)" +msgstr "serveur.stratégie.Stratégie" + +#: ../../source/ref-api/flwr.server.rst:41::1 +#: flwr.server.server_app.ServerApp:1 of +#, fuzzy +msgid "Flower ServerApp." +msgstr "Serveur de Flower" + +#: ../../source/ref-api/flwr.server.rst:41::1 #, fuzzy msgid "" ":py:obj:`ServerConfig `\\ \\(\\[num\\_rounds\\," @@ -9665,41 +10163,42 @@ msgstr "" "config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " "...)``" -#: ../../source/ref-api/flwr.server.rst:37::1 -#: flwr.server.app.ServerConfig:1 of +#: ../../source/ref-api/flwr.server.rst:41::1 +#: flwr.server.server_config.ServerConfig:1 of #, fuzzy msgid "Flower server config." msgstr "Serveur de Flower" -#: ../../source/ref-api/flwr.server.rst:37::1 +#: ../../source/ref-api/flwr.server.rst:41::1 msgid ":py:obj:`SimpleClientManager `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:37::1 +#: ../../source/ref-api/flwr.server.rst:41::1 #: flwr.server.client_manager.SimpleClientManager:1 of msgid "Provides a pool of available clients." msgstr "" -#: ../../source/ref-api/flwr.server.rst:56::1 -msgid ":py:obj:`flwr.server.driver `\\" -msgstr "" - -#: ../../source/ref-api/flwr.server.rst:56::1 flwr.server.driver:1 -#: of -#, fuzzy -msgid "Flower driver SDK." -msgstr "Serveur de Flower" - -#: ../../source/ref-api/flwr.server.rst:56::1 +#: ../../source/ref-api/flwr.server.rst:60::1 #, fuzzy msgid ":py:obj:`flwr.server.strategy `\\" msgstr "serveur.stratégie.Stratégie" -#: ../../source/ref-api/flwr.server.rst:56::1 +#: ../../source/ref-api/flwr.server.rst:60::1 #: flwr.server.strategy:1 of msgid "Contains the strategy abstraction and different implementations." msgstr "" +#: ../../source/ref-api/flwr.server.rst:60::1 +#, fuzzy +msgid ":py:obj:`flwr.server.workflow `\\" +msgstr "serveur.stratégie.Stratégie" + +#: ../../source/ref-api/flwr.server.rst:60::1 +#: flwr.server.workflow:1 of +#, fuzzy +msgid "Workflows." +msgstr "Flux de travail" + #: ../../source/ref-api/flwr.server.ClientManager.rst:2 #, fuzzy msgid "ClientManager" @@ -9793,36 +10292,250 @@ msgstr "" msgid "This method is idempotent." msgstr "" -#: ../../source/ref-api/flwr.server.History.rst:2 -msgid "History" -msgstr "" - -#: flwr.server.history.History.add_loss_centralized:1::1 of -msgid "" -":py:obj:`add_loss_centralized " -"`\\ \\(server\\_round\\, " -"loss\\)" -msgstr "" - -#: flwr.server.history.History.add_loss_centralized:1 -#: flwr.server.history.History.add_loss_centralized:1::1 of +#: ../../source/ref-api/flwr.server.Driver.rst:2 #, fuzzy -msgid "Add one loss entry (from centralized evaluation)." -msgstr "Évaluation centralisée" +msgid "Driver" +msgstr "serveur" -#: flwr.server.history.History.add_loss_centralized:1::1 of +#: flwr.server.driver.driver.Driver:3 of msgid "" -":py:obj:`add_loss_distributed " -"`\\ \\(server\\_round\\, " -"loss\\)" +"The IPv4 or IPv6 address of the Driver API server. Defaults to " +"`\"[::]:9091\"`." msgstr "" -#: flwr.server.history.History.add_loss_centralized:1::1 -#: flwr.server.history.History.add_loss_distributed:1 of -msgid "Add one loss entry (from distributed evaluation)." +#: flwr.server.app.start_server:28 flwr.server.driver.driver.Driver:6 of +msgid "" +"Tuple containing root certificate, server certificate, and private key to" +" start a secure SSL-enabled server. The tuple is expected to have three " +"bytes elements in the following order: * CA certificate. * " +"server certificate. * server private key." msgstr "" -#: flwr.server.history.History.add_loss_centralized:1::1 of +#: flwr.server.app.start_server:28 flwr.server.driver.driver.Driver:6 of +msgid "" +"Tuple containing root certificate, server certificate, and private key to" +" start a secure SSL-enabled server. The tuple is expected to have three " +"bytes elements in the following order:" +msgstr "" + +#: flwr.server.app.start_server:32 flwr.server.driver.driver.Driver:10 of +#, fuzzy +msgid "CA certificate." +msgstr "Certificats" + +#: flwr.server.app.start_server:33 flwr.server.driver.driver.Driver:11 of +#, fuzzy +msgid "server certificate." +msgstr "Certificats" + +#: flwr.server.app.start_server:34 flwr.server.driver.driver.Driver:12 of +#, fuzzy +msgid "server private key." +msgstr "stratégie.du.serveur" + +#: flwr.server.driver.driver.Driver.close:1::1 of +#, fuzzy +msgid ":py:obj:`close `\\ \\(\\)" +msgstr "serveur.stratégie.Stratégie" + +#: flwr.server.driver.driver.Driver.close:1 +#: flwr.server.driver.driver.Driver.close:1::1 of +msgid "Disconnect from the SuperLink if connected." +msgstr "" + +#: flwr.server.driver.driver.Driver.close:1::1 of +msgid "" +":py:obj:`create_message `\\ " +"\\(content\\, message\\_type\\, ...\\)" +msgstr "" + +#: flwr.server.driver.driver.Driver.close:1::1 +#: flwr.server.driver.driver.Driver.create_message:1 of +msgid "Create a new message with specified parameters." +msgstr "" + +#: flwr.server.driver.driver.Driver.close:1::1 of +msgid ":py:obj:`get_node_ids `\\ \\(\\)" +msgstr "" + +#: flwr.server.driver.driver.Driver.close:1::1 +#: flwr.server.driver.driver.Driver.get_node_ids:1 of +msgid "Get node IDs." +msgstr "" + +#: flwr.server.driver.driver.Driver.close:1::1 of +msgid "" +":py:obj:`pull_messages `\\ " +"\\(message\\_ids\\)" +msgstr "" + +#: flwr.server.driver.driver.Driver.close:1::1 +#: flwr.server.driver.driver.Driver.pull_messages:1 of +msgid "Pull messages based on message IDs." +msgstr "" + +#: flwr.server.driver.driver.Driver.close:1::1 of +msgid "" +":py:obj:`push_messages `\\ " +"\\(messages\\)" +msgstr "" + +#: flwr.server.driver.driver.Driver.close:1::1 +#: flwr.server.driver.driver.Driver.push_messages:1 of +msgid "Push messages to specified node IDs." +msgstr "" + +#: flwr.server.driver.driver.Driver.close:1::1 of +#, fuzzy +msgid "" +":py:obj:`send_and_receive `\\ " +"\\(messages\\, \\*\\[\\, timeout\\]\\)" +msgstr "" +"Flower 1.0 : ``start_server(..., " +"config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " +"...)``" + +#: flwr.server.driver.driver.Driver.close:1::1 +#: flwr.server.driver.driver.Driver.send_and_receive:1 of +msgid "Push messages to specified node IDs and pull the reply messages." +msgstr "" + +#: flwr.server.driver.driver.Driver.create_message:3 of +msgid "" +"This method constructs a new `Message` with given content and metadata. " +"The `run_id` and `src_node_id` will be set automatically." +msgstr "" + +#: flwr.server.driver.driver.Driver.create_message:6 of +msgid "" +"The content for the new message. This holds records that are to be sent " +"to the destination node." +msgstr "" + +#: flwr.server.driver.driver.Driver.create_message:9 of +msgid "" +"The type of the message, defining the action to be executed on the " +"receiving end." +msgstr "" + +#: flwr.server.driver.driver.Driver.create_message:12 of +msgid "The ID of the destination node to which the message is being sent." +msgstr "" + +#: flwr.server.driver.driver.Driver.create_message:14 of +msgid "" +"The ID of the group to which this message is associated. In some " +"settings, this is used as the FL round." +msgstr "" + +#: flwr.server.driver.driver.Driver.create_message:17 of +msgid "" +"Time-to-live for the round trip of this message, i.e., the time from " +"sending this message to receiving a reply. It specifies the duration for " +"which the message and its potential reply are considered valid." +msgstr "" + +#: flwr.server.driver.driver.Driver.create_message:22 of +msgid "" +"**message** -- A new `Message` instance with the specified content and " +"metadata." +msgstr "" + +#: flwr.server.driver.driver.Driver.pull_messages:3 of +msgid "" +"This method is used to collect messages from the SuperLink that " +"correspond to a set of given message IDs." +msgstr "" + +#: flwr.server.driver.driver.Driver.pull_messages:6 of +msgid "An iterable of message IDs for which reply messages are to be retrieved." +msgstr "" + +#: flwr.server.driver.driver.Driver.pull_messages:9 of +msgid "**messages** -- An iterable of messages received." +msgstr "" + +#: flwr.server.driver.driver.Driver.push_messages:3 of +msgid "" +"This method takes an iterable of messages and sends each message to the " +"node specified in `dst_node_id`." +msgstr "" + +#: flwr.server.driver.driver.Driver.push_messages:6 +#: flwr.server.driver.driver.Driver.send_and_receive:7 of +msgid "An iterable of messages to be sent." +msgstr "" + +#: flwr.server.driver.driver.Driver.push_messages:9 of +msgid "" +"**message_ids** -- An iterable of IDs for the messages that were sent, " +"which can be used to pull replies." +msgstr "" + +#: flwr.server.driver.driver.Driver.send_and_receive:3 of +msgid "" +"This method sends a list of messages to their destination node IDs and " +"then waits for the replies. It continues to pull replies until either all" +" replies are received or the specified timeout duration is exceeded." +msgstr "" + +#: flwr.server.driver.driver.Driver.send_and_receive:9 of +msgid "" +"The timeout duration in seconds. If specified, the method will wait for " +"replies for this duration. If `None`, there is no time limit and the " +"method will wait until replies for all messages are received." +msgstr "" + +#: flwr.server.driver.driver.Driver.send_and_receive:14 of +msgid "**replies** -- An iterable of reply messages received from the SuperLink." +msgstr "" + +#: flwr.server.driver.driver.Driver.send_and_receive:18 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:53 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:60 +#: of +#, fuzzy +msgid "Notes" +msgstr "Aucun" + +#: flwr.server.driver.driver.Driver.send_and_receive:19 of +msgid "" +"This method uses `push_messages` to send the messages and `pull_messages`" +" to collect the replies. If `timeout` is set, the method may not return " +"replies for all sent messages. A message remains valid until its TTL, " +"which is not affected by `timeout`." +msgstr "" + +#: ../../source/ref-api/flwr.server.History.rst:2 +msgid "History" +msgstr "" + +#: flwr.server.history.History.add_loss_centralized:1::1 of +msgid "" +":py:obj:`add_loss_centralized " +"`\\ \\(server\\_round\\, " +"loss\\)" +msgstr "" + +#: flwr.server.history.History.add_loss_centralized:1 +#: flwr.server.history.History.add_loss_centralized:1::1 of +#, fuzzy +msgid "Add one loss entry (from centralized evaluation)." +msgstr "Évaluation centralisée" + +#: flwr.server.history.History.add_loss_centralized:1::1 of +msgid "" +":py:obj:`add_loss_distributed " +"`\\ \\(server\\_round\\, " +"loss\\)" +msgstr "" + +#: flwr.server.history.History.add_loss_centralized:1::1 +#: flwr.server.history.History.add_loss_distributed:1 of +msgid "Add one loss entry (from distributed evaluation)." +msgstr "" + +#: flwr.server.history.History.add_loss_centralized:1::1 of msgid "" ":py:obj:`add_metrics_centralized " "`\\ \\(server\\_round\\, " @@ -9859,6 +10572,38 @@ msgstr "" msgid "Add metrics entries (from distributed fit)." msgstr "" +#: ../../source/ref-api/flwr.server.LegacyContext.rst:2 +msgid "LegacyContext" +msgstr "" + +#: flwr.server.compat.legacy_context.LegacyContext:1 of +msgid "Bases: :py:class:`~flwr.common.context.Context`" +msgstr "" + +#: ../../source/ref-api/flwr.server.LegacyContext.rst:32::1 +#, fuzzy +msgid ":py:obj:`config `\\" +msgstr "serveur.stratégie.Stratégie" + +#: ../../source/ref-api/flwr.server.LegacyContext.rst:32::1 +#, fuzzy +msgid ":py:obj:`strategy `\\" +msgstr "serveur.stratégie.Stratégie" + +#: ../../source/ref-api/flwr.server.LegacyContext.rst:32::1 +msgid ":py:obj:`client_manager `\\" +msgstr "" + +#: ../../source/ref-api/flwr.server.LegacyContext.rst:32::1 +#, fuzzy +msgid ":py:obj:`history `\\" +msgstr "serveur.stratégie.Stratégie" + +#: ../../source/ref-api/flwr.server.LegacyContext.rst:32::1 +#, fuzzy +msgid ":py:obj:`state `\\" +msgstr "serveur.stratégie.Stratégie" + #: flwr.server.server.Server.client_manager:1::1 of msgid ":py:obj:`client_manager `\\ \\(\\)" msgstr "" @@ -9931,12 +10676,36 @@ msgstr "" msgid "Replace server strategy." msgstr "stratégie.du.serveur" +#: ../../source/ref-api/flwr.server.ServerApp.rst:2 +#, fuzzy +msgid "ServerApp" +msgstr "serveur" + +#: flwr.server.server_app.ServerApp:5 of +#, fuzzy +msgid "Use the `ServerApp` with an existing `Strategy`:" +msgstr "Utilise une stratégie existante" + +#: flwr.server.server_app.ServerApp:15 of +msgid "Use the `ServerApp` with a custom main function:" +msgstr "" + +#: flwr.server.server_app.ServerApp.main:1::1 of +#, fuzzy +msgid ":py:obj:`main `\\ \\(\\)" +msgstr "serveur.stratégie.Stratégie" + +#: flwr.server.server_app.ServerApp.main:1 +#: flwr.server.server_app.ServerApp.main:1::1 of +msgid "Return a decorator that registers the main fn with the server app." +msgstr "" + #: ../../source/ref-api/flwr.server.ServerConfig.rst:2 #, fuzzy msgid "ServerConfig" msgstr "serveur" -#: flwr.server.app.ServerConfig:3 of +#: flwr.server.server_config.ServerConfig:3 of msgid "" "All attributes have default values which allows users to configure just " "the ones they care about." @@ -10010,311 +10779,218 @@ msgstr "" msgid "**success**" msgstr "" -#: ../../source/ref-api/flwr.server.driver.rst:2 -#, fuzzy -msgid "driver" -msgstr "serveur" - -#: ../../source/ref-api/flwr.server.driver.rst:22::1 -msgid "" -":py:obj:`start_driver `\\ \\(\\*\\[\\, " -"server\\_address\\, server\\, ...\\]\\)" -msgstr "" - -#: ../../source/ref-api/flwr.server.driver.rst:22::1 -#: flwr.server.driver.app.start_driver:1 of +#: ../../source/ref-api/flwr.server.run_driver_api.rst:2 #, fuzzy -msgid "Start a Flower Driver API server." -msgstr "Tout d'abord, démarre un serveur Flower :" - -#: ../../source/ref-api/flwr.server.driver.rst:30::1 -msgid "" -":py:obj:`Driver `\\ " -"\\(\\[driver\\_service\\_address\\, ...\\]\\)" -msgstr "" +msgid "run\\_driver\\_api" +msgstr "flower-driver-api" -#: ../../source/ref-api/flwr.server.driver.rst:30::1 -#: flwr.server.driver.driver.Driver:1 of -msgid "`Driver` class provides an interface to the Driver API." +#: ../../source/ref-api/flwr.server.run_fleet_api.rst:2 +msgid "run\\_fleet\\_api" msgstr "" -#: ../../source/ref-api/flwr.server.driver.rst:30::1 -msgid "" -":py:obj:`GrpcDriver `\\ " -"\\(\\[driver\\_service\\_address\\, ...\\]\\)" +#: ../../source/ref-api/flwr.server.run_server_app.rst:2 +msgid "run\\_server\\_app" msgstr "" -#: ../../source/ref-api/flwr.server.driver.rst:30::1 -#: flwr.server.driver.grpc_driver.GrpcDriver:1 of -msgid "`GrpcDriver` provides access to the gRPC Driver API/service." -msgstr "" +#: ../../source/ref-api/flwr.server.run_superlink.rst:2 +#, fuzzy +msgid "run\\_superlink" +msgstr "flower-superlink" -#: ../../source/ref-api/flwr.server.driver.Driver.rst:2 +#: ../../source/ref-api/flwr.server.start_driver.rst:2 #, fuzzy -msgid "Driver" -msgstr "serveur" +msgid "start\\_driver" +msgstr "start_client" -#: flwr.server.driver.driver.Driver:3 of +#: flwr.server.compat.app.start_driver:3 of msgid "" "The IPv4 or IPv6 address of the Driver API server. Defaults to " -"`\"[::]:9091\"`." +"`\"[::]:8080\"`." msgstr "" -#: flwr.server.app.start_server:28 flwr.server.driver.driver.Driver:6 of +#: flwr.server.compat.app.start_driver:6 of msgid "" -"Tuple containing root certificate, server certificate, and private key to" -" start a secure SSL-enabled server. The tuple is expected to have three " -"bytes elements in the following order: * CA certificate. * " -"server certificate. * server private key." +"A server implementation, either `flwr.server.Server` or a subclass " +"thereof. If no instance is provided, then `start_driver` will create one." msgstr "" -#: flwr.server.app.start_server:28 flwr.server.driver.driver.Driver:6 of +#: flwr.server.app.start_server:9 flwr.server.compat.app.start_driver:10 +#: flwr.simulation.app.start_simulation:28 of msgid "" -"Tuple containing root certificate, server certificate, and private key to" -" start a secure SSL-enabled server. The tuple is expected to have three " -"bytes elements in the following order:" -msgstr "" - -#: flwr.server.app.start_server:32 flwr.server.driver.driver.Driver:10 of -#, fuzzy -msgid "CA certificate." -msgstr "Certificats" - -#: flwr.server.app.start_server:33 flwr.server.driver.driver.Driver:11 of -#, fuzzy -msgid "server certificate." -msgstr "Certificats" - -#: flwr.server.app.start_server:34 flwr.server.driver.driver.Driver:12 of -#, fuzzy -msgid "server private key." -msgstr "stratégie.du.serveur" - -#: flwr.server.driver.driver.Driver.get_nodes:1::1 of -msgid ":py:obj:`get_nodes `\\ \\(\\)" +"Currently supported values are `num_rounds` (int, default: 1) and " +"`round_timeout` in seconds (float, default: None)." msgstr "" -#: flwr.server.driver.driver.Driver.get_nodes:1 -#: flwr.server.driver.driver.Driver.get_nodes:1::1 of -msgid "Get node IDs." +#: flwr.server.app.start_server:12 flwr.server.compat.app.start_driver:13 of +msgid "" +"An implementation of the abstract base class " +"`flwr.server.strategy.Strategy`. If no strategy is provided, then " +"`start_server` will use `flwr.server.strategy.FedAvg`." msgstr "" -#: flwr.server.driver.driver.Driver.get_nodes:1::1 of +#: flwr.server.compat.app.start_driver:17 of msgid "" -":py:obj:`pull_task_res `\\ " -"\\(task\\_ids\\)" +"An implementation of the class `flwr.server.ClientManager`. If no " +"implementation is provided, then `start_driver` will use " +"`flwr.server.SimpleClientManager`." msgstr "" -#: flwr.server.driver.driver.Driver.get_nodes:1::1 -#: flwr.server.driver.driver.Driver.pull_task_res:1 -#: flwr.server.driver.grpc_driver.GrpcDriver.connect:1::1 -#: flwr.server.driver.grpc_driver.GrpcDriver.pull_task_res:1 of -msgid "Get task results." +#: flwr.server.compat.app.start_driver:25 of +msgid "The Driver object to use." msgstr "" -#: flwr.server.driver.driver.Driver.get_nodes:1::1 of -msgid "" -":py:obj:`push_task_ins `\\ " -"\\(task\\_ins\\_list\\)" +#: flwr.server.app.start_server:37 flwr.server.compat.app.start_driver:28 of +msgid "**hist** -- Object containing training and evaluation metrics." msgstr "" -#: flwr.server.driver.driver.Driver.get_nodes:1::1 -#: flwr.server.driver.driver.Driver.push_task_ins:1 -#: flwr.server.driver.grpc_driver.GrpcDriver.connect:1::1 -#: flwr.server.driver.grpc_driver.GrpcDriver.push_task_ins:1 of -msgid "Schedule tasks." +#: flwr.server.compat.app.start_driver:33 of +msgid "Starting a driver that connects to an insecure server:" msgstr "" -#: ../../source/ref-api/flwr.server.driver.GrpcDriver.rst:2 -msgid "GrpcDriver" +#: flwr.server.compat.app.start_driver:37 of +msgid "Starting a driver that connects to an SSL-enabled server:" msgstr "" -#: flwr.server.driver.grpc_driver.GrpcDriver.connect:1::1 of -msgid ":py:obj:`connect `\\ \\(\\)" -msgstr "" +#: ../../source/ref-api/flwr.server.start_server.rst:2 +#, fuzzy +msgid "start\\_server" +msgstr "serveur.start_server" -#: flwr.server.driver.grpc_driver.GrpcDriver.connect:1 -#: flwr.server.driver.grpc_driver.GrpcDriver.connect:1::1 of -msgid "Connect to the Driver API." +#: flwr.server.app.start_server:3 of +msgid "The IPv4 or IPv6 address of the server. Defaults to `\"[::]:8080\"`." msgstr "" -#: flwr.server.driver.grpc_driver.GrpcDriver.connect:1::1 of +#: flwr.server.app.start_server:5 of msgid "" -":py:obj:`create_run `\\ " -"\\(req\\)" +"A server implementation, either `flwr.server.Server` or a subclass " +"thereof. If no instance is provided, then `start_server` will create one." msgstr "" -#: flwr.server.driver.grpc_driver.GrpcDriver.connect:1::1 -#: flwr.server.driver.grpc_driver.GrpcDriver.create_run:1 of -#, fuzzy -msgid "Request for run ID." -msgstr "Demande pour une nouvelle Flower Baseline" - -#: flwr.server.driver.grpc_driver.GrpcDriver.connect:1::1 of -msgid ":py:obj:`disconnect `\\ \\(\\)" +#: flwr.server.app.start_server:16 of +msgid "" +"An implementation of the abstract base class `flwr.server.ClientManager`." +" If no implementation is provided, then `start_server` will use " +"`flwr.server.client_manager.SimpleClientManager`." msgstr "" -#: flwr.server.driver.grpc_driver.GrpcDriver.connect:1::1 -#: flwr.server.driver.grpc_driver.GrpcDriver.disconnect:1 of -msgid "Disconnect from the Driver API." +#: flwr.server.app.start_server:21 of +msgid "" +"The maximum length of gRPC messages that can be exchanged with the Flower" +" clients. The default should be sufficient for most models. Users who " +"train very large models might need to increase this value. Note that the " +"Flower clients need to be started with the same value (see " +"`flwr.client.start_client`), otherwise clients will not know about the " +"increased limit and block larger messages." msgstr "" -#: flwr.server.driver.grpc_driver.GrpcDriver.connect:1::1 of -msgid ":py:obj:`get_nodes `\\ \\(req\\)" -msgstr "" +#: flwr.server.app.start_server:42 of +#, fuzzy +msgid "Starting an insecure server:" +msgstr "Démarrer le serveur" -#: flwr.server.driver.grpc_driver.GrpcDriver.connect:1::1 -#: flwr.server.driver.grpc_driver.GrpcDriver.get_nodes:1 of +#: flwr.server.app.start_server:46 of #, fuzzy -msgid "Get client IDs." -msgstr "Moteur client Edge" +msgid "Starting an SSL-enabled server:" +msgstr "Démarrer le serveur" -#: flwr.server.driver.grpc_driver.GrpcDriver.connect:1::1 of -msgid "" -":py:obj:`pull_task_res `\\ " -"\\(req\\)" -msgstr "" +#: ../../source/ref-api/flwr.server.strategy.rst:2 +#, fuzzy +msgid "strategy" +msgstr "stratégie.du.serveur" -#: flwr.server.driver.grpc_driver.GrpcDriver.connect:1::1 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`push_task_ins `\\ " -"\\(req\\)" +":py:obj:`Bulyan `\\ \\(\\*\\, " +"fraction\\_fit\\, fraction\\_evaluate\\, ...\\)" msgstr "" -#: ../../source/ref-api/flwr.server.driver.start_driver.rst:2 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.bulyan.Bulyan:1 of #, fuzzy -msgid "start\\_driver" -msgstr "start_client" +msgid "Bulyan strategy." +msgstr "Stratégies intégrées" -#: flwr.server.driver.app.start_driver:3 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -"The IPv4 or IPv6 address of the Driver API server. Defaults to " -"`\"[::]:8080\"`." +":py:obj:`DPFedAvgAdaptive `\\ " +"\\(strategy\\, num\\_sampled\\_clients\\)" msgstr "" -#: flwr.server.driver.app.start_driver:6 of -msgid "" -"A server implementation, either `flwr.server.Server` or a subclass " -"thereof. If no instance is provided, then `start_driver` will create one." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive:1 of +msgid "Wrapper for configuring a Strategy for DP with Adaptive Clipping." msgstr "" -#: flwr.server.app.start_server:9 flwr.server.driver.app.start_driver:10 -#: flwr.simulation.app.start_simulation:28 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -"Currently supported values are `num_rounds` (int, default: 1) and " -"`round_timeout` in seconds (float, default: None)." +":py:obj:`DPFedAvgFixed `\\ " +"\\(strategy\\, num\\_sampled\\_clients\\, ...\\)" msgstr "" -#: flwr.server.app.start_server:12 flwr.server.driver.app.start_driver:13 of -msgid "" -"An implementation of the abstract base class " -"`flwr.server.strategy.Strategy`. If no strategy is provided, then " -"`start_server` will use `flwr.server.strategy.FedAvg`." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed:1 of +msgid "Wrapper for configuring a Strategy for DP with Fixed Clipping." msgstr "" -#: flwr.server.driver.app.start_driver:17 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -"An implementation of the class `flwr.server.ClientManager`. If no " -"implementation is provided, then `start_driver` will use " -"`flwr.server.SimpleClientManager`." -msgstr "" - -#: flwr.server.app.start_server:37 flwr.server.driver.app.start_driver:26 of -msgid "**hist** -- Object containing training and evaluation metrics." -msgstr "" - -#: flwr.server.driver.app.start_driver:31 of -msgid "Starting a driver that connects to an insecure server:" -msgstr "" - -#: flwr.server.driver.app.start_driver:35 of -msgid "Starting a driver that connects to an SSL-enabled server:" -msgstr "" - -#: ../../source/ref-api/flwr.server.run_driver_api.rst:2 -#, fuzzy -msgid "run\\_driver\\_api" -msgstr "flower-driver-api" - -#: ../../source/ref-api/flwr.server.run_fleet_api.rst:2 -msgid "run\\_fleet\\_api" -msgstr "" - -#: ../../source/ref-api/flwr.server.run_server_app.rst:2 -msgid "run\\_server\\_app" +":py:obj:`DifferentialPrivacyClientSideAdaptiveClipping " +"`\\ " +"\\(...\\)" msgstr "" -#: ../../source/ref-api/flwr.server.run_superlink.rst:2 -#, fuzzy -msgid "run\\_superlink" -msgstr "flower-superlink" - -#: ../../source/ref-api/flwr.server.start_server.rst:2 -#, fuzzy -msgid "start\\_server" -msgstr "serveur.start_server" - -#: flwr.server.app.start_server:3 of -msgid "The IPv4 or IPv6 address of the server. Defaults to `\"[::]:8080\"`." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:1 +#: of +msgid "Strategy wrapper for central DP with client-side adaptive clipping." msgstr "" -#: flwr.server.app.start_server:5 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -"A server implementation, either `flwr.server.Server` or a subclass " -"thereof. If no instance is provided, then `start_server` will create one." +":py:obj:`DifferentialPrivacyServerSideAdaptiveClipping " +"`\\ " +"\\(...\\)" msgstr "" -#: flwr.server.app.start_server:16 of -msgid "" -"An implementation of the abstract base class `flwr.server.ClientManager`." -" If no implementation is provided, then `start_server` will use " -"`flwr.server.client_manager.SimpleClientManager`." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:1 +#: of +msgid "Strategy wrapper for central DP with server-side adaptive clipping." msgstr "" -#: flwr.server.app.start_server:21 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -"The maximum length of gRPC messages that can be exchanged with the Flower" -" clients. The default should be sufficient for most models. Users who " -"train very large models might need to increase this value. Note that the " -"Flower clients need to be started with the same value (see " -"`flwr.client.start_client`), otherwise clients will not know about the " -"increased limit and block larger messages." +":py:obj:`DifferentialPrivacyClientSideFixedClipping " +"`\\ " +"\\(...\\)" msgstr "" -#: flwr.server.app.start_server:42 of -#, fuzzy -msgid "Starting an insecure server:" -msgstr "Démarrer le serveur" - -#: flwr.server.app.start_server:46 of -#, fuzzy -msgid "Starting an SSL-enabled server:" -msgstr "Démarrer le serveur" - -#: ../../source/ref-api/flwr.server.strategy.rst:2 -#, fuzzy -msgid "strategy" -msgstr "stratégie.du.serveur" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:1 +#: of +msgid "Strategy wrapper for central DP with client-side fixed clipping." +msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`FaultTolerantFedAvg " -"`\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" +":py:obj:`DifferentialPrivacyServerSideFixedClipping " +"`\\ " +"\\(...\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg:1 of -msgid "Configurable fault-tolerant FedAvg strategy implementation." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:1 +#: of +msgid "Strategy wrapper for central DP with server-side fixed clipping." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" ":py:obj:`FedAdagrad `\\ \\(\\*\\[\\, " "fraction\\_fit\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 #: flwr.server.strategy.fedadagrad.FedAdagrad:1 of #, fuzzy msgid "FedAdagrad strategy - Adaptive Federated Optimization using Adagrad." @@ -10322,201 +10998,179 @@ msgstr "" "`FedAdam` et `FedAdam` correspondent à la dernière version de l'article " "sur l'optimisation fédérée adaptative." -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" ":py:obj:`FedAdam `\\ \\(\\*\\[\\, " "fraction\\_fit\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 #: flwr.server.strategy.fedadam.FedAdam:1 of msgid "FedAdam - Adaptive Federated Optimization using Adam." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" ":py:obj:`FedAvg `\\ \\(\\*\\[\\, " "fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 #: flwr.server.strategy.fedavg.FedAvg:1 #: flwr.server.strategy.fedavg_android.FedAvgAndroid:1 of #, fuzzy msgid "Federated Averaging strategy." msgstr "Stratégie de moyenne fédérée." -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 -msgid "" -":py:obj:`FedXgbNnAvg `\\ \\(\\*args\\, " -"\\*\\*kwargs\\)" -msgstr "" - -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 -#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg:1 of -msgid "Configurable FedXgbNnAvg strategy implementation." -msgstr "" - -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 -msgid "" -":py:obj:`FedXgbBagging `\\ " -"\\(\\[evaluate\\_function\\]\\)" -msgstr "" - -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging:1 of -msgid "Configurable FedXgbBagging strategy implementation." -msgstr "" - -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 -msgid "" -":py:obj:`FedXgbCyclic `\\ " -"\\(\\*\\*kwargs\\)" -msgstr "" - -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic:1 of -msgid "Configurable FedXgbCyclic strategy implementation." -msgstr "" - -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" ":py:obj:`FedAvgAndroid `\\ " "\\(\\*\\[\\, fraction\\_fit\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" ":py:obj:`FedAvgM `\\ \\(\\*\\[\\, " "fraction\\_fit\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 #: flwr.server.strategy.fedavgm.FedAvgM:1 of #, fuzzy msgid "Federated Averaging with Momentum strategy." msgstr "Stratégie de moyenne fédérée." -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +msgid "" +":py:obj:`FedMedian `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedmedian.FedMedian:1 of +#, fuzzy +msgid "Configurable FedMedian strategy implementation." +msgstr "Configuration de l'évaluation fédérée" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" ":py:obj:`FedOpt `\\ \\(\\*\\[\\, " "fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 #: flwr.server.strategy.fedopt.FedOpt:1 of #, fuzzy msgid "Federated Optim strategy." msgstr "Stratégie de moyenne fédérée." -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" ":py:obj:`FedProx `\\ \\(\\*\\[\\, " "fraction\\_fit\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 #: flwr.server.strategy.fedprox.FedProx:1 of #, fuzzy msgid "Federated Optimization strategy." msgstr "Stratégie de moyenne fédérée." -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`FedYogi `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" +":py:obj:`FedTrimmedAvg `\\ " +"\\(\\*\\[\\, fraction\\_fit\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 -#: flwr.server.strategy.fedyogi.FedYogi:1 of -msgid "FedYogi [Reddi et al., 2020] strategy." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:1 of +msgid "Federated Averaging with Trimmed Mean [Dong Yin, et al., 2021]." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`QFedAvg `\\ \\(\\*\\[\\, " -"q\\_param\\, qffl\\_learning\\_rate\\, ...\\]\\)" +":py:obj:`FedXgbBagging `\\ " +"\\(\\[evaluate\\_function\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 -#: flwr.server.strategy.qfedavg.QFedAvg:1 of -msgid "Configurable QFedAvg strategy implementation." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging:1 of +msgid "Configurable FedXgbBagging strategy implementation." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`FedMedian `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" +":py:obj:`FedXgbCyclic `\\ " +"\\(\\*\\*kwargs\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 -#: flwr.server.strategy.fedmedian.FedMedian:1 of -#, fuzzy -msgid "Configurable FedMedian strategy implementation." -msgstr "Configuration de l'évaluation fédérée" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic:1 of +msgid "Configurable FedXgbCyclic strategy implementation." +msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`FedTrimmedAvg `\\ " -"\\(\\*\\[\\, fraction\\_fit\\, ...\\]\\)" +":py:obj:`FedXgbNnAvg `\\ \\(\\*args\\, " +"\\*\\*kwargs\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:1 of -msgid "Federated Averaging with Trimmed Mean [Dong Yin, et al., 2021]." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg:1 of +msgid "Configurable FedXgbNnAvg strategy implementation." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`Krum `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" +":py:obj:`FedYogi `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 -#: flwr.server.strategy.krum.Krum:1 of -msgid "Krum [Blanchard et al., 2017] strategy." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedyogi.FedYogi:1 of +msgid "FedYogi [Reddi et al., 2020] strategy." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`Bulyan `\\ \\(\\*\\, " -"fraction\\_fit\\, fraction\\_evaluate\\, ...\\)" +":py:obj:`FaultTolerantFedAvg " +"`\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 -#: flwr.server.strategy.bulyan.Bulyan:1 of -#, fuzzy -msgid "Bulyan strategy." -msgstr "Stratégies intégrées" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg:1 of +msgid "Configurable fault-tolerant FedAvg strategy implementation." +msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`DPFedAvgAdaptive `\\ " -"\\(strategy\\, num\\_sampled\\_clients\\)" +":py:obj:`Krum `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 -#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive:1 of -msgid "Wrapper for configuring a Strategy for DP with Adaptive Clipping." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.krum.Krum:1 of +msgid "Krum [Blanchard et al., 2017] strategy." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`DPFedAvgFixed `\\ " -"\\(strategy\\, num\\_sampled\\_clients\\, ...\\)" +":py:obj:`QFedAvg `\\ \\(\\*\\[\\, " +"q\\_param\\, qffl\\_learning\\_rate\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed:1 of -msgid "Wrapper for configuring a Strategy for DP with Fixed Clipping." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.qfedavg.QFedAvg:1 of +msgid "Configurable QFedAvg strategy implementation." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 #, fuzzy msgid ":py:obj:`Strategy `\\ \\(\\)" msgstr "serveur.stratégie.Stratégie" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 #: flwr.server.strategy.strategy.Strategy:1 of msgid "Abstract base class for server strategy implementations." msgstr "" @@ -10719,6 +11373,14 @@ msgid "" "parameters\\, ...\\)" msgstr "" +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.configure_evaluate:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.configure_evaluate:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.configure_evaluate:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.configure_evaluate:1 #: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 #: flwr.server.strategy.fedavg.FedAvg.configure_evaluate:1 @@ -10741,6 +11403,14 @@ msgid "" "\\(server\\_round\\, parameters\\, ...\\)" msgstr "" +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.configure_fit:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.configure_fit:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.configure_fit:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.configure_fit:1 #: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive.configure_fit:1 #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 #: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 @@ -10835,6 +11505,10 @@ msgstr "" msgid "Return the sample size and the required number of available clients." msgstr "" +#: ../../source/ref-api/flwr.server.strategy.DPFedAvgAdaptive.rst:2 +msgid "DPFedAvgAdaptive" +msgstr "DPFedAvgAdaptive" + #: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive:1 of msgid "Bases: :py:class:`~flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed`" msgstr "" @@ -10852,6 +11526,14 @@ msgid "" "\\(server\\_round\\, results\\, ...\\)" msgstr "" +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1 #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 #: of @@ -10901,6 +11583,14 @@ msgid "" "\\(server\\_round\\, parameters\\)" msgstr "" +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.evaluate:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.evaluate:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.evaluate:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.evaluate:1 #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.evaluate:1 of msgid "Evaluate model parameters using an evaluation function from the strategy." @@ -10914,6 +11604,14 @@ msgid "" "\\(client\\_manager\\)" msgstr "" +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.initialize_parameters:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.initialize_parameters:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.initialize_parameters:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.initialize_parameters:1 #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.initialize_parameters:1 of msgid "Initialize global model parameters using given strategy." @@ -10948,6 +11646,14 @@ msgid "" "round of federated evaluation." msgstr "" +#: ../../source/ref-api/flwr.server.strategy.DPFedAvgFixed.rst:2 +msgid "DPFedAvgFixed" +msgstr "DPFedAvgFixed" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:1 #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed:1 #: flwr.server.strategy.fedavg.FedAvg:1 #: flwr.server.strategy.fedavg_android.FedAvgAndroid:1 of @@ -11029,14 +11735,400 @@ msgid "" "round of federated learning." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FaultTolerantFedAvg.rst:2 -#, fuzzy -msgid "FaultTolerantFedAvg" -msgstr "server.strategy.FaultTolerantFedAvg" +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideAdaptiveClipping.rst:2 +msgid "DifferentialPrivacyClientSideAdaptiveClipping" +msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:3 #: of -msgid "" +msgid "Use `adaptiveclipping_mod` modifier at the client side." +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:5 +#: of +msgid "" +"In comparison to `DifferentialPrivacyServerSideAdaptiveClipping`, which " +"performs clipping on the server-side, " +"`DifferentialPrivacyClientSideAdaptiveClipping` expects clipping to " +"happen on the client-side, usually by using the built-in " +"`adaptiveclipping_mod`." +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:10 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:3 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:10 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:3 +#: of +msgid "The strategy to which DP functionalities will be added by this wrapper." +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:12 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:5 +#: of +msgid "The noise multiplier for the Gaussian mechanism for model updates." +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:14 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:7 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:17 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:10 +#: of +msgid "The number of clients that are sampled on each round." +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:16 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:9 +#: of +msgid "" +"The initial value of clipping norm. Defaults to 0.1. Andrew et al. " +"recommends to set to 0.1." +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:19 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:12 +#: of +msgid "The desired quantile of updates which should be clipped. Defaults to 0.5." +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:21 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:14 +#: of +msgid "" +"The learning rate for the clipping norm adaptation. Defaults to 0.2. " +"Andrew et al. recommends to set to 0.2." +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:24 +#: of +msgid "" +"The stddev of the noise added to the count of updates currently below the" +" estimate. Andrew et al. recommends to set to `expected_num_records/20`" +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:30 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:23 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:22 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:15 +#: of +#, fuzzy +msgid "Create a strategy:" +msgstr "stratégie.du.serveur" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:34 +#: of +msgid "" +"Wrap the strategy with the " +"`DifferentialPrivacyClientSideAdaptiveClipping` wrapper:" +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:40 +#: of +msgid "On the client, add the `adaptiveclipping_mod` to the client-side mods:" +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\" +" \\(server\\_round\\, results\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_fit " +"`\\" +" \\(server\\_round\\, results\\, failures\\)" +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_fit:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_fit:1 +#: of +#, fuzzy +msgid "Aggregate training results and update clip norms." +msgstr "Résultats globaux de l'évaluation." + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_evaluate " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_fit " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`evaluate " +"`\\" +" \\(server\\_round\\, parameters\\)" +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`initialize_parameters " +"`\\" +" \\(client\\_manager\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideFixedClipping.rst:2 +#, fuzzy +msgid "DifferentialPrivacyClientSideFixedClipping" +msgstr "Confidentialité différentielle" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:3 +#: of +msgid "Use `fixedclipping_mod` modifier at the client side." +msgstr "" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:5 +#: of +msgid "" +"In comparison to `DifferentialPrivacyServerSideFixedClipping`, which " +"performs clipping on the server-side, " +"`DifferentialPrivacyClientSideFixedClipping` expects clipping to happen " +"on the client-side, usually by using the built-in `fixedclipping_mod`." +msgstr "" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:12 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:5 +#: of +msgid "" +"The noise multiplier for the Gaussian mechanism for model updates. A " +"value of 1.0 or higher is recommended for strong privacy." +msgstr "" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:15 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:8 +#: of +msgid "The value of the clipping norm." +msgstr "" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:26 +#: of +msgid "" +"Wrap the strategy with the `DifferentialPrivacyClientSideFixedClipping` " +"wrapper:" +msgstr "" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:32 +#: of +msgid "On the client, add the `fixedclipping_mod` to the client-side mods:" +msgstr "" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\" +" \\(server\\_round\\, results\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_fit " +"`\\" +" \\(server\\_round\\, results\\, failures\\)" +msgstr "" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_fit:1 +#: of +#, fuzzy +msgid "Add noise to the aggregated parameters." +msgstr "Puis sérialise le résultat agrégé :" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_evaluate " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_fit " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`evaluate " +"`\\" +" \\(server\\_round\\, parameters\\)" +msgstr "" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`initialize_parameters " +"`\\" +" \\(client\\_manager\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideAdaptiveClipping.rst:2 +msgid "DifferentialPrivacyServerSideAdaptiveClipping" +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:17 +#: of +msgid "" +"The standard deviation of the noise added to the count of updates below " +"the estimate. Andrew et al. recommends to set to " +"`expected_num_records/20`" +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:27 +#: of +msgid "" +"Wrap the strategy with the DifferentialPrivacyServerSideAdaptiveClipping " +"wrapper" +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\" +" \\(server\\_round\\, results\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_fit " +"`\\" +" \\(server\\_round\\, results\\, failures\\)" +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_evaluate " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_fit " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`evaluate " +"`\\" +" \\(server\\_round\\, parameters\\)" +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`initialize_parameters " +"`\\" +" \\(client\\_manager\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideFixedClipping.rst:2 +#, fuzzy +msgid "DifferentialPrivacyServerSideFixedClipping" +msgstr "Confidentialité différentielle" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:19 +#: of +msgid "" +"Wrap the strategy with the DifferentialPrivacyServerSideFixedClipping " +"wrapper" +msgstr "" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\" +" \\(server\\_round\\, results\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_fit " +"`\\" +" \\(server\\_round\\, results\\, failures\\)" +msgstr "" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_fit:1 +#: of +msgid "Compute the updates, clip, and pass them for aggregation." +msgstr "" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_evaluate " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_fit " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`evaluate " +"`\\" +" \\(server\\_round\\, parameters\\)" +msgstr "" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`initialize_parameters " +"`\\" +" \\(client\\_manager\\)" +msgstr "" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_fit:3 +#: of +msgid "Afterward, add noise to the aggregated parameters." +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.FaultTolerantFedAvg.rst:2 +#, fuzzy +msgid "FaultTolerantFedAvg" +msgstr "server.strategy.FaultTolerantFedAvg" + +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of +msgid "" ":py:obj:`aggregate_evaluate " "`\\ " "\\(server\\_round\\, results\\, ...\\)" @@ -11312,6 +12404,10 @@ msgid "" "Defaults to 1.0." msgstr "" +#: flwr.server.strategy.fedavg.FedAvg:33 of +msgid "Enable (True) or disable (False) in-place aggregation of model updates." +msgstr "" + #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" ":py:obj:`aggregate_evaluate " @@ -12410,31 +13506,477 @@ msgid "" "these as the initial global model parameters." msgstr "" -#: ../../source/ref-api/flwr.simulation.rst:2 +#: ../../source/ref-api/flwr.server.workflow.rst:2 #, fuzzy -msgid "simulation" -msgstr "Simulation de moniteur" +msgid "workflow" +msgstr "Flux de travail" -#: ../../source/ref-api/flwr.simulation.rst:17::1 +#: ../../source/ref-api/flwr.server.workflow.rst:24::1 msgid "" -":py:obj:`start_simulation `\\ \\(\\*\\," -" client\\_fn\\[\\, ...\\]\\)" +":py:obj:`DefaultWorkflow `\\ " +"\\(\\[fit\\_workflow\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.simulation.rst:17::1 -#: flwr.simulation.app.start_simulation:1 of -#, fuzzy -msgid "Start a Ray-based Flower simulation server." -msgstr "Simulation de moniteur" +#: ../../source/ref-api/flwr.server.workflow.rst:24::1 +#: flwr.server.workflow.default_workflows.DefaultWorkflow:1 of +msgid "Default workflow in Flower." +msgstr "" -#: ../../source/ref-api/flwr.simulation.start_simulation.rst:2 -#, fuzzy -msgid "start\\_simulation" -msgstr "démarrer_simulation" +#: ../../source/ref-api/flwr.server.workflow.rst:24::1 +msgid "" +":py:obj:`SecAggPlusWorkflow `\\ " +"\\(num\\_shares\\, ...\\[\\, ...\\]\\)" +msgstr "" -#: flwr.simulation.app.start_simulation:3 of +#: ../../source/ref-api/flwr.server.workflow.rst:24::1 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:1 +#: of +msgid "The workflow for the SecAgg+ protocol." +msgstr "" + +#: ../../source/ref-api/flwr.server.workflow.rst:24::1 msgid "" -"A function creating client instances. The function must take a single " +":py:obj:`SecAggWorkflow `\\ " +"\\(reconstruction\\_threshold\\, \\*\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.workflow.rst:24::1 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:1 of +msgid "The workflow for the SecAgg protocol." +msgstr "" + +#: ../../source/ref-api/flwr.server.workflow.DefaultWorkflow.rst:2 +#, fuzzy +msgid "DefaultWorkflow" +msgstr "Flux de travail" + +#: ../../source/ref-api/flwr.server.workflow.SecAggPlusWorkflow.rst:2 +#, fuzzy +msgid "SecAggPlusWorkflow" +msgstr "Flux de travail" + +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:3 +#: of +msgid "" +"The SecAgg+ protocol ensures the secure summation of integer vectors " +"owned by multiple parties, without accessing any individual integer " +"vector. This workflow allows the server to compute the weighted average " +"of model parameters across all clients, ensuring individual contributions" +" remain private. This is achieved by clients sending both, a weighting " +"factor and a weighted version of the locally updated parameters, both of " +"which are masked for privacy. Specifically, each client uploads \"[w, w *" +" params]\" with masks, where weighting factor 'w' is the number of " +"examples ('num_examples') and 'params' represents the model parameters " +"('parameters') from the client's `FitRes`. The server then aggregates " +"these contributions to compute the weighted average of model parameters." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:14 +#: of +msgid "" +"The protocol involves four main stages: - 'setup': Send SecAgg+ " +"configuration to clients and collect their public keys. - 'share keys': " +"Broadcast public keys among clients and collect encrypted secret" +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:17 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:17 +#: of +msgid "key shares." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:18 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:18 +#: of +msgid "" +"'collect masked vectors': Forward encrypted secret key shares to target " +"clients and collect masked model parameters." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:20 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:20 +#: of +msgid "" +"'unmask': Collect secret key shares to decrypt and aggregate the model " +"parameters." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:22 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:22 +#: of +msgid "" +"Only the aggregated model parameters are exposed and passed to " +"`Strategy.aggregate_fit`, ensuring individual data privacy." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:25 +#: of +msgid "" +"The number of shares into which each client's private key is split under " +"the SecAgg+ protocol. If specified as a float, it represents the " +"proportion of all selected clients, and the number of shares will be set " +"dynamically in the run time. A private key can be reconstructed from " +"these shares, allowing for the secure aggregation of model updates. Each " +"client sends one share to each of its neighbors while retaining one." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:25 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:32 +#: of +msgid "" +"The minimum number of shares required to reconstruct a client's private " +"key, or, if specified as a float, it represents the proportion of the " +"total number of shares needed for reconstruction. This threshold ensures " +"privacy by allowing for the recovery of contributions from dropped " +"clients during aggregation, without compromising individual client data." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:31 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:38 +#: of +msgid "" +"The maximum value of the weight that can be assigned to any single " +"client's update during the weighted average calculation on the server " +"side, e.g., in the FedAvg algorithm." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:35 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:42 +#: of +msgid "" +"The range within which model parameters are clipped before quantization. " +"This parameter ensures each model parameter is bounded within " +"[-clipping_range, clipping_range], facilitating quantization." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:39 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:46 +#: of +msgid "" +"The size of the range into which floating-point model parameters are " +"quantized, mapping each parameter to an integer in [0, " +"quantization_range-1]. This facilitates cryptographic operations on the " +"model updates." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:43 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:50 +#: of +msgid "" +"The range of values from which random mask entries are uniformly sampled " +"([0, modulus_range-1]). `modulus_range` must be less than 4294967296. " +"Please use 2**n values for `modulus_range` to prevent overflow issues." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:47 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:54 +#: of +msgid "" +"The timeout duration in seconds. If specified, the workflow will wait for" +" replies for this duration each time. If `None`, there is no time limit " +"and the workflow will wait until replies for all messages are received." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:61 +#: of +msgid "" +"Generally, higher `num_shares` means more robust to dropouts while " +"increasing the computational costs; higher `reconstruction_threshold` " +"means better privacy guarantees but less tolerance to dropouts." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:58 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:64 +#: of +msgid "Too large `max_weight` may compromise the precision of the quantization." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:59 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:65 +#: of +msgid "`modulus_range` must be 2**n and larger than `quantization_range`." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:66 +#: of +msgid "" +"When `num_shares` is a float, it is interpreted as the proportion of all " +"selected clients, and hence the number of shares will be determined in " +"the runtime. This allows for dynamic adjustment based on the total number" +" of participating clients." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:69 +#: of +msgid "" +"Similarly, when `reconstruction_threshold` is a float, it is interpreted " +"as the proportion of the number of shares needed for the reconstruction " +"of a private key. This feature enables flexibility in setting the " +"security threshold relative to the number of distributed shares." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:73 +#: of +msgid "" +"`num_shares`, `reconstruction_threshold`, and the quantization parameters" +" (`clipping_range`, `quantization_range`, `modulus_range`) play critical " +"roles in balancing privacy, robustness, and efficiency within the SecAgg+" +" protocol." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of +msgid "" +":py:obj:`collect_masked_vectors_stage " +"`\\" +" \\(driver\\, ...\\)" +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of +msgid "Execute the 'collect masked vectors' stage." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of +msgid "" +":py:obj:`setup_stage " +"`\\ \\(driver\\, " +"context\\, state\\)" +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.setup_stage:1 +#: of +msgid "Execute the 'setup' stage." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of +msgid "" +":py:obj:`share_keys_stage " +"`\\ " +"\\(driver\\, context\\, state\\)" +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.share_keys_stage:1 +#: of +msgid "Execute the 'share keys' stage." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of +msgid "" +":py:obj:`unmask_stage " +"`\\ \\(driver\\, " +"context\\, state\\)" +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.unmask_stage:1 +#: of +msgid "Execute the 'unmask' stage." +msgstr "" + +#: ../../source/ref-api/flwr.server.workflow.SecAggWorkflow.rst:2 +#, fuzzy +msgid "SecAggWorkflow" +msgstr "Flux de travail" + +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:1 of +msgid "" +"Bases: " +":py:class:`~flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow`" +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:3 of +msgid "" +"The SecAgg protocol ensures the secure summation of integer vectors owned" +" by multiple parties, without accessing any individual integer vector. " +"This workflow allows the server to compute the weighted average of model " +"parameters across all clients, ensuring individual contributions remain " +"private. This is achieved by clients sending both, a weighting factor and" +" a weighted version of the locally updated parameters, both of which are " +"masked for privacy. Specifically, each client uploads \"[w, w * params]\"" +" with masks, where weighting factor 'w' is the number of examples " +"('num_examples') and 'params' represents the model parameters " +"('parameters') from the client's `FitRes`. The server then aggregates " +"these contributions to compute the weighted average of model parameters." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:14 of +msgid "" +"The protocol involves four main stages: - 'setup': Send SecAgg " +"configuration to clients and collect their public keys. - 'share keys': " +"Broadcast public keys among clients and collect encrypted secret" +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:54 of +msgid "" +"Each client's private key is split into N shares under the SecAgg " +"protocol, where N is the number of selected clients." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:56 of +msgid "" +"Generally, higher `reconstruction_threshold` means better privacy " +"guarantees but less tolerance to dropouts." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:60 of +msgid "" +"When `reconstruction_threshold` is a float, it is interpreted as the " +"proportion of the number of all selected clients needed for the " +"reconstruction of a private key. This feature enables flexibility in " +"setting the security threshold relative to the number of selected " +"clients." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:64 of +msgid "" +"`reconstruction_threshold`, and the quantization parameters " +"(`clipping_range`, `quantization_range`, `modulus_range`) play critical " +"roles in balancing privacy, robustness, and efficiency within the SecAgg " +"protocol." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of +msgid "" +":py:obj:`collect_masked_vectors_stage " +"`\\ " +"\\(driver\\, ...\\)" +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of +msgid "" +":py:obj:`setup_stage `\\" +" \\(driver\\, context\\, state\\)" +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of +msgid "" +":py:obj:`share_keys_stage " +"`\\ \\(driver\\, " +"context\\, state\\)" +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of +msgid "" +":py:obj:`unmask_stage " +"`\\ \\(driver\\, " +"context\\, state\\)" +msgstr "" + +#: ../../source/ref-api/flwr.simulation.rst:2 +#, fuzzy +msgid "simulation" +msgstr "Simulation de moniteur" + +#: ../../source/ref-api/flwr.simulation.rst:19::1 +msgid "" +":py:obj:`start_simulation `\\ \\(\\*\\," +" client\\_fn\\[\\, ...\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.simulation.rst:19::1 +#: flwr.simulation.app.start_simulation:1 of +#, fuzzy +msgid "Start a Ray-based Flower simulation server." +msgstr "Simulation de moniteur" + +#: ../../source/ref-api/flwr.simulation.rst:19::1 +msgid "" +":py:obj:`run_simulation_from_cli " +"`\\ \\(\\)" +msgstr "" + +#: ../../source/ref-api/flwr.simulation.rst:19::1 +#: flwr.simulation.run_simulation.run_simulation_from_cli:1 of +msgid "Run Simulation Engine from the CLI." +msgstr "" + +#: ../../source/ref-api/flwr.simulation.rst:19::1 +msgid "" +":py:obj:`run_simulation `\\ " +"\\(server\\_app\\, client\\_app\\, ...\\)" +msgstr "" + +#: ../../source/ref-api/flwr.simulation.rst:19::1 +#: flwr.simulation.run_simulation.run_simulation:1 of +msgid "Run a Flower App using the Simulation Engine." +msgstr "" + +#: ../../source/ref-api/flwr.simulation.run_simulation.rst:2 +#, fuzzy +msgid "run\\_simulation" +msgstr "Simulation de moniteur" + +#: flwr.simulation.run_simulation.run_simulation:3 of +msgid "" +"The `ServerApp` to be executed. It will send messages to different " +"`ClientApp` instances running on different (virtual) SuperNodes." +msgstr "" + +#: flwr.simulation.run_simulation.run_simulation:6 of +msgid "" +"The `ClientApp` to be executed by each of the SuperNodes. It will receive" +" messages sent by the `ServerApp`." +msgstr "" + +#: flwr.simulation.run_simulation.run_simulation:9 of +msgid "" +"Number of nodes that run a ClientApp. They can be sampled by a Driver in " +"the ServerApp and receive a Message describing what the ClientApp should " +"perform." +msgstr "" + +#: flwr.simulation.run_simulation.run_simulation:13 of +msgid "A simulation backend that runs `ClientApp`s." +msgstr "" + +#: flwr.simulation.run_simulation.run_simulation:15 of +msgid "" +"'A dictionary, e.g {\"\": , \"\": } to " +"configure a backend. Values supported in are those included by " +"`flwr.common.typing.ConfigsRecordValues`." +msgstr "" + +#: flwr.simulation.run_simulation.run_simulation:19 of +msgid "" +"A boolean to indicate whether to enable GPU growth on the main thread. " +"This is desirable if you make use of a TensorFlow model on your " +"`ServerApp` while having your `ClientApp` running on the same GPU. " +"Without enabling this, you might encounter an out-of-memory error because" +" TensorFlow, by default, allocates all GPU memory. Read more about how " +"`tf.config.experimental.set_memory_growth()` works in the TensorFlow " +"documentation: https://www.tensorflow.org/api/stable." +msgstr "" + +#: flwr.simulation.run_simulation.run_simulation:26 of +msgid "" +"When diabled, only INFO, WARNING and ERROR log messages will be shown. If" +" enabled, DEBUG-level logs will be displayed." +msgstr "" + +#: ../../source/ref-api/flwr.simulation.run_simulation_from_cli.rst:2 +#, fuzzy +msgid "run\\_simulation\\_from\\_cli" +msgstr "Simulation de moniteur" + +#: ../../source/ref-api/flwr.simulation.start_simulation.rst:2 +#, fuzzy +msgid "start\\_simulation" +msgstr "démarrer_simulation" + +#: flwr.simulation.app.start_simulation:3 of +msgid "" +"A function creating client instances. The function must take a single " "`str` argument called `cid`. It should return a single client instance of" " type Client. Note that the created client instances are ephemeral and " "will often be destroyed after a single method invocation. Since client " @@ -12522,7 +14064,7 @@ msgstr "" msgid "" "Optionally specify the type of actor to use. The actor object, which " "persists throughout the simulation, will be the process in charge of " -"running the clients' jobs (i.e. their `fit()` method)." +"executing a ClientApp wrapping input argument `client_fn`." msgstr "" #: flwr.simulation.app.start_simulation:54 of @@ -13635,9 +15177,9 @@ msgstr "" #: ../../source/ref-changelog.md:220 msgid "" "Much effort went into a completely restructured Flower docs experience. " -"The documentation on [flower.ai/docs](flower.ai/docs) is now divided " -"into Flower Framework, Flower Baselines, Flower Android SDK, Flower iOS " -"SDK, and code example projects." +"The documentation on [flower.ai/docs](https://flower.ai/docs) is now " +"divided into Flower Framework, Flower Baselines, Flower Android SDK, " +"Flower iOS SDK, and code example projects." msgstr "" #: ../../source/ref-changelog.md:222 @@ -13975,15 +15517,15 @@ msgid "" "gradient boosting to improve model accuracy. We added a new `FedXgbNnAvg`" " " "[strategy](https://github.com/adap/flower/tree/main/src/py/flwr/server/strategy/fedxgb_nn_avg.py)," -" and a [code " -"example](https://github.com/adap/flower/tree/main/examples/xgboost-quickstart)" -" that demonstrates the usage of this new strategy in an XGBoost project." +" and a [code example](https://github.com/adap/flower/tree/main/examples" +"/xgboost-quickstart) that demonstrates the usage of this new strategy in " +"an XGBoost project." msgstr "" "Nous avons ajouté une nouvelle [stratégie] `FedXgbNnAvg` " "(https://github.com/adap/flower/tree/main/src/py/flwr/server/strategy/fedxgb_nn_avg.py)," " et un [exemple de code] " -"(https://github.com/adap/flower/tree/main/examples/xgboost-quickstart)" -" qui démontre l'utilisation de cette nouvelle stratégie dans un projet " +"(https://github.com/adap/flower/tree/main/examples/xgboost-quickstart) " +"qui démontre l'utilisation de cette nouvelle stratégie dans un projet " "XGBoost." #: ../../source/ref-changelog.md:300 @@ -14199,12 +15741,14 @@ msgstr "" msgid "" "TabNet is a powerful and flexible framework for training machine learning" " models on tabular data. We now have a federated example using Flower: " -"[quickstart-tabnet](https://github.com/adap/flower/tree/main/examples/quickstart-tabnet)." +"[quickstart-tabnet](https://github.com/adap/flower/tree/main/examples" +"/quickstart-tabnet)." msgstr "" "TabNet est un cadre puissant et flexible pour former des modèles " "d'apprentissage automatique sur des données tabulaires. Nous avons " -"maintenant un exemple fédéré utilisant Flower : " -"[quickstart-tabnet](https://github.com/adap/flower/tree/main/examples/quickstart-tabnet)." +"maintenant un exemple fédéré utilisant Flower : [quickstart-" +"tabnet](https://github.com/adap/flower/tree/main/examples/quickstart-" +"tabnet)." #: ../../source/ref-changelog.md:334 msgid "" @@ -14396,12 +15940,14 @@ msgstr "" msgid "" "A new code example (`quickstart-fastai`) demonstrates federated learning " "with [fastai](https://www.fast.ai/) and Flower. You can find it here: " -"[quickstart-fastai](https://github.com/adap/flower/tree/main/examples/quickstart-fastai)." +"[quickstart-fastai](https://github.com/adap/flower/tree/main/examples" +"/quickstart-fastai)." msgstr "" "Un nouvel exemple de code (`quickstart-fastai`) démontre l'apprentissage " "fédéré avec [fastai](https://www.fast.ai/) et Flower. Tu peux le trouver " -"ici : " -"[quickstart-fastai](https://github.com/adap/flower/tree/main/examples/quickstart-fastai)." +"ici : [quickstart-" +"fastai](https://github.com/adap/flower/tree/main/examples/quickstart-" +"fastai)." #: ../../source/ref-changelog.md:376 msgid "" @@ -14723,8 +16269,8 @@ msgid "" "[Client and NumPyClient](https://flower.ai/docs/framework/tutorial-" "customize-the-client-pytorch.html)" msgstr "" -"[Client et NumPyClient] (https://flower.ai/docs/tutorial/Flower-4" -"-Client-and-NumPyClient-PyTorch.html)" +"[Client et NumPyClient] (https://flower.ai/docs/tutorial/Flower-4-Client-" +"and-NumPyClient-PyTorch.html)" #: ../../source/ref-changelog.md:435 msgid "" @@ -14845,12 +16391,14 @@ msgstr "" #: ../../source/ref-changelog.md:453 msgid "" "A new code example (`quickstart-pandas`) demonstrates federated analytics" -" with Pandas and Flower. You can find it here: " -"[quickstart-pandas](https://github.com/adap/flower/tree/main/examples/quickstart-pandas)." +" with Pandas and Flower. You can find it here: [quickstart-" +"pandas](https://github.com/adap/flower/tree/main/examples/quickstart-" +"pandas)." msgstr "" "Un nouvel exemple de code (`quickstart-pandas`) démontre l'analyse " -"fédérée avec Pandas et Flower. Tu peux le trouver ici : " -"[quickstart-pandas](https://github.com/adap/flower/tree/main/examples/quickstart-pandas)." +"fédérée avec Pandas et Flower. Tu peux le trouver ici : [quickstart-" +"pandas](https://github.com/adap/flower/tree/main/examples/quickstart-" +"pandas)." #: ../../source/ref-changelog.md:455 msgid "" @@ -14949,9 +16497,8 @@ msgid "" "never contributed on GitHub before, this is the perfect place to start!" msgstr "" "L'un des points forts est le nouveau [guide du premier contributeur] " -"(https://flower.ai/docs/first-time-contributors.html) : si tu n'as " -"jamais contribué sur GitHub auparavant, c'est l'endroit idéal pour " -"commencer !" +"(https://flower.ai/docs/first-time-contributors.html) : si tu n'as jamais" +" contribué sur GitHub auparavant, c'est l'endroit idéal pour commencer !" #: ../../source/ref-changelog.md:477 msgid "v1.1.0 (2022-10-31)" @@ -15847,14 +17394,15 @@ msgstr "" "[#914](https://github.com/adap/flower/pull/914))" #: ../../source/ref-changelog.md:660 +#, fuzzy msgid "" "The first preview release of Flower Baselines has arrived! We're " "kickstarting Flower Baselines with implementations of FedOpt (FedYogi, " "FedAdam, FedAdagrad), FedBN, and FedAvgM. Check the documentation on how " "to use [Flower Baselines](https://flower.ai/docs/using-baselines.html). " "With this first preview release we're also inviting the community to " -"[contribute their own baselines](https://flower.ai/docs/contributing-" -"baselines.html)." +"[contribute their own baselines](https://flower.ai/docs/baselines/how-to-" +"contribute-baselines.html)." msgstr "" "La première version préliminaire de Flower Baselines est arrivée ! Nous " "démarrons Flower Baselines avec des implémentations de FedOpt (FedYogi, " @@ -16743,10 +18291,11 @@ msgstr "" "métriques spécifiques à une tâche sur le serveur." #: ../../source/ref-changelog.md:845 +#, fuzzy msgid "" "Custom metric dictionaries are now used in two user-facing APIs: they are" " returned from Strategy methods `aggregate_fit`/`aggregate_evaluate` and " -"they enable evaluation functions passed to build-in strategies (via " +"they enable evaluation functions passed to built-in strategies (via " "`eval_fn`) to return more than two evaluation metrics. Strategies can " "even return *aggregated* metrics dictionaries for the server to keep " "track of." @@ -16760,8 +18309,9 @@ msgstr "" "*agrégées* pour que le serveur puisse en garder la trace." #: ../../source/ref-changelog.md:847 +#, fuzzy msgid "" -"Stratey implementations should migrate their `aggregate_fit` and " +"Strategy implementations should migrate their `aggregate_fit` and " "`aggregate_evaluate` methods to the new return type (e.g., by simply " "returning an empty `{}`), server-side evaluation functions should migrate" " from `return loss, accuracy` to `return loss, {\"accuracy\": accuracy}`." @@ -17252,28 +18802,14 @@ msgstr "" "tels que `PyTorch `_ ou `TensorFlow " "`_." -#: ../../source/ref-example-projects.rst:11 +#: ../../source/ref-example-projects.rst:10 +#, fuzzy msgid "" -"Flower usage examples used to be bundled with Flower in a package called " -"``flwr_example``. We are migrating those examples to standalone projects " -"to make them easier to use. All new examples are based in the directory " -"`examples `_." -msgstr "" -"Les exemples d'utilisation de Flower étaient auparavant regroupés avec " -"Flower dans un paquet appelé ``flwr_example``. Nous migrons ces exemples " -"vers des projets autonomes pour les rendre plus faciles à utiliser. Tous " -"les nouveaux exemples sont basés dans le répertoire ``examples " -"`_." - -#: ../../source/ref-example-projects.rst:16 -msgid "The following examples are available as standalone projects." +"The following examples are available as standalone projects. Quickstart " +"TensorFlow/Keras ---------------------------" msgstr "Les exemples suivants sont disponibles sous forme de projets autonomes." -#: ../../source/ref-example-projects.rst:20 -msgid "Quickstart TensorFlow/Keras" -msgstr "Démarrage rapide de TensorFlow/Keras" - -#: ../../source/ref-example-projects.rst:22 +#: ../../source/ref-example-projects.rst:14 msgid "" "The TensorFlow/Keras quickstart example shows CIFAR-10 image " "classification with MobileNetV2:" @@ -17281,7 +18817,7 @@ msgstr "" "L'exemple de démarrage rapide TensorFlow/Keras montre la classification " "d'images CIFAR-10 avec MobileNetV2 :" -#: ../../source/ref-example-projects.rst:25 +#: ../../source/ref-example-projects.rst:17 #, fuzzy msgid "" "`Quickstart TensorFlow (Code) " @@ -17292,16 +18828,14 @@ msgstr "" "`_" -#: ../../source/ref-example-projects.rst:26 +#: ../../source/ref-example-projects.rst:18 #, fuzzy -msgid "" -"`Quickstart TensorFlow (Tutorial) `_" +msgid ":doc:`Quickstart TensorFlow (Tutorial) `" msgstr "" "`Quickstart TensorFlow (Tutorial) `_" -#: ../../source/ref-example-projects.rst:27 +#: ../../source/ref-example-projects.rst:19 msgid "" "`Quickstart TensorFlow (Blog Post) `_" @@ -17309,12 +18843,12 @@ msgstr "" "`Quickstart TensorFlow (Blog Post) `_" -#: ../../source/ref-example-projects.rst:31 +#: ../../source/ref-example-projects.rst:23 #: ../../source/tutorial-quickstart-pytorch.rst:5 msgid "Quickstart PyTorch" msgstr "Démarrage rapide de PyTorch" -#: ../../source/ref-example-projects.rst:33 +#: ../../source/ref-example-projects.rst:25 msgid "" "The PyTorch quickstart example shows CIFAR-10 image classification with a" " simple Convolutional Neural Network:" @@ -17322,7 +18856,7 @@ msgstr "" "L'exemple de démarrage rapide PyTorch montre la classification d'images " "CIFAR-10 avec un simple réseau neuronal convolutif :" -#: ../../source/ref-example-projects.rst:36 +#: ../../source/ref-example-projects.rst:28 #, fuzzy msgid "" "`Quickstart PyTorch (Code) " @@ -17331,20 +18865,18 @@ msgstr "" "`Quickstart PyTorch (Code) " "`_" -#: ../../source/ref-example-projects.rst:37 +#: ../../source/ref-example-projects.rst:29 #, fuzzy -msgid "" -"`Quickstart PyTorch (Tutorial) `_" +msgid ":doc:`Quickstart PyTorch (Tutorial) `" msgstr "" "`Quickstart PyTorch (Tutorial) `_" -#: ../../source/ref-example-projects.rst:41 +#: ../../source/ref-example-projects.rst:33 msgid "PyTorch: From Centralized To Federated" msgstr "PyTorch : De la centralisation à la fédération" -#: ../../source/ref-example-projects.rst:43 +#: ../../source/ref-example-projects.rst:35 msgid "" "This example shows how a regular PyTorch project can be federated using " "Flower:" @@ -17352,7 +18884,7 @@ msgstr "" "Cet exemple montre comment un projet PyTorch ordinaire peut être fédéré à" " l'aide de Flower :" -#: ../../source/ref-example-projects.rst:45 +#: ../../source/ref-example-projects.rst:37 #, fuzzy msgid "" "`PyTorch: From Centralized To Federated (Code) " @@ -17363,22 +18895,21 @@ msgstr "" "`_" -#: ../../source/ref-example-projects.rst:46 +#: ../../source/ref-example-projects.rst:38 #, fuzzy msgid "" -"`PyTorch: From Centralized To Federated (Tutorial) " -"`_" +":doc:`PyTorch: From Centralized To Federated (Tutorial) `" msgstr "" "`PyTorch : De la centralisation à la fédération (Tutoriel) " "`_" -#: ../../source/ref-example-projects.rst:50 +#: ../../source/ref-example-projects.rst:42 msgid "Federated Learning on Raspberry Pi and Nvidia Jetson" msgstr "Apprentissage fédéré sur Raspberry Pi et Nvidia Jetson" -#: ../../source/ref-example-projects.rst:52 +#: ../../source/ref-example-projects.rst:44 msgid "" "This example shows how Flower can be used to build a federated learning " "system that run across Raspberry Pi and Nvidia Jetson:" @@ -17387,7 +18918,7 @@ msgstr "" "système d'apprentissage fédéré qui fonctionne sur Raspberry Pi et Nvidia " "Jetson :" -#: ../../source/ref-example-projects.rst:54 +#: ../../source/ref-example-projects.rst:46 #, fuzzy msgid "" "`Federated Learning on Raspberry Pi and Nvidia Jetson (Code) " @@ -17396,7 +18927,7 @@ msgstr "" "`L'apprentissage fédéré sur Raspberry Pi et Nvidia Jetson (Code) " "`_" -#: ../../source/ref-example-projects.rst:55 +#: ../../source/ref-example-projects.rst:47 msgid "" "`Federated Learning on Raspberry Pi and Nvidia Jetson (Blog Post) " "`_" @@ -17404,214 +18935,39 @@ msgstr "" "`L'apprentissage fédéré sur Raspberry Pi et Nvidia Jetson (Blog Post) " "`_" -#: ../../source/ref-example-projects.rst:60 -msgid "Legacy Examples (`flwr_example`)" -msgstr "Exemples hérités (`flwr_example`)" - -#: ../../source/ref-example-projects.rst:63 +#: ../../source/ref-faq.rst:4 msgid "" -"The useage examples in `flwr_example` are deprecated and will be removed " -"in the future. New examples are provided as standalone projects in " -"`examples `_." +"This page collects answers to commonly asked questions about Federated " +"Learning with Flower." msgstr "" -"Les exemples d'utilisation dans `flwr_example` sont obsolètes et seront " -"supprimés à l'avenir. De nouveaux exemples sont fournis en tant que " -"projets autonomes dans `examples " -"`_." +"Cette page rassemble les réponses aux questions les plus fréquemment " +"posées sur l'apprentissage fédéré avec Flower." -#: ../../source/ref-example-projects.rst:69 -msgid "Extra Dependencies" -msgstr "Dépendances supplémentaires" +#: ../../source/ref-faq.rst +#, fuzzy +msgid ":fa:`eye,mr-1` Can Flower run on Jupyter Notebooks / Google Colab?" +msgstr "" +":fa:`eye,mr-1` Flower peut-il fonctionner sur les ordinateurs portables " +"Juptyter / Google Colab ?" -#: ../../source/ref-example-projects.rst:71 +#: ../../source/ref-faq.rst:8 msgid "" -"The core Flower framework keeps a minimal set of dependencies. The " -"examples demonstrate Flower in the context of different machine learning " -"frameworks, so additional dependencies need to be installed before an " -"example can be run." +"Yes, it can! Flower even comes with a few under-the-hood optimizations to" +" make it work even better on Colab. Here's a quickstart example:" msgstr "" -"Le noyau du framework Flower conserve un ensemble minimal de dépendances." -" Les exemples démontrent Flower dans le contexte de différents frameworks" -" d'apprentissage automatique, de sorte que des dépendances " -"supplémentaires doivent être installées avant qu'un exemple puisse être " -"exécuté." - -#: ../../source/ref-example-projects.rst:75 -msgid "For PyTorch examples::" -msgstr "Pour les exemples de PyTorch: :" - -#: ../../source/ref-example-projects.rst:79 -msgid "For TensorFlow examples::" -msgstr "Pour les exemples de TensorFlow : :" - -#: ../../source/ref-example-projects.rst:83 -msgid "For both PyTorch and TensorFlow examples::" -msgstr "Pour les exemples PyTorch et TensorFlow: :" +"Oui, c'est possible ! Flower est même livré avec quelques optimisations " +"pour qu'il fonctionne encore mieux sur Colab. Voici un exemple de " +"démarrage rapide :" -#: ../../source/ref-example-projects.rst:87 +#: ../../source/ref-faq.rst:10 +#, fuzzy msgid "" -"Please consult :code:`pyproject.toml` for a full list of possible extras " -"(section :code:`[tool.poetry.extras]`)." +"`Flower simulation PyTorch " +"`_" msgstr "" -"Tu peux consulter :code:`pyproject.toml` pour une liste complète des " -"extras possibles (section :code:`[tool.poetry.extras]`)." - -#: ../../source/ref-example-projects.rst:92 -msgid "PyTorch Examples" -msgstr "Exemples de PyTorch" - -#: ../../source/ref-example-projects.rst:94 -msgid "" -"Our PyTorch examples are based on PyTorch 1.7. They should work with " -"other releases as well. So far, we provide the following examples." -msgstr "" -"Nos exemples PyTorch sont basés sur PyTorch 1.7. Ils devraient " -"fonctionner avec d'autres versions également. Jusqu'à présent, nous " -"fournissons les exemples suivants." - -#: ../../source/ref-example-projects.rst:98 -msgid "CIFAR-10 Image Classification" -msgstr "Classification d'images CIFAR-10" - -#: ../../source/ref-example-projects.rst:100 -msgid "" -"`CIFAR-10 and CIFAR-100 `_ " -"are popular RGB image datasets. The Flower CIFAR-10 example uses PyTorch " -"to train a simple CNN classifier in a federated learning setup with two " -"clients." -msgstr "" -"`CIFAR-10 et CIFAR-100 `_ " -"sont des ensembles de données d'images RVB populaires. L'exemple Flower " -"CIFAR-10 utilise PyTorch pour former un classificateur CNN simple dans " -"une configuration d'apprentissage fédéré avec deux clients." - -#: ../../source/ref-example-projects.rst:104 -#: ../../source/ref-example-projects.rst:121 -#: ../../source/ref-example-projects.rst:146 -msgid "First, start a Flower server:" -msgstr "Tout d'abord, démarre un serveur Flower :" - -#: ../../source/ref-example-projects.rst:106 -msgid "$ ./src/py/flwr_example/pytorch_cifar/run-server.sh" -msgstr "$ ./src/py/flwr_example/pytorch_cifar/run-server.sh" - -#: ../../source/ref-example-projects.rst:108 -#: ../../source/ref-example-projects.rst:125 -#: ../../source/ref-example-projects.rst:150 -msgid "Then, start the two clients in a new terminal window:" -msgstr "Ensuite, démarre les deux clients dans une nouvelle fenêtre de terminal :" - -#: ../../source/ref-example-projects.rst:110 -msgid "$ ./src/py/flwr_example/pytorch_cifar/run-clients.sh" -msgstr "$ ./src/py/flwr_example/pytorch_cifar/run-clients.sh" - -#: ../../source/ref-example-projects.rst:112 -msgid "For more details, see :code:`src/py/flwr_example/pytorch_cifar`." -msgstr "Pour plus de détails, voir :code:`src/py/flwr_example/pytorch_cifar`." - -#: ../../source/ref-example-projects.rst:115 -msgid "ImageNet-2012 Image Classification" -msgstr "ImageNet-2012 Classification des images" - -#: ../../source/ref-example-projects.rst:117 -msgid "" -"`ImageNet-2012 `_ is one of the major computer" -" vision datasets. The Flower ImageNet example uses PyTorch to train a " -"ResNet-18 classifier in a federated learning setup with ten clients." -msgstr "" -"`ImageNet-2012 `_ est l'un des principaux " -"ensembles de données de vision par ordinateur. L'exemple Flower ImageNet " -"utilise PyTorch pour entraîner un classificateur ResNet-18 dans une " -"configuration d'apprentissage fédéré avec dix clients." - -#: ../../source/ref-example-projects.rst:123 -msgid "$ ./src/py/flwr_example/pytorch_imagenet/run-server.sh" -msgstr "$ ./src/py/flwr_example/pytorch_imagenet/run-server.sh" - -#: ../../source/ref-example-projects.rst:127 -msgid "$ ./src/py/flwr_example/pytorch_imagenet/run-clients.sh" -msgstr "$ ./src/py/flwr_example/pytorch_imagenet/run-clients.sh" - -#: ../../source/ref-example-projects.rst:129 -msgid "For more details, see :code:`src/py/flwr_example/pytorch_imagenet`." -msgstr "Pour plus de détails, voir :code:`src/py/flwr_example/pytorch_imagenet`." - -#: ../../source/ref-example-projects.rst:133 -msgid "TensorFlow Examples" -msgstr "Exemples de TensorFlow" - -#: ../../source/ref-example-projects.rst:135 -msgid "" -"Our TensorFlow examples are based on TensorFlow 2.0 or newer. So far, we " -"provide the following examples." -msgstr "" -"Nos exemples TensorFlow sont basés sur TensorFlow 2.0 ou une version plus" -" récente. Jusqu'à présent, nous te proposons les exemples suivants." - -#: ../../source/ref-example-projects.rst:139 -msgid "Fashion-MNIST Image Classification" -msgstr "Classification d'images Fashion-MNIST" - -#: ../../source/ref-example-projects.rst:141 -msgid "" -"`Fashion-MNIST `_ is " -"often used as the \"Hello, world!\" of machine learning. We follow this " -"tradition and provide an example which samples random local datasets from" -" Fashion-MNIST and trains a simple image classification model over those " -"partitions." -msgstr "" -"nous suivons cette tradition et fournissons un exemple qui échantillonne " -"des ensembles de données locales aléatoires de Fashion-MNIST et entraîne " -"un modèle simple de classification d'images sur ces partitions." - -#: ../../source/ref-example-projects.rst:148 -msgid "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-server.sh" -msgstr "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-server.sh" - -#: ../../source/ref-example-projects.rst:152 -msgid "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-clients.sh" -msgstr "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-clients.sh" - -#: ../../source/ref-example-projects.rst:154 -msgid "" -"For more details, see " -":code:`src/py/flwr_example/tensorflow_fashion_mnist`." -msgstr "" -"Pour plus de détails, voir " -":code:`src/py/flwr_example/tensorflow_fashion_mnist`." - -#: ../../source/ref-faq.rst:4 -msgid "" -"This page collects answers to commonly asked questions about Federated " -"Learning with Flower." -msgstr "" -"Cette page rassemble les réponses aux questions les plus fréquemment " -"posées sur l'apprentissage fédéré avec Flower." - -#: ../../source/ref-faq.rst -msgid ":fa:`eye,mr-1` Can Flower run on Juptyter Notebooks / Google Colab?" -msgstr "" -":fa:`eye,mr-1` Flower peut-il fonctionner sur les ordinateurs portables " -"Juptyter / Google Colab ?" - -#: ../../source/ref-faq.rst:8 -msgid "" -"Yes, it can! Flower even comes with a few under-the-hood optimizations to" -" make it work even better on Colab. Here's a quickstart example:" -msgstr "" -"Oui, c'est possible ! Flower est même livré avec quelques optimisations " -"pour qu'il fonctionne encore mieux sur Colab. Voici un exemple de " -"démarrage rapide :" - -#: ../../source/ref-faq.rst:10 -#, fuzzy -msgid "" -"`Flower simulation PyTorch " -"`_" -msgstr "" -"`Flower Quickstart (TensorFlow/Keras) " -"`_" +"`Flower Quickstart (TensorFlow/Keras) " +"`_" #: ../../source/ref-faq.rst:11 #, fuzzy @@ -17652,13 +19008,13 @@ msgstr "" #, fuzzy msgid "" "Yes, it does. Please take a look at our `blog post " -"`_ or check out the code examples:" +"`_ or check out the code examples:" msgstr "" "Oui. Jetez un coup d'œil à notre `blog post " -"`_ ou consultez l'`exemple de code Android sur GitHub" -" `_." +"`_ ou consultez l'`exemple de code Android sur GitHub " +"`_." #: ../../source/ref-faq.rst:21 msgid "" @@ -17701,8 +19057,9 @@ msgstr "" "`_." #: ../../source/ref-faq.rst:30 +#, fuzzy msgid "" -"`Flower meets KOSMoS `_." msgstr "" "`Flower rencontre KOSMoS `_ ." msgstr "" "Si tu veux voir tout ce qui est mis ensemble, tu devrais consulter " "l'exemple de code complet : " @@ -18261,7 +19616,7 @@ msgstr "" "huggingface](https://github.com/adap/flower/tree/main/examples" "/quickstart-huggingface)." -#: ../../source/tutorial-quickstart-huggingface.rst:227 +#: ../../source/tutorial-quickstart-huggingface.rst:226 msgid "" "Of course, this is a very basic example, and a lot can be added or " "modified, it was just to showcase how simply we could federate a Hugging " @@ -18272,7 +19627,7 @@ msgstr "" "simplicité on pouvait fédérer un flux de travail Hugging Face à l'aide de" " Flower." -#: ../../source/tutorial-quickstart-huggingface.rst:230 +#: ../../source/tutorial-quickstart-huggingface.rst:229 msgid "" "Note that in this example we used :code:`PyTorch`, but we could have very" " well used :code:`TensorFlow`." @@ -18304,9 +19659,9 @@ msgstr "" #, fuzzy msgid "" "First of all, for running the Flower Python server, it is recommended to " -"create a virtual environment and run everything within a `virtualenv " -"`_. For the Flower " -"client implementation in iOS, it is recommended to use Xcode as our IDE." +"create a virtual environment and run everything within a :doc:`virtualenv" +" `. For the Flower client " +"implementation in iOS, it is recommended to use Xcode as our IDE." msgstr "" "Tout d'abord, il est recommandé de créer un environnement virtuel et de " "tout exécuter au sein d'un `virtualenv `_. As a result, we would " -"encourage you to use other ML frameworks alongise Flower, for example, " +"encourage you to use other ML frameworks alongside Flower, for example, " "PyTorch. This tutorial might be removed in future versions of Flower." msgstr "" @@ -18517,15 +19890,25 @@ msgstr "" #: ../../source/tutorial-quickstart-mxnet.rst:14 #: ../../source/tutorial-quickstart-scikitlearn.rst:12 +#, fuzzy msgid "" "It is recommended to create a virtual environment and run everything " -"within this `virtualenv `_." +"within this :doc:`virtualenv `." msgstr "" "Il est recommandé de créer un environnement virtuel et de tout exécuter " "dans ce `virtualenv `_." +#: ../../source/tutorial-quickstart-mxnet.rst:16 +#: ../../source/tutorial-quickstart-pytorch.rst:17 +#: ../../source/tutorial-quickstart-scikitlearn.rst:14 +msgid "" +"Our example consists of one *server* and two *clients* all having the " +"same model." +msgstr "" +"Notre exemple consiste en un *serveur* et deux *clients* ayant tous le " +"même modèle." + #: ../../source/tutorial-quickstart-mxnet.rst:18 #: ../../source/tutorial-quickstart-scikitlearn.rst:16 msgid "" @@ -18849,15 +20232,40 @@ msgstr "" #: ../../source/tutorial-quickstart-pytorch.rst:15 #: ../../source/tutorial-quickstart-xgboost.rst:39 +#, fuzzy msgid "" "First of all, it is recommended to create a virtual environment and run " -"everything within a `virtualenv `_." +"everything within a :doc:`virtualenv `." msgstr "" "Tout d'abord, il est recommandé de créer un environnement virtuel et de " "tout exécuter au sein d'un `virtualenv `_." +#: ../../source/tutorial-quickstart-pytorch.rst:19 +msgid "" +"*Clients* are responsible for generating individual weight-updates for " +"the model based on their local datasets. These updates are then sent to " +"the *server* which will aggregate them to produce a better model. " +"Finally, the *server* sends this improved version of the model back to " +"each *client*. A complete cycle of weight updates is called a *round*." +msgstr "" +"*Les clients* sont chargés de générer des mises à jour de poids " +"individuelles pour le modèle en fonction de leurs ensembles de données " +"locales. Ces mises à jour sont ensuite envoyées au *serveur* qui les " +"agrège pour produire un meilleur modèle. Enfin, le *serveur* renvoie " +"cette version améliorée du modèle à chaque *client*. Un cycle complet de " +"mises à jour de poids s'appelle un *round*." + +#: ../../source/tutorial-quickstart-pytorch.rst:23 +msgid "" +"Now that we have a rough idea of what is going on, let's get started. We " +"first need to install Flower. You can do this by running :" +msgstr "" +"Maintenant que nous avons une idée générale de ce qui se passe, " +"commençons. Nous devons d'abord installer Flower. Tu peux le faire en " +"exécutant :" + #: ../../source/tutorial-quickstart-pytorch.rst:29 msgid "" "Since we want to use PyTorch to solve a computer vision task, let's go " @@ -19064,7 +20472,8 @@ msgstr "" "régression logistique` sur MNIST en utilisant Flower et scikit-learn." #: ../../source/tutorial-quickstart-scikitlearn.rst:26 -msgid "Since we want to use scikt-learn, let's go ahead and install it:" +#, fuzzy +msgid "Since we want to use scikit-learn, let's go ahead and install it:" msgstr "Puisque nous voulons utiliser scikt-learn, allons-y et installons-le :" #: ../../source/tutorial-quickstart-scikitlearn.rst:32 @@ -19154,12 +20563,14 @@ msgstr "" "scikit-learn :" #: ../../source/tutorial-quickstart-scikitlearn.rst:73 +#, fuzzy msgid "" -"We load the MNIST dataset from `OpenML `_, " -"a popular image classification dataset of handwritten digits for machine " -"learning. The utility :code:`utils.load_mnist()` downloads the training " -"and test data. The training set is split afterwards into 10 partitions " -"with :code:`utils.partition()`." +"We load the MNIST dataset from `OpenML " +"`_, a popular " +"image classification dataset of handwritten digits for machine learning. " +"The utility :code:`utils.load_mnist()` downloads the training and test " +"data. The training set is split afterwards into 10 partitions with " +":code:`utils.partition()`." msgstr "" "Nous chargeons l'ensemble de données MNIST de `OpenML " "`_, un ensemble de données de " @@ -19757,10 +21168,9 @@ msgid "" "`_), we provide more options to define various experimental" " setups, including aggregation strategies, data partitioning and " -"centralised/distributed evaluation. We also support `Flower simulation " -"`_ making " -"it easy to simulate large client cohorts in a resource-aware manner. " -"Let's take a look!" +"centralised/distributed evaluation. We also support :doc:`Flower " +"simulation ` making it easy to simulate large " +"client cohorts in a resource-aware manner. Let's take a look!" msgstr "" #: ../../source/tutorial-quickstart-xgboost.rst:603 @@ -20256,8 +21666,8 @@ msgstr "" "Bienvenue dans la quatrième partie du tutoriel sur l'apprentissage fédéré" " Flower. Dans les parties précédentes de ce tutoriel, nous avons présenté" " l'apprentissage fédéré avec PyTorch et Flower (`partie 1 " -"`__)," -" nous avons appris comment les stratégies peuvent être utilisées pour " +"`__), " +"nous avons appris comment les stratégies peuvent être utilisées pour " "personnaliser l'exécution à la fois sur le serveur et les clients " "(`partie 2 `__), et nous avons construit notre propre stratégie " @@ -20567,8 +21977,8 @@ msgstr "Côté client" #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:615 msgid "" -"To be able to serialize our ``ndarray``\\ s into sparse " -"parameters, we will just have to call our custom functions in our " +"To be able to serialize our ``ndarray``\\ s into sparse parameters, we " +"will just have to call our custom functions in our " "``flwr.client.Client``." msgstr "" "Pour pouvoir sérialiser nos ``ndarray`` en paramètres sparse, il nous " @@ -21419,8 +22829,8 @@ msgid "" msgstr "" "Dans ce carnet, nous allons commencer à personnaliser le système " "d'apprentissage fédéré que nous avons construit dans le carnet " -"d'introduction (toujours en utilisant `Flower `__ et" -" `PyTorch `__)." +"d'introduction (toujours en utilisant `Flower `__ et " +"`PyTorch `__)." #: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:17 #, fuzzy @@ -21725,9 +23135,9 @@ msgstr "" #, fuzzy msgid "" "The `Flower Federated Learning Tutorial - Part 3 " -"`__ shows how to build a fully custom ``Strategy`` " -"from scratch." +"`__ shows how to build a fully custom ``Strategy`` from " +"scratch." msgstr "" "Le `Tutoriel d'apprentissage fédéré Flower - Partie 3 [WIP] " "`__ browser or the `Signal `__ " "messenger shows that users care about privacy. In fact, they choose the " -"privacy-enhancing version over other alternatives, if such an alternative " -"exists. But what can we do to apply machine learning and data science to " -"these cases to utilize private data? After all, these are all areas that " -"would benefit significantly from recent advances in AI." +"privacy-enhancing version over other alternatives, if such an alternative" +" exists. But what can we do to apply machine learning and data science to" +" these cases to utilize private data? After all, these are all areas that" +" would benefit significantly from recent advances in AI." msgstr "" "La popularité des systèmes améliorant la confidentialité comme le " "navigateur `Brave `__ ou le messager `Signal " @@ -22186,7 +23596,7 @@ msgstr "" "partir d'un point de contrôle précédemment sauvegardé." #: ../../source/tutorial-series-what-is-federated-learning.ipynb:210 -msgid "|ba47ffb421814b0f8f9fa5719093d839|" +msgid "|5b1408eec0d746cdb91162a9107b6089|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:307 @@ -22221,7 +23631,7 @@ msgstr "" "rendements décroissants." #: ../../source/tutorial-series-what-is-federated-learning.ipynb:225 -msgid "|aeac5bf79cbf497082e979834717e01b|" +msgid "|aef19f4b122c4e8d9f4c57f99bcd5dd2|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:309 @@ -22254,7 +23664,7 @@ msgstr "" "données locales, ou même de quelques étapes (mini-batchs)." #: ../../source/tutorial-series-what-is-federated-learning.ipynb:240 -msgid "|ce27ed4bbe95459dba016afc42486ba2|" +msgid "|2881a86d8fc54ba29d96b29fc2819f4a|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:311 @@ -22285,7 +23695,7 @@ msgstr "" " l'entraînement local." #: ../../source/tutorial-series-what-is-federated-learning.ipynb:255 -msgid "|ae94a7f71dda443cbec2385751427d41|" +msgid "|ec1fe880237247e0975f52766775ab84|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:313 @@ -22344,7 +23754,7 @@ msgstr "" "times as much as each of the 100 examples." #: ../../source/tutorial-series-what-is-federated-learning.ipynb:273 -msgid "|e61fce4d43d243e7bb08bdde97d81ce6|" +msgid "|9fdf048ed58d4467b2718cdf4aaf1ec3|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:315 @@ -22451,11 +23861,6 @@ msgstr "" "empêcher le serveur de voir les résultats soumis par les nœuds clients " "individuels." -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:303 -#, fuzzy -msgid "Differential Privacy" -msgstr "Confidentialité différentielle" - #: ../../source/tutorial-series-what-is-federated-learning.ipynb:305 msgid "" "Differential privacy (DP) is often mentioned in the context of Federated " @@ -22492,7 +23897,7 @@ msgstr "" "quel cadre de ML et n'importe quel langage de programmation." #: ../../source/tutorial-series-what-is-federated-learning.ipynb:334 -msgid "|08cb60859b07461588fe44e55810b050|" +msgid "|ff726bc5505e432388ee2fdd6ef420b9|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:340 @@ -24629,10 +26034,10 @@ msgstr "" #~ "Flower Python server, it is recommended" #~ " to create a virtual environment and" #~ " run everything within a `virtualenv " -#~ "`_." -#~ " For the Flower client implementation " -#~ "in iOS, it is recommended to use" -#~ " Xcode as our IDE." +#~ "`_. " +#~ "For the Flower client implementation in" +#~ " iOS, it is recommended to use " +#~ "Xcode as our IDE." #~ msgstr "" #~ "Tout d'abord, pour l'exécution du " #~ "serveur Flower Python, il est recommandé" @@ -24795,27 +26200,12 @@ msgstr "" #~ "The implementation can be seen in " #~ ":code:`MLModelInspect`." #~ msgstr "" -#~ "Comme CoreML ne permet pas de voir" -#~ " les paramètres du modèle avant la" -#~ " formation, et que l'accès aux " -#~ "paramètres du modèle pendant ou après" -#~ " la formation ne peut se faire " -#~ "qu'en spécifiant le nom de la " -#~ "couche, nous devons connaître ces " -#~ "informations à l'avance, en regardant " -#~ "les spécifications du modèle, qui sont" -#~ " écrites sous forme de fichiers " -#~ "proto. La mise en œuvre peut être" -#~ " vue dans :code:`MLModelInspect`." #~ msgid "" #~ "After we have all of the necessary" #~ " informations, let's create our Flower " #~ "client." #~ msgstr "" -#~ "Après avoir obtenu toutes les " -#~ "informations nécessaires, créons notre client" -#~ " Flower." #~ msgid "" #~ "Then start the Flower gRPC client " @@ -25474,8 +26864,8 @@ msgstr "" #~ " papers. If you want to add a" #~ " new baseline or experiment, please " #~ "check the `Contributing Baselines " -#~ "`_ " -#~ "section." +#~ "`_ section." #~ msgstr "" #~ msgid "Paper" @@ -25798,3 +27188,1467 @@ msgstr "" #~ msgid "|c76452ae1ed84965be7ef23c72b95845|" #~ msgstr "" +#~ msgid "" +#~ "Please follow the first section on " +#~ "`Run Flower using Docker " +#~ "`_ which covers this" +#~ " step in more detail." +#~ msgstr "" + +#~ msgid "" +#~ "Since `Flower 1.5 `_ we have " +#~ "introduced translations to our doc " +#~ "pages, but, as you might have " +#~ "noticed, the translations are often " +#~ "imperfect. If you speak languages other" +#~ " than English, you might be able " +#~ "to help us in our effort to " +#~ "make Federated Learning accessible to as" +#~ " many people as possible by " +#~ "contributing to those translations! This " +#~ "might also be a great opportunity " +#~ "for those wanting to become open " +#~ "source contributors with little prerequistes." +#~ msgstr "" + +#~ msgid "" +#~ "You input your translation in the " +#~ "textbox at the top and then, once" +#~ " you are happy with it, you " +#~ "either press ``Save and continue`` (to" +#~ " save the translation and go to " +#~ "the next untranslated string), ``Save " +#~ "and stay`` (to save the translation " +#~ "and stay on the same page), " +#~ "``Suggest`` (to add your translation to" +#~ " suggestions for other users to " +#~ "view), or ``Skip`` (to go to the" +#~ " next untranslated string without saving" +#~ " anything)." +#~ msgstr "" + +#~ msgid "" +#~ "If the section is completely empty " +#~ "(without any token) or non-existant, " +#~ "the changelog will just contain the " +#~ "title of the PR for the changelog" +#~ " entry, without any description." +#~ msgstr "" + +#~ msgid "Example: Walk-Through PyTorch & MNIST" +#~ msgstr "Exemple : PyTorch et MNIST" + +#~ msgid "" +#~ "In this tutorial we will learn, " +#~ "how to train a Convolutional Neural " +#~ "Network on MNIST using Flower and " +#~ "PyTorch." +#~ msgstr "" +#~ "Dans ce tutoriel, nous allons apprendre," +#~ " comment former un réseau neuronal " +#~ "convolutif sur MNIST en utilisant Flower" +#~ " et PyTorch." + +#~ msgid "" +#~ "Since we want to use PyTorch to" +#~ " solve a computer vision task, let's" +#~ " go ahead an install PyTorch and " +#~ "the **torchvision** library:" +#~ msgstr "" +#~ "Puisque nous voulons utiliser PyTorch " +#~ "pour résoudre une tâche de vision " +#~ "par ordinateur, installons PyTorch et la" +#~ " bibliothèque **torchvision** :" + +#~ msgid "Ready... Set... Train!" +#~ msgstr "Prêts... prêts... entraînez-vous !" + +#~ msgid "" +#~ "Now that we have all our " +#~ "dependencies installed, let's run a " +#~ "simple distributed training with two " +#~ "clients and one server. Our training " +#~ "procedure and network architecture are " +#~ "based on PyTorch's `Basic MNIST Example" +#~ " `_. " +#~ "This will allow you see how easy" +#~ " it is to wrap your code with" +#~ " Flower and begin training in a " +#~ "federated way. We provide you with " +#~ "two helper scripts, namely *run-" +#~ "server.sh*, and *run-clients.sh*. Don't " +#~ "be afraid to look inside, they are" +#~ " simple enough =)." +#~ msgstr "" +#~ "Maintenant que nous avons installé " +#~ "toutes nos dépendances, lançons un " +#~ "simple entraînement distribué avec deux " +#~ "clients et un serveur. Notre procédure" +#~ " d'entraînement et l'architecture de notre" +#~ " réseau sont basées sur l'exemple " +#~ "MNIST de base de PyTorch " +#~ "`_. Cela" +#~ " te permettra de voir à quel " +#~ "point il est facile d'envelopper ton " +#~ "code avec Flower et de commencer " +#~ "l'entraînement de manière fédérée. Nous " +#~ "te fournissons deux scripts d'aide, à" +#~ " savoir *run-server.sh*, et *run-" +#~ "clients.sh*. N'aie pas peur de regarder" +#~ " à l'intérieur, ils sont assez " +#~ "simples =)." + +#~ msgid "" +#~ "Go ahead and launch on a terminal" +#~ " the *run-server.sh* script first as" +#~ " follows:" +#~ msgstr "Lance sur un terminal le script *run-server.sh* d'abord comme suit :" + +#~ msgid "Now that the server is up and running, go ahead and launch the clients." +#~ msgstr "Maintenant que le serveur est opérationnel, vas-y et lance les clients." + +#~ msgid "" +#~ "Et voilà! You should be seeing the" +#~ " training procedure and, after a few" +#~ " iterations, the test accuracy for " +#~ "each client." +#~ msgstr "" +#~ "Et voilà ! Tu devrais voir la " +#~ "procédure d'entraînement et, après quelques" +#~ " itérations, la précision du test " +#~ "pour chaque client." + +#~ msgid "Now, let's see what is really happening inside." +#~ msgstr "Maintenant, voyons ce qui se passe réellement à l'intérieur." + +#~ msgid "" +#~ "Inside the server helper script *run-" +#~ "server.sh* you will find the following" +#~ " code that basically runs the " +#~ ":code:`server.py`" +#~ msgstr "" +#~ "Dans le script d'aide au serveur " +#~ "*run-server.sh*, tu trouveras le code " +#~ "suivant qui exécute le fichier " +#~ ":code:`server.py`" + +#~ msgid "" +#~ "We can go a bit deeper and " +#~ "see that :code:`server.py` simply launches " +#~ "a server that will coordinate three " +#~ "rounds of training. Flower Servers are" +#~ " very customizable, but for simple " +#~ "workloads, we can start a server " +#~ "using the :ref:`start_server ` function and leave " +#~ "all the configuration possibilities at " +#~ "their default values, as seen below." +#~ msgstr "" +#~ "Nous pouvons aller un peu plus " +#~ "loin et voir que :code:`server.py` lance" +#~ " simplement un serveur qui coordonnera " +#~ "trois tours de formation. Flower Les " +#~ "serveurs sont très personnalisables, mais " +#~ "pour les charges de travail simples, " +#~ "nous pouvons démarrer un serveur à " +#~ "l'aide de la fonction :ref:`start_server " +#~ "` et " +#~ "laisser toutes les possibilités de " +#~ "configuration à leurs valeurs par " +#~ "défaut, comme on peut le voir " +#~ "ci-dessous." + +#~ msgid "" +#~ "Next, let's take a look at the " +#~ "*run-clients.sh* file. You will see " +#~ "that it contains the main loop " +#~ "that starts a set of *clients*." +#~ msgstr "" +#~ "Ensuite, jetons un coup d'œil au " +#~ "fichier *run-clients.sh*. Tu verras " +#~ "qu'il contient la boucle principale qui" +#~ " démarre un ensemble de *clients*." + +#~ msgid "" +#~ "**cid**: is the client ID. It is" +#~ " an integer that uniquely identifies " +#~ "client identifier." +#~ msgstr "" +#~ "**cid** : c'est l'identifiant du client." +#~ " C'est un nombre entier qui identifie" +#~ " de façon unique l'identifiant du " +#~ "client." + +#~ msgid "**sever_address**: String that identifies IP and port of the server." +#~ msgstr "**sever_address** : Chaîne qui identifie l'IP et le port du serveur." + +#~ msgid "" +#~ "**nb_clients**: This defines the number " +#~ "of clients being created. This piece " +#~ "of information is not required by " +#~ "the client, but it helps us " +#~ "partition the original MNIST dataset to" +#~ " make sure that every client is " +#~ "working on unique subsets of both " +#~ "*training* and *test* sets." +#~ msgstr "" +#~ "**Cette information n'est pas requise " +#~ "par le client, mais elle nous aide" +#~ " à partitionner l'ensemble de données " +#~ "MNIST original pour nous assurer que " +#~ "chaque client travaille sur des sous-" +#~ "ensembles uniques des ensembles *formation*" +#~ " et *test*." + +#~ msgid "" +#~ "Again, we can go deeper and look" +#~ " inside :code:`flwr_example/quickstart-" +#~ "pytorch/client.py`. After going through the" +#~ " argument parsing code at the " +#~ "beginning of our :code:`main` function, " +#~ "you will find a call to " +#~ ":code:`mnist.load_data`. This function is " +#~ "responsible for partitioning the original " +#~ "MNIST datasets (*training* and *test*) " +#~ "and returning a :code:`torch.utils.data.DataLoader`" +#~ " s for each of them. We then" +#~ " instantiate a :code:`PytorchMNISTClient` object" +#~ " with our client ID, our DataLoaders," +#~ " the number of epochs in each " +#~ "round, and which device we want to" +#~ " use for training (CPU or GPU)." +#~ msgstr "" +#~ "Encore une fois, nous pouvons aller " +#~ "plus loin et regarder dans " +#~ ":code:`flwr_example/quickstart-pytorch/client.py`. Après" +#~ " avoir parcouru le code d'analyse des" +#~ " arguments au début de notre fonction" +#~ " :code:`main`, tu trouveras un appel " +#~ "à :code:`mnist.load_data`. Cette fonction est" +#~ " responsable du partitionnement des " +#~ "ensembles de données MNIST originaux " +#~ "(*training* et *test*) et renvoie un " +#~ ":code:`torch.utils.data.DataLoader` s pour chacun" +#~ " d'entre eux. Nous instancions ensuite " +#~ "un objet :code:`PytorchMNISTClient` avec notre" +#~ " ID client, nos DataLoaders, le " +#~ "nombre d'époques dans chaque tour et " +#~ "le périphérique que nous voulons " +#~ "utiliser pour l'entraînement (CPU ou " +#~ "GPU)." + +#~ msgid "" +#~ "The :code:`PytorchMNISTClient` object when " +#~ "finally passed to :code:`fl.client.start_client` " +#~ "along with the server's address as " +#~ "the training process begins." +#~ msgstr "" +#~ "L'objet :code:`PytorchMNISTClient` est finalement" +#~ " transmis à :code:`fl.client.start_client` avec" +#~ " l'adresse du serveur lorsque le " +#~ "processus de formation commence." + +#~ msgid "A Closer Look" +#~ msgstr "Regarder de plus près" + +#~ msgid "" +#~ "Now, let's look closely into the " +#~ ":code:`PytorchMNISTClient` inside :code:`flwr_example" +#~ ".quickstart-pytorch.mnist` and see what it" +#~ " is doing:" +#~ msgstr "" +#~ "Maintenant, examinons de près le " +#~ ":code:`PytorchMNISTClient` à l'intérieur du " +#~ ":code:`flwr_example.quickstart-pytorch.mnist` et " +#~ "voyons ce qu'il fait :" + +#~ msgid "" +#~ "The first thing to notice is that" +#~ " :code:`PytorchMNISTClient` instantiates a CNN" +#~ " model inside its constructor" +#~ msgstr "" +#~ "La première chose à remarquer est " +#~ "que :code:`PytorchMNISTClient` instancie un " +#~ "modèle CNN dans son constructeur" + +#~ msgid "" +#~ "The code for the CNN is available" +#~ " under :code:`quickstart-pytorch.mnist` and " +#~ "it is reproduced below. It is the" +#~ " same network found in `Basic MNIST" +#~ " Example " +#~ "`_." +#~ msgstr "" +#~ "Le code du CNN est disponible sous" +#~ " :code:`quickstart-pytorch.mnist` et il est" +#~ " reproduit ci-dessous. Il s'agit du" +#~ " même réseau que celui que l'on " +#~ "trouve dans `Exemple basique de MNIST" +#~ " `_." + +#~ msgid "" +#~ "The second thing to notice is that" +#~ " :code:`PytorchMNISTClient` class inherits from" +#~ " the :code:`fl.client.Client`, and hence it" +#~ " must implement the following methods:" +#~ msgstr "" +#~ "La deuxième chose à noter est que" +#~ " la classe :code:`PytorchMNISTClient` hérite " +#~ "de :code:`fl.client.Client`, et qu'elle doit" +#~ " donc implémenter les méthodes suivantes" +#~ " :" + +#~ msgid "" +#~ "When comparing the abstract class to " +#~ "its derived class :code:`PytorchMNISTClient` " +#~ "you will notice that :code:`fit` calls" +#~ " a :code:`train` function and that " +#~ ":code:`evaluate` calls a :code:`test`: " +#~ "function." +#~ msgstr "" +#~ "En comparant la classe abstraite à " +#~ "sa classe dérivée :code:`PytorchMNISTClient`, " +#~ "tu remarqueras que :code:`fit` appelle " +#~ "une fonction :code:`train` et que " +#~ ":code:`evaluate` appelle une fonction " +#~ ":code:`test` :." + +#~ msgid "" +#~ "These functions can both be found " +#~ "inside the same :code:`quickstart-" +#~ "pytorch.mnist` module:" +#~ msgstr "" +#~ "Ces fonctions se trouvent toutes deux" +#~ " dans le même module :code:`quickstart-" +#~ "pytorch.mnist` :" + +#~ msgid "" +#~ "Observe that these functions encapsulate " +#~ "regular training and test loops and " +#~ "provide :code:`fit` and :code:`evaluate` with" +#~ " final statistics for each round. You" +#~ " could substitute them with your " +#~ "custom train and test loops and " +#~ "change the network architecture, and the" +#~ " entire example would still work " +#~ "flawlessly. As a matter of fact, " +#~ "why not try and modify the code" +#~ " to an example of your liking?" +#~ msgstr "" +#~ "Observe que ces fonctions encapsulent " +#~ "les boucles d'entraînement et de test" +#~ " habituelles et fournissent à :code:`fit`" +#~ " et :code:`evaluate` les statistiques " +#~ "finales pour chaque tour. Tu pourrais" +#~ " les remplacer par tes boucles " +#~ "d'entraînement et de test personnalisées " +#~ "et changer l'architecture du réseau, et" +#~ " l'ensemble de l'exemple fonctionnerait " +#~ "toujours parfaitement. En fait, pourquoi " +#~ "ne pas essayer de modifier le code" +#~ " pour en faire un exemple qui " +#~ "te plairait ?" + +#~ msgid "Give It a Try" +#~ msgstr "Fais un essai" + +#~ msgid "" +#~ "Looking through the quickstart code " +#~ "description above will have given a " +#~ "good understanding of how *clients* and" +#~ " *servers* work in Flower, how to " +#~ "run a simple experiment, and the " +#~ "internals of a client wrapper. Here " +#~ "are a few things you could try " +#~ "on your own and get more " +#~ "experience with Flower:" +#~ msgstr "" +#~ "En parcourant la description du code " +#~ "de démarrage rapide ci-dessus, tu " +#~ "auras acquis une bonne compréhension du" +#~ " fonctionnement des *clients* et des " +#~ "*serveurs* dans Flower, de l'exécution " +#~ "d'une expérience simple et de la " +#~ "structure interne d'un wrapper client. " +#~ "Voici quelques exemples que tu peux " +#~ "essayer par toi-même pour acquérir " +#~ "plus d'expérience avec Flower :" + +#~ msgid "" +#~ "Try and change :code:`PytorchMNISTClient` so" +#~ " it can accept different architectures." +#~ msgstr "" +#~ "Essaie de modifier :code:`PytorchMNISTClient` " +#~ "pour qu'il puisse accepter différentes " +#~ "architectures." + +#~ msgid "" +#~ "Modify the :code:`train` function so " +#~ "that it accepts different optimizers" +#~ msgstr "" +#~ "Modifie la fonction :code:`train` pour " +#~ "qu'elle accepte différents optimiseurs" + +#~ msgid "" +#~ "Modify the :code:`test` function so that" +#~ " it proves not only the top-1 " +#~ "(regular accuracy) but also the top-5" +#~ " accuracy?" +#~ msgstr "" +#~ "Modifie la fonction :code:`test` pour " +#~ "qu'elle prouve non seulement le top-1" +#~ " (précision normale) mais aussi le " +#~ "top-5 ?" + +#~ msgid "" +#~ "Go larger! Try to adapt the code" +#~ " to larger images and datasets. Why" +#~ " not try training on ImageNet with" +#~ " a ResNet-50?" +#~ msgstr "" +#~ "Essaie d'adapter le code à des " +#~ "images et à des ensembles de " +#~ "données plus grands. Pourquoi ne pas " +#~ "essayer de s'entraîner sur ImageNet avec" +#~ " un ResNet-50 ?" + +#~ msgid "You are ready now. Enjoy learning in a federated way!" +#~ msgstr "Tu es prêt maintenant. Profite de l'apprentissage de manière fédérée !" + +#~ msgid "" +#~ "Flower provides differential privacy (DP) " +#~ "wrapper classes for the easy integration" +#~ " of the central DP guarantees " +#~ "provided by DP-FedAvg into training " +#~ "pipelines defined in any of the " +#~ "various ML frameworks that Flower is " +#~ "compatible with." +#~ msgstr "" +#~ "Flower fournit des classes d'enveloppe " +#~ "de confidentialité différentielle (DP) pour" +#~ " l'intégration facile des garanties " +#~ "centrales de DP fournies par DP-" +#~ "FedAvg dans les pipelines de formation" +#~ " définis dans n'importe lequel des " +#~ "divers cadres de ML avec lesquels " +#~ "Flower est compatible." + +#~ msgid "" +#~ "Please note that these components are" +#~ " still experimental; the correct " +#~ "configuration of DP for a specific " +#~ "task is still an unsolved problem." +#~ msgstr "" +#~ "Note que ces composants sont encore " +#~ "expérimentaux, la configuration correcte du" +#~ " DP pour une tâche spécifique est " +#~ "encore un problème non résolu." + +#~ msgid "" +#~ "The name DP-FedAvg is misleading " +#~ "since it can be applied on top " +#~ "of any FL algorithm that conforms " +#~ "to the general structure prescribed by" +#~ " the FedOpt family of algorithms." +#~ msgstr "" +#~ "Le nom DP-FedAvg est trompeur car" +#~ " il peut être appliqué à n'importe" +#~ " quel algorithme FL qui se conforme" +#~ " à la structure générale prescrite " +#~ "par la famille d'algorithmes FedOpt." + +#~ msgid "DP-FedAvg" +#~ msgstr "DP-FedAvg" + +#~ msgid "" +#~ "DP-FedAvg, originally proposed by " +#~ "McMahan et al. [mcmahan]_ and extended" +#~ " by Andrew et al. [andrew]_, is " +#~ "essentially FedAvg with the following " +#~ "modifications." +#~ msgstr "" +#~ "DP-FedAvg, proposé à l'origine par " +#~ "McMahan et al. [mcmahan]_ et étendu " +#~ "par Andrew et al. [andrew]_, est " +#~ "essentiellement FedAvg avec les modifications" +#~ " suivantes." + +#~ msgid "" +#~ "**Clipping** : The influence of each " +#~ "client's update is bounded by clipping" +#~ " it. This is achieved by enforcing" +#~ " a cap on the L2 norm of " +#~ "the update, scaling it down if " +#~ "needed." +#~ msgstr "" +#~ "**Clipping** : L'influence de la mise" +#~ " à jour de chaque client est " +#~ "limitée en l'écrêtant. Ceci est réalisé" +#~ " en imposant un plafond à la " +#~ "norme L2 de la mise à jour, " +#~ "en la réduisant si nécessaire." + +#~ msgid "" +#~ "**Noising** : Gaussian noise, calibrated " +#~ "to the clipping threshold, is added " +#~ "to the average computed at the " +#~ "server." +#~ msgstr "" +#~ "**Bruit** : un bruit gaussien, calibré" +#~ " sur le seuil d'écrêtage, est ajouté" +#~ " à la moyenne calculée au niveau " +#~ "du serveur." + +#~ msgid "" +#~ "The distribution of the update norm " +#~ "has been shown to vary from " +#~ "task-to-task and to evolve as " +#~ "training progresses. This variability is " +#~ "crucial in understanding its impact on" +#~ " differential privacy guarantees, emphasizing " +#~ "the need for an adaptive approach " +#~ "[andrew]_ that continuously adjusts the " +#~ "clipping threshold to track a " +#~ "prespecified quantile of the update norm" +#~ " distribution." +#~ msgstr "" +#~ "Il a été démontré que la " +#~ "distribution de la norme de mise à" +#~ " jour varie d'une tâche à l'autre " +#~ "et évolue au fur et à mesure " +#~ "de la formation. C'est pourquoi nous " +#~ "utilisons une approche adaptative [andrew]_" +#~ " qui ajuste continuellement le seuil " +#~ "d'écrêtage pour suivre un quantile " +#~ "prédéfini de la distribution de la " +#~ "norme de mise à jour." + +#~ msgid "Simplifying Assumptions" +#~ msgstr "Simplifier les hypothèses" + +#~ msgid "" +#~ "We make (and attempt to enforce) a" +#~ " number of assumptions that must be" +#~ " satisfied to ensure that the " +#~ "training process actually realizes the " +#~ ":math:`(\\epsilon, \\delta)` guarantees the " +#~ "user has in mind when configuring " +#~ "the setup." +#~ msgstr "" +#~ "Nous formulons (et tentons d'appliquer) " +#~ "un certain nombre d'hypothèses qui " +#~ "doivent être satisfaites pour que le " +#~ "processus de formation réalise réellement " +#~ "les garanties :math:`(\\epsilon, \\delta)` que" +#~ " l'utilisateur a à l'esprit lorsqu'il " +#~ "configure l'installation." + +#~ msgid "" +#~ "**Fixed-size subsampling** :Fixed-size " +#~ "subsamples of the clients must be " +#~ "taken at each round, as opposed to" +#~ " variable-sized Poisson subsamples." +#~ msgstr "" +#~ "**Sous-échantillonnage de taille fixe** " +#~ ":Des sous-échantillons de taille fixe" +#~ " des clients doivent être prélevés à" +#~ " chaque tour, par opposition aux " +#~ "sous-échantillons de Poisson de taille " +#~ "variable." + +#~ msgid "" +#~ "**Unweighted averaging** : The contributions" +#~ " from all the clients must weighted" +#~ " equally in the aggregate to " +#~ "eliminate the requirement for the server" +#~ " to know in advance the sum of" +#~ " the weights of all clients available" +#~ " for selection." +#~ msgstr "" +#~ "**Moyenne non pondérée** : Les " +#~ "contributions de tous les clients " +#~ "doivent être pondérées de façon égale" +#~ " dans l'ensemble afin que le serveur" +#~ " n'ait pas à connaître à l'avance " +#~ "la somme des poids de tous les " +#~ "clients disponibles pour la sélection." + +#~ msgid "" +#~ "**No client failures** : The set " +#~ "of available clients must stay constant" +#~ " across all rounds of training. In" +#~ " other words, clients cannot drop out" +#~ " or fail." +#~ msgstr "" +#~ "**Aucune défaillance de client** : " +#~ "L'ensemble des clients disponibles doit " +#~ "rester constant pendant toutes les " +#~ "séries de formation. En d'autres termes," +#~ " les clients ne peuvent pas " +#~ "abandonner ou échouer." + +#~ msgid "" +#~ "The first two are useful for " +#~ "eliminating a multitude of complications " +#~ "associated with calibrating the noise to" +#~ " the clipping threshold, while the " +#~ "third one is required to comply " +#~ "with the assumptions of the privacy " +#~ "analysis." +#~ msgstr "" +#~ "Les deux premiers sont utiles pour " +#~ "éliminer une multitude de complications " +#~ "liées au calibrage du bruit en " +#~ "fonction du seuil d'écrêtage, tandis que" +#~ " le troisième est nécessaire pour se" +#~ " conformer aux hypothèses de l'analyse " +#~ "de la vie privée." + +#~ msgid "" +#~ "These restrictions are in line with " +#~ "constraints imposed by Andrew et al. " +#~ "[andrew]_." +#~ msgstr "" +#~ "Ces restrictions sont conformes aux " +#~ "contraintes imposées par Andrew et al." +#~ " [andrew]_." + +#~ msgid "Customizable Responsibility for Noise injection" +#~ msgstr "Responsabilité personnalisable pour l'injection de bruit" + +#~ msgid "" +#~ "In contrast to other implementations " +#~ "where the addition of noise is " +#~ "performed at the server, you can " +#~ "configure the site of noise injection" +#~ " to better match your threat model." +#~ " We provide users with the " +#~ "flexibility to set up the training " +#~ "such that each client independently adds" +#~ " a small amount of noise to the" +#~ " clipped update, with the result that" +#~ " simply aggregating the noisy updates " +#~ "is equivalent to the explicit addition" +#~ " of noise to the non-noisy " +#~ "aggregate at the server." +#~ msgstr "" +#~ "Contrairement à d'autres implémentations où" +#~ " l'ajout de bruit est effectué au " +#~ "niveau du serveur, tu peux configurer" +#~ " le site d'injection de bruit pour" +#~ " qu'il corresponde mieux à ton modèle" +#~ " de menace. Nous offrons aux " +#~ "utilisateurs la possibilité de configurer " +#~ "l'entraînement de telle sorte que chaque" +#~ " client ajoute indépendamment une petite" +#~ " quantité de bruit à la mise à" +#~ " jour écrêtée, ce qui fait que " +#~ "le simple fait d'agréger les mises " +#~ "à jour bruyantes équivaut à l'ajout " +#~ "explicite de bruit à l'agrégat non " +#~ "bruyant au niveau du serveur." + +#~ msgid "" +#~ "To be precise, if we let :math:`m`" +#~ " be the number of clients sampled " +#~ "each round and :math:`\\sigma_\\Delta` be " +#~ "the scale of the total Gaussian " +#~ "noise that needs to be added to" +#~ " the sum of the model updates, " +#~ "we can use simple maths to show" +#~ " that this is equivalent to each " +#~ "client adding noise with scale " +#~ ":math:`\\sigma_\\Delta/\\sqrt{m}`." +#~ msgstr "" +#~ "Pour être précis, si nous laissons " +#~ ":math:`m` être le nombre de clients " +#~ "échantillonnés à chaque tour et " +#~ ":math:\\sigma_\\Delta` être l'échelle du bruit" +#~ " gaussien total qui doit être ajouté" +#~ " à la somme des mises à jour" +#~ " du modèle, nous pouvons utiliser des" +#~ " mathématiques simples pour montrer que " +#~ "cela équivaut à ce que chaque " +#~ "client ajoute du bruit avec l'échelle" +#~ " :math:\\sigma_\\Delta/\\sqrt{m}`." + +#~ msgid "Wrapper-based approach" +#~ msgstr "Approche basée sur l'enveloppe" + +#~ msgid "" +#~ "Introducing DP to an existing workload" +#~ " can be thought of as adding an" +#~ " extra layer of security around it." +#~ " This inspired us to provide the " +#~ "additional server and client-side logic" +#~ " needed to make the training process" +#~ " differentially private as wrappers for " +#~ "instances of the :code:`Strategy` and " +#~ ":code:`NumPyClient` abstract classes respectively." +#~ " This wrapper-based approach has the" +#~ " advantage of being easily composable " +#~ "with other wrappers that someone might" +#~ " contribute to the Flower library in" +#~ " the future, e.g., for secure " +#~ "aggregation. Using Inheritance instead can " +#~ "be tedious because that would require" +#~ " the creation of new sub- classes " +#~ "every time a new class implementing " +#~ ":code:`Strategy` or :code:`NumPyClient` is " +#~ "defined." +#~ msgstr "" +#~ "L'introduction du DP dans une charge " +#~ "de travail existante peut être " +#~ "considérée comme l'ajout d'une couche de" +#~ " sécurité supplémentaire autour d'elle. " +#~ "Cela nous a incités à fournir la" +#~ " logique supplémentaire côté serveur et " +#~ "côté client nécessaire pour rendre le" +#~ " processus de formation différentiellement " +#~ "privé en tant qu'enveloppes pour les " +#~ "instances des classes abstraites " +#~ ":code:`Strategy` et :code:`NumPyClient` " +#~ "respectivement. Cette approche basée sur " +#~ "l'enveloppe a l'avantage d'être facilement " +#~ "composable avec d'autres enveloppes que " +#~ "quelqu'un pourrait contribuer à la " +#~ "bibliothèque Flower à l'avenir, par " +#~ "exemple, pour l'agrégation sécurisée. " +#~ "L'utilisation de l'héritage à la place" +#~ " peut être fastidieuse car cela " +#~ "nécessiterait la création de nouvelles " +#~ "sous-classes chaque fois qu'une nouvelle" +#~ " classe mettant en œuvre :code:`Strategy`" +#~ " ou :code:`NumPyClient` est définie." + +#~ msgid "" +#~ "The first version of our solution " +#~ "was to define a decorator whose " +#~ "constructor accepted, among other things, " +#~ "a boolean-valued variable indicating " +#~ "whether adaptive clipping was to be " +#~ "enabled or not. We quickly realized " +#~ "that this would clutter its " +#~ ":code:`__init__()` function with variables " +#~ "corresponding to hyperparameters of adaptive" +#~ " clipping that would remain unused " +#~ "when it was disabled. A cleaner " +#~ "implementation could be achieved by " +#~ "splitting the functionality into two " +#~ "decorators, :code:`DPFedAvgFixed` and " +#~ ":code:`DPFedAvgAdaptive`, with the latter sub-" +#~ " classing the former. The constructors " +#~ "for both classes accept a boolean " +#~ "parameter :code:`server_side_noising`, which, as " +#~ "the name suggests, determines where " +#~ "noising is to be performed." +#~ msgstr "" +#~ "La première version de notre solution" +#~ " consistait à définir un décorateur " +#~ "dont le constructeur acceptait, entre " +#~ "autres, une variable à valeur booléenne" +#~ " indiquant si l'écrêtage adaptatif devait" +#~ " être activé ou non. Nous nous " +#~ "sommes rapidement rendu compte que cela" +#~ " encombrerait sa fonction :code:`__init__()` " +#~ "avec des variables correspondant aux " +#~ "hyperparamètres de l'écrêtage adaptatif qui" +#~ " resteraient inutilisées lorsque celui-ci" +#~ " était désactivé. Une implémentation plus" +#~ " propre pourrait être obtenue en " +#~ "divisant la fonctionnalité en deux " +#~ "décorateurs, :code:`DPFedAvgFixed` et " +#~ ":code:`DPFedAvgAdaptive`, le second sous-" +#~ "classant le premier. Les constructeurs " +#~ "des deux classes acceptent un paramètre" +#~ " booléen :code:`server_side_noising` qui, comme" +#~ " son nom l'indique, détermine l'endroit " +#~ "où le noising doit être effectué." + +#~ msgid "" +#~ "The server-side capabilities required " +#~ "for the original version of DP-" +#~ "FedAvg, i.e., the one which performed" +#~ " fixed clipping, can be completely " +#~ "captured with the help of wrapper " +#~ "logic for just the following two " +#~ "methods of the :code:`Strategy` abstract " +#~ "class." +#~ msgstr "" +#~ "Les capacités côté serveur requises pour" +#~ " la version originale de DP-FedAvg," +#~ " c'est-à-dire celle qui effectue un " +#~ "écrêtage fixe, peuvent être entièrement " +#~ "capturées à l'aide d'une logique " +#~ "d'enveloppement pour les deux méthodes " +#~ "suivantes de la classe abstraite " +#~ ":code:`Strategy`." + +#~ msgid "" +#~ ":code:`configure_fit()` : The config " +#~ "dictionary being sent by the wrapped " +#~ ":code:`Strategy` to each client needs to" +#~ " be augmented with an additional " +#~ "value equal to the clipping threshold" +#~ " (keyed under :code:`dpfedavg_clip_norm`) and," +#~ " if :code:`server_side_noising=true`, another one" +#~ " equal to the scale of the " +#~ "Gaussian noise that needs to be " +#~ "added at the client (keyed under " +#~ ":code:`dpfedavg_noise_stddev`). This entails " +#~ "*post*-processing of the results returned " +#~ "by the wrappee's implementation of " +#~ ":code:`configure_fit()`." +#~ msgstr "" +#~ ":code:`configure_fit()` : Le dictionnaire de" +#~ " configuration envoyé par la " +#~ ":code:`Strategy` enveloppée à chaque client" +#~ " doit être augmenté d'une valeur " +#~ "supplémentaire égale au seuil d'écrêtage " +#~ "(indiqué sous :code:`dpfedavg_clip_norm`) et, " +#~ "si :code:`server_side_noising=true`, d'une autre " +#~ "égale à l'échelle du bruit gaussien " +#~ "qui doit être ajouté au client " +#~ "(indiqué sous :code:`dpfedavg_noise_stddev`)." + +#~ msgid "" +#~ ":code:`aggregate_fit()`: We check whether any" +#~ " of the sampled clients dropped out" +#~ " or failed to upload an update " +#~ "before the round timed out. In " +#~ "that case, we need to abort the" +#~ " current round, discarding any successful" +#~ " updates that were received, and move" +#~ " on to the next one. On the " +#~ "other hand, if all clients responded " +#~ "successfully, we must force the " +#~ "averaging of the updates to happen " +#~ "in an unweighted manner by intercepting" +#~ " the :code:`parameters` field of " +#~ ":code:`FitRes` for each received update " +#~ "and setting it to 1. Furthermore, " +#~ "if :code:`server_side_noising=true`, each update " +#~ "is perturbed with an amount of " +#~ "noise equal to what it would have" +#~ " been subjected to had client-side" +#~ " noising being enabled. This entails " +#~ "*pre*-processing of the arguments to " +#~ "this method before passing them on " +#~ "to the wrappee's implementation of " +#~ ":code:`aggregate_fit()`." +#~ msgstr "" +#~ ":code:`aggregate_fit()`: We check whether any" +#~ " of the sampled clients dropped out" +#~ " or failed to upload an update " +#~ "before the round timed out. In " +#~ "that case, we need to abort the" +#~ " current round, discarding any successful" +#~ " updates that were received, and move" +#~ " on to the next one. On the " +#~ "other hand, if all clients responded " +#~ "successfully, we must force the " +#~ "averaging of the updates to happen " +#~ "in an unweighted manner by intercepting" +#~ " the :code:`parameters` field of " +#~ ":code:`FitRes` for each received update " +#~ "and setting it to 1. Furthermore, " +#~ "if :code:`server_side_noising=true`, each update " +#~ "is perturbed with an amount of " +#~ "noise equal to what it would have" +#~ " been subjected to had client-side" +#~ " noising being enabled. This entails " +#~ "*pre*-processing of the arguments to " +#~ "this method before passing them on " +#~ "to the wrappee's implementation of " +#~ ":code:`aggregate_fit()`." + +#~ msgid "" +#~ "We can't directly change the aggregation" +#~ " function of the wrapped strategy to" +#~ " force it to add noise to the" +#~ " aggregate, hence we simulate client-" +#~ "side noising to implement server-side" +#~ " noising." +#~ msgstr "" +#~ "Nous ne pouvons pas modifier directement" +#~ " la fonction d'agrégation de la " +#~ "stratégie enveloppée pour la forcer à" +#~ " ajouter du bruit à l'agrégat, c'est" +#~ " pourquoi nous simulons le bruit côté" +#~ " client pour mettre en œuvre le " +#~ "bruit côté serveur." + +#~ msgid "" +#~ "These changes have been put together " +#~ "into a class called :code:`DPFedAvgFixed`, " +#~ "whose constructor accepts the strategy " +#~ "being decorated, the clipping threshold " +#~ "and the number of clients sampled " +#~ "every round as compulsory arguments. The" +#~ " user is expected to specify the " +#~ "clipping threshold since the order of" +#~ " magnitude of the update norms is " +#~ "highly dependent on the model being " +#~ "trained and providing a default value" +#~ " would be misleading. The number of" +#~ " clients sampled at every round is" +#~ " required to calculate the amount of" +#~ " noise that must be added to " +#~ "each individual update, either by the" +#~ " server or the clients." +#~ msgstr "" +#~ "Ces modifications ont été regroupées " +#~ "dans une classe appelée :code:`DPFedAvgFixed`," +#~ " dont le constructeur accepte la " +#~ "stratégie décorée, le seuil d'écrêtage " +#~ "et le nombre de clients échantillonnés" +#~ " à chaque tour comme arguments " +#~ "obligatoires. L'utilisateur est censé " +#~ "spécifier le seuil d'écrêtage car " +#~ "l'ordre de grandeur des normes de " +#~ "mise à jour dépend fortement du " +#~ "modèle formé et fournir une valeur " +#~ "par défaut serait trompeur. Le nombre" +#~ " de clients échantillonnés à chaque " +#~ "tour est nécessaire pour calculer la " +#~ "quantité de bruit qui doit être " +#~ "ajoutée à chaque mise à jour " +#~ "individuelle, que ce soit par le " +#~ "serveur ou par les clients." + +#~ msgid "" +#~ "The additional functionality required to " +#~ "facilitate adaptive clipping has been " +#~ "provided in :code:`DPFedAvgAdaptive`, a " +#~ "subclass of :code:`DPFedAvgFixed`. It " +#~ "overrides the above-mentioned methods to" +#~ " do the following." +#~ msgstr "" +#~ "La fonctionnalité supplémentaire nécessaire " +#~ "pour faciliter l'écrêtage adaptatif a " +#~ "été fournie dans :code:`DPFedAvgAdaptive`, une" +#~ " sous-classe de :code:`DPFedAvgFixed`. Elle" +#~ " remplace les méthodes mentionnées ci-" +#~ "dessus pour effectuer les opérations " +#~ "suivantes." + +#~ msgid "" +#~ ":code:`configure_fit()` : It intercepts the" +#~ " config dict returned by " +#~ ":code:`super.configure_fit()` to add the " +#~ "key-value pair " +#~ ":code:`dpfedavg_adaptive_clip_enabled:True` to it, " +#~ "which the client interprets as an " +#~ "instruction to include an indicator bit" +#~ " (1 if update norm <= clipping " +#~ "threshold, 0 otherwise) in the results" +#~ " returned by it." +#~ msgstr "" +#~ ":code:`configure_fit()` : Il intercepte le " +#~ "dict de configuration renvoyé par " +#~ ":code:`super.configure_fit()` pour y ajouter " +#~ "la paire clé-valeur " +#~ ":code:`dpfedavg_adaptive_clip_enabled:True`, que le " +#~ "client interprète comme une instruction " +#~ "d'inclure un bit indicateur (1 si " +#~ "la norme de mise à jour <= " +#~ "seuil d'écrêtage, 0 sinon) dans les " +#~ "résultats qu'il renvoie." + +#~ msgid "" +#~ ":code:`aggregate_fit()` : It follows a " +#~ "call to :code:`super.aggregate_fit()` with one" +#~ " to :code:`__update_clip_norm__()`, a procedure" +#~ " which adjusts the clipping threshold " +#~ "on the basis of the indicator bits" +#~ " received from the sampled clients." +#~ msgstr "" +#~ ":code:`aggregate_fit()` : Il fait suivre " +#~ "un appel à :code:`super.aggregate_fit()` d'un" +#~ " appel à :code:`__update_clip_norm__()`, une " +#~ "procédure qui ajuste le seuil d'écrêtage" +#~ " sur la base des bits indicateurs " +#~ "reçus des clients échantillonnés." + +#~ msgid "" +#~ "The client-side capabilities required " +#~ "can be completely captured through " +#~ "wrapper logic for just the :code:`fit()`" +#~ " method of the :code:`NumPyClient` abstract" +#~ " class. To be precise, we need " +#~ "to *post-process* the update computed" +#~ " by the wrapped client to clip " +#~ "it, if necessary, to the threshold " +#~ "value supplied by the server as " +#~ "part of the config dictionary. In " +#~ "addition to this, it may need to" +#~ " perform some extra work if either" +#~ " (or both) of the following keys " +#~ "are also present in the dict." +#~ msgstr "" +#~ "Les capacités requises côté client " +#~ "peuvent être entièrement capturées par " +#~ "une logique de wrapper pour la " +#~ "seule méthode :code:`fit()` de la classe" +#~ " abstraite :code:`NumPyClient`. Pour être " +#~ "précis, nous devons *post-traiter* la" +#~ " mise à jour calculée par le " +#~ "client wrapped pour l'écrêter, si " +#~ "nécessaire, à la valeur seuil fournie" +#~ " par le serveur dans le cadre " +#~ "du dictionnaire de configuration. En " +#~ "plus de cela, il peut avoir besoin" +#~ " d'effectuer un travail supplémentaire si" +#~ " l'une des clés suivantes (ou les " +#~ "deux) est également présente dans le " +#~ "dict." + +#~ msgid "" +#~ ":code:`dpfedavg_noise_stddev` : Generate and " +#~ "add the specified amount of noise " +#~ "to the clipped update." +#~ msgstr "" +#~ ":code:`dpfedavg_noise_stddev` : Génère et " +#~ "ajoute la quantité de bruit spécifiée" +#~ " à la mise à jour de " +#~ "l'écrêtage." + +#~ msgid "" +#~ ":code:`dpfedavg_adaptive_clip_enabled` : Augment the" +#~ " metrics dict in the :code:`FitRes` " +#~ "object being returned to the server " +#~ "with an indicator bit, calculated as " +#~ "described earlier." +#~ msgstr "" +#~ ":code:`dpfedavg_adaptive_clip_enabled` : Complète " +#~ "les métriques dict dans l'objet " +#~ ":code:`FitRes` renvoyé au serveur avec " +#~ "un bit indicateur, calculé comme décrit" +#~ " précédemment." + +#~ msgid "Performing the :math:`(\\epsilon, \\delta)` analysis" +#~ msgstr "Effectuer l'analyse :math:`(\\epsilon, \\delta)`" + +#~ msgid "" +#~ "Assume you have trained for :math:`n`" +#~ " rounds with sampling fraction :math:`q`" +#~ " and noise multiplier :math:`z`. In " +#~ "order to calculate the :math:`\\epsilon` " +#~ "value this would result in for a" +#~ " particular :math:`\\delta`, the following " +#~ "script may be used." +#~ msgstr "" +#~ "Supposons que tu te sois entraîné " +#~ "pendant :math:`n` tours avec la fraction" +#~ " d'échantillonnage :math:`q` et le " +#~ "multiplicateur de bruit :math:`z`. Afin " +#~ "de calculer la valeur :math:`epsilon` " +#~ "qui en résulterait pour un " +#~ ":math:`\\delta` particulier, le script suivant" +#~ " peut être utilisé." + +#~ msgid "" +#~ "`How to run Flower using Docker " +#~ "`_" +#~ msgstr "" + +#~ msgid "Enjoy building more robust and flexible ``ClientApp``s with mods!" +#~ msgstr "" + +#~ msgid "" +#~ ":py:obj:`ClientApp `\\ " +#~ "\\(client\\_fn\\[\\, mods\\]\\)" +#~ msgstr "" + +#~ msgid ":py:obj:`flwr.server.driver `\\" +#~ msgstr "" + +#~ msgid "Flower driver SDK." +#~ msgstr "Serveur de Flower" + +#~ msgid "driver" +#~ msgstr "serveur" + +#~ msgid "" +#~ ":py:obj:`start_driver `\\ " +#~ "\\(\\*\\[\\, server\\_address\\, server\\, ...\\]\\)" +#~ msgstr "" + +#~ msgid "" +#~ ":py:obj:`Driver `\\ " +#~ "\\(\\[driver\\_service\\_address\\, ...\\]\\)" +#~ msgstr "" + +#~ msgid "" +#~ ":py:obj:`GrpcDriver `\\ " +#~ "\\(\\[driver\\_service\\_address\\, ...\\]\\)" +#~ msgstr "" + +#~ msgid "`GrpcDriver` provides access to the gRPC Driver API/service." +#~ msgstr "" + +#~ msgid ":py:obj:`get_nodes `\\ \\(\\)" +#~ msgstr "" + +#~ msgid "" +#~ ":py:obj:`pull_task_res " +#~ "`\\ \\(task\\_ids\\)" +#~ msgstr "" + +#~ msgid "Get task results." +#~ msgstr "" + +#~ msgid "" +#~ ":py:obj:`push_task_ins " +#~ "`\\ " +#~ "\\(task\\_ins\\_list\\)" +#~ msgstr "" + +#~ msgid "Schedule tasks." +#~ msgstr "" + +#~ msgid "GrpcDriver" +#~ msgstr "" + +#~ msgid ":py:obj:`connect `\\ \\(\\)" +#~ msgstr "" + +#~ msgid "Connect to the Driver API." +#~ msgstr "" + +#~ msgid "" +#~ ":py:obj:`create_run " +#~ "`\\ \\(req\\)" +#~ msgstr "" + +#~ msgid "Request for run ID." +#~ msgstr "Demande pour une nouvelle Flower Baseline" + +#~ msgid "" +#~ ":py:obj:`disconnect " +#~ "`\\ \\(\\)" +#~ msgstr "" + +#~ msgid "Disconnect from the Driver API." +#~ msgstr "" + +#~ msgid "" +#~ ":py:obj:`get_nodes `\\" +#~ " \\(req\\)" +#~ msgstr "" + +#~ msgid "Get client IDs." +#~ msgstr "Moteur client Edge" + +#~ msgid "" +#~ ":py:obj:`pull_task_res " +#~ "`\\ \\(req\\)" +#~ msgstr "" + +#~ msgid "" +#~ ":py:obj:`push_task_ins " +#~ "`\\ \\(req\\)" +#~ msgstr "" + +#~ msgid "" +#~ "Optionally specify the type of actor " +#~ "to use. The actor object, which " +#~ "persists throughout the simulation, will " +#~ "be the process in charge of " +#~ "running the clients' jobs (i.e. their" +#~ " `fit()` method)." +#~ msgstr "" + +#~ msgid "" +#~ "Much effort went into a completely " +#~ "restructured Flower docs experience. The " +#~ "documentation on [flower.ai/docs](flower.ai/docs) is" +#~ " now divided into Flower Framework, " +#~ "Flower Baselines, Flower Android SDK, " +#~ "Flower iOS SDK, and code example " +#~ "projects." +#~ msgstr "" + +#~ msgid "" +#~ "Flower usage examples used to be " +#~ "bundled with Flower in a package " +#~ "called ``flwr_example``. We are migrating " +#~ "those examples to standalone projects to" +#~ " make them easier to use. All " +#~ "new examples are based in the " +#~ "directory `examples " +#~ "`_." +#~ msgstr "" +#~ "Les exemples d'utilisation de Flower " +#~ "étaient auparavant regroupés avec Flower " +#~ "dans un paquet appelé ``flwr_example``. " +#~ "Nous migrons ces exemples vers des " +#~ "projets autonomes pour les rendre plus" +#~ " faciles à utiliser. Tous les " +#~ "nouveaux exemples sont basés dans le " +#~ "répertoire ``examples " +#~ "`_." + +#~ msgid "Quickstart TensorFlow/Keras" +#~ msgstr "Démarrage rapide de TensorFlow/Keras" + +#~ msgid "Legacy Examples (`flwr_example`)" +#~ msgstr "Exemples hérités (`flwr_example`)" + +#~ msgid "" +#~ "The useage examples in `flwr_example` " +#~ "are deprecated and will be removed " +#~ "in the future. New examples are " +#~ "provided as standalone projects in " +#~ "`examples `_." +#~ msgstr "" +#~ "Les exemples d'utilisation dans `flwr_example`" +#~ " sont obsolètes et seront supprimés à" +#~ " l'avenir. De nouveaux exemples sont " +#~ "fournis en tant que projets autonomes" +#~ " dans `examples " +#~ "`_." + +#~ msgid "Extra Dependencies" +#~ msgstr "Dépendances supplémentaires" + +#~ msgid "" +#~ "The core Flower framework keeps a " +#~ "minimal set of dependencies. The " +#~ "examples demonstrate Flower in the " +#~ "context of different machine learning " +#~ "frameworks, so additional dependencies need" +#~ " to be installed before an example" +#~ " can be run." +#~ msgstr "" +#~ "Le noyau du framework Flower conserve" +#~ " un ensemble minimal de dépendances. " +#~ "Les exemples démontrent Flower dans le" +#~ " contexte de différents frameworks " +#~ "d'apprentissage automatique, de sorte que " +#~ "des dépendances supplémentaires doivent être" +#~ " installées avant qu'un exemple puisse " +#~ "être exécuté." + +#~ msgid "For PyTorch examples::" +#~ msgstr "Pour les exemples de PyTorch: :" + +#~ msgid "For TensorFlow examples::" +#~ msgstr "Pour les exemples de TensorFlow : :" + +#~ msgid "For both PyTorch and TensorFlow examples::" +#~ msgstr "Pour les exemples PyTorch et TensorFlow: :" + +#~ msgid "" +#~ "Please consult :code:`pyproject.toml` for a" +#~ " full list of possible extras " +#~ "(section :code:`[tool.poetry.extras]`)." +#~ msgstr "" +#~ "Tu peux consulter :code:`pyproject.toml` pour" +#~ " une liste complète des extras " +#~ "possibles (section :code:`[tool.poetry.extras]`)." + +#~ msgid "PyTorch Examples" +#~ msgstr "Exemples de PyTorch" + +#~ msgid "" +#~ "Our PyTorch examples are based on " +#~ "PyTorch 1.7. They should work with " +#~ "other releases as well. So far, we" +#~ " provide the following examples." +#~ msgstr "" +#~ "Nos exemples PyTorch sont basés sur " +#~ "PyTorch 1.7. Ils devraient fonctionner " +#~ "avec d'autres versions également. Jusqu'à " +#~ "présent, nous fournissons les exemples " +#~ "suivants." + +#~ msgid "CIFAR-10 Image Classification" +#~ msgstr "Classification d'images CIFAR-10" + +#~ msgid "" +#~ "`CIFAR-10 and CIFAR-100 " +#~ "`_ are " +#~ "popular RGB image datasets. The Flower" +#~ " CIFAR-10 example uses PyTorch to " +#~ "train a simple CNN classifier in a" +#~ " federated learning setup with two " +#~ "clients." +#~ msgstr "" +#~ "`CIFAR-10 et CIFAR-100 " +#~ "`_ sont des" +#~ " ensembles de données d'images RVB " +#~ "populaires. L'exemple Flower CIFAR-10 utilise" +#~ " PyTorch pour former un classificateur " +#~ "CNN simple dans une configuration " +#~ "d'apprentissage fédéré avec deux clients." + +#~ msgid "First, start a Flower server:" +#~ msgstr "Tout d'abord, démarre un serveur Flower :" + +#~ msgid "$ ./src/py/flwr_example/pytorch_cifar/run-server.sh" +#~ msgstr "$ ./src/py/flwr_example/pytorch_cifar/run-server.sh" + +#~ msgid "Then, start the two clients in a new terminal window:" +#~ msgstr "" +#~ "Ensuite, démarre les deux clients dans" +#~ " une nouvelle fenêtre de terminal :" + +#~ msgid "$ ./src/py/flwr_example/pytorch_cifar/run-clients.sh" +#~ msgstr "$ ./src/py/flwr_example/pytorch_cifar/run-clients.sh" + +#~ msgid "For more details, see :code:`src/py/flwr_example/pytorch_cifar`." +#~ msgstr "Pour plus de détails, voir :code:`src/py/flwr_example/pytorch_cifar`." + +#~ msgid "ImageNet-2012 Image Classification" +#~ msgstr "ImageNet-2012 Classification des images" + +#~ msgid "" +#~ "`ImageNet-2012 `_ is " +#~ "one of the major computer vision " +#~ "datasets. The Flower ImageNet example " +#~ "uses PyTorch to train a ResNet-18 " +#~ "classifier in a federated learning setup" +#~ " with ten clients." +#~ msgstr "" +#~ "`ImageNet-2012 `_ est " +#~ "l'un des principaux ensembles de données" +#~ " de vision par ordinateur. L'exemple " +#~ "Flower ImageNet utilise PyTorch pour " +#~ "entraîner un classificateur ResNet-18 dans " +#~ "une configuration d'apprentissage fédéré avec" +#~ " dix clients." + +#~ msgid "$ ./src/py/flwr_example/pytorch_imagenet/run-server.sh" +#~ msgstr "$ ./src/py/flwr_example/pytorch_imagenet/run-server.sh" + +#~ msgid "$ ./src/py/flwr_example/pytorch_imagenet/run-clients.sh" +#~ msgstr "$ ./src/py/flwr_example/pytorch_imagenet/run-clients.sh" + +#~ msgid "For more details, see :code:`src/py/flwr_example/pytorch_imagenet`." +#~ msgstr "" +#~ "Pour plus de détails, voir " +#~ ":code:`src/py/flwr_example/pytorch_imagenet`." + +#~ msgid "TensorFlow Examples" +#~ msgstr "Exemples de TensorFlow" + +#~ msgid "" +#~ "Our TensorFlow examples are based on " +#~ "TensorFlow 2.0 or newer. So far, " +#~ "we provide the following examples." +#~ msgstr "" +#~ "Nos exemples TensorFlow sont basés sur" +#~ " TensorFlow 2.0 ou une version plus" +#~ " récente. Jusqu'à présent, nous te " +#~ "proposons les exemples suivants." + +#~ msgid "Fashion-MNIST Image Classification" +#~ msgstr "Classification d'images Fashion-MNIST" + +#~ msgid "" +#~ "`Fashion-MNIST `_ is often used as " +#~ "the \"Hello, world!\" of machine " +#~ "learning. We follow this tradition and" +#~ " provide an example which samples " +#~ "random local datasets from Fashion-MNIST" +#~ " and trains a simple image " +#~ "classification model over those partitions." +#~ msgstr "" +#~ "nous suivons cette tradition et " +#~ "fournissons un exemple qui échantillonne " +#~ "des ensembles de données locales " +#~ "aléatoires de Fashion-MNIST et entraîne" +#~ " un modèle simple de classification " +#~ "d'images sur ces partitions." + +#~ msgid "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-server.sh" +#~ msgstr "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-server.sh" + +#~ msgid "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-clients.sh" +#~ msgstr "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-clients.sh" + +#~ msgid "" +#~ "For more details, see " +#~ ":code:`src/py/flwr_example/tensorflow_fashion_mnist`." +#~ msgstr "" +#~ "Pour plus de détails, voir " +#~ ":code:`src/py/flwr_example/tensorflow_fashion_mnist`." + +#~ msgid "" +#~ "MXNet is no longer maintained and " +#~ "has been moved into `Attic " +#~ "`_. As a " +#~ "result, we would encourage you to " +#~ "use other ML frameworks alongise Flower," +#~ " for example, PyTorch. This tutorial " +#~ "might be removed in future versions " +#~ "of Flower." +#~ msgstr "" + +#~ msgid "" +#~ "Now that you have known how " +#~ "federated XGBoost work with Flower, it's" +#~ " time to run some more comprehensive" +#~ " experiments by customising the " +#~ "experimental settings. In the xgboost-" +#~ "comprehensive example (`full code " +#~ "`_), we provide more options " +#~ "to define various experimental setups, " +#~ "including aggregation strategies, data " +#~ "partitioning and centralised/distributed evaluation." +#~ " We also support `Flower simulation " +#~ "`_ making it easy to " +#~ "simulate large client cohorts in a " +#~ "resource-aware manner. Let's take a " +#~ "look!" +#~ msgstr "" + +#~ msgid "|31e4b1afa87c4b968327bbeafbf184d4|" +#~ msgstr "" + +#~ msgid "|c9d935b4284e4c389a33d86b33e07c0a|" +#~ msgstr "" + +#~ msgid "|00727b5faffb468f84dd1b03ded88638|" +#~ msgstr "" + +#~ msgid "|daf0cf0ff4c24fd29439af78416cf47b|" +#~ msgstr "" + +#~ msgid "|9f093007080d471d94ca90d3e9fde9b6|" +#~ msgstr "" + +#~ msgid "|46a26e6150e0479fbd3dfd655f36eb13|" +#~ msgstr "" + +#~ msgid "|3daba297595c4c7fb845d90404a6179a|" +#~ msgstr "" + +#~ msgid "|5769874fa9c4455b80b2efda850d39d7|" +#~ msgstr "" + +#~ msgid "|ba47ffb421814b0f8f9fa5719093d839|" +#~ msgstr "" + +#~ msgid "|aeac5bf79cbf497082e979834717e01b|" +#~ msgstr "" + +#~ msgid "|ce27ed4bbe95459dba016afc42486ba2|" +#~ msgstr "" + +#~ msgid "|ae94a7f71dda443cbec2385751427d41|" +#~ msgstr "" + +#~ msgid "|e61fce4d43d243e7bb08bdde97d81ce6|" +#~ msgstr "" + +#~ msgid "|08cb60859b07461588fe44e55810b050|" +#~ msgstr "" + diff --git a/doc/locales/pt_BR/LC_MESSAGES/framework-docs.po b/doc/locales/pt_BR/LC_MESSAGES/framework-docs.po index 5a5d736ece38..4e117619f9b5 100644 --- a/doc/locales/pt_BR/LC_MESSAGES/framework-docs.po +++ b/doc/locales/pt_BR/LC_MESSAGES/framework-docs.po @@ -8,7 +8,7 @@ msgid "" msgstr "" "Project-Id-Version: Flower main\n" "Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2024-02-13 11:23+0100\n" +"POT-Creation-Date: 2024-03-15 14:23+0000\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language: pt_BR\n" @@ -17,7 +17,7 @@ msgstr "" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=utf-8\n" "Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 2.13.1\n" +"Generated-By: Babel 2.14.0\n" #: ../../source/contributor-explanation-architecture.rst:2 msgid "Flower Architecture" @@ -83,9 +83,8 @@ msgstr "" #: ../../source/contributor-how-to-build-docker-images.rst:19 msgid "" -"Please follow the first section on `Run Flower using Docker " -"`_ " -"which covers this step in more detail." +"Please follow the first section on :doc:`Run Flower using Docker ` which covers this step in more detail." msgstr "" #: ../../source/contributor-how-to-build-docker-images.rst:23 @@ -287,7 +286,7 @@ msgid "" "to help us in our effort to make Federated Learning accessible to as many" " people as possible by contributing to those translations! This might " "also be a great opportunity for those wanting to become open source " -"contributors with little prerequistes." +"contributors with little prerequisites." msgstr "" #: ../../source/contributor-how-to-contribute-translations.rst:13 @@ -338,7 +337,7 @@ msgstr "" #: ../../source/contributor-how-to-contribute-translations.rst:47 msgid "" -"You input your translation in the textbox at the top and then, once you " +"You input your translation in the text box at the top and then, once you " "are happy with it, you either press ``Save and continue`` (to save the " "translation and go to the next untranslated string), ``Save and stay`` " "(to save the translation and stay on the same page), ``Suggest`` (to add " @@ -376,8 +375,8 @@ msgstr "" #: ../../source/contributor-how-to-contribute-translations.rst:69 msgid "" "If you want to add a new language, you will first have to contact us, " -"either on `Slack `_, or by opening an " -"issue on our `GitHub repo `_." +"either on `Slack `_, or by opening an issue" +" on our `GitHub repo `_." msgstr "" #: ../../source/contributor-how-to-create-new-messages.rst:2 @@ -419,8 +418,8 @@ msgid "" "The first thing we need to do is to define a message type for the RPC " "system in :code:`transport.proto`. Note that we have to do it for both " "the request and response messages. For more details on the syntax of " -"proto3, please see the `official documentation " -"`_." +"proto3, please see the `official documentation `_." msgstr "" #: ../../source/contributor-how-to-create-new-messages.rst:35 @@ -530,7 +529,7 @@ msgstr "" #: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:11 msgid "" "Source: `Official VSCode documentation " -"`_" +"`_" msgstr "" #: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:15 @@ -567,14 +566,14 @@ msgstr "" #: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:23 msgid "" "`Developing inside a Container " -"`_" msgstr "" #: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:24 msgid "" "`Remote development in Containers " -"`_" +"`_" msgstr "" #: ../../source/contributor-how-to-install-development-versions.rst:2 @@ -823,8 +822,8 @@ msgstr "" #: ../../source/contributor-how-to-release-flower.rst:25 msgid "" -"Merge the pull request on the same day (i.e., before a new nightly release" -" gets published to PyPI)." +"Merge the pull request on the same day (i.e., before a new nightly " +"release gets published to PyPI)." msgstr "" #: ../../source/contributor-how-to-release-flower.rst:28 @@ -837,8 +836,8 @@ msgstr "" #: ../../source/contributor-how-to-release-flower.rst:33 msgid "" -"PyPI supports pre-releases (alpha, beta, release candidate). Pre-releases " -"MUST use one of the following naming patterns:" +"PyPI supports pre-releases (alpha, beta, release candidate). Pre-releases" +" MUST use one of the following naming patterns:" msgstr "" #: ../../source/contributor-how-to-release-flower.rst:35 @@ -1114,8 +1113,8 @@ msgstr "" #: ../../source/contributor-ref-good-first-contributions.rst:25 msgid "" "If you are not familiar with Flower Baselines, you should probably check-" -"out our `contributing guide for baselines `_." +"out our `contributing guide for baselines " +"`_." msgstr "" #: ../../source/contributor-ref-good-first-contributions.rst:27 @@ -1123,7 +1122,7 @@ msgid "" "You should then check out the open `issues " "`_" " for baseline requests. If you find a baseline that you'd like to work on" -" and that has no assignes, feel free to assign it to yourself and start " +" and that has no assignees, feel free to assign it to yourself and start " "working on it!" msgstr "" @@ -1208,42 +1207,41 @@ msgstr "" #: ../../source/contributor-tutorial-contribute-on-github.rst:6 msgid "" "If you're familiar with how contributing on GitHub works, you can " -"directly checkout our `getting started guide for contributors " -"`_." +"directly checkout our :doc:`getting started guide for contributors " +"`." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:11 +#: ../../source/contributor-tutorial-contribute-on-github.rst:10 msgid "Setting up the repository" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:22 +#: ../../source/contributor-tutorial-contribute-on-github.rst:21 msgid "**Create a GitHub account and setup Git**" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:14 +#: ../../source/contributor-tutorial-contribute-on-github.rst:13 msgid "" "Git is a distributed version control tool. This allows for an entire " "codebase's history to be stored and every developer's machine. It is a " "software that will need to be installed on your local machine, you can " -"follow this `guide `_ to set it up." +"follow this `guide `_ to set it up." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:17 +#: ../../source/contributor-tutorial-contribute-on-github.rst:16 msgid "" "GitHub, itself, is a code hosting platform for version control and " "collaboration. It allows for everyone to collaborate and work from " "anywhere on remote repositories." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:19 +#: ../../source/contributor-tutorial-contribute-on-github.rst:18 msgid "" "If you haven't already, you will need to create an account on `GitHub " "`_." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:21 +#: ../../source/contributor-tutorial-contribute-on-github.rst:20 msgid "" "The idea behind the generic Git and GitHub workflow boils down to this: " "you download code from a remote repository on GitHub, make changes " @@ -1251,19 +1249,19 @@ msgid "" "history back to GitHub." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:33 +#: ../../source/contributor-tutorial-contribute-on-github.rst:32 msgid "**Forking the Flower repository**" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:25 +#: ../../source/contributor-tutorial-contribute-on-github.rst:24 msgid "" "A fork is a personal copy of a GitHub repository. To create one for " -"Flower, you must navigate to https://github.com/adap/flower (while " +"Flower, you must navigate to ``_ (while " "connected to your GitHub account) and click the ``Fork`` button situated " "on the top right of the page." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:30 +#: ../../source/contributor-tutorial-contribute-on-github.rst:29 msgid "" "You can change the name if you want, but this is not necessary as this " "version of Flower will be yours and will sit inside your own account " @@ -1271,11 +1269,11 @@ msgid "" " the top left corner that you are looking at your own version of Flower." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:48 +#: ../../source/contributor-tutorial-contribute-on-github.rst:47 msgid "**Cloning your forked repository**" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:36 +#: ../../source/contributor-tutorial-contribute-on-github.rst:35 msgid "" "The next step is to download the forked repository on your machine to be " "able to make changes to it. On your forked repository page, you should " @@ -1283,27 +1281,27 @@ msgid "" "ability to copy the HTTPS link of the repository." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:42 +#: ../../source/contributor-tutorial-contribute-on-github.rst:41 msgid "" "Once you copied the \\, you can open a terminal on your machine, " "navigate to the place you want to download the repository to and type:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:48 +#: ../../source/contributor-tutorial-contribute-on-github.rst:47 msgid "" "This will create a ``flower/`` (or the name of your fork if you renamed " "it) folder in the current working directory." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:67 +#: ../../source/contributor-tutorial-contribute-on-github.rst:66 msgid "**Add origin**" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:51 +#: ../../source/contributor-tutorial-contribute-on-github.rst:50 msgid "You can then go into the repository folder:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:57 +#: ../../source/contributor-tutorial-contribute-on-github.rst:56 msgid "" "And here we will need to add an origin to our repository. The origin is " "the \\ of the remote fork repository. To obtain it, we can do as " @@ -1311,27 +1309,27 @@ msgid "" "account and copying the link." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:62 +#: ../../source/contributor-tutorial-contribute-on-github.rst:61 msgid "" "Once the \\ is copied, we can type the following command in our " "terminal:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:91 +#: ../../source/contributor-tutorial-contribute-on-github.rst:90 msgid "**Add upstream**" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:70 +#: ../../source/contributor-tutorial-contribute-on-github.rst:69 msgid "" "Now we will add an upstream address to our repository. Still in the same " -"directroy, we must run the following command:" +"directory, we must run the following command:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:77 +#: ../../source/contributor-tutorial-contribute-on-github.rst:76 msgid "The following diagram visually explains what we did in the previous steps:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:81 +#: ../../source/contributor-tutorial-contribute-on-github.rst:80 msgid "" "The upstream is the GitHub remote address of the parent repository (in " "this case Flower), i.e. the one we eventually want to contribute to and " @@ -1340,169 +1338,169 @@ msgid "" "in our own account." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:85 +#: ../../source/contributor-tutorial-contribute-on-github.rst:84 msgid "" "To make sure our local version of the fork is up-to-date with the latest " "changes from the Flower repository, we can execute the following command:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:94 +#: ../../source/contributor-tutorial-contribute-on-github.rst:93 msgid "Setting up the coding environment" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:96 +#: ../../source/contributor-tutorial-contribute-on-github.rst:95 msgid "" -"This can be achieved by following this `getting started guide for " -"contributors`_ (note that you won't need to clone the repository). Once " -"you are able to write code and test it, you can finally start making " -"changes!" +"This can be achieved by following this :doc:`getting started guide for " +"contributors ` (note " +"that you won't need to clone the repository). Once you are able to write " +"code and test it, you can finally start making changes!" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:101 +#: ../../source/contributor-tutorial-contribute-on-github.rst:100 msgid "Making changes" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:103 +#: ../../source/contributor-tutorial-contribute-on-github.rst:102 msgid "" "Before making any changes make sure you are up-to-date with your " "repository:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:109 +#: ../../source/contributor-tutorial-contribute-on-github.rst:108 msgid "And with Flower's repository:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:123 +#: ../../source/contributor-tutorial-contribute-on-github.rst:122 msgid "**Create a new branch**" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:116 +#: ../../source/contributor-tutorial-contribute-on-github.rst:115 msgid "" "To make the history cleaner and easier to work with, it is good practice " "to create a new branch for each feature/project that needs to be " "implemented." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:119 +#: ../../source/contributor-tutorial-contribute-on-github.rst:118 msgid "" "To do so, just run the following command inside the repository's " "directory:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:126 +#: ../../source/contributor-tutorial-contribute-on-github.rst:125 msgid "**Make changes**" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:126 +#: ../../source/contributor-tutorial-contribute-on-github.rst:125 msgid "Write great code and create wonderful changes using your favorite editor!" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:139 +#: ../../source/contributor-tutorial-contribute-on-github.rst:138 msgid "**Test and format your code**" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:129 +#: ../../source/contributor-tutorial-contribute-on-github.rst:128 msgid "" "Don't forget to test and format your code! Otherwise your code won't be " "able to be merged into the Flower repository. This is done so the " "codebase stays consistent and easy to understand." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:132 +#: ../../source/contributor-tutorial-contribute-on-github.rst:131 msgid "To do so, we have written a few scripts that you can execute:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:151 +#: ../../source/contributor-tutorial-contribute-on-github.rst:150 msgid "**Stage changes**" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:142 +#: ../../source/contributor-tutorial-contribute-on-github.rst:141 msgid "" "Before creating a commit that will update your history, you must specify " "to Git which files it needs to take into account." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:144 +#: ../../source/contributor-tutorial-contribute-on-github.rst:143 msgid "This can be done with:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:150 +#: ../../source/contributor-tutorial-contribute-on-github.rst:149 msgid "" "To check which files have been modified compared to the last version " "(last commit) and to see which files are staged for commit, you can use " "the :code:`git status` command." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:161 +#: ../../source/contributor-tutorial-contribute-on-github.rst:160 msgid "**Commit changes**" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:154 +#: ../../source/contributor-tutorial-contribute-on-github.rst:153 msgid "" "Once you have added all the files you wanted to commit using :code:`git " "add`, you can finally create your commit using this command:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:160 +#: ../../source/contributor-tutorial-contribute-on-github.rst:159 msgid "" "The \\ is there to explain to others what the commit " "does. It should be written in an imperative style and be concise. An " "example would be :code:`git commit -m \"Add images to README\"`." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:172 +#: ../../source/contributor-tutorial-contribute-on-github.rst:171 msgid "**Push the changes to the fork**" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:164 +#: ../../source/contributor-tutorial-contribute-on-github.rst:163 msgid "" "Once we have committed our changes, we have effectively updated our local" " history, but GitHub has no way of knowing this unless we push our " "changes to our origin's remote address:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:171 +#: ../../source/contributor-tutorial-contribute-on-github.rst:170 msgid "" "Once this is done, you will see on the GitHub that your forked repo was " "updated with the changes you have made." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:175 +#: ../../source/contributor-tutorial-contribute-on-github.rst:174 msgid "Creating and merging a pull request (PR)" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:206 +#: ../../source/contributor-tutorial-contribute-on-github.rst:205 msgid "**Create the PR**" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:178 +#: ../../source/contributor-tutorial-contribute-on-github.rst:177 msgid "" "Once you have pushed changes, on the GitHub webpage of your repository " "you should see the following message:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:182 +#: ../../source/contributor-tutorial-contribute-on-github.rst:181 msgid "Otherwise you can always find this option in the ``Branches`` page." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:184 +#: ../../source/contributor-tutorial-contribute-on-github.rst:183 msgid "" "Once you click the ``Compare & pull request`` button, you should see " "something similar to this:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:188 +#: ../../source/contributor-tutorial-contribute-on-github.rst:187 msgid "At the top you have an explanation of which branch will be merged where:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:192 +#: ../../source/contributor-tutorial-contribute-on-github.rst:191 msgid "" "In this example you can see that the request is to merge the branch " "``doc-fixes`` from my forked repository to branch ``main`` from the " "Flower repository." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:194 +#: ../../source/contributor-tutorial-contribute-on-github.rst:193 msgid "" "The input box in the middle is there for you to describe what your PR " "does and to link it to existing issues. We have placed comments (that " @@ -1510,7 +1508,7 @@ msgid "" "process." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:197 +#: ../../source/contributor-tutorial-contribute-on-github.rst:196 msgid "" "It is important to follow the instructions described in comments. For " "instance, in order to not break how our changelog system works, you " @@ -1519,163 +1517,163 @@ msgid "" ":ref:`changelogentry` appendix." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:201 +#: ../../source/contributor-tutorial-contribute-on-github.rst:200 msgid "" "At the bottom you will find the button to open the PR. This will notify " "reviewers that a new PR has been opened and that they should look over it" " to merge or to request changes." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:204 +#: ../../source/contributor-tutorial-contribute-on-github.rst:203 msgid "" "If your PR is not yet ready for review, and you don't want to notify " "anyone, you have the option to create a draft pull request:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:209 +#: ../../source/contributor-tutorial-contribute-on-github.rst:208 msgid "**Making new changes**" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:209 +#: ../../source/contributor-tutorial-contribute-on-github.rst:208 msgid "" "Once the PR has been opened (as draft or not), you can still push new " "commits to it the same way we did before, by making changes to the branch" " associated with the PR." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:231 +#: ../../source/contributor-tutorial-contribute-on-github.rst:230 msgid "**Review the PR**" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:212 +#: ../../source/contributor-tutorial-contribute-on-github.rst:211 msgid "" "Once the PR has been opened or once the draft PR has been marked as " "ready, a review from code owners will be automatically requested:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:216 +#: ../../source/contributor-tutorial-contribute-on-github.rst:215 msgid "" "Code owners will then look into the code, ask questions, request changes " "or validate the PR." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:218 +#: ../../source/contributor-tutorial-contribute-on-github.rst:217 msgid "Merging will be blocked if there are ongoing requested changes." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:222 +#: ../../source/contributor-tutorial-contribute-on-github.rst:221 msgid "" "To resolve them, just push the necessary changes to the branch associated" " with the PR:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:226 +#: ../../source/contributor-tutorial-contribute-on-github.rst:225 msgid "And resolve the conversation:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:230 +#: ../../source/contributor-tutorial-contribute-on-github.rst:229 msgid "" "Once all the conversations have been resolved, you can re-request a " "review." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:251 +#: ../../source/contributor-tutorial-contribute-on-github.rst:250 msgid "**Once the PR is merged**" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:234 +#: ../../source/contributor-tutorial-contribute-on-github.rst:233 msgid "" "If all the automatic tests have passed and reviewers have no more changes" " to request, they can approve the PR and merge it." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:238 +#: ../../source/contributor-tutorial-contribute-on-github.rst:237 msgid "" "Once it is merged, you can delete the branch on GitHub (a button should " "appear to do so) and also delete it locally by doing:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:245 +#: ../../source/contributor-tutorial-contribute-on-github.rst:244 msgid "Then you should update your forked repository by doing:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:254 +#: ../../source/contributor-tutorial-contribute-on-github.rst:253 msgid "Example of first contribution" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:257 +#: ../../source/contributor-tutorial-contribute-on-github.rst:256 msgid "Problem" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:259 +#: ../../source/contributor-tutorial-contribute-on-github.rst:258 msgid "" -"For our documentation, we’ve started to use the `Diàtaxis framework " +"For our documentation, we've started to use the `Diàtaxis framework " "`_." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:261 +#: ../../source/contributor-tutorial-contribute-on-github.rst:260 msgid "" -"Our “How to” guides should have titles that continue the sencence “How to" -" …”, for example, “How to upgrade to Flower 1.0”." +"Our \"How to\" guides should have titles that continue the sentence \"How" +" to …\", for example, \"How to upgrade to Flower 1.0\"." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:263 +#: ../../source/contributor-tutorial-contribute-on-github.rst:262 msgid "" "Most of our guides do not follow this new format yet, and changing their " "title is (unfortunately) more involved than one might think." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:265 +#: ../../source/contributor-tutorial-contribute-on-github.rst:264 msgid "" -"This issue is about changing the title of a doc from present continious " +"This issue is about changing the title of a doc from present continuous " "to present simple." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:267 +#: ../../source/contributor-tutorial-contribute-on-github.rst:266 msgid "" -"Let's take the example of “Saving Progress” which we changed to “Save " -"Progress”. Does this pass our check?" +"Let's take the example of \"Saving Progress\" which we changed to \"Save " +"Progress\". Does this pass our check?" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:269 -msgid "Before: ”How to saving progress” ❌" +#: ../../source/contributor-tutorial-contribute-on-github.rst:268 +msgid "Before: \"How to saving progress\" ❌" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:271 -msgid "After: ”How to save progress” ✅" +#: ../../source/contributor-tutorial-contribute-on-github.rst:270 +msgid "After: \"How to save progress\" ✅" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:274 +#: ../../source/contributor-tutorial-contribute-on-github.rst:273 msgid "Solution" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:276 +#: ../../source/contributor-tutorial-contribute-on-github.rst:275 msgid "" -"This is a tiny change, but it’ll allow us to test your end-to-end setup. " -"After cloning and setting up the Flower repo, here’s what you should do:" +"This is a tiny change, but it'll allow us to test your end-to-end setup. " +"After cloning and setting up the Flower repo, here's what you should do:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:278 +#: ../../source/contributor-tutorial-contribute-on-github.rst:277 msgid "Find the source file in ``doc/source``" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:279 +#: ../../source/contributor-tutorial-contribute-on-github.rst:278 msgid "" "Make the change in the ``.rst`` file (beware, the dashes under the title " "should be the same length as the title itself)" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:280 +#: ../../source/contributor-tutorial-contribute-on-github.rst:279 msgid "" -"Build the docs and check the result: ``_" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:283 +#: ../../source/contributor-tutorial-contribute-on-github.rst:282 msgid "Rename file" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:285 +#: ../../source/contributor-tutorial-contribute-on-github.rst:284 msgid "" "You might have noticed that the file name still reflects the old wording." " If we just change the file, then we break all existing links to it - it " @@ -1683,77 +1681,77 @@ msgid "" "engine ranking." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:288 -msgid "Here’s how to change the file name:" +#: ../../source/contributor-tutorial-contribute-on-github.rst:287 +msgid "Here's how to change the file name:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:290 +#: ../../source/contributor-tutorial-contribute-on-github.rst:289 msgid "Change the file name to ``save-progress.rst``" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:291 +#: ../../source/contributor-tutorial-contribute-on-github.rst:290 msgid "Add a redirect rule to ``doc/source/conf.py``" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:293 +#: ../../source/contributor-tutorial-contribute-on-github.rst:292 msgid "" "This will cause a redirect from ``saving-progress.html`` to ``save-" "progress.html``, old links will continue to work." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:296 +#: ../../source/contributor-tutorial-contribute-on-github.rst:295 msgid "Apply changes in the index file" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:298 +#: ../../source/contributor-tutorial-contribute-on-github.rst:297 msgid "" "For the lateral navigation bar to work properly, it is very important to " "update the ``index.rst`` file as well. This is where we define the whole " "arborescence of the navbar." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:301 +#: ../../source/contributor-tutorial-contribute-on-github.rst:300 msgid "Find and modify the file name in ``index.rst``" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:304 +#: ../../source/contributor-tutorial-contribute-on-github.rst:303 msgid "Open PR" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:306 +#: ../../source/contributor-tutorial-contribute-on-github.rst:305 msgid "" -"Commit the changes (commit messages are always imperative: “Do " -"something”, in this case “Change …”)" +"Commit the changes (commit messages are always imperative: \"Do " +"something\", in this case \"Change …\")" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:307 +#: ../../source/contributor-tutorial-contribute-on-github.rst:306 msgid "Push the changes to your fork" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:308 +#: ../../source/contributor-tutorial-contribute-on-github.rst:307 msgid "Open a PR (as shown above)" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:309 +#: ../../source/contributor-tutorial-contribute-on-github.rst:308 msgid "Wait for it to be approved!" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:310 +#: ../../source/contributor-tutorial-contribute-on-github.rst:309 msgid "Congrats! 🥳 You're now officially a Flower contributor!" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:314 +#: ../../source/contributor-tutorial-contribute-on-github.rst:313 msgid "How to write a good PR title" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:316 +#: ../../source/contributor-tutorial-contribute-on-github.rst:315 msgid "" "A well-crafted PR title helps team members quickly understand the purpose" " and scope of the changes being proposed. Here's a guide to help you " "write a good GitHub PR title:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:318 +#: ../../source/contributor-tutorial-contribute-on-github.rst:317 msgid "" "1. Be Clear and Concise: Provide a clear summary of the changes in a " "concise manner. 1. Use Actionable Verbs: Start with verbs like \"Add,\" " @@ -1763,62 +1761,62 @@ msgid "" "Capitalization and Punctuation: Follow grammar rules for clarity." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:324 +#: ../../source/contributor-tutorial-contribute-on-github.rst:323 msgid "" "Let's start with a few examples for titles that should be avoided because" " they do not provide meaningful information:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:326 +#: ../../source/contributor-tutorial-contribute-on-github.rst:325 msgid "Implement Algorithm" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:327 +#: ../../source/contributor-tutorial-contribute-on-github.rst:326 msgid "Database" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:328 +#: ../../source/contributor-tutorial-contribute-on-github.rst:327 msgid "Add my_new_file.py to codebase" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:329 +#: ../../source/contributor-tutorial-contribute-on-github.rst:328 msgid "Improve code in module" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:330 +#: ../../source/contributor-tutorial-contribute-on-github.rst:329 msgid "Change SomeModule" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:332 +#: ../../source/contributor-tutorial-contribute-on-github.rst:331 msgid "" "Here are a few positive examples which provide helpful information " "without repeating how they do it, as that is already visible in the " "\"Files changed\" section of the PR:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:334 +#: ../../source/contributor-tutorial-contribute-on-github.rst:333 msgid "Update docs banner to mention Flower Summit 2023" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:335 +#: ../../source/contributor-tutorial-contribute-on-github.rst:334 msgid "Remove unnecessary XGBoost dependency" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:336 +#: ../../source/contributor-tutorial-contribute-on-github.rst:335 msgid "Remove redundant attributes in strategies subclassing FedAvg" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:337 +#: ../../source/contributor-tutorial-contribute-on-github.rst:336 msgid "Add CI job to deploy the staging system when the ``main`` branch changes" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:338 +#: ../../source/contributor-tutorial-contribute-on-github.rst:337 msgid "" "Add new amazing library which will be used to improve the simulation " "engine" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:342 +#: ../../source/contributor-tutorial-contribute-on-github.rst:341 #: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:548 #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:946 #: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:727 @@ -1827,150 +1825,150 @@ msgstr "" msgid "Next steps" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:344 +#: ../../source/contributor-tutorial-contribute-on-github.rst:343 msgid "" "Once you have made your first PR, and want to contribute more, be sure to" " check out the following :" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:346 +#: ../../source/contributor-tutorial-contribute-on-github.rst:345 msgid "" -"`Good first contributions `_, where you should particularly look " -"into the :code:`baselines` contributions." +":doc:`Good first contributions `, where you should particularly look into the " +":code:`baselines` contributions." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:350 +#: ../../source/contributor-tutorial-contribute-on-github.rst:349 #: ../../source/fed/0000-20200102-fed-template.md:60 msgid "Appendix" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:355 +#: ../../source/contributor-tutorial-contribute-on-github.rst:354 msgid "Changelog entry" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:357 +#: ../../source/contributor-tutorial-contribute-on-github.rst:356 msgid "" "When opening a new PR, inside its description, there should be a " "``Changelog entry`` header." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:359 +#: ../../source/contributor-tutorial-contribute-on-github.rst:358 msgid "" "Above this header you should see the following comment that explains how " "to write your changelog entry:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:361 +#: ../../source/contributor-tutorial-contribute-on-github.rst:360 msgid "" "Inside the following 'Changelog entry' section, you should put the " "description of your changes that will be added to the changelog alongside" " your PR title." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:364 +#: ../../source/contributor-tutorial-contribute-on-github.rst:363 msgid "" -"If the section is completely empty (without any token) or non-existant, " +"If the section is completely empty (without any token) or non-existent, " "the changelog will just contain the title of the PR for the changelog " "entry, without any description." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:367 +#: ../../source/contributor-tutorial-contribute-on-github.rst:366 msgid "" "If the section contains some text other than tokens, it will use it to " "add a description to the change." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:369 +#: ../../source/contributor-tutorial-contribute-on-github.rst:368 msgid "" "If the section contains one of the following tokens it will ignore any " "other text and put the PR under the corresponding section of the " "changelog:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:371 +#: ../../source/contributor-tutorial-contribute-on-github.rst:370 msgid " is for classifying a PR as a general improvement." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:373 +#: ../../source/contributor-tutorial-contribute-on-github.rst:372 msgid " is to not add the PR to the changelog" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:375 +#: ../../source/contributor-tutorial-contribute-on-github.rst:374 msgid " is to add a general baselines change to the PR" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:377 +#: ../../source/contributor-tutorial-contribute-on-github.rst:376 msgid " is to add a general examples change to the PR" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:379 +#: ../../source/contributor-tutorial-contribute-on-github.rst:378 msgid " is to add a general sdk change to the PR" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:381 +#: ../../source/contributor-tutorial-contribute-on-github.rst:380 msgid " is to add a general simulations change to the PR" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:383 +#: ../../source/contributor-tutorial-contribute-on-github.rst:382 msgid "Note that only one token should be used." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:385 +#: ../../source/contributor-tutorial-contribute-on-github.rst:384 msgid "" "Its content must have a specific format. We will break down what each " "possibility does:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:387 +#: ../../source/contributor-tutorial-contribute-on-github.rst:386 msgid "" "If the ``### Changelog entry`` section contains nothing or doesn't exist," " the following text will be added to the changelog::" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:391 +#: ../../source/contributor-tutorial-contribute-on-github.rst:390 msgid "" "If the ``### Changelog entry`` section contains a description (and no " "token), the following text will be added to the changelog::" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:397 +#: ../../source/contributor-tutorial-contribute-on-github.rst:396 msgid "" "If the ``### Changelog entry`` section contains ````, nothing will " "change in the changelog." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:399 +#: ../../source/contributor-tutorial-contribute-on-github.rst:398 msgid "" "If the ``### Changelog entry`` section contains ````, the " "following text will be added to the changelog::" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:403 +#: ../../source/contributor-tutorial-contribute-on-github.rst:402 msgid "" "If the ``### Changelog entry`` section contains ````, the " "following text will be added to the changelog::" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:407 +#: ../../source/contributor-tutorial-contribute-on-github.rst:406 msgid "" "If the ``### Changelog entry`` section contains ````, the " "following text will be added to the changelog::" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:411 +#: ../../source/contributor-tutorial-contribute-on-github.rst:410 msgid "" "If the ``### Changelog entry`` section contains ````, the following " "text will be added to the changelog::" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:415 +#: ../../source/contributor-tutorial-contribute-on-github.rst:414 msgid "" "If the ``### Changelog entry`` section contains ````, the " "following text will be added to the changelog::" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:419 +#: ../../source/contributor-tutorial-contribute-on-github.rst:418 msgid "" "Note that only one token must be provided, otherwise, only the first " "action (in the order listed above), will be performed." @@ -2004,7 +2002,7 @@ msgstr "" msgid "" "Flower uses :code:`pyproject.toml` to manage dependencies and configure " "development tools (the ones which support it). Poetry is a build tool " -"which supports `PEP 517 `_." +"which supports `PEP 517 `_." msgstr "" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:18 @@ -2172,9 +2170,9 @@ msgid "" "`_, a federated training strategy " "designed for non-iid data. We are using PyTorch to train a Convolutional " "Neural Network(with Batch Normalization layers) on the CIFAR-10 dataset. " -"When applying FedBN, only few changes needed compared to `Example: " -"PyTorch - From Centralized To Federated `_." +"When applying FedBN, only few changes needed compared to :doc:`Example: " +"PyTorch - From Centralized To Federated `." msgstr "" #: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:9 @@ -2184,10 +2182,10 @@ msgstr "" #: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:10 msgid "" -"All files are revised based on `Example: PyTorch - From Centralized To " -"Federated `_. The only thing to do is modifying the file called " -":code:`cifar.py`, revised part is shown below:" +"All files are revised based on :doc:`Example: PyTorch - From Centralized " +"To Federated `. The only " +"thing to do is modifying the file called :code:`cifar.py`, revised part " +"is shown below:" msgstr "" #: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:13 @@ -2205,8 +2203,8 @@ msgstr "" msgid "" "So far this should all look fairly familiar if you've used PyTorch " "before. Let's take the next step and use what we've built to create a " -"federated learning system within FedBN, the sytstem consists of one " -"server and two clients." +"federated learning system within FedBN, the system consists of one server" +" and two clients." msgstr "" #: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:51 @@ -2216,13 +2214,12 @@ msgstr "" #: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:53 msgid "" -"If you have read `Example: PyTorch - From Centralized To Federated " -"`_, the following parts are easy to follow, onyl " -":code:`get_parameters` and :code:`set_parameters` function in " -":code:`client.py` needed to revise. If not, please read the `Example: " -"PyTorch - From Centralized To Federated `_. first." +"If you have read :doc:`Example: PyTorch - From Centralized To Federated " +"`, the following parts are" +" easy to follow, only :code:`get_parameters` and :code:`set_parameters` " +"function in :code:`client.py` needed to revise. If not, please read the " +":doc:`Example: PyTorch - From Centralized To Federated `. first." msgstr "" #: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:56 @@ -2730,8 +2727,8 @@ msgid "" "Implementing a Flower *client* basically means implementing a subclass of" " either :code:`flwr.client.Client` or :code:`flwr.client.NumPyClient`. " "Our implementation will be based on :code:`flwr.client.NumPyClient` and " -"we'll call it :code:`MNISTClient`. :code:`NumPyClient` is slightly easier " -"to implement than :code:`Client` if you use a framework with good NumPy " +"we'll call it :code:`MNISTClient`. :code:`NumPyClient` is slightly easier" +" to implement than :code:`Client` if you use a framework with good NumPy " "interoperability (like PyTorch or MXNet) because it avoids some of the " "boilerplate that would otherwise be necessary. :code:`MNISTClient` needs " "to implement four methods, two methods for getting/setting model " @@ -2911,8 +2908,8 @@ msgid "" "Implementing a Flower *client* basically means implementing a subclass of" " either :code:`flwr.client.Client` or :code:`flwr.client.NumPyClient`. " "Our implementation will be based on :code:`flwr.client.NumPyClient` and " -"we'll call it :code:`CifarClient`. :code:`NumPyClient` is slightly easier " -"to implement than :code:`Client` if you use a framework with good NumPy " +"we'll call it :code:`CifarClient`. :code:`NumPyClient` is slightly easier" +" to implement than :code:`Client` if you use a framework with good NumPy " "interoperability (like PyTorch or TensorFlow/Keras) because it avoids " "some of the boilerplate that would otherwise be necessary. " ":code:`CifarClient` needs to implement four methods, two methods for " @@ -2962,569 +2959,291 @@ msgid "" "How about adding more clients?" msgstr "" -#: ../../source/example-walkthrough-pytorch-mnist.rst:2 -msgid "Example: Walk-Through PyTorch & MNIST" -msgstr "" - -#: ../../source/example-walkthrough-pytorch-mnist.rst:4 -msgid "" -"In this tutorial we will learn, how to train a Convolutional Neural " -"Network on MNIST using Flower and PyTorch." -msgstr "" - -#: ../../source/example-walkthrough-pytorch-mnist.rst:6 -#: ../../source/tutorial-quickstart-mxnet.rst:16 -#: ../../source/tutorial-quickstart-pytorch.rst:17 -#: ../../source/tutorial-quickstart-scikitlearn.rst:14 -msgid "" -"Our example consists of one *server* and two *clients* all having the " -"same model." -msgstr "" - -#: ../../source/example-walkthrough-pytorch-mnist.rst:8 -#: ../../source/tutorial-quickstart-pytorch.rst:19 -msgid "" -"*Clients* are responsible for generating individual weight-updates for " -"the model based on their local datasets. These updates are then sent to " -"the *server* which will aggregate them to produce a better model. " -"Finally, the *server* sends this improved version of the model back to " -"each *client*. A complete cycle of weight updates is called a *round*." -msgstr "" - -#: ../../source/example-walkthrough-pytorch-mnist.rst:12 -#: ../../source/tutorial-quickstart-pytorch.rst:23 -msgid "" -"Now that we have a rough idea of what is going on, let's get started. We " -"first need to install Flower. You can do this by running :" -msgstr "" - -#: ../../source/example-walkthrough-pytorch-mnist.rst:18 -msgid "" -"Since we want to use PyTorch to solve a computer vision task, let's go " -"ahead an install PyTorch and the **torchvision** library:" -msgstr "" - -#: ../../source/example-walkthrough-pytorch-mnist.rst:26 -msgid "Ready... Set... Train!" -msgstr "" - -#: ../../source/example-walkthrough-pytorch-mnist.rst:28 -msgid "" -"Now that we have all our dependencies installed, let's run a simple " -"distributed training with two clients and one server. Our training " -"procedure and network architecture are based on PyTorch's `Basic MNIST " -"Example `_. This " -"will allow you see how easy it is to wrap your code with Flower and begin" -" training in a federated way. We provide you with two helper scripts, " -"namely *run-server.sh*, and *run-clients.sh*. Don't be afraid to look " -"inside, they are simple enough =)." -msgstr "" - -#: ../../source/example-walkthrough-pytorch-mnist.rst:31 -msgid "" -"Go ahead and launch on a terminal the *run-server.sh* script first as " -"follows:" -msgstr "" - -#: ../../source/example-walkthrough-pytorch-mnist.rst:38 -msgid "Now that the server is up and running, go ahead and launch the clients." +#: ../../source/explanation-differential-privacy.rst:2 +#: ../../source/explanation-differential-privacy.rst:11 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:303 +msgid "Differential Privacy" msgstr "" -#: ../../source/example-walkthrough-pytorch-mnist.rst:45 +#: ../../source/explanation-differential-privacy.rst:3 msgid "" -"Et voilà! You should be seeing the training procedure and, after a few " -"iterations, the test accuracy for each client." -msgstr "" - -#: ../../source/example-walkthrough-pytorch-mnist.rst:66 -msgid "Now, let's see what is really happening inside." -msgstr "" - -#: ../../source/example-walkthrough-pytorch-mnist.rst:69 -#: ../../source/tutorial-quickstart-ios.rst:129 -#: ../../source/tutorial-quickstart-mxnet.rst:226 -#: ../../source/tutorial-quickstart-pytorch.rst:203 -#: ../../source/tutorial-quickstart-scikitlearn.rst:157 -#: ../../source/tutorial-quickstart-tensorflow.rst:98 -#: ../../source/tutorial-quickstart-xgboost.rst:309 -msgid "Flower Server" +"The information in datasets like healthcare, financial transactions, user" +" preferences, etc., is valuable and has the potential for scientific " +"breakthroughs and provides important business insights. However, such " +"data is also sensitive and there is a risk of compromising individual " +"privacy." msgstr "" -#: ../../source/example-walkthrough-pytorch-mnist.rst:71 +#: ../../source/explanation-differential-privacy.rst:6 msgid "" -"Inside the server helper script *run-server.sh* you will find the " -"following code that basically runs the :code:`server.py`" +"Traditional methods like anonymization alone would not work because of " +"attacks like Re-identification and Data Linkage. That's where " +"differential privacy comes in. It provides the possibility of analyzing " +"data while ensuring the privacy of individuals." msgstr "" -#: ../../source/example-walkthrough-pytorch-mnist.rst:78 +#: ../../source/explanation-differential-privacy.rst:12 msgid "" -"We can go a bit deeper and see that :code:`server.py` simply launches a " -"server that will coordinate three rounds of training. Flower Servers are " -"very customizable, but for simple workloads, we can start a server using " -"the :ref:`start_server ` function and " -"leave all the configuration possibilities at their default values, as " -"seen below." -msgstr "" - -#: ../../source/example-walkthrough-pytorch-mnist.rst:89 -#: ../../source/tutorial-quickstart-ios.rst:34 -#: ../../source/tutorial-quickstart-mxnet.rst:36 -#: ../../source/tutorial-quickstart-pytorch.rst:37 -#: ../../source/tutorial-quickstart-scikitlearn.rst:40 -#: ../../source/tutorial-quickstart-tensorflow.rst:29 -#: ../../source/tutorial-quickstart-xgboost.rst:55 -msgid "Flower Client" +"Imagine two datasets that are identical except for a single record (for " +"instance, Alice's data). Differential Privacy (DP) guarantees that any " +"analysis (M), like calculating the average income, will produce nearly " +"identical results for both datasets (O and O' would be similar). This " +"preserves group patterns while obscuring individual details, ensuring the" +" individual's information remains hidden in the crowd." msgstr "" -#: ../../source/example-walkthrough-pytorch-mnist.rst:91 -msgid "" -"Next, let's take a look at the *run-clients.sh* file. You will see that " -"it contains the main loop that starts a set of *clients*." +#: ../../source/explanation-differential-privacy.rst:-1 +msgid "DP Intro" msgstr "" -#: ../../source/example-walkthrough-pytorch-mnist.rst:100 +#: ../../source/explanation-differential-privacy.rst:22 msgid "" -"**cid**: is the client ID. It is an integer that uniquely identifies " -"client identifier." -msgstr "" - -#: ../../source/example-walkthrough-pytorch-mnist.rst:101 -msgid "**sever_address**: String that identifies IP and port of the server." +"One of the most commonly used mechanisms to achieve DP is adding enough " +"noise to the output of the analysis to mask the contribution of each " +"individual in the data while preserving the overall accuracy of the " +"analysis." msgstr "" -#: ../../source/example-walkthrough-pytorch-mnist.rst:102 -msgid "" -"**nb_clients**: This defines the number of clients being created. This " -"piece of information is not required by the client, but it helps us " -"partition the original MNIST dataset to make sure that every client is " -"working on unique subsets of both *training* and *test* sets." +#: ../../source/explanation-differential-privacy.rst:25 +msgid "Formal Definition" msgstr "" -#: ../../source/example-walkthrough-pytorch-mnist.rst:104 +#: ../../source/explanation-differential-privacy.rst:26 msgid "" -"Again, we can go deeper and look inside :code:`flwr_example/quickstart-" -"pytorch/client.py`. After going through the argument parsing code at the " -"beginning of our :code:`main` function, you will find a call to " -":code:`mnist.load_data`. This function is responsible for partitioning " -"the original MNIST datasets (*training* and *test*) and returning a " -":code:`torch.utils.data.DataLoader` s for each of them. We then " -"instantiate a :code:`PytorchMNISTClient` object with our client ID, our " -"DataLoaders, the number of epochs in each round, and which device we want" -" to use for training (CPU or GPU)." +"Differential Privacy (DP) provides statistical guarantees against the " +"information an adversary can infer through the output of a randomized " +"algorithm. It provides an unconditional upper bound on the influence of a" +" single individual on the output of the algorithm by adding noise [1]. A " +"randomized mechanism M provides (:math:`\\epsilon`, " +":math:`\\delta`)-differential privacy if for any two neighboring " +"databases, D :sub:`1` and D :sub:`2`, that differ in only a single " +"record, and for all possible outputs S ⊆ Range(A):" msgstr "" -#: ../../source/example-walkthrough-pytorch-mnist.rst:119 +#: ../../source/explanation-differential-privacy.rst:32 msgid "" -"The :code:`PytorchMNISTClient` object when finally passed to " -":code:`fl.client.start_client` along with the server's address as the " -"training process begins." +"\\small\n" +"P[M(D_{1} \\in A)] \\leq e^{\\delta} P[M(D_{2} \\in A)] + \\delta" msgstr "" -#: ../../source/example-walkthrough-pytorch-mnist.rst:123 -msgid "A Closer Look" -msgstr "" - -#: ../../source/example-walkthrough-pytorch-mnist.rst:125 +#: ../../source/explanation-differential-privacy.rst:38 msgid "" -"Now, let's look closely into the :code:`PytorchMNISTClient` inside " -":code:`flwr_example.quickstart-pytorch.mnist` and see what it is doing:" +"The :math:`\\epsilon` parameter, also known as the privacy budget, is a " +"metric of privacy loss. It also controls the privacy-utility trade-off; " +"lower :math:`\\epsilon` values indicate higher levels of privacy but are " +"likely to reduce utility as well. The :math:`\\delta` parameter accounts " +"for a small probability on which the upper bound :math:`\\epsilon` does " +"not hold. The amount of noise needed to achieve differential privacy is " +"proportional to the sensitivity of the output, which measures the maximum" +" change in the output due to the inclusion or removal of a single record." msgstr "" -#: ../../source/example-walkthrough-pytorch-mnist.rst:226 -msgid "" -"The first thing to notice is that :code:`PytorchMNISTClient` instantiates" -" a CNN model inside its constructor" +#: ../../source/explanation-differential-privacy.rst:45 +msgid "Differential Privacy in Machine Learning" msgstr "" -#: ../../source/example-walkthrough-pytorch-mnist.rst:244 +#: ../../source/explanation-differential-privacy.rst:46 msgid "" -"The code for the CNN is available under :code:`quickstart-pytorch.mnist` " -"and it is reproduced below. It is the same network found in `Basic MNIST " -"Example `_." +"DP can be utilized in machine learning to preserve the privacy of the " +"training data. Differentially private machine learning algorithms are " +"designed in a way to prevent the algorithm to learn any specific " +"information about any individual data points and subsequently prevent the" +" model from revealing sensitive information. Depending on the stage at " +"which noise is introduced, various methods exist for applying DP to " +"machine learning algorithms. One approach involves adding noise to the " +"training data (either to the features or labels), while another method " +"entails injecting noise into the gradients of the loss function during " +"model training. Additionally, such noise can be incorporated into the " +"model's output." msgstr "" -#: ../../source/example-walkthrough-pytorch-mnist.rst:290 -msgid "" -"The second thing to notice is that :code:`PytorchMNISTClient` class " -"inherits from the :code:`fl.client.Client`, and hence it must implement " -"the following methods:" +#: ../../source/explanation-differential-privacy.rst:53 +msgid "Differential Privacy in Federated Learning" msgstr "" -#: ../../source/example-walkthrough-pytorch-mnist.rst:315 +#: ../../source/explanation-differential-privacy.rst:54 msgid "" -"When comparing the abstract class to its derived class " -":code:`PytorchMNISTClient` you will notice that :code:`fit` calls a " -":code:`train` function and that :code:`evaluate` calls a :code:`test`: " -"function." +"Federated learning is a data minimization approach that allows multiple " +"parties to collaboratively train a model without sharing their raw data. " +"However, federated learning also introduces new privacy challenges. The " +"model updates between parties and the central server can leak information" +" about the local data. These leaks can be exploited by attacks such as " +"membership inference and property inference attacks, or model inversion " +"attacks." msgstr "" -#: ../../source/example-walkthrough-pytorch-mnist.rst:317 +#: ../../source/explanation-differential-privacy.rst:58 msgid "" -"These functions can both be found inside the same :code:`quickstart-" -"pytorch.mnist` module:" +"DP can play a crucial role in federated learning to provide privacy for " +"the clients' data." msgstr "" -#: ../../source/example-walkthrough-pytorch-mnist.rst:437 +#: ../../source/explanation-differential-privacy.rst:60 msgid "" -"Observe that these functions encapsulate regular training and test loops " -"and provide :code:`fit` and :code:`evaluate` with final statistics for " -"each round. You could substitute them with your custom train and test " -"loops and change the network architecture, and the entire example would " -"still work flawlessly. As a matter of fact, why not try and modify the " -"code to an example of your liking?" +"Depending on the granularity of privacy provision or the location of " +"noise addition, different forms of DP exist in federated learning. In " +"this explainer, we focus on two approaches of DP utilization in federated" +" learning based on where the noise is added: at the server (also known as" +" the center) or at the client (also known as the local)." msgstr "" -#: ../../source/example-walkthrough-pytorch-mnist.rst:444 -msgid "Give It a Try" -msgstr "" - -#: ../../source/example-walkthrough-pytorch-mnist.rst:445 +#: ../../source/explanation-differential-privacy.rst:63 msgid "" -"Looking through the quickstart code description above will have given a " -"good understanding of how *clients* and *servers* work in Flower, how to " -"run a simple experiment, and the internals of a client wrapper. Here are " -"a few things you could try on your own and get more experience with " -"Flower:" +"**Central Differential Privacy**: DP is applied by the server and the " +"goal is to prevent the aggregated model from leaking information about " +"each client's data." msgstr "" -#: ../../source/example-walkthrough-pytorch-mnist.rst:448 +#: ../../source/explanation-differential-privacy.rst:65 msgid "" -"Try and change :code:`PytorchMNISTClient` so it can accept different " -"architectures." -msgstr "" - -#: ../../source/example-walkthrough-pytorch-mnist.rst:449 -msgid "Modify the :code:`train` function so that it accepts different optimizers" +"**Local Differential Privacy**: DP is applied on the client side before " +"sending any information to the server and the goal is to prevent the " +"updates that are sent to the server from leaking any information about " +"the client's data." msgstr "" -#: ../../source/example-walkthrough-pytorch-mnist.rst:450 -msgid "" -"Modify the :code:`test` function so that it proves not only the top-1 " -"(regular accuracy) but also the top-5 accuracy?" +#: ../../source/explanation-differential-privacy.rst:-1 +#: ../../source/explanation-differential-privacy.rst:68 +#: ../../source/how-to-use-differential-privacy.rst:11 +msgid "Central Differential Privacy" msgstr "" -#: ../../source/example-walkthrough-pytorch-mnist.rst:451 +#: ../../source/explanation-differential-privacy.rst:69 msgid "" -"Go larger! Try to adapt the code to larger images and datasets. Why not " -"try training on ImageNet with a ResNet-50?" -msgstr "" - -#: ../../source/example-walkthrough-pytorch-mnist.rst:453 -msgid "You are ready now. Enjoy learning in a federated way!" -msgstr "" - -#: ../../source/explanation-differential-privacy.rst:2 -msgid "Differential privacy" +"In this approach, which is also known as user-level DP, the central " +"server is responsible for adding noise to the globally aggregated " +"parameters. It should be noted that trust in the server is required." msgstr "" -#: ../../source/explanation-differential-privacy.rst:4 +#: ../../source/explanation-differential-privacy.rst:76 msgid "" -"Flower provides differential privacy (DP) wrapper classes for the easy " -"integration of the central DP guarantees provided by DP-FedAvg into " -"training pipelines defined in any of the various ML frameworks that " -"Flower is compatible with." +"While there are various ways to implement central DP in federated " +"learning, we concentrate on the algorithms proposed by [2] and [3]. The " +"overall approach is to clip the model updates sent by the clients and add" +" some amount of noise to the aggregated model. In each iteration, a " +"random set of clients is chosen with a specific probability for training." +" Each client performs local training on its own data. The update of each " +"client is then clipped by some value `S` (sensitivity `S`). This would " +"limit the impact of any individual client which is crucial for privacy " +"and often beneficial for robustness. A common approach to achieve this is" +" by restricting the `L2` norm of the clients' model updates, ensuring " +"that larger updates are scaled down to fit within the norm `S`." msgstr "" -#: ../../source/explanation-differential-privacy.rst:7 -msgid "" -"Please note that these components are still experimental; the correct " -"configuration of DP for a specific task is still an unsolved problem." +#: ../../source/explanation-differential-privacy.rst:-1 +msgid "clipping" msgstr "" -#: ../../source/explanation-differential-privacy.rst:10 +#: ../../source/explanation-differential-privacy.rst:89 msgid "" -"The name DP-FedAvg is misleading since it can be applied on top of any FL" -" algorithm that conforms to the general structure prescribed by the " -"FedOpt family of algorithms." -msgstr "" - -#: ../../source/explanation-differential-privacy.rst:13 -msgid "DP-FedAvg" +"Afterwards, the Gaussian mechanism is used to add noise in order to " +"distort the sum of all clients' updates. The amount of noise is scaled to" +" the sensitivity value to obtain a privacy guarantee. The Gaussian " +"mechanism is used with a noise sampled from `N (0, σ²)` where `σ = ( " +"noise_scale * S ) / (number of sampled clients)`." msgstr "" -#: ../../source/explanation-differential-privacy.rst:15 -msgid "" -"DP-FedAvg, originally proposed by McMahan et al. [mcmahan]_ and extended " -"by Andrew et al. [andrew]_, is essentially FedAvg with the following " -"modifications." +#: ../../source/explanation-differential-privacy.rst:94 +msgid "Clipping" msgstr "" -#: ../../source/explanation-differential-privacy.rst:17 +#: ../../source/explanation-differential-privacy.rst:96 msgid "" -"**Clipping** : The influence of each client's update is bounded by " -"clipping it. This is achieved by enforcing a cap on the L2 norm of the " -"update, scaling it down if needed." +"There are two forms of clipping commonly used in Central DP: Fixed " +"Clipping and Adaptive Clipping." msgstr "" -#: ../../source/explanation-differential-privacy.rst:18 +#: ../../source/explanation-differential-privacy.rst:98 msgid "" -"**Noising** : Gaussian noise, calibrated to the clipping threshold, is " -"added to the average computed at the server." +"**Fixed Clipping** : A predefined fix threshold is set for the magnitude " +"of clients' updates. Any update exceeding this threshold is clipped back " +"to the threshold value." msgstr "" -#: ../../source/explanation-differential-privacy.rst:20 +#: ../../source/explanation-differential-privacy.rst:100 msgid "" -"The distribution of the update norm has been shown to vary from task-to-" -"task and to evolve as training progresses. This variability is crucial in" -" understanding its impact on differential privacy guarantees, emphasizing" -" the need for an adaptive approach [andrew]_ that continuously adjusts " -"the clipping threshold to track a prespecified quantile of the update " +"**Adaptive Clipping** : The clipping threshold dynamically adjusts based " +"on the observed update distribution [4]. It means that the clipping value" +" is tuned during the rounds with respect to the quantile of the update " "norm distribution." msgstr "" -#: ../../source/explanation-differential-privacy.rst:23 -msgid "Simplifying Assumptions" -msgstr "" - -#: ../../source/explanation-differential-privacy.rst:25 -msgid "" -"We make (and attempt to enforce) a number of assumptions that must be " -"satisfied to ensure that the training process actually realizes the " -":math:`(\\epsilon, \\delta)` guarantees the user has in mind when " -"configuring the setup." -msgstr "" - -#: ../../source/explanation-differential-privacy.rst:27 -msgid "" -"**Fixed-size subsampling** :Fixed-size subsamples of the clients must be " -"taken at each round, as opposed to variable-sized Poisson subsamples." -msgstr "" - -#: ../../source/explanation-differential-privacy.rst:28 -msgid "" -"**Unweighted averaging** : The contributions from all the clients must " -"weighted equally in the aggregate to eliminate the requirement for the " -"server to know in advance the sum of the weights of all clients available" -" for selection." -msgstr "" - -#: ../../source/explanation-differential-privacy.rst:29 -msgid "" -"**No client failures** : The set of available clients must stay constant " -"across all rounds of training. In other words, clients cannot drop out or" -" fail." -msgstr "" - -#: ../../source/explanation-differential-privacy.rst:31 -msgid "" -"The first two are useful for eliminating a multitude of complications " -"associated with calibrating the noise to the clipping threshold, while " -"the third one is required to comply with the assumptions of the privacy " -"analysis." -msgstr "" - -#: ../../source/explanation-differential-privacy.rst:34 -msgid "" -"These restrictions are in line with constraints imposed by Andrew et al. " -"[andrew]_." -msgstr "" - -#: ../../source/explanation-differential-privacy.rst:37 -msgid "Customizable Responsibility for Noise injection" -msgstr "" - -#: ../../source/explanation-differential-privacy.rst:38 -msgid "" -"In contrast to other implementations where the addition of noise is " -"performed at the server, you can configure the site of noise injection to" -" better match your threat model. We provide users with the flexibility to" -" set up the training such that each client independently adds a small " -"amount of noise to the clipped update, with the result that simply " -"aggregating the noisy updates is equivalent to the explicit addition of " -"noise to the non-noisy aggregate at the server." -msgstr "" - -#: ../../source/explanation-differential-privacy.rst:41 -msgid "" -"To be precise, if we let :math:`m` be the number of clients sampled each " -"round and :math:`\\sigma_\\Delta` be the scale of the total Gaussian " -"noise that needs to be added to the sum of the model updates, we can use " -"simple maths to show that this is equivalent to each client adding noise " -"with scale :math:`\\sigma_\\Delta/\\sqrt{m}`." -msgstr "" - -#: ../../source/explanation-differential-privacy.rst:44 -msgid "Wrapper-based approach" -msgstr "" - -#: ../../source/explanation-differential-privacy.rst:46 -msgid "" -"Introducing DP to an existing workload can be thought of as adding an " -"extra layer of security around it. This inspired us to provide the " -"additional server and client-side logic needed to make the training " -"process differentially private as wrappers for instances of the " -":code:`Strategy` and :code:`NumPyClient` abstract classes respectively. " -"This wrapper-based approach has the advantage of being easily composable " -"with other wrappers that someone might contribute to the Flower library " -"in the future, e.g., for secure aggregation. Using Inheritance instead " -"can be tedious because that would require the creation of new sub- " -"classes every time a new class implementing :code:`Strategy` or " -":code:`NumPyClient` is defined." -msgstr "" - -#: ../../source/explanation-differential-privacy.rst:49 -msgid "Server-side logic" -msgstr "" - -#: ../../source/explanation-differential-privacy.rst:51 -msgid "" -"The first version of our solution was to define a decorator whose " -"constructor accepted, among other things, a boolean-valued variable " -"indicating whether adaptive clipping was to be enabled or not. We quickly" -" realized that this would clutter its :code:`__init__()` function with " -"variables corresponding to hyperparameters of adaptive clipping that " -"would remain unused when it was disabled. A cleaner implementation could " -"be achieved by splitting the functionality into two decorators, " -":code:`DPFedAvgFixed` and :code:`DPFedAvgAdaptive`, with the latter sub- " -"classing the former. The constructors for both classes accept a boolean " -"parameter :code:`server_side_noising`, which, as the name suggests, " -"determines where noising is to be performed." -msgstr "" - -#: ../../source/explanation-differential-privacy.rst:54 -#: ../../source/ref-api/flwr.server.strategy.DPFedAvgFixed.rst:2 -msgid "DPFedAvgFixed" -msgstr "" - -#: ../../source/explanation-differential-privacy.rst:56 -msgid "" -"The server-side capabilities required for the original version of DP-" -"FedAvg, i.e., the one which performed fixed clipping, can be completely " -"captured with the help of wrapper logic for just the following two " -"methods of the :code:`Strategy` abstract class." -msgstr "" - -#: ../../source/explanation-differential-privacy.rst:58 -msgid "" -":code:`configure_fit()` : The config dictionary being sent by the wrapped" -" :code:`Strategy` to each client needs to be augmented with an additional" -" value equal to the clipping threshold (keyed under " -":code:`dpfedavg_clip_norm`) and, if :code:`server_side_noising=true`, " -"another one equal to the scale of the Gaussian noise that needs to be " -"added at the client (keyed under :code:`dpfedavg_noise_stddev`). This " -"entails *post*-processing of the results returned by the wrappee's " -"implementation of :code:`configure_fit()`." -msgstr "" - -#: ../../source/explanation-differential-privacy.rst:59 +#: ../../source/explanation-differential-privacy.rst:102 msgid "" -":code:`aggregate_fit()`: We check whether any of the sampled clients " -"dropped out or failed to upload an update before the round timed out. In " -"that case, we need to abort the current round, discarding any successful " -"updates that were received, and move on to the next one. On the other " -"hand, if all clients responded successfully, we must force the averaging " -"of the updates to happen in an unweighted manner by intercepting the " -":code:`parameters` field of :code:`FitRes` for each received update and " -"setting it to 1. Furthermore, if :code:`server_side_noising=true`, each " -"update is perturbed with an amount of noise equal to what it would have " -"been subjected to had client-side noising being enabled. This entails " -"*pre*-processing of the arguments to this method before passing them on " -"to the wrappee's implementation of :code:`aggregate_fit()`." +"The choice between fixed and adaptive clipping depends on various factors" +" such as privacy requirements, data distribution, model complexity, and " +"others." msgstr "" -#: ../../source/explanation-differential-privacy.rst:62 -msgid "" -"We can't directly change the aggregation function of the wrapped strategy" -" to force it to add noise to the aggregate, hence we simulate client-side" -" noising to implement server-side noising." +#: ../../source/explanation-differential-privacy.rst:-1 +#: ../../source/explanation-differential-privacy.rst:105 +#: ../../source/how-to-use-differential-privacy.rst:96 +msgid "Local Differential Privacy" msgstr "" -#: ../../source/explanation-differential-privacy.rst:64 +#: ../../source/explanation-differential-privacy.rst:107 msgid "" -"These changes have been put together into a class called " -":code:`DPFedAvgFixed`, whose constructor accepts the strategy being " -"decorated, the clipping threshold and the number of clients sampled every" -" round as compulsory arguments. The user is expected to specify the " -"clipping threshold since the order of magnitude of the update norms is " -"highly dependent on the model being trained and providing a default value" -" would be misleading. The number of clients sampled at every round is " -"required to calculate the amount of noise that must be added to each " -"individual update, either by the server or the clients." -msgstr "" - -#: ../../source/explanation-differential-privacy.rst:67 -#: ../../source/ref-api/flwr.server.strategy.DPFedAvgAdaptive.rst:2 -msgid "DPFedAvgAdaptive" +"In this approach, each client is responsible for performing DP. Local DP " +"avoids the need for a fully trusted aggregator, but it should be noted " +"that local DP leads to a decrease in accuracy but better privacy in " +"comparison to central DP." msgstr "" -#: ../../source/explanation-differential-privacy.rst:69 -msgid "" -"The additional functionality required to facilitate adaptive clipping has" -" been provided in :code:`DPFedAvgAdaptive`, a subclass of " -":code:`DPFedAvgFixed`. It overrides the above-mentioned methods to do the" -" following." +#: ../../source/explanation-differential-privacy.rst:116 +msgid "In this explainer, we focus on two forms of achieving Local DP:" msgstr "" -#: ../../source/explanation-differential-privacy.rst:71 +#: ../../source/explanation-differential-privacy.rst:118 msgid "" -":code:`configure_fit()` : It intercepts the config dict returned by " -":code:`super.configure_fit()` to add the key-value pair " -":code:`dpfedavg_adaptive_clip_enabled:True` to it, which the client " -"interprets as an instruction to include an indicator bit (1 if update " -"norm <= clipping threshold, 0 otherwise) in the results returned by it." +"Each client adds noise to the local updates before sending them to the " +"server. To achieve (:math:`\\epsilon`, :math:`\\delta`)-DP, considering " +"the sensitivity of the local model to be ∆, Gaussian noise is applied " +"with a noise scale of σ where:" msgstr "" -#: ../../source/explanation-differential-privacy.rst:73 +#: ../../source/explanation-differential-privacy.rst:120 msgid "" -":code:`aggregate_fit()` : It follows a call to " -":code:`super.aggregate_fit()` with one to :code:`__update_clip_norm__()`," -" a procedure which adjusts the clipping threshold on the basis of the " -"indicator bits received from the sampled clients." -msgstr "" - -#: ../../source/explanation-differential-privacy.rst:77 -msgid "Client-side logic" +"\\small\n" +"\\frac{∆ \\times \\sqrt{2 \\times " +"\\log\\left(\\frac{1.25}{\\delta}\\right)}}{\\epsilon}\n" +"\n" msgstr "" -#: ../../source/explanation-differential-privacy.rst:79 +#: ../../source/explanation-differential-privacy.rst:125 msgid "" -"The client-side capabilities required can be completely captured through " -"wrapper logic for just the :code:`fit()` method of the " -":code:`NumPyClient` abstract class. To be precise, we need to *post-" -"process* the update computed by the wrapped client to clip it, if " -"necessary, to the threshold value supplied by the server as part of the " -"config dictionary. In addition to this, it may need to perform some extra" -" work if either (or both) of the following keys are also present in the " -"dict." +"Each client adds noise to the gradients of the model during the local " +"training (DP-SGD). More specifically, in this approach, gradients are " +"clipped and an amount of calibrated noise is injected into the gradients." msgstr "" -#: ../../source/explanation-differential-privacy.rst:81 +#: ../../source/explanation-differential-privacy.rst:128 msgid "" -":code:`dpfedavg_noise_stddev` : Generate and add the specified amount of " -"noise to the clipped update." +"Please note that these two approaches are providing privacy at different " +"levels." msgstr "" -#: ../../source/explanation-differential-privacy.rst:82 -msgid "" -":code:`dpfedavg_adaptive_clip_enabled` : Augment the metrics dict in the " -":code:`FitRes` object being returned to the server with an indicator bit," -" calculated as described earlier." +#: ../../source/explanation-differential-privacy.rst:131 +msgid "**References:**" msgstr "" -#: ../../source/explanation-differential-privacy.rst:86 -msgid "Performing the :math:`(\\epsilon, \\delta)` analysis" +#: ../../source/explanation-differential-privacy.rst:133 +msgid "[1] Dwork et al. The Algorithmic Foundations of Differential Privacy." msgstr "" -#: ../../source/explanation-differential-privacy.rst:88 +#: ../../source/explanation-differential-privacy.rst:135 msgid "" -"Assume you have trained for :math:`n` rounds with sampling fraction " -":math:`q` and noise multiplier :math:`z`. In order to calculate the " -":math:`\\epsilon` value this would result in for a particular " -":math:`\\delta`, the following script may be used." +"[2] McMahan et al. Learning Differentially Private Recurrent Language " +"Models." msgstr "" -#: ../../source/explanation-differential-privacy.rst:98 +#: ../../source/explanation-differential-privacy.rst:137 msgid "" -"McMahan et al. \"Learning Differentially Private Recurrent Language " -"Models.\" International Conference on Learning Representations (ICLR), " -"2017." +"[3] Geyer et al. Differentially Private Federated Learning: A Client " +"Level Perspective." msgstr "" -#: ../../source/explanation-differential-privacy.rst:100 -msgid "" -"Andrew, Galen, et al. \"Differentially Private Learning with Adaptive " -"Clipping.\" Advances in Neural Information Processing Systems (NeurIPS), " -"2021." +#: ../../source/explanation-differential-privacy.rst:139 +msgid "[4] Galen et al. Differentially Private Learning with Adaptive Clipping." msgstr "" #: ../../source/explanation-federated-evaluation.rst:2 @@ -3947,6 +3666,7 @@ msgid "As a reference, this document follows the above structure." msgstr "" #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:90 +#: ../../source/ref-api/flwr.common.Metadata.rst:2 msgid "Metadata" msgstr "" @@ -4259,13 +3979,12 @@ msgstr "" #: ../../source/how-to-configure-clients.rst:89 msgid "" "This can be achieved by customizing an existing strategy or by " -"`implementing a custom strategy from scratch " -"`_. " -"Here's a nonsensical example that customizes :code:`FedAvg` by adding a " -"custom ``\"hello\": \"world\"`` configuration key/value pair to the " -"config dict of a *single client* (only the first client in the list, the " -"other clients in this round to not receive this \"special\" config " -"value):" +":doc:`implementing a custom strategy from scratch `. Here's a nonsensical example that customizes :code:`FedAvg`" +" by adding a custom ``\"hello\": \"world\"`` configuration key/value pair" +" to the config dict of a *single client* (only the first client in the " +"list, the other clients in this round to not receive this \"special\" " +"config value):" msgstr "" #: ../../source/how-to-configure-logging.rst:2 @@ -4602,7 +4321,7 @@ msgid "" "More sophisticated implementations can use :code:`configure_fit` to " "implement custom client selection logic. A client will only participate " "in a round if the corresponding :code:`ClientProxy` is included in the " -"the list returned from :code:`configure_fit`." +"list returned from :code:`configure_fit`." msgstr "" #: ../../source/how-to-implement-strategies.rst:240 @@ -4673,7 +4392,7 @@ msgid "" "More sophisticated implementations can use :code:`configure_evaluate` to " "implement custom client selection logic. A client will only participate " "in a round if the corresponding :code:`ClientProxy` is included in the " -"the list returned from :code:`configure_evaluate`." +"list returned from :code:`configure_evaluate`." msgstr "" #: ../../source/how-to-implement-strategies.rst:287 @@ -4805,9 +4524,7 @@ msgid "Install via Docker" msgstr "" #: ../../source/how-to-install-flower.rst:60 -msgid "" -"`How to run Flower using Docker `_" +msgid ":doc:`How to run Flower using Docker `" msgstr "" #: ../../source/how-to-install-flower.rst:63 @@ -5069,14 +4786,12 @@ msgstr "" #: ../../source/how-to-monitor-simulation.rst:234 msgid "" -"Ray Dashboard: ``_" +"Ray Dashboard: ``_" msgstr "" #: ../../source/how-to-monitor-simulation.rst:236 -msgid "" -"Ray Metrics: ``_" +msgid "Ray Metrics: ``_" msgstr "" #: ../../source/how-to-run-flower-using-docker.rst:2 @@ -5954,7 +5669,8 @@ msgstr "" msgid "" "Remove \"placeholder\" methods from subclasses of ``Client`` or " "``NumPyClient``. If you, for example, use server-side evaluation, then " -"empty placeholder implementations of ``evaluate`` are no longer necessary." +"empty placeholder implementations of ``evaluate`` are no longer " +"necessary." msgstr "" #: ../../source/how-to-upgrade-to-flower-1.0.rst:85 @@ -6093,80 +5809,225 @@ msgid "" msgstr "" #: ../../source/how-to-use-built-in-mods.rst:89 -msgid "Enjoy building more robust and flexible ``ClientApp``s with mods!" +msgid "Enjoy building a more robust and flexible ``ClientApp`` with mods!" msgstr "" -#: ../../source/how-to-use-strategies.rst:2 -msgid "Use strategies" +#: ../../source/how-to-use-differential-privacy.rst:2 +msgid "Use Differential Privacy" msgstr "" -#: ../../source/how-to-use-strategies.rst:4 +#: ../../source/how-to-use-differential-privacy.rst:3 msgid "" -"Flower allows full customization of the learning process through the " -":code:`Strategy` abstraction. A number of built-in strategies are " -"provided in the core framework." +"This guide explains how you can utilize differential privacy in the " +"Flower framework. If you are not yet familiar with differential privacy, " +"you can refer to :doc:`explanation-differential-privacy`." msgstr "" -#: ../../source/how-to-use-strategies.rst:6 +#: ../../source/how-to-use-differential-privacy.rst:7 msgid "" -"There are three ways to customize the way Flower orchestrates the " -"learning process on the server side:" +"Differential Privacy in Flower is in a preview phase. If you plan to use " +"these features in a production environment with sensitive data, feel free" +" contact us to discuss your requirements and to receive guidance on how " +"to best use these features." msgstr "" -#: ../../source/how-to-use-strategies.rst:8 -msgid "Use an existing strategy, for example, :code:`FedAvg`" +#: ../../source/how-to-use-differential-privacy.rst:12 +msgid "" +"This approach consists of two seprate phases: clipping of the updates and" +" adding noise to the aggregated model. For the clipping phase, Flower " +"framework has made it possible to decide whether to perform clipping on " +"the server side or the client side." msgstr "" -#: ../../source/how-to-use-strategies.rst:9 -#: ../../source/how-to-use-strategies.rst:40 -msgid "Customize an existing strategy with callback functions" +#: ../../source/how-to-use-differential-privacy.rst:15 +msgid "" +"**Server-side Clipping**: This approach has the advantage of the server " +"enforcing uniform clipping across all clients' updates and reducing the " +"communication overhead for clipping values. However, it also has the " +"disadvantage of increasing the computational load on the server due to " +"the need to perform the clipping operation for all clients." msgstr "" -#: ../../source/how-to-use-strategies.rst:10 -#: ../../source/how-to-use-strategies.rst:87 -msgid "Implement a novel strategy" +#: ../../source/how-to-use-differential-privacy.rst:16 +msgid "" +"**Client-side Clipping**: This approach has the advantage of reducing the" +" computational overhead on the server. However, it also has the " +"disadvantage of lacking centralized control, as the server has less " +"control over the clipping process." msgstr "" -#: ../../source/how-to-use-strategies.rst:14 -msgid "Use an existing strategy" +#: ../../source/how-to-use-differential-privacy.rst:21 +msgid "Server-side Clipping" msgstr "" -#: ../../source/how-to-use-strategies.rst:16 +#: ../../source/how-to-use-differential-privacy.rst:22 msgid "" -"Flower comes with a number of popular federated learning strategies " -"built-in. A built-in strategy can be instantiated as follows:" +"For central DP with server-side clipping, there are two :code:`Strategy` " +"classes that act as wrappers around the actual :code:`Strategy` instance " +"(for example, :code:`FedAvg`). The two wrapper classes are " +":code:`DifferentialPrivacyServerSideFixedClipping` and " +":code:`DifferentialPrivacyServerSideAdaptiveClipping` for fixed and " +"adaptive clipping." msgstr "" -#: ../../source/how-to-use-strategies.rst:25 +#: ../../source/how-to-use-differential-privacy.rst:-1 +msgid "server side clipping" +msgstr "" + +#: ../../source/how-to-use-differential-privacy.rst:31 msgid "" -"This creates a strategy with all parameters left at their default values " -"and passes it to the :code:`start_server` function. It is usually " -"recommended to adjust a few parameters during instantiation:" +"The code sample below enables the :code:`FedAvg` strategy to use server-" +"side fixed clipping using the " +":code:`DifferentialPrivacyServerSideFixedClipping` wrapper class. The " +"same approach can be used with " +":code:`DifferentialPrivacyServerSideAdaptiveClipping` by adjusting the " +"corresponding input parameters." msgstr "" -#: ../../source/how-to-use-strategies.rst:42 +#: ../../source/how-to-use-differential-privacy.rst:52 +msgid "Client-side Clipping" +msgstr "" + +#: ../../source/how-to-use-differential-privacy.rst:53 msgid "" -"Existing strategies provide several ways to customize their behaviour. " -"Callback functions allow strategies to call user-provided code during " -"execution." +"For central DP with client-side clipping, the server sends the clipping " +"value to selected clients on each round. Clients can use existing Flower " +":code:`Mods` to perform the clipping. Two mods are available for fixed " +"and adaptive client-side clipping: :code:`fixedclipping_mod` and " +":code:`adaptiveclipping_mod` with corresponding server-side wrappers " +":code:`DifferentialPrivacyClientSideFixedClipping` and " +":code:`DifferentialPrivacyClientSideAdaptiveClipping`." msgstr "" -#: ../../source/how-to-use-strategies.rst:45 -msgid "Configuring client fit and client evaluate" +#: ../../source/how-to-use-differential-privacy.rst:-1 +msgid "client side clipping" msgstr "" -#: ../../source/how-to-use-strategies.rst:47 +#: ../../source/how-to-use-differential-privacy.rst:63 msgid "" -"The server can pass new configuration values to the client each round by " -"providing a function to :code:`on_fit_config_fn`. The provided function " -"will be called by the strategy and must return a dictionary of " -"configuration key values pairs that will be sent to the client. It must " -"return a dictionary of arbitrary configuration values :code:`client.fit`" -" and :code:`client.evaluate` functions during each round of federated " -"learning." +"The code sample below enables the :code:`FedAvg` strategy to use " +"differential privacy with client-side fixed clipping using both the " +":code:`DifferentialPrivacyClientSideFixedClipping` wrapper class and, on " +"the client, :code:`fixedclipping_mod`:" msgstr "" -#: ../../source/how-to-use-strategies.rst:75 +#: ../../source/how-to-use-differential-privacy.rst:80 +msgid "" +"In addition to the server-side strategy wrapper, the :code:`ClientApp` " +"needs to configure the matching :code:`fixedclipping_mod` to perform the " +"client-side clipping:" +msgstr "" + +#: ../../source/how-to-use-differential-privacy.rst:97 +msgid "" +"To utilize local differential privacy (DP) and add noise to the client " +"model parameters before transmitting them to the server in Flower, you " +"can use the `LocalDpMod`. The following hyperparameters need to be set: " +"clipping norm value, sensitivity, epsilon, and delta." +msgstr "" + +#: ../../source/how-to-use-differential-privacy.rst:-1 +msgid "local DP mod" +msgstr "" + +#: ../../source/how-to-use-differential-privacy.rst:104 +msgid "Below is a code example that shows how to use :code:`LocalDpMod`:" +msgstr "" + +#: ../../source/how-to-use-differential-privacy.rst:122 +msgid "" +"Please note that the order of mods, especially those that modify " +"parameters, is important when using multiple modifiers. Typically, " +"differential privacy (DP) modifiers should be the last to operate on " +"parameters." +msgstr "" + +#: ../../source/how-to-use-differential-privacy.rst:125 +msgid "Local Training using Privacy Engines" +msgstr "" + +#: ../../source/how-to-use-differential-privacy.rst:126 +msgid "" +"For ensuring data instance-level privacy during local model training on " +"the client side, consider leveraging privacy engines such as Opacus and " +"TensorFlow Privacy. For examples of using Flower with these engines, " +"please refer to the Flower examples directory (`Opacus " +"`_, `Tensorflow" +" Privacy `_)." +msgstr "" + +#: ../../source/how-to-use-strategies.rst:2 +msgid "Use strategies" +msgstr "" + +#: ../../source/how-to-use-strategies.rst:4 +msgid "" +"Flower allows full customization of the learning process through the " +":code:`Strategy` abstraction. A number of built-in strategies are " +"provided in the core framework." +msgstr "" + +#: ../../source/how-to-use-strategies.rst:6 +msgid "" +"There are three ways to customize the way Flower orchestrates the " +"learning process on the server side:" +msgstr "" + +#: ../../source/how-to-use-strategies.rst:8 +msgid "Use an existing strategy, for example, :code:`FedAvg`" +msgstr "" + +#: ../../source/how-to-use-strategies.rst:9 +#: ../../source/how-to-use-strategies.rst:40 +msgid "Customize an existing strategy with callback functions" +msgstr "" + +#: ../../source/how-to-use-strategies.rst:10 +#: ../../source/how-to-use-strategies.rst:87 +msgid "Implement a novel strategy" +msgstr "" + +#: ../../source/how-to-use-strategies.rst:14 +msgid "Use an existing strategy" +msgstr "" + +#: ../../source/how-to-use-strategies.rst:16 +msgid "" +"Flower comes with a number of popular federated learning strategies " +"built-in. A built-in strategy can be instantiated as follows:" +msgstr "" + +#: ../../source/how-to-use-strategies.rst:25 +msgid "" +"This creates a strategy with all parameters left at their default values " +"and passes it to the :code:`start_server` function. It is usually " +"recommended to adjust a few parameters during instantiation:" +msgstr "" + +#: ../../source/how-to-use-strategies.rst:42 +msgid "" +"Existing strategies provide several ways to customize their behaviour. " +"Callback functions allow strategies to call user-provided code during " +"execution." +msgstr "" + +#: ../../source/how-to-use-strategies.rst:45 +msgid "Configuring client fit and client evaluate" +msgstr "" + +#: ../../source/how-to-use-strategies.rst:47 +msgid "" +"The server can pass new configuration values to the client each round by " +"providing a function to :code:`on_fit_config_fn`. The provided function " +"will be called by the strategy and must return a dictionary of " +"configuration key values pairs that will be sent to the client. It must " +"return a dictionary of arbitrary configuration values :code:`client.fit`" +" and :code:`client.evaluate` functions during each round of federated " +"learning." +msgstr "" + +#: ../../source/how-to-use-strategies.rst:75 msgid "" "The :code:`on_fit_config_fn` can be used to pass arbitrary configuration " "values from server to client, and poetentially change these values each " @@ -6211,11 +6072,11 @@ msgstr "" msgid "How-to guides" msgstr "" -#: ../../source/index.rst:97 +#: ../../source/index.rst:98 msgid "Legacy example guides" msgstr "" -#: ../../source/index.rst:108 ../../source/index.rst:112 +#: ../../source/index.rst:109 ../../source/index.rst:113 msgid "Explanations" msgstr "" @@ -6223,23 +6084,23 @@ msgstr "" msgid "API reference" msgstr "" -#: ../../source/index.rst:137 +#: ../../source/index.rst:138 msgid "Reference docs" msgstr "" -#: ../../source/index.rst:153 +#: ../../source/index.rst:154 msgid "Contributor tutorials" msgstr "" -#: ../../source/index.rst:160 +#: ../../source/index.rst:161 msgid "Contributor how-to guides" msgstr "" -#: ../../source/index.rst:173 +#: ../../source/index.rst:174 msgid "Contributor explanations" msgstr "" -#: ../../source/index.rst:179 +#: ../../source/index.rst:180 msgid "Contributor references" msgstr "" @@ -6323,33 +6184,33 @@ msgid "" "specific goal." msgstr "" -#: ../../source/index.rst:110 +#: ../../source/index.rst:111 msgid "" "Understanding-oriented concept guides explain and discuss key topics and " "underlying ideas behind Flower and collaborative AI." msgstr "" -#: ../../source/index.rst:120 +#: ../../source/index.rst:121 msgid "References" msgstr "" -#: ../../source/index.rst:122 +#: ../../source/index.rst:123 msgid "Information-oriented API reference and other reference material." msgstr "" -#: ../../source/index.rst:131::1 +#: ../../source/index.rst:132::1 msgid ":py:obj:`flwr `\\" msgstr "" -#: ../../source/index.rst:131::1 flwr:1 of +#: ../../source/index.rst:132::1 flwr:1 of msgid "Flower main package." msgstr "" -#: ../../source/index.rst:148 +#: ../../source/index.rst:149 msgid "Contributor docs" msgstr "" -#: ../../source/index.rst:150 +#: ../../source/index.rst:151 msgid "" "The Flower community welcomes contributions. The following docs are " "intended to help along the way." @@ -6371,11 +6232,19 @@ msgstr "" msgid "flower-fleet-api" msgstr "" +#: ../../source/ref-api-cli.rst:37 +msgid "flower-client-app" +msgstr "" + +#: ../../source/ref-api-cli.rst:47 +msgid "flower-server-app" +msgstr "" + #: ../../source/ref-api/flwr.rst:2 msgid "flwr" msgstr "" -#: ../../source/ref-api/flwr.rst:25 ../../source/ref-api/flwr.server.rst:48 +#: ../../source/ref-api/flwr.rst:25 ../../source/ref-api/flwr.server.rst:52 msgid "Modules" msgstr "" @@ -6400,7 +6269,7 @@ msgid ":py:obj:`flwr.server `\\" msgstr "" #: ../../source/ref-api/flwr.rst:35::1 -#: ../../source/ref-api/flwr.server.rst:37::1 flwr.server:1 +#: ../../source/ref-api/flwr.server.rst:41::1 flwr.server:1 #: flwr.server.server.Server:1 of msgid "Flower server." msgstr "" @@ -6419,7 +6288,6 @@ msgstr "" #: ../../source/ref-api/flwr.client.rst:13 #: ../../source/ref-api/flwr.common.rst:13 -#: ../../source/ref-api/flwr.server.driver.rst:13 #: ../../source/ref-api/flwr.server.rst:13 #: ../../source/ref-api/flwr.simulation.rst:13 msgid "Functions" @@ -6457,10 +6325,10 @@ msgid "Start a Flower NumPyClient which connects to a gRPC server." msgstr "" #: ../../source/ref-api/flwr.client.rst:26 -#: ../../source/ref-api/flwr.common.rst:31 -#: ../../source/ref-api/flwr.server.driver.rst:24 -#: ../../source/ref-api/flwr.server.rst:28 +#: ../../source/ref-api/flwr.common.rst:32 +#: ../../source/ref-api/flwr.server.rst:29 #: ../../source/ref-api/flwr.server.strategy.rst:17 +#: ../../source/ref-api/flwr.server.workflow.rst:17 msgid "Classes" msgstr "" @@ -6475,7 +6343,7 @@ msgstr "" #: ../../source/ref-api/flwr.client.rst:33::1 msgid "" -":py:obj:`ClientApp `\\ \\(client\\_fn\\[\\, " +":py:obj:`ClientApp `\\ \\(\\[client\\_fn\\, " "mods\\]\\)" msgstr "" @@ -6502,8 +6370,12 @@ msgstr "" #: ../../source/ref-api/flwr.client.Client.rst:15 #: ../../source/ref-api/flwr.client.ClientApp.rst:15 #: ../../source/ref-api/flwr.client.NumPyClient.rst:15 +#: ../../source/ref-api/flwr.common.Array.rst:15 #: ../../source/ref-api/flwr.common.ClientMessage.rst:15 +#: ../../source/ref-api/flwr.common.ConfigsRecord.rst:15 +#: ../../source/ref-api/flwr.common.Context.rst:15 #: ../../source/ref-api/flwr.common.DisconnectRes.rst:15 +#: ../../source/ref-api/flwr.common.Error.rst:15 #: ../../source/ref-api/flwr.common.EvaluateIns.rst:15 #: ../../source/ref-api/flwr.common.EvaluateRes.rst:15 #: ../../source/ref-api/flwr.common.FitIns.rst:15 @@ -6512,20 +6384,32 @@ msgstr "" #: ../../source/ref-api/flwr.common.GetParametersRes.rst:15 #: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:15 #: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:15 +#: ../../source/ref-api/flwr.common.Message.rst:15 +#: ../../source/ref-api/flwr.common.MessageType.rst:15 +#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:15 +#: ../../source/ref-api/flwr.common.Metadata.rst:15 +#: ../../source/ref-api/flwr.common.MetricsRecord.rst:15 #: ../../source/ref-api/flwr.common.Parameters.rst:15 +#: ../../source/ref-api/flwr.common.ParametersRecord.rst:15 #: ../../source/ref-api/flwr.common.ReconnectIns.rst:15 +#: ../../source/ref-api/flwr.common.RecordSet.rst:15 #: ../../source/ref-api/flwr.common.ServerMessage.rst:15 #: ../../source/ref-api/flwr.common.Status.rst:15 #: ../../source/ref-api/flwr.server.ClientManager.rst:15 +#: ../../source/ref-api/flwr.server.Driver.rst:15 #: ../../source/ref-api/flwr.server.History.rst:15 +#: ../../source/ref-api/flwr.server.LegacyContext.rst:15 #: ../../source/ref-api/flwr.server.Server.rst:15 +#: ../../source/ref-api/flwr.server.ServerApp.rst:15 #: ../../source/ref-api/flwr.server.ServerConfig.rst:15 #: ../../source/ref-api/flwr.server.SimpleClientManager.rst:15 -#: ../../source/ref-api/flwr.server.driver.Driver.rst:15 -#: ../../source/ref-api/flwr.server.driver.GrpcDriver.rst:15 #: ../../source/ref-api/flwr.server.strategy.Bulyan.rst:15 #: ../../source/ref-api/flwr.server.strategy.DPFedAvgAdaptive.rst:15 #: ../../source/ref-api/flwr.server.strategy.DPFedAvgFixed.rst:15 +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideAdaptiveClipping.rst:15 +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideFixedClipping.rst:15 +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideAdaptiveClipping.rst:15 +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideFixedClipping.rst:15 #: ../../source/ref-api/flwr.server.strategy.FaultTolerantFedAvg.rst:15 #: ../../source/ref-api/flwr.server.strategy.FedAdagrad.rst:15 #: ../../source/ref-api/flwr.server.strategy.FedAdam.rst:15 @@ -6543,6 +6427,9 @@ msgstr "" #: ../../source/ref-api/flwr.server.strategy.Krum.rst:15 #: ../../source/ref-api/flwr.server.strategy.QFedAvg.rst:15 #: ../../source/ref-api/flwr.server.strategy.Strategy.rst:15 +#: ../../source/ref-api/flwr.server.workflow.DefaultWorkflow.rst:15 +#: ../../source/ref-api/flwr.server.workflow.SecAggPlusWorkflow.rst:15 +#: ../../source/ref-api/flwr.server.workflow.SecAggWorkflow.rst:15 msgid "Methods" msgstr "" @@ -6619,9 +6506,12 @@ msgstr "" #: ../../source/ref-api/flwr.client.Client.rst:46 #: ../../source/ref-api/flwr.client.NumPyClient.rst:46 +#: ../../source/ref-api/flwr.common.Array.rst:28 #: ../../source/ref-api/flwr.common.ClientMessage.rst:25 #: ../../source/ref-api/flwr.common.Code.rst:19 +#: ../../source/ref-api/flwr.common.Context.rst:25 #: ../../source/ref-api/flwr.common.DisconnectRes.rst:25 +#: ../../source/ref-api/flwr.common.Error.rst:25 #: ../../source/ref-api/flwr.common.EvaluateIns.rst:25 #: ../../source/ref-api/flwr.common.EvaluateRes.rst:25 #: ../../source/ref-api/flwr.common.EventType.rst:19 @@ -6631,10 +6521,16 @@ msgstr "" #: ../../source/ref-api/flwr.common.GetParametersRes.rst:25 #: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:25 #: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:25 +#: ../../source/ref-api/flwr.common.Message.rst:37 +#: ../../source/ref-api/flwr.common.MessageType.rst:25 +#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:25 +#: ../../source/ref-api/flwr.common.Metadata.rst:25 #: ../../source/ref-api/flwr.common.Parameters.rst:25 #: ../../source/ref-api/flwr.common.ReconnectIns.rst:25 +#: ../../source/ref-api/flwr.common.RecordSet.rst:25 #: ../../source/ref-api/flwr.common.ServerMessage.rst:25 #: ../../source/ref-api/flwr.common.Status.rst:25 +#: ../../source/ref-api/flwr.server.LegacyContext.rst:25 #: ../../source/ref-api/flwr.server.ServerConfig.rst:25 msgid "Attributes" msgstr "" @@ -6652,14 +6548,25 @@ msgstr "" #: flwr.client.numpy_client.NumPyClient.fit #: flwr.client.numpy_client.NumPyClient.get_parameters #: flwr.client.numpy_client.NumPyClient.get_properties -#: flwr.server.app.start_server +#: flwr.common.context.Context flwr.common.message.Error +#: flwr.common.message.Message flwr.common.message.Message.create_error_reply +#: flwr.common.message.Message.create_reply flwr.common.message.Metadata +#: flwr.common.record.parametersrecord.Array flwr.server.app.start_server #: flwr.server.client_manager.ClientManager.register #: flwr.server.client_manager.ClientManager.unregister #: flwr.server.client_manager.SimpleClientManager.register #: flwr.server.client_manager.SimpleClientManager.unregister #: flwr.server.client_manager.SimpleClientManager.wait_for -#: flwr.server.driver.app.start_driver flwr.server.driver.driver.Driver +#: flwr.server.compat.app.start_driver flwr.server.driver.driver.Driver +#: flwr.server.driver.driver.Driver.create_message +#: flwr.server.driver.driver.Driver.pull_messages +#: flwr.server.driver.driver.Driver.push_messages +#: flwr.server.driver.driver.Driver.send_and_receive #: flwr.server.strategy.bulyan.Bulyan +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit #: flwr.server.strategy.fedadagrad.FedAdagrad @@ -6675,7 +6582,10 @@ msgstr "" #: flwr.server.strategy.strategy.Strategy.configure_fit #: flwr.server.strategy.strategy.Strategy.evaluate #: flwr.server.strategy.strategy.Strategy.initialize_parameters -#: flwr.simulation.app.start_simulation of +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow +#: flwr.simulation.app.start_simulation +#: flwr.simulation.run_simulation.run_simulation of msgid "Parameters" msgstr "" @@ -6693,13 +6603,17 @@ msgstr "" #: flwr.client.numpy_client.NumPyClient.fit #: flwr.client.numpy_client.NumPyClient.get_parameters #: flwr.client.numpy_client.NumPyClient.get_properties -#: flwr.server.app.start_server +#: flwr.common.message.Message.create_reply flwr.server.app.start_server #: flwr.server.client_manager.ClientManager.num_available #: flwr.server.client_manager.ClientManager.register #: flwr.server.client_manager.SimpleClientManager.num_available #: flwr.server.client_manager.SimpleClientManager.register #: flwr.server.client_manager.SimpleClientManager.wait_for -#: flwr.server.driver.app.start_driver +#: flwr.server.compat.app.start_driver +#: flwr.server.driver.driver.Driver.create_message +#: flwr.server.driver.driver.Driver.pull_messages +#: flwr.server.driver.driver.Driver.push_messages +#: flwr.server.driver.driver.Driver.send_and_receive #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit #: flwr.server.strategy.strategy.Strategy.aggregate_evaluate @@ -6723,13 +6637,17 @@ msgstr "" #: flwr.client.client.Client.get_properties #: flwr.client.numpy_client.NumPyClient.get_parameters #: flwr.client.numpy_client.NumPyClient.get_properties -#: flwr.server.app.start_server +#: flwr.common.message.Message.create_reply flwr.server.app.start_server #: flwr.server.client_manager.ClientManager.num_available #: flwr.server.client_manager.ClientManager.register #: flwr.server.client_manager.SimpleClientManager.num_available #: flwr.server.client_manager.SimpleClientManager.register #: flwr.server.client_manager.SimpleClientManager.wait_for -#: flwr.server.driver.app.start_driver +#: flwr.server.compat.app.start_driver +#: flwr.server.driver.driver.Driver.create_message +#: flwr.server.driver.driver.Driver.pull_messages +#: flwr.server.driver.driver.Driver.push_messages +#: flwr.server.driver.driver.Driver.send_and_receive #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit #: flwr.server.strategy.strategy.Strategy.aggregate_evaluate @@ -6779,23 +6697,38 @@ msgstr "" msgid "ClientApp" msgstr "" -#: flwr.client.client_app.ClientApp:1 flwr.common.typing.ClientMessage:1 +#: flwr.client.client_app.ClientApp:1 flwr.common.constant.MessageType:1 +#: flwr.common.constant.MessageTypeLegacy:1 flwr.common.context.Context:1 +#: flwr.common.message.Error:1 flwr.common.message.Message:1 +#: flwr.common.message.Metadata:1 flwr.common.record.parametersrecord.Array:1 +#: flwr.common.record.recordset.RecordSet:1 flwr.common.typing.ClientMessage:1 #: flwr.common.typing.DisconnectRes:1 flwr.common.typing.EvaluateIns:1 #: flwr.common.typing.EvaluateRes:1 flwr.common.typing.FitIns:1 #: flwr.common.typing.FitRes:1 flwr.common.typing.GetParametersIns:1 #: flwr.common.typing.GetParametersRes:1 flwr.common.typing.GetPropertiesIns:1 #: flwr.common.typing.GetPropertiesRes:1 flwr.common.typing.Parameters:1 #: flwr.common.typing.ReconnectIns:1 flwr.common.typing.ServerMessage:1 -#: flwr.common.typing.Status:1 flwr.server.app.ServerConfig:1 -#: flwr.server.driver.driver.Driver:1 -#: flwr.server.driver.grpc_driver.GrpcDriver:1 flwr.server.history.History:1 -#: flwr.server.server.Server:1 of +#: flwr.common.typing.Status:1 flwr.server.driver.driver.Driver:1 +#: flwr.server.history.History:1 flwr.server.server.Server:1 +#: flwr.server.server_app.ServerApp:1 flwr.server.server_config.ServerConfig:1 +#: flwr.server.workflow.default_workflows.DefaultWorkflow:1 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:1 +#: of msgid "Bases: :py:class:`object`" msgstr "" -#: flwr.client.app.start_client:33 flwr.client.app.start_numpy_client:36 -#: flwr.client.client_app.ClientApp:4 flwr.server.app.start_server:41 -#: flwr.server.driver.app.start_driver:30 of +#: flwr.client.app.start_client:41 flwr.client.app.start_numpy_client:36 +#: flwr.client.client_app.ClientApp:4 +#: flwr.client.client_app.ClientApp.evaluate:4 +#: flwr.client.client_app.ClientApp.query:4 +#: flwr.client.client_app.ClientApp.train:4 flwr.server.app.start_server:41 +#: flwr.server.compat.app.start_driver:32 flwr.server.server_app.ServerApp:4 +#: flwr.server.server_app.ServerApp.main:4 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:29 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:22 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:21 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:14 +#: of msgid "Examples" msgstr "" @@ -6818,6 +6751,33 @@ msgid "" "global attribute `app` that points to an object of type `ClientApp`." msgstr "" +#: flwr.client.client_app.ClientApp.evaluate:1::1 of +msgid ":py:obj:`evaluate `\\ \\(\\)" +msgstr "" + +#: flwr.client.client_app.ClientApp.evaluate:1 +#: flwr.client.client_app.ClientApp.evaluate:1::1 of +msgid "Return a decorator that registers the evaluate fn with the client app." +msgstr "" + +#: flwr.client.client_app.ClientApp.evaluate:1::1 of +msgid ":py:obj:`query `\\ \\(\\)" +msgstr "" + +#: flwr.client.client_app.ClientApp.evaluate:1::1 +#: flwr.client.client_app.ClientApp.query:1 of +msgid "Return a decorator that registers the query fn with the client app." +msgstr "" + +#: flwr.client.client_app.ClientApp.evaluate:1::1 of +msgid ":py:obj:`train `\\ \\(\\)" +msgstr "" + +#: flwr.client.client_app.ClientApp.evaluate:1::1 +#: flwr.client.client_app.ClientApp.train:1 of +msgid "Return a decorator that registers the train fn with the client app." +msgstr "" + #: ../../source/ref-api/flwr.client.NumPyClient.rst:2 msgid "NumPyClient" msgstr "" @@ -7015,7 +6975,7 @@ msgid "" msgstr "" #: flwr.client.app.start_client:19 flwr.client.app.start_numpy_client:22 -#: flwr.server.driver.app.start_driver:21 of +#: flwr.server.compat.app.start_driver:21 of msgid "" "The PEM-encoded root certificates as a byte string or a path string. If " "provided, a secure connection using the certificates will be established " @@ -7035,15 +6995,29 @@ msgid "" "(experimental) - 'rest': HTTP (experimental)" msgstr "" -#: flwr.client.app.start_client:34 flwr.client.app.start_numpy_client:37 of +#: flwr.client.app.start_client:31 of +msgid "" +"The maximum number of times the client will try to connect to the server " +"before giving up in case of a connection error. If set to None, there is " +"no limit to the number of tries." +msgstr "" + +#: flwr.client.app.start_client:35 of +msgid "" +"The maximum duration before the client stops trying to connect to the " +"server in case of connection error. If set to None, there is no limit to " +"the total time." +msgstr "" + +#: flwr.client.app.start_client:42 flwr.client.app.start_numpy_client:37 of msgid "Starting a gRPC client with an insecure server connection:" msgstr "" -#: flwr.client.app.start_client:41 flwr.client.app.start_numpy_client:44 of +#: flwr.client.app.start_client:49 flwr.client.app.start_numpy_client:44 of msgid "Starting an SSL-enabled gRPC client using system certificates:" msgstr "" -#: flwr.client.app.start_client:52 flwr.client.app.start_numpy_client:52 of +#: flwr.client.app.start_client:60 flwr.client.app.start_numpy_client:52 of msgid "Starting an SSL-enabled gRPC client using provided certificates:" msgstr "" @@ -7067,73 +7041,82 @@ msgstr "" msgid "common" msgstr "" -#: ../../source/ref-api/flwr.common.rst:29::1 +#: ../../source/ref-api/flwr.common.rst:30::1 +msgid ":py:obj:`array_from_numpy `\\ \\(ndarray\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:30::1 +#: flwr.common.record.conversion_utils.array_from_numpy:1 of +msgid "Create Array from NumPy ndarray." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:30::1 msgid ":py:obj:`bytes_to_ndarray `\\ \\(tensor\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:29::1 +#: ../../source/ref-api/flwr.common.rst:30::1 #: flwr.common.parameter.bytes_to_ndarray:1 of msgid "Deserialize NumPy ndarray from bytes." msgstr "" -#: ../../source/ref-api/flwr.common.rst:29::1 +#: ../../source/ref-api/flwr.common.rst:30::1 msgid "" ":py:obj:`configure `\\ \\(identifier\\[\\, " "filename\\, host\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:29::1 +#: ../../source/ref-api/flwr.common.rst:30::1 #: flwr.common.logger.configure:1 of msgid "Configure logging to file and/or remote log server." msgstr "" -#: ../../source/ref-api/flwr.common.rst:29::1 +#: ../../source/ref-api/flwr.common.rst:30::1 msgid "" ":py:obj:`event `\\ \\(event\\_type\\[\\, " "event\\_details\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:29::1 +#: ../../source/ref-api/flwr.common.rst:30::1 #: flwr.common.telemetry.event:1 of msgid "Submit create_event to ThreadPoolExecutor to avoid blocking." msgstr "" -#: ../../source/ref-api/flwr.common.rst:29::1 +#: ../../source/ref-api/flwr.common.rst:30::1 msgid "" ":py:obj:`log `\\ \\(level\\, msg\\, \\*args\\, " "\\*\\*kwargs\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:29::1 logging.Logger.log:1 +#: ../../source/ref-api/flwr.common.rst:30::1 logging.Logger.log:1 #: of msgid "Log 'msg % args' with the integer severity 'level'." msgstr "" -#: ../../source/ref-api/flwr.common.rst:29::1 +#: ../../source/ref-api/flwr.common.rst:30::1 msgid ":py:obj:`ndarray_to_bytes `\\ \\(ndarray\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:29::1 +#: ../../source/ref-api/flwr.common.rst:30::1 #: flwr.common.parameter.ndarray_to_bytes:1 of msgid "Serialize NumPy ndarray to bytes." msgstr "" -#: ../../source/ref-api/flwr.common.rst:29::1 +#: ../../source/ref-api/flwr.common.rst:30::1 msgid ":py:obj:`now `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:29::1 +#: ../../source/ref-api/flwr.common.rst:30::1 #: flwr.common.date.now:1 of msgid "Construct a datetime from time.time() with time zone set to UTC." msgstr "" -#: ../../source/ref-api/flwr.common.rst:29::1 +#: ../../source/ref-api/flwr.common.rst:30::1 msgid "" ":py:obj:`ndarrays_to_parameters `\\ " "\\(ndarrays\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:29::1 +#: ../../source/ref-api/flwr.common.rst:30::1 #: flwr.common.parameter.ndarrays_to_parameters:1 #: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 #: flwr.server.strategy.fedavg_android.FedAvgAndroid.ndarrays_to_parameters:1 @@ -7141,187 +7124,358 @@ msgstr "" msgid "Convert NumPy ndarrays to parameters object." msgstr "" -#: ../../source/ref-api/flwr.common.rst:29::1 +#: ../../source/ref-api/flwr.common.rst:30::1 msgid "" ":py:obj:`parameters_to_ndarrays `\\ " "\\(parameters\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:29::1 +#: ../../source/ref-api/flwr.common.rst:30::1 #: flwr.common.parameter.parameters_to_ndarrays:1 of msgid "Convert parameters object to NumPy ndarrays." msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 +msgid "" +":py:obj:`Array `\\ \\(dtype\\, shape\\, stype\\, " +"data\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +#: flwr.common.record.parametersrecord.Array:1 of +msgid "Array type." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 msgid "" ":py:obj:`ClientMessage `\\ " "\\(\\[get\\_properties\\_res\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.typing.ClientMessage:1 of msgid "ClientMessage is a container used to hold one result message." msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 msgid ":py:obj:`Code `\\ \\(value\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.typing.Code:1 of msgid "Client status codes." msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 +msgid "" +":py:obj:`ConfigsRecord `\\ " +"\\(\\[configs\\_dict\\, keep\\_input\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +#: flwr.common.record.configsrecord.ConfigsRecord:1 of +msgid "Configs record." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +msgid ":py:obj:`Context `\\ \\(state\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +#: flwr.common.context.Context:1 of +msgid "State of your run." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 msgid ":py:obj:`DisconnectRes `\\ \\(reason\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.typing.DisconnectRes:1 of msgid "DisconnectRes message from client to server." msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 msgid "" ":py:obj:`EvaluateIns `\\ \\(parameters\\, " "config\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.typing.EvaluateIns:1 of msgid "Evaluate instructions for a client." msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 msgid "" ":py:obj:`EvaluateRes `\\ \\(status\\, loss\\, " "num\\_examples\\, metrics\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.typing.EvaluateRes:1 of msgid "Evaluate response from a client." msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 msgid ":py:obj:`EventType `\\ \\(value\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.telemetry.EventType:1 of msgid "Types of telemetry events." msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 msgid ":py:obj:`FitIns `\\ \\(parameters\\, config\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.typing.FitIns:1 of msgid "Fit instructions for a client." msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 msgid "" ":py:obj:`FitRes `\\ \\(status\\, parameters\\, " "num\\_examples\\, metrics\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.typing.FitRes:1 of msgid "Fit response from a client." msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 +msgid ":py:obj:`Error `\\ \\(code\\[\\, reason\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +#: flwr.common.message.Error:1 of +msgid "A dataclass that stores information about an error that occurred." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 msgid ":py:obj:`GetParametersIns `\\ \\(config\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.typing.GetParametersIns:1 of msgid "Parameters request for a client." msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 msgid "" ":py:obj:`GetParametersRes `\\ \\(status\\, " "parameters\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.typing.GetParametersRes:1 of msgid "Response when asked to return parameters." msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 msgid ":py:obj:`GetPropertiesIns `\\ \\(config\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.typing.GetPropertiesIns:1 of msgid "Properties request for a client." msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 msgid "" ":py:obj:`GetPropertiesRes `\\ \\(status\\, " "properties\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.typing.GetPropertiesRes:1 of msgid "Properties response from a client." msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 +msgid "" +":py:obj:`Message `\\ \\(metadata\\[\\, content\\, " +"error\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +#: flwr.common.message.Message:1 of +msgid "State of your application from the viewpoint of the entity using it." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +msgid ":py:obj:`MessageType `\\ \\(\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +#: flwr.common.constant.MessageType:1 of +msgid "Message type." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +msgid ":py:obj:`MessageTypeLegacy `\\ \\(\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +#: flwr.common.constant.MessageTypeLegacy:1 of +msgid "Legacy message type." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +msgid "" +":py:obj:`Metadata `\\ \\(run\\_id\\, " +"message\\_id\\, src\\_node\\_id\\, ...\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +#: flwr.common.message.Metadata:1 of +msgid "A dataclass holding metadata associated with the current message." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +msgid "" +":py:obj:`MetricsRecord `\\ " +"\\(\\[metrics\\_dict\\, keep\\_input\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +#: flwr.common.record.metricsrecord.MetricsRecord:1 of +msgid "Metrics record." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 msgid ":py:obj:`NDArray `\\" msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 msgid "" "alias of :py:class:`~numpy.ndarray`\\ [:py:obj:`~typing.Any`, " ":py:class:`~numpy.dtype`\\ [:py:obj:`~typing.Any`]]" msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 msgid "" ":py:obj:`Parameters `\\ \\(tensors\\, " "tensor\\_type\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.typing.Parameters:1 of msgid "Model parameters." msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 +msgid "" +":py:obj:`ParametersRecord `\\ " +"\\(\\[array\\_dict\\, keep\\_input\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +#: flwr.common.record.parametersrecord.ParametersRecord:1 of +msgid "Parameters record." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 msgid ":py:obj:`ReconnectIns `\\ \\(seconds\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.typing.ReconnectIns:1 of msgid "ReconnectIns message from server to client." msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 +msgid "" +":py:obj:`RecordSet `\\ " +"\\(\\[parameters\\_records\\, ...\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +#: flwr.common.record.recordset.RecordSet:1 of +msgid "RecordSet stores groups of parameters, metrics and configs." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 msgid "" ":py:obj:`ServerMessage `\\ " "\\(\\[get\\_properties\\_ins\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.typing.ServerMessage:1 of msgid "ServerMessage is a container used to hold one instruction message." msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 msgid ":py:obj:`Status `\\ \\(code\\, message\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.typing.Status:1 of msgid "Client status." msgstr "" +#: ../../source/ref-api/flwr.common.Array.rst:2 +msgid "Array" +msgstr "" + +#: flwr.common.record.parametersrecord.Array:3 of +msgid "" +"A dataclass containing serialized data from an array-like or tensor-like " +"object along with some metadata about it." +msgstr "" + +#: flwr.common.record.parametersrecord.Array:6 of +msgid "" +"A string representing the data type of the serialised object (e.g. " +"`np.float32`)" +msgstr "" + +#: flwr.common.record.parametersrecord.Array:8 of +msgid "" +"A list representing the shape of the unserialized array-like object. This" +" is used to deserialize the data (depending on the serialization method) " +"or simply as a metadata field." +msgstr "" + +#: flwr.common.record.parametersrecord.Array:12 of +msgid "" +"A string indicating the type of serialisation mechanism used to generate " +"the bytes in `data` from an array-like or tensor-like object." +msgstr "" + +#: flwr.common.record.parametersrecord.Array:15 of +msgid "A buffer of bytes containing the data." +msgstr "" + +#: ../../source/ref-api/flwr.common.Array.rst:26::1 +msgid ":py:obj:`numpy `\\ \\(\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.Array.rst:26::1 +#: flwr.common.record.parametersrecord.Array.numpy:1 of +msgid "Return the array as a NumPy array." +msgstr "" + +#: flwr.common.record.parametersrecord.Array.numpy:1::1 of +msgid ":py:obj:`dtype `\\" +msgstr "" + +#: flwr.common.record.parametersrecord.Array.numpy:1::1 of +msgid ":py:obj:`shape `\\" +msgstr "" + +#: flwr.common.record.parametersrecord.Array.numpy:1::1 of +msgid ":py:obj:`stype `\\" +msgstr "" + +#: flwr.common.record.parametersrecord.Array.numpy:1::1 of +msgid ":py:obj:`data `\\" +msgstr "" + #: ../../source/ref-api/flwr.common.ClientMessage.rst:2 msgid "ClientMessage" msgstr "" @@ -7380,6 +7534,104 @@ msgid "" "`\\" msgstr "" +#: ../../source/ref-api/flwr.common.ConfigsRecord.rst:2 +msgid "ConfigsRecord" +msgstr "" + +#: flwr.common.record.configsrecord.ConfigsRecord:1 of +msgid "" +"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " +"[:py:class:`str`, :py:obj:`~typing.Union`\\ [:py:class:`int`, " +":py:class:`float`, :py:class:`str`, :py:class:`bytes`, :py:class:`bool`, " +":py:class:`~typing.List`\\ [:py:class:`int`], :py:class:`~typing.List`\\ " +"[:py:class:`float`], :py:class:`~typing.List`\\ [:py:class:`str`], " +":py:class:`~typing.List`\\ [:py:class:`bytes`], " +":py:class:`~typing.List`\\ [:py:class:`bool`]]]" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`clear `\\ \\(\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1 +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid "Remove all items from R." +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`count_bytes `\\ \\(\\)" +msgstr "" + +#: flwr.common.record.configsrecord.ConfigsRecord.count_bytes:1 +#: flwr.common.record.metricsrecord.MetricsRecord.count_bytes:1 +#: flwr.common.record.parametersrecord.ParametersRecord.count_bytes:1 +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid "Return number of Bytes stored in this object." +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 +#: flwr.common.record.typeddict.TypedDict.get:1 of +msgid "d defaults to None." +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`items `\\ \\(\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`keys `\\ \\(\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 +#: flwr.common.record.typeddict.TypedDict.pop:1 of +msgid "If key is not found, d is returned if given, otherwise KeyError is raised." +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid "" +":py:obj:`update `\\ \\(\\[E\\, " +"\\]\\*\\*F\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 +#: flwr.common.record.typeddict.TypedDict.update:1 of +msgid "Update R from dict/iterable E and F." +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`values `\\ \\(\\)" +msgstr "" + +#: flwr.common.record.configsrecord.ConfigsRecord.count_bytes:3 of +msgid "This function counts booleans as occupying 1 Byte." +msgstr "" + +#: ../../source/ref-api/flwr.common.Context.rst:2 +msgid "Context" +msgstr "" + +#: flwr.common.context.Context:3 of +msgid "" +"Holds records added by the entity in a given run and that will stay " +"local. This means that the data it holds will never leave the system it's" +" running from. This can be used as an intermediate storage or scratchpad " +"when executing mods. It can also be used as a memory to access at " +"different points during the lifecycle of this entity (e.g. across " +"multiple rounds)" +msgstr "" + +#: ../../source/ref-api/flwr.common.Context.rst:28::1 +msgid ":py:obj:`state `\\" +msgstr "" + #: ../../source/ref-api/flwr.common.DisconnectRes.rst:2 msgid "DisconnectRes" msgstr "" @@ -7388,6 +7640,34 @@ msgstr "" msgid ":py:obj:`reason `\\" msgstr "" +#: ../../source/ref-api/flwr.common.Error.rst:2 +msgid "Error" +msgstr "" + +#: flwr.common.message.Error:3 of +msgid "An identifier for the error." +msgstr "" + +#: flwr.common.message.Error:5 of +msgid "A reason for why the error arose (e.g. an exception stack-trace)" +msgstr "" + +#: flwr.common.Error.code:1::1 of +msgid ":py:obj:`code `\\" +msgstr "" + +#: flwr.common.Error.code:1 flwr.common.Error.code:1::1 of +msgid "Error code." +msgstr "" + +#: flwr.common.Error.code:1::1 of +msgid ":py:obj:`reason `\\" +msgstr "" + +#: flwr.common.Error.code:1::1 flwr.common.Error.reason:1 of +msgid "Reason reported about the error." +msgstr "" + #: ../../source/ref-api/flwr.common.EvaluateIns.rst:2 msgid "EvaluateIns" msgstr "" @@ -7608,11 +7888,283 @@ msgstr "" msgid ":py:obj:`properties `\\" msgstr "" -#: ../../source/ref-api/flwr.common.NDArray.rst:2 -msgid "NDArray" +#: ../../source/ref-api/flwr.common.Message.rst:2 +msgid "Message" msgstr "" -#: ../../source/ref-api/flwr.common.Parameters.rst:29::1 +#: flwr.common.Message.content:1::1 flwr.common.Message.metadata:1 +#: flwr.common.message.Message:3 of +msgid "A dataclass including information about the message to be executed." +msgstr "" + +#: flwr.common.message.Message:5 of +msgid "" +"Holds records either sent by another entity (e.g. sent by the server-side" +" logic to a client, or vice-versa) or that will be sent to it." +msgstr "" + +#: flwr.common.message.Message:8 of +msgid "" +"A dataclass that captures information about an error that took place when" +" processing another message." +msgstr "" + +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +msgid "" +":py:obj:`create_error_reply `\\ " +"\\(error\\, ttl\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +#: flwr.common.message.Message.create_error_reply:1 of +msgid "Construct a reply message indicating an error happened." +msgstr "" + +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +msgid "" +":py:obj:`create_reply `\\ \\(content\\," +" ttl\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +#: flwr.common.message.Message.create_reply:1 of +msgid "Create a reply to this message with specified content and TTL." +msgstr "" + +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +msgid ":py:obj:`has_content `\\ \\(\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +#: flwr.common.message.Message.has_content:1 of +msgid "Return True if message has content, else False." +msgstr "" + +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +msgid ":py:obj:`has_error `\\ \\(\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +#: flwr.common.message.Message.has_error:1 of +msgid "Return True if message has an error, else False." +msgstr "" + +#: flwr.common.Message.content:1::1 of +msgid ":py:obj:`content `\\" +msgstr "" + +#: flwr.common.Message.content:1 flwr.common.Message.content:1::1 +#: of +msgid "The content of this message." +msgstr "" + +#: flwr.common.Message.content:1::1 of +msgid ":py:obj:`error `\\" +msgstr "" + +#: flwr.common.Message.content:1::1 flwr.common.Message.error:1 of +msgid "Error captured by this message." +msgstr "" + +#: flwr.common.Message.content:1::1 of +msgid ":py:obj:`metadata `\\" +msgstr "" + +#: flwr.common.message.Message.create_error_reply:3 of +msgid "The error that was encountered." +msgstr "" + +#: flwr.common.Metadata.dst_node_id:1::1 +#: flwr.common.Metadata.ttl:1 flwr.common.message.Message.create_error_reply:5 +#: flwr.common.message.Message.create_reply:9 flwr.common.message.Metadata:16 +#: of +msgid "Time-to-live for this message." +msgstr "" + +#: flwr.common.message.Message.create_reply:3 of +msgid "" +"The method generates a new `Message` as a reply to this message. It " +"inherits 'run_id', 'src_node_id', 'dst_node_id', and 'message_type' from " +"this message and sets 'reply_to_message' to the ID of this message." +msgstr "" + +#: flwr.common.message.Message.create_reply:7 of +msgid "The content for the reply message." +msgstr "" + +#: flwr.common.message.Message.create_reply:12 of +msgid "A new `Message` instance representing the reply." +msgstr "" + +#: ../../source/ref-api/flwr.common.MessageType.rst:2 +msgid "MessageType" +msgstr "" + +#: ../../source/ref-api/flwr.common.MessageType.rst:30::1 +msgid ":py:obj:`EVALUATE `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.MessageType.rst:30::1 +msgid ":py:obj:`QUERY `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.MessageType.rst:30::1 +msgid ":py:obj:`TRAIN `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:2 +msgid "MessageTypeLegacy" +msgstr "" + +#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:29::1 +msgid ":py:obj:`GET_PARAMETERS `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:29::1 +msgid ":py:obj:`GET_PROPERTIES `\\" +msgstr "" + +#: flwr.common.Metadata.dst_node_id:1::1 +#: flwr.common.Metadata.run_id:1 flwr.common.message.Metadata:3 of +msgid "An identifier for the current run." +msgstr "" + +#: flwr.common.Metadata.dst_node_id:1::1 +#: flwr.common.Metadata.message_id:1 flwr.common.message.Metadata:5 of +msgid "An identifier for the current message." +msgstr "" + +#: flwr.common.Metadata.dst_node_id:1::1 +#: flwr.common.Metadata.src_node_id:1 flwr.common.message.Metadata:7 of +msgid "An identifier for the node sending this message." +msgstr "" + +#: flwr.common.Metadata.dst_node_id:1 +#: flwr.common.Metadata.dst_node_id:1::1 +#: flwr.common.message.Metadata:9 of +msgid "An identifier for the node receiving this message." +msgstr "" + +#: flwr.common.Metadata.dst_node_id:1::1 +#: flwr.common.Metadata.reply_to_message:1 flwr.common.message.Metadata:11 of +msgid "An identifier for the message this message replies to." +msgstr "" + +#: flwr.common.message.Metadata:13 of +msgid "" +"An identifier for grouping messages. In some settings, this is used as " +"the FL round." +msgstr "" + +#: flwr.common.Metadata.dst_node_id:1::1 +#: flwr.common.Metadata.message_type:1 flwr.common.message.Metadata:18 of +msgid "A string that encodes the action to be executed on the receiving end." +msgstr "" + +#: flwr.common.message.Metadata:21 of +msgid "" +"An identifier that can be used when loading a particular data partition " +"for a ClientApp. Making use of this identifier is more relevant when " +"conducting simulations." +msgstr "" + +#: flwr.common.Metadata.dst_node_id:1::1 of +msgid ":py:obj:`dst_node_id `\\" +msgstr "" + +#: flwr.common.Metadata.dst_node_id:1::1 of +msgid ":py:obj:`group_id `\\" +msgstr "" + +#: flwr.common.Metadata.dst_node_id:1::1 +#: flwr.common.Metadata.group_id:1 of +msgid "An identifier for grouping messages." +msgstr "" + +#: flwr.common.Metadata.dst_node_id:1::1 of +msgid ":py:obj:`message_id `\\" +msgstr "" + +#: flwr.common.Metadata.dst_node_id:1::1 of +msgid ":py:obj:`message_type `\\" +msgstr "" + +#: flwr.common.Metadata.dst_node_id:1::1 of +msgid ":py:obj:`partition_id `\\" +msgstr "" + +#: flwr.common.Metadata.dst_node_id:1::1 +#: flwr.common.Metadata.partition_id:1 of +msgid "An identifier telling which data partition a ClientApp should use." +msgstr "" + +#: flwr.common.Metadata.dst_node_id:1::1 of +msgid ":py:obj:`reply_to_message `\\" +msgstr "" + +#: flwr.common.Metadata.dst_node_id:1::1 of +msgid ":py:obj:`run_id `\\" +msgstr "" + +#: flwr.common.Metadata.dst_node_id:1::1 of +msgid ":py:obj:`src_node_id `\\" +msgstr "" + +#: flwr.common.Metadata.dst_node_id:1::1 of +msgid ":py:obj:`ttl `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.MetricsRecord.rst:2 +msgid "MetricsRecord" +msgstr "" + +#: flwr.common.record.metricsrecord.MetricsRecord:1 of +msgid "" +"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " +"[:py:class:`str`, :py:obj:`~typing.Union`\\ [:py:class:`int`, " +":py:class:`float`, :py:class:`~typing.List`\\ [:py:class:`int`], " +":py:class:`~typing.List`\\ [:py:class:`float`]]]" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`clear `\\ \\(\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`count_bytes `\\ \\(\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`items `\\ \\(\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`keys `\\ \\(\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid "" +":py:obj:`update `\\ \\(\\[E\\, " +"\\]\\*\\*F\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`values `\\ \\(\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.NDArray.rst:2 +msgid "NDArray" +msgstr "" + +#: ../../source/ref-api/flwr.common.Parameters.rst:29::1 msgid ":py:obj:`tensors `\\" msgstr "" @@ -7620,6 +8172,65 @@ msgstr "" msgid ":py:obj:`tensor_type `\\" msgstr "" +#: ../../source/ref-api/flwr.common.ParametersRecord.rst:2 +msgid "ParametersRecord" +msgstr "" + +#: flwr.common.record.parametersrecord.ParametersRecord:1 of +msgid "" +"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " +"[:py:class:`str`, :py:class:`~flwr.common.record.parametersrecord.Array`]" +msgstr "" + +#: flwr.common.record.parametersrecord.ParametersRecord:3 of +msgid "" +"A dataclass storing named Arrays in order. This means that it holds " +"entries as an OrderedDict[str, Array]. ParametersRecord objects can be " +"viewed as an equivalent to PyTorch's state_dict, but holding serialised " +"tensors instead." +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`clear `\\ \\(\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`count_bytes `\\ \\(\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`items `\\ \\(\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`keys `\\ \\(\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid "" +":py:obj:`update `\\ \\(\\[E\\, " +"\\]\\*\\*F\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`values `\\ \\(\\)" +msgstr "" + +#: flwr.common.record.parametersrecord.ParametersRecord.count_bytes:3 of +msgid "" +"Note that a small amount of Bytes might also be included in this counting" +" that correspond to metadata of the serialized object (e.g. of NumPy " +"array) needed for deseralization." +msgstr "" + #: ../../source/ref-api/flwr.common.ReconnectIns.rst:2 msgid "ReconnectIns" msgstr "" @@ -7628,6 +8239,37 @@ msgstr "" msgid ":py:obj:`seconds `\\" msgstr "" +#: ../../source/ref-api/flwr.common.RecordSet.rst:2 +msgid "RecordSet" +msgstr "" + +#: flwr.common.RecordSet.configs_records:1::1 of +msgid ":py:obj:`configs_records `\\" +msgstr "" + +#: flwr.common.RecordSet.configs_records:1 +#: flwr.common.RecordSet.configs_records:1::1 of +msgid "Dictionary holding ConfigsRecord instances." +msgstr "" + +#: flwr.common.RecordSet.configs_records:1::1 of +msgid ":py:obj:`metrics_records `\\" +msgstr "" + +#: flwr.common.RecordSet.configs_records:1::1 +#: flwr.common.RecordSet.metrics_records:1 of +msgid "Dictionary holding MetricsRecord instances." +msgstr "" + +#: flwr.common.RecordSet.configs_records:1::1 of +msgid ":py:obj:`parameters_records `\\" +msgstr "" + +#: flwr.common.RecordSet.configs_records:1::1 +#: flwr.common.RecordSet.parameters_records:1 of +msgid "Dictionary holding ParametersRecord instances." +msgstr "" + #: ../../source/ref-api/flwr.common.ServerMessage.rst:2 msgid "ServerMessage" msgstr "" @@ -7664,6 +8306,10 @@ msgstr "" msgid ":py:obj:`message `\\" msgstr "" +#: ../../source/ref-api/flwr.common.array_from_numpy.rst:2 +msgid "array\\_from\\_numpy" +msgstr "" + #: ../../source/ref-api/flwr.common.bytes_to_ndarray.rst:2 msgid "bytes\\_to\\_ndarray" msgstr "" @@ -7711,113 +8357,157 @@ msgstr "" msgid "server" msgstr "" -#: ../../source/ref-api/flwr.server.rst:26::1 +#: ../../source/ref-api/flwr.server.rst:27::1 msgid ":py:obj:`run_driver_api `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:26::1 +#: ../../source/ref-api/flwr.server.rst:27::1 #: flwr.server.app.run_driver_api:1 of msgid "Run Flower server (Driver API)." msgstr "" -#: ../../source/ref-api/flwr.server.rst:26::1 +#: ../../source/ref-api/flwr.server.rst:27::1 msgid ":py:obj:`run_fleet_api `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:26::1 +#: ../../source/ref-api/flwr.server.rst:27::1 #: flwr.server.app.run_fleet_api:1 of msgid "Run Flower server (Fleet API)." msgstr "" -#: ../../source/ref-api/flwr.server.rst:26::1 +#: ../../source/ref-api/flwr.server.rst:27::1 msgid ":py:obj:`run_server_app `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:26::1 -#: flwr.server.app.run_server_app:1 of +#: ../../source/ref-api/flwr.server.rst:27::1 +#: flwr.server.run_serverapp.run_server_app:1 of msgid "Run Flower server app." msgstr "" -#: ../../source/ref-api/flwr.server.rst:26::1 +#: ../../source/ref-api/flwr.server.rst:27::1 msgid ":py:obj:`run_superlink `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:26::1 +#: ../../source/ref-api/flwr.server.rst:27::1 #: flwr.server.app.run_superlink:1 of msgid "Run Flower server (Driver API and Fleet API)." msgstr "" -#: ../../source/ref-api/flwr.server.rst:26::1 +#: ../../source/ref-api/flwr.server.rst:27::1 +msgid "" +":py:obj:`start_driver `\\ \\(\\*\\[\\, " +"server\\_address\\, server\\, ...\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.rst:27::1 +#: flwr.server.compat.app.start_driver:1 of +msgid "Start a Flower Driver API server." +msgstr "" + +#: ../../source/ref-api/flwr.server.rst:27::1 msgid "" ":py:obj:`start_server `\\ \\(\\*\\[\\, " "server\\_address\\, server\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:26::1 +#: ../../source/ref-api/flwr.server.rst:27::1 #: flwr.server.app.start_server:1 of msgid "Start a Flower server using the gRPC transport layer." msgstr "" -#: ../../source/ref-api/flwr.server.rst:37::1 +#: ../../source/ref-api/flwr.server.rst:41::1 msgid ":py:obj:`ClientManager `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:37::1 +#: ../../source/ref-api/flwr.server.rst:41::1 #: flwr.server.client_manager.ClientManager:1 of msgid "Abstract base class for managing Flower clients." msgstr "" -#: ../../source/ref-api/flwr.server.rst:37::1 +#: ../../source/ref-api/flwr.server.rst:41::1 +msgid "" +":py:obj:`Driver `\\ " +"\\(\\[driver\\_service\\_address\\, ...\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.rst:41::1 +#: flwr.server.driver.driver.Driver:1 of +msgid "`Driver` class provides an interface to the Driver API." +msgstr "" + +#: ../../source/ref-api/flwr.server.rst:41::1 msgid ":py:obj:`History `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:37::1 +#: ../../source/ref-api/flwr.server.rst:41::1 #: flwr.server.history.History:1 of msgid "History class for training and/or evaluation metrics collection." msgstr "" -#: ../../source/ref-api/flwr.server.rst:37::1 +#: ../../source/ref-api/flwr.server.rst:41::1 +msgid "" +":py:obj:`LegacyContext `\\ \\(state\\[\\, " +"config\\, strategy\\, ...\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.rst:41::1 +#: flwr.server.compat.legacy_context.LegacyContext:1 of +msgid "Legacy Context." +msgstr "" + +#: ../../source/ref-api/flwr.server.rst:41::1 msgid "" ":py:obj:`Server `\\ \\(\\*\\, client\\_manager\\[\\, " "strategy\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:37::1 +#: ../../source/ref-api/flwr.server.rst:41::1 +msgid "" +":py:obj:`ServerApp `\\ \\(\\[server\\, config\\, " +"strategy\\, ...\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.rst:41::1 +#: flwr.server.server_app.ServerApp:1 of +msgid "Flower ServerApp." +msgstr "" + +#: ../../source/ref-api/flwr.server.rst:41::1 msgid "" ":py:obj:`ServerConfig `\\ \\(\\[num\\_rounds\\," " round\\_timeout\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:37::1 -#: flwr.server.app.ServerConfig:1 of +#: ../../source/ref-api/flwr.server.rst:41::1 +#: flwr.server.server_config.ServerConfig:1 of msgid "Flower server config." msgstr "" -#: ../../source/ref-api/flwr.server.rst:37::1 +#: ../../source/ref-api/flwr.server.rst:41::1 msgid ":py:obj:`SimpleClientManager `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:37::1 +#: ../../source/ref-api/flwr.server.rst:41::1 #: flwr.server.client_manager.SimpleClientManager:1 of msgid "Provides a pool of available clients." msgstr "" -#: ../../source/ref-api/flwr.server.rst:56::1 -msgid ":py:obj:`flwr.server.driver `\\" +#: ../../source/ref-api/flwr.server.rst:60::1 +msgid ":py:obj:`flwr.server.strategy `\\" msgstr "" -#: ../../source/ref-api/flwr.server.rst:56::1 flwr.server.driver:1 -#: of -msgid "Flower driver SDK." +#: ../../source/ref-api/flwr.server.rst:60::1 +#: flwr.server.strategy:1 of +msgid "Contains the strategy abstraction and different implementations." msgstr "" -#: ../../source/ref-api/flwr.server.rst:56::1 -msgid ":py:obj:`flwr.server.strategy `\\" +#: ../../source/ref-api/flwr.server.rst:60::1 +msgid ":py:obj:`flwr.server.workflow `\\" msgstr "" -#: ../../source/ref-api/flwr.server.rst:56::1 -#: flwr.server.strategy:1 of -msgid "Contains the strategy abstraction and different implementations." +#: ../../source/ref-api/flwr.server.rst:60::1 +#: flwr.server.workflow:1 of +msgid "Workflows." msgstr "" #: ../../source/ref-api/flwr.server.ClientManager.rst:2 @@ -7912,54 +8602,258 @@ msgstr "" msgid "This method is idempotent." msgstr "" -#: ../../source/ref-api/flwr.server.History.rst:2 -msgid "History" +#: ../../source/ref-api/flwr.server.Driver.rst:2 +msgid "Driver" msgstr "" -#: flwr.server.history.History.add_loss_centralized:1::1 of +#: flwr.server.driver.driver.Driver:3 of msgid "" -":py:obj:`add_loss_centralized " -"`\\ \\(server\\_round\\, " -"loss\\)" +"The IPv4 or IPv6 address of the Driver API server. Defaults to " +"`\"[::]:9091\"`." msgstr "" -#: flwr.server.history.History.add_loss_centralized:1 -#: flwr.server.history.History.add_loss_centralized:1::1 of -msgid "Add one loss entry (from centralized evaluation)." +#: flwr.server.app.start_server:28 flwr.server.driver.driver.Driver:6 of +msgid "" +"Tuple containing root certificate, server certificate, and private key to" +" start a secure SSL-enabled server. The tuple is expected to have three " +"bytes elements in the following order: * CA certificate. * " +"server certificate. * server private key." msgstr "" -#: flwr.server.history.History.add_loss_centralized:1::1 of +#: flwr.server.app.start_server:28 flwr.server.driver.driver.Driver:6 of msgid "" -":py:obj:`add_loss_distributed " -"`\\ \\(server\\_round\\, " -"loss\\)" +"Tuple containing root certificate, server certificate, and private key to" +" start a secure SSL-enabled server. The tuple is expected to have three " +"bytes elements in the following order:" msgstr "" -#: flwr.server.history.History.add_loss_centralized:1::1 -#: flwr.server.history.History.add_loss_distributed:1 of -msgid "Add one loss entry (from distributed evaluation)." +#: flwr.server.app.start_server:32 flwr.server.driver.driver.Driver:10 of +msgid "CA certificate." msgstr "" -#: flwr.server.history.History.add_loss_centralized:1::1 of -msgid "" -":py:obj:`add_metrics_centralized " -"`\\ \\(server\\_round\\, " -"metrics\\)" +#: flwr.server.app.start_server:33 flwr.server.driver.driver.Driver:11 of +msgid "server certificate." msgstr "" -#: flwr.server.history.History.add_loss_centralized:1::1 -#: flwr.server.history.History.add_metrics_centralized:1 of -msgid "Add metrics entries (from centralized evaluation)." +#: flwr.server.app.start_server:34 flwr.server.driver.driver.Driver:12 of +msgid "server private key." msgstr "" -#: flwr.server.history.History.add_loss_centralized:1::1 of -msgid "" -":py:obj:`add_metrics_distributed " -"`\\ \\(server\\_round\\, " -"metrics\\)" +#: flwr.server.driver.driver.Driver.close:1::1 of +msgid ":py:obj:`close `\\ \\(\\)" msgstr "" -#: flwr.server.history.History.add_loss_centralized:1::1 +#: flwr.server.driver.driver.Driver.close:1 +#: flwr.server.driver.driver.Driver.close:1::1 of +msgid "Disconnect from the SuperLink if connected." +msgstr "" + +#: flwr.server.driver.driver.Driver.close:1::1 of +msgid "" +":py:obj:`create_message `\\ " +"\\(content\\, message\\_type\\, ...\\)" +msgstr "" + +#: flwr.server.driver.driver.Driver.close:1::1 +#: flwr.server.driver.driver.Driver.create_message:1 of +msgid "Create a new message with specified parameters." +msgstr "" + +#: flwr.server.driver.driver.Driver.close:1::1 of +msgid ":py:obj:`get_node_ids `\\ \\(\\)" +msgstr "" + +#: flwr.server.driver.driver.Driver.close:1::1 +#: flwr.server.driver.driver.Driver.get_node_ids:1 of +msgid "Get node IDs." +msgstr "" + +#: flwr.server.driver.driver.Driver.close:1::1 of +msgid "" +":py:obj:`pull_messages `\\ " +"\\(message\\_ids\\)" +msgstr "" + +#: flwr.server.driver.driver.Driver.close:1::1 +#: flwr.server.driver.driver.Driver.pull_messages:1 of +msgid "Pull messages based on message IDs." +msgstr "" + +#: flwr.server.driver.driver.Driver.close:1::1 of +msgid "" +":py:obj:`push_messages `\\ " +"\\(messages\\)" +msgstr "" + +#: flwr.server.driver.driver.Driver.close:1::1 +#: flwr.server.driver.driver.Driver.push_messages:1 of +msgid "Push messages to specified node IDs." +msgstr "" + +#: flwr.server.driver.driver.Driver.close:1::1 of +msgid "" +":py:obj:`send_and_receive `\\ " +"\\(messages\\, \\*\\[\\, timeout\\]\\)" +msgstr "" + +#: flwr.server.driver.driver.Driver.close:1::1 +#: flwr.server.driver.driver.Driver.send_and_receive:1 of +msgid "Push messages to specified node IDs and pull the reply messages." +msgstr "" + +#: flwr.server.driver.driver.Driver.create_message:3 of +msgid "" +"This method constructs a new `Message` with given content and metadata. " +"The `run_id` and `src_node_id` will be set automatically." +msgstr "" + +#: flwr.server.driver.driver.Driver.create_message:6 of +msgid "" +"The content for the new message. This holds records that are to be sent " +"to the destination node." +msgstr "" + +#: flwr.server.driver.driver.Driver.create_message:9 of +msgid "" +"The type of the message, defining the action to be executed on the " +"receiving end." +msgstr "" + +#: flwr.server.driver.driver.Driver.create_message:12 of +msgid "The ID of the destination node to which the message is being sent." +msgstr "" + +#: flwr.server.driver.driver.Driver.create_message:14 of +msgid "" +"The ID of the group to which this message is associated. In some " +"settings, this is used as the FL round." +msgstr "" + +#: flwr.server.driver.driver.Driver.create_message:17 of +msgid "" +"Time-to-live for the round trip of this message, i.e., the time from " +"sending this message to receiving a reply. It specifies the duration for " +"which the message and its potential reply are considered valid." +msgstr "" + +#: flwr.server.driver.driver.Driver.create_message:22 of +msgid "" +"**message** -- A new `Message` instance with the specified content and " +"metadata." +msgstr "" + +#: flwr.server.driver.driver.Driver.pull_messages:3 of +msgid "" +"This method is used to collect messages from the SuperLink that " +"correspond to a set of given message IDs." +msgstr "" + +#: flwr.server.driver.driver.Driver.pull_messages:6 of +msgid "An iterable of message IDs for which reply messages are to be retrieved." +msgstr "" + +#: flwr.server.driver.driver.Driver.pull_messages:9 of +msgid "**messages** -- An iterable of messages received." +msgstr "" + +#: flwr.server.driver.driver.Driver.push_messages:3 of +msgid "" +"This method takes an iterable of messages and sends each message to the " +"node specified in `dst_node_id`." +msgstr "" + +#: flwr.server.driver.driver.Driver.push_messages:6 +#: flwr.server.driver.driver.Driver.send_and_receive:7 of +msgid "An iterable of messages to be sent." +msgstr "" + +#: flwr.server.driver.driver.Driver.push_messages:9 of +msgid "" +"**message_ids** -- An iterable of IDs for the messages that were sent, " +"which can be used to pull replies." +msgstr "" + +#: flwr.server.driver.driver.Driver.send_and_receive:3 of +msgid "" +"This method sends a list of messages to their destination node IDs and " +"then waits for the replies. It continues to pull replies until either all" +" replies are received or the specified timeout duration is exceeded." +msgstr "" + +#: flwr.server.driver.driver.Driver.send_and_receive:9 of +msgid "" +"The timeout duration in seconds. If specified, the method will wait for " +"replies for this duration. If `None`, there is no time limit and the " +"method will wait until replies for all messages are received." +msgstr "" + +#: flwr.server.driver.driver.Driver.send_and_receive:14 of +msgid "**replies** -- An iterable of reply messages received from the SuperLink." +msgstr "" + +#: flwr.server.driver.driver.Driver.send_and_receive:18 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:53 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:60 +#: of +msgid "Notes" +msgstr "" + +#: flwr.server.driver.driver.Driver.send_and_receive:19 of +msgid "" +"This method uses `push_messages` to send the messages and `pull_messages`" +" to collect the replies. If `timeout` is set, the method may not return " +"replies for all sent messages. A message remains valid until its TTL, " +"which is not affected by `timeout`." +msgstr "" + +#: ../../source/ref-api/flwr.server.History.rst:2 +msgid "History" +msgstr "" + +#: flwr.server.history.History.add_loss_centralized:1::1 of +msgid "" +":py:obj:`add_loss_centralized " +"`\\ \\(server\\_round\\, " +"loss\\)" +msgstr "" + +#: flwr.server.history.History.add_loss_centralized:1 +#: flwr.server.history.History.add_loss_centralized:1::1 of +msgid "Add one loss entry (from centralized evaluation)." +msgstr "" + +#: flwr.server.history.History.add_loss_centralized:1::1 of +msgid "" +":py:obj:`add_loss_distributed " +"`\\ \\(server\\_round\\, " +"loss\\)" +msgstr "" + +#: flwr.server.history.History.add_loss_centralized:1::1 +#: flwr.server.history.History.add_loss_distributed:1 of +msgid "Add one loss entry (from distributed evaluation)." +msgstr "" + +#: flwr.server.history.History.add_loss_centralized:1::1 of +msgid "" +":py:obj:`add_metrics_centralized " +"`\\ \\(server\\_round\\, " +"metrics\\)" +msgstr "" + +#: flwr.server.history.History.add_loss_centralized:1::1 +#: flwr.server.history.History.add_metrics_centralized:1 of +msgid "Add metrics entries (from centralized evaluation)." +msgstr "" + +#: flwr.server.history.History.add_loss_centralized:1::1 of +msgid "" +":py:obj:`add_metrics_distributed " +"`\\ \\(server\\_round\\, " +"metrics\\)" +msgstr "" + +#: flwr.server.history.History.add_loss_centralized:1::1 #: flwr.server.history.History.add_metrics_distributed:1 of msgid "Add metrics entries (from distributed evaluation)." msgstr "" @@ -7976,6 +8870,34 @@ msgstr "" msgid "Add metrics entries (from distributed fit)." msgstr "" +#: ../../source/ref-api/flwr.server.LegacyContext.rst:2 +msgid "LegacyContext" +msgstr "" + +#: flwr.server.compat.legacy_context.LegacyContext:1 of +msgid "Bases: :py:class:`~flwr.common.context.Context`" +msgstr "" + +#: ../../source/ref-api/flwr.server.LegacyContext.rst:32::1 +msgid ":py:obj:`config `\\" +msgstr "" + +#: ../../source/ref-api/flwr.server.LegacyContext.rst:32::1 +msgid ":py:obj:`strategy `\\" +msgstr "" + +#: ../../source/ref-api/flwr.server.LegacyContext.rst:32::1 +msgid ":py:obj:`client_manager `\\" +msgstr "" + +#: ../../source/ref-api/flwr.server.LegacyContext.rst:32::1 +msgid ":py:obj:`history `\\" +msgstr "" + +#: ../../source/ref-api/flwr.server.LegacyContext.rst:32::1 +msgid ":py:obj:`state `\\" +msgstr "" + #: flwr.server.server.Server.client_manager:1::1 of msgid ":py:obj:`client_manager `\\ \\(\\)" msgstr "" @@ -8047,11 +8969,32 @@ msgstr "" msgid "Replace server strategy." msgstr "" +#: ../../source/ref-api/flwr.server.ServerApp.rst:2 +msgid "ServerApp" +msgstr "" + +#: flwr.server.server_app.ServerApp:5 of +msgid "Use the `ServerApp` with an existing `Strategy`:" +msgstr "" + +#: flwr.server.server_app.ServerApp:15 of +msgid "Use the `ServerApp` with a custom main function:" +msgstr "" + +#: flwr.server.server_app.ServerApp.main:1::1 of +msgid ":py:obj:`main `\\ \\(\\)" +msgstr "" + +#: flwr.server.server_app.ServerApp.main:1 +#: flwr.server.server_app.ServerApp.main:1::1 of +msgid "Return a decorator that registers the main fn with the server app." +msgstr "" + #: ../../source/ref-api/flwr.server.ServerConfig.rst:2 msgid "ServerConfig" msgstr "" -#: flwr.server.app.ServerConfig:3 of +#: flwr.server.server_config.ServerConfig:3 of msgid "" "All attributes have default values which allows users to configure just " "the ones they care about." @@ -8125,488 +9068,381 @@ msgstr "" msgid "**success**" msgstr "" -#: ../../source/ref-api/flwr.server.driver.rst:2 -msgid "driver" +#: ../../source/ref-api/flwr.server.run_driver_api.rst:2 +msgid "run\\_driver\\_api" msgstr "" -#: ../../source/ref-api/flwr.server.driver.rst:22::1 -msgid "" -":py:obj:`start_driver `\\ \\(\\*\\[\\, " -"server\\_address\\, server\\, ...\\]\\)" +#: ../../source/ref-api/flwr.server.run_fleet_api.rst:2 +msgid "run\\_fleet\\_api" msgstr "" -#: ../../source/ref-api/flwr.server.driver.rst:22::1 -#: flwr.server.driver.app.start_driver:1 of -msgid "Start a Flower Driver API server." +#: ../../source/ref-api/flwr.server.run_server_app.rst:2 +msgid "run\\_server\\_app" msgstr "" -#: ../../source/ref-api/flwr.server.driver.rst:30::1 -msgid "" -":py:obj:`Driver `\\ " -"\\(\\[driver\\_service\\_address\\, ...\\]\\)" +#: ../../source/ref-api/flwr.server.run_superlink.rst:2 +msgid "run\\_superlink" msgstr "" -#: ../../source/ref-api/flwr.server.driver.rst:30::1 -#: flwr.server.driver.driver.Driver:1 of -msgid "`Driver` class provides an interface to the Driver API." +#: ../../source/ref-api/flwr.server.start_driver.rst:2 +msgid "start\\_driver" msgstr "" -#: ../../source/ref-api/flwr.server.driver.rst:30::1 +#: flwr.server.compat.app.start_driver:3 of msgid "" -":py:obj:`GrpcDriver `\\ " -"\\(\\[driver\\_service\\_address\\, ...\\]\\)" -msgstr "" - -#: ../../source/ref-api/flwr.server.driver.rst:30::1 -#: flwr.server.driver.grpc_driver.GrpcDriver:1 of -msgid "`GrpcDriver` provides access to the gRPC Driver API/service." +"The IPv4 or IPv6 address of the Driver API server. Defaults to " +"`\"[::]:8080\"`." msgstr "" -#: ../../source/ref-api/flwr.server.driver.Driver.rst:2 -msgid "Driver" +#: flwr.server.compat.app.start_driver:6 of +msgid "" +"A server implementation, either `flwr.server.Server` or a subclass " +"thereof. If no instance is provided, then `start_driver` will create one." msgstr "" -#: flwr.server.driver.driver.Driver:3 of +#: flwr.server.app.start_server:9 flwr.server.compat.app.start_driver:10 +#: flwr.simulation.app.start_simulation:28 of msgid "" -"The IPv4 or IPv6 address of the Driver API server. Defaults to " -"`\"[::]:9091\"`." +"Currently supported values are `num_rounds` (int, default: 1) and " +"`round_timeout` in seconds (float, default: None)." msgstr "" -#: flwr.server.app.start_server:28 flwr.server.driver.driver.Driver:6 of +#: flwr.server.app.start_server:12 flwr.server.compat.app.start_driver:13 of msgid "" -"Tuple containing root certificate, server certificate, and private key to" -" start a secure SSL-enabled server. The tuple is expected to have three " -"bytes elements in the following order: * CA certificate. * " -"server certificate. * server private key." +"An implementation of the abstract base class " +"`flwr.server.strategy.Strategy`. If no strategy is provided, then " +"`start_server` will use `flwr.server.strategy.FedAvg`." msgstr "" -#: flwr.server.app.start_server:28 flwr.server.driver.driver.Driver:6 of +#: flwr.server.compat.app.start_driver:17 of msgid "" -"Tuple containing root certificate, server certificate, and private key to" -" start a secure SSL-enabled server. The tuple is expected to have three " -"bytes elements in the following order:" +"An implementation of the class `flwr.server.ClientManager`. If no " +"implementation is provided, then `start_driver` will use " +"`flwr.server.SimpleClientManager`." msgstr "" -#: flwr.server.app.start_server:32 flwr.server.driver.driver.Driver:10 of -msgid "CA certificate." +#: flwr.server.compat.app.start_driver:25 of +msgid "The Driver object to use." msgstr "" -#: flwr.server.app.start_server:33 flwr.server.driver.driver.Driver:11 of -msgid "server certificate." +#: flwr.server.app.start_server:37 flwr.server.compat.app.start_driver:28 of +msgid "**hist** -- Object containing training and evaluation metrics." msgstr "" -#: flwr.server.app.start_server:34 flwr.server.driver.driver.Driver:12 of -msgid "server private key." +#: flwr.server.compat.app.start_driver:33 of +msgid "Starting a driver that connects to an insecure server:" msgstr "" -#: flwr.server.driver.driver.Driver.get_nodes:1::1 of -msgid ":py:obj:`get_nodes `\\ \\(\\)" +#: flwr.server.compat.app.start_driver:37 of +msgid "Starting a driver that connects to an SSL-enabled server:" msgstr "" -#: flwr.server.driver.driver.Driver.get_nodes:1 -#: flwr.server.driver.driver.Driver.get_nodes:1::1 of -msgid "Get node IDs." +#: ../../source/ref-api/flwr.server.start_server.rst:2 +msgid "start\\_server" msgstr "" -#: flwr.server.driver.driver.Driver.get_nodes:1::1 of -msgid "" -":py:obj:`pull_task_res `\\ " -"\\(task\\_ids\\)" +#: flwr.server.app.start_server:3 of +msgid "The IPv4 or IPv6 address of the server. Defaults to `\"[::]:8080\"`." msgstr "" -#: flwr.server.driver.driver.Driver.get_nodes:1::1 -#: flwr.server.driver.driver.Driver.pull_task_res:1 -#: flwr.server.driver.grpc_driver.GrpcDriver.connect:1::1 -#: flwr.server.driver.grpc_driver.GrpcDriver.pull_task_res:1 of -msgid "Get task results." +#: flwr.server.app.start_server:5 of +msgid "" +"A server implementation, either `flwr.server.Server` or a subclass " +"thereof. If no instance is provided, then `start_server` will create one." msgstr "" -#: flwr.server.driver.driver.Driver.get_nodes:1::1 of +#: flwr.server.app.start_server:16 of msgid "" -":py:obj:`push_task_ins `\\ " -"\\(task\\_ins\\_list\\)" +"An implementation of the abstract base class `flwr.server.ClientManager`." +" If no implementation is provided, then `start_server` will use " +"`flwr.server.client_manager.SimpleClientManager`." msgstr "" -#: flwr.server.driver.driver.Driver.get_nodes:1::1 -#: flwr.server.driver.driver.Driver.push_task_ins:1 -#: flwr.server.driver.grpc_driver.GrpcDriver.connect:1::1 -#: flwr.server.driver.grpc_driver.GrpcDriver.push_task_ins:1 of -msgid "Schedule tasks." +#: flwr.server.app.start_server:21 of +msgid "" +"The maximum length of gRPC messages that can be exchanged with the Flower" +" clients. The default should be sufficient for most models. Users who " +"train very large models might need to increase this value. Note that the " +"Flower clients need to be started with the same value (see " +"`flwr.client.start_client`), otherwise clients will not know about the " +"increased limit and block larger messages." msgstr "" -#: ../../source/ref-api/flwr.server.driver.GrpcDriver.rst:2 -msgid "GrpcDriver" +#: flwr.server.app.start_server:42 of +msgid "Starting an insecure server:" msgstr "" -#: flwr.server.driver.grpc_driver.GrpcDriver.connect:1::1 of -msgid ":py:obj:`connect `\\ \\(\\)" +#: flwr.server.app.start_server:46 of +msgid "Starting an SSL-enabled server:" msgstr "" -#: flwr.server.driver.grpc_driver.GrpcDriver.connect:1 -#: flwr.server.driver.grpc_driver.GrpcDriver.connect:1::1 of -msgid "Connect to the Driver API." +#: ../../source/ref-api/flwr.server.strategy.rst:2 +msgid "strategy" msgstr "" -#: flwr.server.driver.grpc_driver.GrpcDriver.connect:1::1 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`create_run `\\ " -"\\(req\\)" +":py:obj:`Bulyan `\\ \\(\\*\\, " +"fraction\\_fit\\, fraction\\_evaluate\\, ...\\)" msgstr "" -#: flwr.server.driver.grpc_driver.GrpcDriver.connect:1::1 -#: flwr.server.driver.grpc_driver.GrpcDriver.create_run:1 of -msgid "Request for run ID." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.bulyan.Bulyan:1 of +msgid "Bulyan strategy." msgstr "" -#: flwr.server.driver.grpc_driver.GrpcDriver.connect:1::1 of -msgid ":py:obj:`disconnect `\\ \\(\\)" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +msgid "" +":py:obj:`DPFedAvgAdaptive `\\ " +"\\(strategy\\, num\\_sampled\\_clients\\)" msgstr "" -#: flwr.server.driver.grpc_driver.GrpcDriver.connect:1::1 -#: flwr.server.driver.grpc_driver.GrpcDriver.disconnect:1 of -msgid "Disconnect from the Driver API." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive:1 of +msgid "Wrapper for configuring a Strategy for DP with Adaptive Clipping." msgstr "" -#: flwr.server.driver.grpc_driver.GrpcDriver.connect:1::1 of -msgid ":py:obj:`get_nodes `\\ \\(req\\)" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +msgid "" +":py:obj:`DPFedAvgFixed `\\ " +"\\(strategy\\, num\\_sampled\\_clients\\, ...\\)" msgstr "" -#: flwr.server.driver.grpc_driver.GrpcDriver.connect:1::1 -#: flwr.server.driver.grpc_driver.GrpcDriver.get_nodes:1 of -msgid "Get client IDs." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed:1 of +msgid "Wrapper for configuring a Strategy for DP with Fixed Clipping." msgstr "" -#: flwr.server.driver.grpc_driver.GrpcDriver.connect:1::1 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`pull_task_res `\\ " -"\\(req\\)" +":py:obj:`DifferentialPrivacyClientSideAdaptiveClipping " +"`\\ " +"\\(...\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:1 +#: of +msgid "Strategy wrapper for central DP with client-side adaptive clipping." msgstr "" -#: flwr.server.driver.grpc_driver.GrpcDriver.connect:1::1 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`push_task_ins `\\ " -"\\(req\\)" +":py:obj:`DifferentialPrivacyServerSideAdaptiveClipping " +"`\\ " +"\\(...\\)" msgstr "" -#: ../../source/ref-api/flwr.server.driver.start_driver.rst:2 -msgid "start\\_driver" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:1 +#: of +msgid "Strategy wrapper for central DP with server-side adaptive clipping." msgstr "" -#: flwr.server.driver.app.start_driver:3 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -"The IPv4 or IPv6 address of the Driver API server. Defaults to " -"`\"[::]:8080\"`." +":py:obj:`DifferentialPrivacyClientSideFixedClipping " +"`\\ " +"\\(...\\)" msgstr "" -#: flwr.server.driver.app.start_driver:6 of -msgid "" -"A server implementation, either `flwr.server.Server` or a subclass " -"thereof. If no instance is provided, then `start_driver` will create one." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:1 +#: of +msgid "Strategy wrapper for central DP with client-side fixed clipping." msgstr "" -#: flwr.server.app.start_server:9 flwr.server.driver.app.start_driver:10 -#: flwr.simulation.app.start_simulation:28 of -msgid "" -"Currently supported values are `num_rounds` (int, default: 1) and " -"`round_timeout` in seconds (float, default: None)." -msgstr "" - -#: flwr.server.app.start_server:12 flwr.server.driver.app.start_driver:13 of -msgid "" -"An implementation of the abstract base class " -"`flwr.server.strategy.Strategy`. If no strategy is provided, then " -"`start_server` will use `flwr.server.strategy.FedAvg`." -msgstr "" - -#: flwr.server.driver.app.start_driver:17 of -msgid "" -"An implementation of the class `flwr.server.ClientManager`. If no " -"implementation is provided, then `start_driver` will use " -"`flwr.server.SimpleClientManager`." -msgstr "" - -#: flwr.server.app.start_server:37 flwr.server.driver.app.start_driver:26 of -msgid "**hist** -- Object containing training and evaluation metrics." -msgstr "" - -#: flwr.server.driver.app.start_driver:31 of -msgid "Starting a driver that connects to an insecure server:" -msgstr "" - -#: flwr.server.driver.app.start_driver:35 of -msgid "Starting a driver that connects to an SSL-enabled server:" -msgstr "" - -#: ../../source/ref-api/flwr.server.run_driver_api.rst:2 -msgid "run\\_driver\\_api" -msgstr "" - -#: ../../source/ref-api/flwr.server.run_fleet_api.rst:2 -msgid "run\\_fleet\\_api" -msgstr "" - -#: ../../source/ref-api/flwr.server.run_server_app.rst:2 -msgid "run\\_server\\_app" -msgstr "" - -#: ../../source/ref-api/flwr.server.run_superlink.rst:2 -msgid "run\\_superlink" -msgstr "" - -#: ../../source/ref-api/flwr.server.start_server.rst:2 -msgid "start\\_server" -msgstr "" - -#: flwr.server.app.start_server:3 of -msgid "The IPv4 or IPv6 address of the server. Defaults to `\"[::]:8080\"`." -msgstr "" - -#: flwr.server.app.start_server:5 of -msgid "" -"A server implementation, either `flwr.server.Server` or a subclass " -"thereof. If no instance is provided, then `start_server` will create one." -msgstr "" - -#: flwr.server.app.start_server:16 of -msgid "" -"An implementation of the abstract base class `flwr.server.ClientManager`." -" If no implementation is provided, then `start_server` will use " -"`flwr.server.client_manager.SimpleClientManager`." -msgstr "" - -#: flwr.server.app.start_server:21 of -msgid "" -"The maximum length of gRPC messages that can be exchanged with the Flower" -" clients. The default should be sufficient for most models. Users who " -"train very large models might need to increase this value. Note that the " -"Flower clients need to be started with the same value (see " -"`flwr.client.start_client`), otherwise clients will not know about the " -"increased limit and block larger messages." -msgstr "" - -#: flwr.server.app.start_server:42 of -msgid "Starting an insecure server:" -msgstr "" - -#: flwr.server.app.start_server:46 of -msgid "Starting an SSL-enabled server:" -msgstr "" - -#: ../../source/ref-api/flwr.server.strategy.rst:2 -msgid "strategy" -msgstr "" - -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`FaultTolerantFedAvg " -"`\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" +":py:obj:`DifferentialPrivacyServerSideFixedClipping " +"`\\ " +"\\(...\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg:1 of -msgid "Configurable fault-tolerant FedAvg strategy implementation." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:1 +#: of +msgid "Strategy wrapper for central DP with server-side fixed clipping." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" ":py:obj:`FedAdagrad `\\ \\(\\*\\[\\, " "fraction\\_fit\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 #: flwr.server.strategy.fedadagrad.FedAdagrad:1 of msgid "FedAdagrad strategy - Adaptive Federated Optimization using Adagrad." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" ":py:obj:`FedAdam `\\ \\(\\*\\[\\, " "fraction\\_fit\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 #: flwr.server.strategy.fedadam.FedAdam:1 of msgid "FedAdam - Adaptive Federated Optimization using Adam." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" ":py:obj:`FedAvg `\\ \\(\\*\\[\\, " "fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 #: flwr.server.strategy.fedavg.FedAvg:1 #: flwr.server.strategy.fedavg_android.FedAvgAndroid:1 of msgid "Federated Averaging strategy." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 -msgid "" -":py:obj:`FedXgbNnAvg `\\ \\(\\*args\\, " -"\\*\\*kwargs\\)" -msgstr "" - -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 -#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg:1 of -msgid "Configurable FedXgbNnAvg strategy implementation." -msgstr "" - -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 -msgid "" -":py:obj:`FedXgbBagging `\\ " -"\\(\\[evaluate\\_function\\]\\)" -msgstr "" - -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging:1 of -msgid "Configurable FedXgbBagging strategy implementation." -msgstr "" - -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 -msgid "" -":py:obj:`FedXgbCyclic `\\ " -"\\(\\*\\*kwargs\\)" -msgstr "" - -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic:1 of -msgid "Configurable FedXgbCyclic strategy implementation." -msgstr "" - -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" ":py:obj:`FedAvgAndroid `\\ " "\\(\\*\\[\\, fraction\\_fit\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" ":py:obj:`FedAvgM `\\ \\(\\*\\[\\, " "fraction\\_fit\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 #: flwr.server.strategy.fedavgm.FedAvgM:1 of msgid "Federated Averaging with Momentum strategy." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +msgid "" +":py:obj:`FedMedian `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedmedian.FedMedian:1 of +msgid "Configurable FedMedian strategy implementation." +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" ":py:obj:`FedOpt `\\ \\(\\*\\[\\, " "fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 #: flwr.server.strategy.fedopt.FedOpt:1 of msgid "Federated Optim strategy." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" ":py:obj:`FedProx `\\ \\(\\*\\[\\, " "fraction\\_fit\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 #: flwr.server.strategy.fedprox.FedProx:1 of msgid "Federated Optimization strategy." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`FedYogi `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" +":py:obj:`FedTrimmedAvg `\\ " +"\\(\\*\\[\\, fraction\\_fit\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 -#: flwr.server.strategy.fedyogi.FedYogi:1 of -msgid "FedYogi [Reddi et al., 2020] strategy." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:1 of +msgid "Federated Averaging with Trimmed Mean [Dong Yin, et al., 2021]." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`QFedAvg `\\ \\(\\*\\[\\, " -"q\\_param\\, qffl\\_learning\\_rate\\, ...\\]\\)" +":py:obj:`FedXgbBagging `\\ " +"\\(\\[evaluate\\_function\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 -#: flwr.server.strategy.qfedavg.QFedAvg:1 of -msgid "Configurable QFedAvg strategy implementation." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging:1 of +msgid "Configurable FedXgbBagging strategy implementation." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`FedMedian `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" +":py:obj:`FedXgbCyclic `\\ " +"\\(\\*\\*kwargs\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 -#: flwr.server.strategy.fedmedian.FedMedian:1 of -msgid "Configurable FedMedian strategy implementation." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic:1 of +msgid "Configurable FedXgbCyclic strategy implementation." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`FedTrimmedAvg `\\ " -"\\(\\*\\[\\, fraction\\_fit\\, ...\\]\\)" +":py:obj:`FedXgbNnAvg `\\ \\(\\*args\\, " +"\\*\\*kwargs\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:1 of -msgid "Federated Averaging with Trimmed Mean [Dong Yin, et al., 2021]." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg:1 of +msgid "Configurable FedXgbNnAvg strategy implementation." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`Krum `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" +":py:obj:`FedYogi `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 -#: flwr.server.strategy.krum.Krum:1 of -msgid "Krum [Blanchard et al., 2017] strategy." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedyogi.FedYogi:1 of +msgid "FedYogi [Reddi et al., 2020] strategy." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`Bulyan `\\ \\(\\*\\, " -"fraction\\_fit\\, fraction\\_evaluate\\, ...\\)" +":py:obj:`FaultTolerantFedAvg " +"`\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 -#: flwr.server.strategy.bulyan.Bulyan:1 of -msgid "Bulyan strategy." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg:1 of +msgid "Configurable fault-tolerant FedAvg strategy implementation." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`DPFedAvgAdaptive `\\ " -"\\(strategy\\, num\\_sampled\\_clients\\)" +":py:obj:`Krum `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 -#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive:1 of -msgid "Wrapper for configuring a Strategy for DP with Adaptive Clipping." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.krum.Krum:1 of +msgid "Krum [Blanchard et al., 2017] strategy." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`DPFedAvgFixed `\\ " -"\\(strategy\\, num\\_sampled\\_clients\\, ...\\)" +":py:obj:`QFedAvg `\\ \\(\\*\\[\\, " +"q\\_param\\, qffl\\_learning\\_rate\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed:1 of -msgid "Wrapper for configuring a Strategy for DP with Fixed Clipping." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.qfedavg.QFedAvg:1 of +msgid "Configurable QFedAvg strategy implementation." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid ":py:obj:`Strategy `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 #: flwr.server.strategy.strategy.Strategy:1 of msgid "Abstract base class for server strategy implementations." msgstr "" @@ -8806,6 +9642,14 @@ msgid "" "parameters\\, ...\\)" msgstr "" +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.configure_evaluate:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.configure_evaluate:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.configure_evaluate:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.configure_evaluate:1 #: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 #: flwr.server.strategy.fedavg.FedAvg.configure_evaluate:1 @@ -8827,6 +9671,14 @@ msgid "" "\\(server\\_round\\, parameters\\, ...\\)" msgstr "" +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.configure_fit:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.configure_fit:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.configure_fit:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.configure_fit:1 #: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive.configure_fit:1 #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 #: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 @@ -8920,6 +9772,10 @@ msgstr "" msgid "Return the sample size and the required number of available clients." msgstr "" +#: ../../source/ref-api/flwr.server.strategy.DPFedAvgAdaptive.rst:2 +msgid "DPFedAvgAdaptive" +msgstr "" + #: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive:1 of msgid "Bases: :py:class:`~flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed`" msgstr "" @@ -8937,6 +9793,14 @@ msgid "" "\\(server\\_round\\, results\\, ...\\)" msgstr "" +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1 #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 #: of @@ -8985,6 +9849,14 @@ msgid "" "\\(server\\_round\\, parameters\\)" msgstr "" +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.evaluate:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.evaluate:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.evaluate:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.evaluate:1 #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.evaluate:1 of msgid "Evaluate model parameters using an evaluation function from the strategy." @@ -8998,6 +9870,14 @@ msgid "" "\\(client\\_manager\\)" msgstr "" +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.initialize_parameters:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.initialize_parameters:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.initialize_parameters:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.initialize_parameters:1 #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.initialize_parameters:1 of msgid "Initialize global model parameters using given strategy." @@ -9031,6 +9911,14 @@ msgid "" "round of federated evaluation." msgstr "" +#: ../../source/ref-api/flwr.server.strategy.DPFedAvgFixed.rst:2 +msgid "DPFedAvgFixed" +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:1 #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed:1 #: flwr.server.strategy.fedavg.FedAvg:1 #: flwr.server.strategy.fedavg_android.FedAvgAndroid:1 of @@ -9112,9270 +10000,11353 @@ msgid "" "round of federated learning." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FaultTolerantFedAvg.rst:2 -msgid "FaultTolerantFedAvg" +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideAdaptiveClipping.rst:2 +msgid "DifferentialPrivacyClientSideAdaptiveClipping" msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:3 #: of -msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +msgid "Use `adaptiveclipping_mod` modifier at the client side." msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:5 #: of msgid "" -":py:obj:`aggregate_fit " -"`\\ " -"\\(server\\_round\\, results\\, failures\\)" +"In comparison to `DifferentialPrivacyServerSideAdaptiveClipping`, which " +"performs clipping on the server-side, " +"`DifferentialPrivacyClientSideAdaptiveClipping` expects clipping to " +"happen on the client-side, usually by using the built-in " +"`adaptiveclipping_mod`." msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_fit:1 -#: flwr.server.strategy.fedadagrad.FedAdagrad.aggregate_fit:1 -#: flwr.server.strategy.fedadam.FedAdam.aggregate_fit:1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_fit:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_fit:1 -#: flwr.server.strategy.fedavgm.FedAvgM.aggregate_fit:1 -#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg.aggregate_fit:1 -#: flwr.server.strategy.fedyogi.FedYogi.aggregate_fit:1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_fit:1 of -msgid "Aggregate fit results using weighted average." +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:10 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:3 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:10 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:3 +#: of +msgid "The strategy to which DP functionalities will be added by this wrapper." msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:12 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:5 #: of -msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +msgid "The noise multiplier for the Gaussian mechanism for model updates." msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:14 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:7 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:17 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:10 #: of -msgid "" -":py:obj:`configure_fit " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +msgid "The number of clients that are sampled on each round." msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:16 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:9 #: of msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +"The initial value of clipping norm. Defaults to 0.1. Andrew et al. " +"recommends to set to 0.1." msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:19 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:12 #: of -msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +msgid "The desired quantile of updates which should be clipped. Defaults to 0.5." msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:21 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:14 #: of msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +"The learning rate for the clipping norm adaptation. Defaults to 0.2. " +"Andrew et al. recommends to set to 0.2." msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:24 #: of msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +"The stddev of the noise added to the count of updates currently below the" +" estimate. Andrew et al. recommends to set to `expected_num_records/20`" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedAdagrad.rst:2 -#: ../../source/ref-changelog.md:839 -msgid "FedAdagrad" -msgstr "" - -#: flwr.server.strategy.fedadagrad.FedAdagrad:1 -#: flwr.server.strategy.fedadam.FedAdam:1 -#: flwr.server.strategy.fedyogi.FedYogi:1 of -msgid "Bases: :py:class:`~flwr.server.strategy.fedopt.FedOpt`" -msgstr "" - -#: flwr.server.strategy.fedadagrad.FedAdagrad:3 -#: flwr.server.strategy.fedadam.FedAdam:3 flwr.server.strategy.fedopt.FedOpt:3 -#: flwr.server.strategy.fedyogi.FedYogi:3 of -msgid "Implementation based on https://arxiv.org/abs/2003.00295v5" -msgstr "" - -#: flwr.server.strategy.fedadagrad.FedAdagrad:21 -#: flwr.server.strategy.fedadagrad.FedAdagrad:23 -#: flwr.server.strategy.fedadam.FedAdam:25 -#: flwr.server.strategy.fedadam.FedAdam:27 -#: flwr.server.strategy.fedavg.FedAvg:29 flwr.server.strategy.fedavg.FedAvg:31 -#: flwr.server.strategy.fedopt.FedOpt:25 flwr.server.strategy.fedopt.FedOpt:27 -#: flwr.server.strategy.fedprox.FedProx:61 -#: flwr.server.strategy.fedprox.FedProx:63 -#: flwr.server.strategy.fedyogi.FedYogi:28 -#: flwr.server.strategy.fedyogi.FedYogi:30 of -msgid "Metrics aggregation function, optional." -msgstr "" - -#: flwr.server.strategy.fedadagrad.FedAdagrad:29 -#: flwr.server.strategy.fedadam.FedAdam:29 -#: flwr.server.strategy.fedopt.FedOpt:29 of -msgid "Server-side learning rate. Defaults to 1e-1." +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:30 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:23 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:22 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:15 +#: of +msgid "Create a strategy:" msgstr "" -#: flwr.server.strategy.fedadagrad.FedAdagrad:31 -#: flwr.server.strategy.fedadam.FedAdam:31 -#: flwr.server.strategy.fedopt.FedOpt:31 of -msgid "Client-side learning rate. Defaults to 1e-1." +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:34 +#: of +msgid "" +"Wrap the strategy with the " +"`DifferentialPrivacyClientSideAdaptiveClipping` wrapper:" msgstr "" -#: flwr.server.strategy.fedadagrad.FedAdagrad:33 -#: flwr.server.strategy.fedadam.FedAdam:37 -#: flwr.server.strategy.fedopt.FedOpt:37 of -msgid "Controls the algorithm's degree of adaptability. Defaults to 1e-9." +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:40 +#: of +msgid "On the client, add the `adaptiveclipping_mod` to the client-side mods:" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of msgid "" ":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +"`\\" +" \\(server\\_round\\, results\\, ...\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of msgid "" -":py:obj:`aggregate_fit `\\" +":py:obj:`aggregate_fit " +"`\\" " \\(server\\_round\\, results\\, failures\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_fit:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_fit:1 +#: of +msgid "Aggregate training results and update clip norms." +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of msgid "" ":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of msgid "" -":py:obj:`configure_fit `\\" +":py:obj:`configure_fit " +"`\\" " \\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +":py:obj:`evaluate " +"`\\" +" \\(server\\_round\\, parameters\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of msgid "" ":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +"`\\" +" \\(client\\_manager\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideFixedClipping.rst:2 +msgid "DifferentialPrivacyClientSideFixedClipping" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:3 +#: of +msgid "Use `fixedclipping_mod` modifier at the client side." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedAdam.rst:2 -msgid "FedAdam" +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:5 +#: of +msgid "" +"In comparison to `DifferentialPrivacyServerSideFixedClipping`, which " +"performs clipping on the server-side, " +"`DifferentialPrivacyClientSideFixedClipping` expects clipping to happen " +"on the client-side, usually by using the built-in `fixedclipping_mod`." msgstr "" -#: flwr.server.strategy.fedadam.FedAdam:33 -#: flwr.server.strategy.fedyogi.FedYogi:36 of -msgid "Momentum parameter. Defaults to 0.9." +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:12 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:5 +#: of +msgid "" +"The noise multiplier for the Gaussian mechanism for model updates. A " +"value of 1.0 or higher is recommended for strong privacy." msgstr "" -#: flwr.server.strategy.fedadam.FedAdam:35 -#: flwr.server.strategy.fedyogi.FedYogi:38 of -msgid "Second moment parameter. Defaults to 0.99." +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:15 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:8 +#: of +msgid "The value of the clipping norm." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:26 +#: of msgid "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\," -" results\\, ...\\)" +"Wrap the strategy with the `DifferentialPrivacyClientSideFixedClipping` " +"wrapper:" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:32 +#: of +msgid "On the client, add the `fixedclipping_mod` to the client-side mods:" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: of msgid "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\," -" parameters\\, ...\\)" +":py:obj:`aggregate_evaluate " +"`\\" +" \\(server\\_round\\, results\\, ...\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: of msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +":py:obj:`aggregate_fit " +"`\\" +" \\(server\\_round\\, results\\, failures\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_fit:1 +#: of +msgid "Add noise to the aggregated parameters." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: of msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +":py:obj:`configure_evaluate " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: of msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +":py:obj:`configure_fit " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: of msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +":py:obj:`evaluate " +"`\\" +" \\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedAvg.rst:2 -msgid "FedAvg" +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`initialize_parameters " +"`\\" +" \\(client\\_manager\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg:3 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:3 of -msgid "Implementation based on https://arxiv.org/abs/1602.05629" +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideAdaptiveClipping.rst:2 +msgid "DifferentialPrivacyServerSideAdaptiveClipping" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg:5 flwr.server.strategy.fedprox.FedProx:37 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:17 #: of msgid "" -"Fraction of clients used during training. In case `min_fit_clients` is " -"larger than `fraction_fit * available_clients`, `min_fit_clients` will " -"still be sampled. Defaults to 1.0." +"The standard deviation of the noise added to the count of updates below " +"the estimate. Andrew et al. recommends to set to " +"`expected_num_records/20`" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg:9 flwr.server.strategy.fedprox.FedProx:41 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:27 #: of msgid "" -"Fraction of clients used during validation. In case " -"`min_evaluate_clients` is larger than `fraction_evaluate * " -"available_clients`, `min_evaluate_clients` will still be sampled. " -"Defaults to 1.0." +"Wrap the strategy with the DifferentialPrivacyServerSideAdaptiveClipping " +"wrapper" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of msgid "" ":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\, " -"results\\, ...\\)" +"`\\" +" \\(server\\_round\\, results\\, ...\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +":py:obj:`aggregate_fit " +"`\\" +" \\(server\\_round\\, results\\, failures\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of msgid "" ":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\, " -"parameters\\, ...\\)" +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +":py:obj:`configure_fit " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +":py:obj:`evaluate " +"`\\" +" \\(server\\_round\\, parameters\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of msgid "" ":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +"`\\" +" \\(client\\_manager\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideFixedClipping.rst:2 +msgid "DifferentialPrivacyServerSideFixedClipping" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:19 +#: of msgid "" -":py:obj:`num_fit_clients `\\" -" \\(num\\_available\\_clients\\)" -msgstr "" - -#: ../../source/ref-api/flwr.server.strategy.FedAvgAndroid.rst:2 -msgid "FedAvgAndroid" +"Wrap the strategy with the DifferentialPrivacyServerSideFixedClipping " +"wrapper" msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 #: of msgid "" ":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +"`\\" +" \\(server\\_round\\, results\\, ...\\)" msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 #: of msgid "" ":py:obj:`aggregate_fit " -"`\\ " -"\\(server\\_round\\, results\\, failures\\)" +"`\\" +" \\(server\\_round\\, results\\, failures\\)" msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_fit:1 #: of -msgid "" -":py:obj:`bytes_to_ndarray " -"`\\ \\(tensor\\)" -msgstr "" - -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.bytes_to_ndarray:1 of -msgid "Deserialize NumPy array from bytes." +msgid "Compute the updates, clip, and pass them for aggregation." msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 #: of msgid "" ":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 #: of msgid "" ":py:obj:`configure_fit " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 #: of msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +":py:obj:`evaluate " +"`\\" +" \\(server\\_round\\, parameters\\)" msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 #: of msgid "" ":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +"`\\" +" \\(client\\_manager\\)" msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_fit:3 #: of -msgid "" -":py:obj:`ndarray_to_bytes " -"`\\ \\(ndarray\\)" +msgid "Afterward, add noise to the aggregated parameters." msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.ndarray_to_bytes:1 of -msgid "Serialize NumPy array to bytes." +#: ../../source/ref-api/flwr.server.strategy.FaultTolerantFedAvg.rst:2 +msgid "FaultTolerantFedAvg" msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 #: of msgid "" -":py:obj:`ndarrays_to_parameters " -"`\\ " -"\\(ndarrays\\)" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 #: of msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +":py:obj:`aggregate_fit " +"`\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_fit:1 +#: flwr.server.strategy.fedadagrad.FedAdagrad.aggregate_fit:1 +#: flwr.server.strategy.fedadam.FedAdam.aggregate_fit:1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_fit:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_fit:1 +#: flwr.server.strategy.fedavgm.FedAvgM.aggregate_fit:1 +#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg.aggregate_fit:1 +#: flwr.server.strategy.fedyogi.FedYogi.aggregate_fit:1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_fit:1 of +msgid "Aggregate fit results using weighted average." +msgstr "" + +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 #: of msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 #: of msgid "" -":py:obj:`parameters_to_ndarrays " -"`\\ " -"\\(parameters\\)" +":py:obj:`configure_fit " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.parameters_to_ndarrays:1 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 #: of -msgid "Convert parameters object to NumPy weights." +msgid "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedAvgM.rst:2 -msgid "FedAvgM" +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: flwr.server.strategy.fedavgm.FedAvgM:3 of -msgid "Implementation based on https://arxiv.org/abs/1909.06335" +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: flwr.server.strategy.fedavgm.FedAvgM:25 of +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of msgid "" -"Server-side learning rate used in server-side optimization. Defaults to " -"1.0." +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: flwr.server.strategy.fedavgm.FedAvgM:28 of -msgid "Server-side momentum factor used for FedAvgM. Defaults to 0.0." +#: ../../source/ref-api/flwr.server.strategy.FedAdagrad.rst:2 +#: ../../source/ref-changelog.md:839 +msgid "FedAdagrad" +msgstr "" + +#: flwr.server.strategy.fedadagrad.FedAdagrad:1 +#: flwr.server.strategy.fedadam.FedAdam:1 +#: flwr.server.strategy.fedyogi.FedYogi:1 of +msgid "Bases: :py:class:`~flwr.server.strategy.fedopt.FedOpt`" +msgstr "" + +#: flwr.server.strategy.fedadagrad.FedAdagrad:3 +#: flwr.server.strategy.fedadam.FedAdam:3 flwr.server.strategy.fedopt.FedOpt:3 +#: flwr.server.strategy.fedyogi.FedYogi:3 of +msgid "Implementation based on https://arxiv.org/abs/2003.00295v5" +msgstr "" + +#: flwr.server.strategy.fedadagrad.FedAdagrad:21 +#: flwr.server.strategy.fedadagrad.FedAdagrad:23 +#: flwr.server.strategy.fedadam.FedAdam:25 +#: flwr.server.strategy.fedadam.FedAdam:27 +#: flwr.server.strategy.fedavg.FedAvg:29 flwr.server.strategy.fedavg.FedAvg:31 +#: flwr.server.strategy.fedopt.FedOpt:25 flwr.server.strategy.fedopt.FedOpt:27 +#: flwr.server.strategy.fedprox.FedProx:61 +#: flwr.server.strategy.fedprox.FedProx:63 +#: flwr.server.strategy.fedyogi.FedYogi:28 +#: flwr.server.strategy.fedyogi.FedYogi:30 of +msgid "Metrics aggregation function, optional." +msgstr "" + +#: flwr.server.strategy.fedadagrad.FedAdagrad:29 +#: flwr.server.strategy.fedadam.FedAdam:29 +#: flwr.server.strategy.fedopt.FedOpt:29 of +msgid "Server-side learning rate. Defaults to 1e-1." +msgstr "" + +#: flwr.server.strategy.fedadagrad.FedAdagrad:31 +#: flwr.server.strategy.fedadam.FedAdam:31 +#: flwr.server.strategy.fedopt.FedOpt:31 of +msgid "Client-side learning rate. Defaults to 1e-1." +msgstr "" + +#: flwr.server.strategy.fedadagrad.FedAdagrad:33 +#: flwr.server.strategy.fedadam.FedAdam:37 +#: flwr.server.strategy.fedopt.FedOpt:37 of +msgid "Controls the algorithm's degree of adaptability. Defaults to 1e-9." msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" ":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\," -" results\\, ...\\)" +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +":py:obj:`aggregate_fit `\\" +" \\(server\\_round\\, results\\, failures\\)" msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" ":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\," -" parameters\\, ...\\)" +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +":py:obj:`configure_fit `\\" +" \\(server\\_round\\, parameters\\, ...\\)" msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -":py:obj:`evaluate `\\ " +":py:obj:`evaluate `\\ " "\\(server\\_round\\, parameters\\)" msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" ":py:obj:`initialize_parameters " -"`\\ " +"`\\ " "\\(client\\_manager\\)" msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" ":py:obj:`num_evaluation_clients " -"`\\ " +"`\\ " "\\(num\\_available\\_clients\\)" msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" ":py:obj:`num_fit_clients " -"`\\ " +"`\\ " "\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedMedian.rst:2 -msgid "FedMedian" +#: ../../source/ref-api/flwr.server.strategy.FedAdam.rst:2 +msgid "FedAdam" +msgstr "" + +#: flwr.server.strategy.fedadam.FedAdam:33 +#: flwr.server.strategy.fedyogi.FedYogi:36 of +msgid "Momentum parameter. Defaults to 0.9." +msgstr "" + +#: flwr.server.strategy.fedadam.FedAdam:35 +#: flwr.server.strategy.fedyogi.FedYogi:38 of +msgid "Second moment parameter. Defaults to 0.99." msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" ":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +"`\\ \\(server\\_round\\," +" results\\, ...\\)" msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -":py:obj:`aggregate_fit `\\ " +":py:obj:`aggregate_fit `\\ " "\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedmedian.FedMedian.aggregate_fit:1 of -msgid "Aggregate fit results using median." -msgstr "" - #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" ":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -":py:obj:`configure_fit `\\ " +":py:obj:`configure_fit `\\ " "\\(server\\_round\\, parameters\\, ...\\)" msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -":py:obj:`evaluate `\\ " +":py:obj:`evaluate `\\ " "\\(server\\_round\\, parameters\\)" msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" ":py:obj:`initialize_parameters " -"`\\ " +"`\\ " "\\(client\\_manager\\)" msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" ":py:obj:`num_evaluation_clients " -"`\\ " +"`\\ " "\\(num\\_available\\_clients\\)" msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" ":py:obj:`num_fit_clients " -"`\\ " +"`\\ " "\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedOpt.rst:2 -msgid "FedOpt" +#: ../../source/ref-api/flwr.server.strategy.FedAvg.rst:2 +msgid "FedAvg" msgstr "" -#: flwr.server.strategy.fedopt.FedOpt:33 of -msgid "Momentum parameter. Defaults to 0.0." +#: flwr.server.strategy.fedavg.FedAvg:3 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:3 of +msgid "Implementation based on https://arxiv.org/abs/1602.05629" msgstr "" -#: flwr.server.strategy.fedopt.FedOpt:35 of -msgid "Second moment parameter. Defaults to 0.0." +#: flwr.server.strategy.fedavg.FedAvg:5 flwr.server.strategy.fedprox.FedProx:37 +#: of +msgid "" +"Fraction of clients used during training. In case `min_fit_clients` is " +"larger than `fraction_fit * available_clients`, `min_fit_clients` will " +"still be sampled. Defaults to 1.0." +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg:9 flwr.server.strategy.fedprox.FedProx:41 +#: of +msgid "" +"Fraction of clients used during validation. In case " +"`min_evaluate_clients` is larger than `fraction_evaluate * " +"available_clients`, `min_evaluate_clients` will still be sampled. " +"Defaults to 1.0." +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg:33 of +msgid "Enable (True) or disable (False) in-place aggregation of model updates." msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" ":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\, " +"`\\ \\(server\\_round\\, " "results\\, ...\\)" msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -":py:obj:`aggregate_fit `\\ " +":py:obj:`aggregate_fit `\\ " "\\(server\\_round\\, results\\, failures\\)" msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" ":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\, " +"`\\ \\(server\\_round\\, " "parameters\\, ...\\)" msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -":py:obj:`configure_fit `\\ " +":py:obj:`configure_fit `\\ " "\\(server\\_round\\, parameters\\, ...\\)" msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -":py:obj:`evaluate `\\ " +":py:obj:`evaluate `\\ " "\\(server\\_round\\, parameters\\)" msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" ":py:obj:`initialize_parameters " -"`\\ " +"`\\ " "\\(client\\_manager\\)" msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" ":py:obj:`num_evaluation_clients " -"`\\ " +"`\\ " "\\(num\\_available\\_clients\\)" msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -":py:obj:`num_fit_clients `\\" +":py:obj:`num_fit_clients `\\" " \\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedProx.rst:2 -msgid "FedProx" -msgstr "" - -#: flwr.server.strategy.fedprox.FedProx:3 of -msgid "Implementation based on https://arxiv.org/abs/1812.06127" +#: ../../source/ref-api/flwr.server.strategy.FedAvgAndroid.rst:2 +msgid "FedAvgAndroid" msgstr "" -#: flwr.server.strategy.fedprox.FedProx:5 of +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of msgid "" -"The strategy in itself will not be different than FedAvg, the client " -"needs to be adjusted. A proximal term needs to be added to the loss " -"function during the training:" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -#: flwr.server.strategy.fedprox.FedProx:9 of +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of msgid "" -"\\\\frac{\\\\mu}{2} || w - w^t ||^2\n" -"\n" +":py:obj:`aggregate_fit " +"`\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: flwr.server.strategy.fedprox.FedProx:12 of +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of msgid "" -"Where $w^t$ are the global parameters and $w$ are the local weights the " -"function will be optimized with." +":py:obj:`bytes_to_ndarray " +"`\\ \\(tensor\\)" msgstr "" -#: flwr.server.strategy.fedprox.FedProx:15 of -msgid "In PyTorch, for example, the loss would go from:" +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.bytes_to_ndarray:1 of +msgid "Deserialize NumPy array from bytes." msgstr "" -#: flwr.server.strategy.fedprox.FedProx:21 of -msgid "To:" -msgstr "" - -#: flwr.server.strategy.fedprox.FedProx:30 of -msgid "" -"With `global_params` being a copy of the parameters before the training " -"takes place." -msgstr "" - -#: flwr.server.strategy.fedprox.FedProx:65 of +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of msgid "" -"The weight of the proximal term used in the optimization. 0.0 makes this " -"strategy equivalent to FedAvg, and the higher the coefficient, the more " -"regularization will be used (that is, the client parameters will need to " -"be closer to the server parameters during training)." +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of msgid "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\," -" results\\, ...\\)" +":py:obj:`configure_fit " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of msgid "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\," -" parameters\\, ...\\)" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +":py:obj:`ndarray_to_bytes " +"`\\ \\(ndarray\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.ndarray_to_bytes:1 of +msgid "Serialize NumPy array to bytes." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +":py:obj:`ndarrays_to_parameters " +"`\\ " +"\\(ndarrays\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of msgid "" ":py:obj:`num_evaluation_clients " -"`\\ " +"`\\ " "\\(num\\_available\\_clients\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of msgid "" ":py:obj:`num_fit_clients " -"`\\ " +"`\\ " "\\(num\\_available\\_clients\\)" msgstr "" -#: flwr.server.strategy.fedprox.FedProx.configure_fit:3 of -msgid "Sends the proximal factor mu to the clients" +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`parameters_to_ndarrays " +"`\\ " +"\\(parameters\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedTrimmedAvg.rst:2 -msgid "FedTrimmedAvg" +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.parameters_to_ndarrays:1 +#: of +msgid "Convert parameters object to NumPy weights." msgstr "" -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:3 of -msgid "Implemented based on: https://arxiv.org/abs/1803.01498" +#: ../../source/ref-api/flwr.server.strategy.FedAvgM.rst:2 +msgid "FedAvgM" msgstr "" -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:25 of -msgid "Fraction to cut off of both tails of the distribution. Defaults to 0.2." +#: flwr.server.strategy.fedavgm.FedAvgM:3 of +msgid "Implementation based on https://arxiv.org/abs/1909.06335" +msgstr "" + +#: flwr.server.strategy.fedavgm.FedAvgM:25 of +msgid "" +"Server-side learning rate used in server-side optimization. Defaults to " +"1.0." +msgstr "" + +#: flwr.server.strategy.fedavgm.FedAvgM:28 of +msgid "Server-side momentum factor used for FedAvgM. Defaults to 0.0." msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" ":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +"`\\ \\(server\\_round\\," +" results\\, ...\\)" msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -":py:obj:`aggregate_fit " -"`\\ " +":py:obj:`aggregate_fit `\\ " "\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg.aggregate_fit:1 of -msgid "Aggregate fit results using trimmed average." -msgstr "" - #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" ":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -":py:obj:`configure_fit " -"`\\ " +":py:obj:`configure_fit `\\ " "\\(server\\_round\\, parameters\\, ...\\)" msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -":py:obj:`evaluate `\\ " +":py:obj:`evaluate `\\ " "\\(server\\_round\\, parameters\\)" msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" ":py:obj:`initialize_parameters " -"`\\ " +"`\\ " "\\(client\\_manager\\)" msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" ":py:obj:`num_evaluation_clients " -"`\\ " +"`\\ " "\\(num\\_available\\_clients\\)" msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" ":py:obj:`num_fit_clients " -"`\\ " +"`\\ " "\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedXgbBagging.rst:2 -msgid "FedXgbBagging" +#: ../../source/ref-api/flwr.server.strategy.FedMedian.rst:2 +msgid "FedMedian" msgstr "" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" ":py:obj:`aggregate_evaluate " -"`\\ " +"`\\ " "\\(server\\_round\\, results\\, ...\\)" msgstr "" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of -msgid "Aggregate evaluation metrics using average." -msgstr "" - -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -":py:obj:`aggregate_fit " -"`\\ " +":py:obj:`aggregate_fit `\\ " "\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_fit:1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_fit:1 of -msgid "Aggregate fit results using bagging." +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedmedian.FedMedian.aggregate_fit:1 of +msgid "Aggregate fit results using median." msgstr "" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" ":py:obj:`configure_evaluate " -"`\\ " +"`\\ " "\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -":py:obj:`configure_fit " -"`\\ " +":py:obj:`configure_fit `\\ " "\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -":py:obj:`evaluate `\\ " +":py:obj:`evaluate `\\ " "\\(server\\_round\\, parameters\\)" msgstr "" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" ":py:obj:`initialize_parameters " -"`\\ " +"`\\ " "\\(client\\_manager\\)" msgstr "" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" ":py:obj:`num_evaluation_clients " -"`\\ " +"`\\ " "\\(num\\_available\\_clients\\)" msgstr "" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" ":py:obj:`num_fit_clients " -"`\\ " +"`\\ " "\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedXgbCyclic.rst:2 -msgid "FedXgbCyclic" +#: ../../source/ref-api/flwr.server.strategy.FedOpt.rst:2 +msgid "FedOpt" msgstr "" -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of +#: flwr.server.strategy.fedopt.FedOpt:33 of +msgid "Momentum parameter. Defaults to 0.0." +msgstr "" + +#: flwr.server.strategy.fedopt.FedOpt:35 of +msgid "Second moment parameter. Defaults to 0.0." +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" ":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +"`\\ \\(server\\_round\\, " +"results\\, ...\\)" msgstr "" -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -":py:obj:`aggregate_fit " -"`\\ \\(server\\_round\\," -" results\\, failures\\)" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" ":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"`\\ \\(server\\_round\\, " +"parameters\\, ...\\)" msgstr "" -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -":py:obj:`configure_fit " -"`\\ \\(server\\_round\\," -" parameters\\, ...\\)" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -":py:obj:`evaluate `\\ " +":py:obj:`evaluate `\\ " "\\(server\\_round\\, parameters\\)" msgstr "" -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" ":py:obj:`initialize_parameters " -"`\\ " +"`\\ " "\\(client\\_manager\\)" msgstr "" -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" ":py:obj:`num_evaluation_clients " -"`\\ " +"`\\ " "\\(num\\_available\\_clients\\)" msgstr "" -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" -msgstr "" - -#: ../../source/ref-api/flwr.server.strategy.FedXgbNnAvg.rst:2 -msgid "FedXgbNnAvg" +":py:obj:`num_fit_clients `\\" +" \\(num\\_available\\_clients\\)" msgstr "" -#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg:5 of -msgid "" -"This strategy is deprecated, but a copy of it is available in Flower " -"Baselines: " -"https://github.com/adap/flower/tree/main/baselines/hfedxgboost." +#: ../../source/ref-api/flwr.server.strategy.FedProx.rst:2 +msgid "FedProx" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +#: flwr.server.strategy.fedprox.FedProx:3 of +msgid "Implementation based on https://arxiv.org/abs/1812.06127" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.strategy.fedprox.FedProx:5 of msgid "" -":py:obj:`aggregate_fit " -"`\\ \\(server\\_round\\, " -"results\\, failures\\)" +"The strategy in itself will not be different than FedAvg, the client " +"needs to be adjusted. A proximal term needs to be added to the loss " +"function during the training:" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.strategy.fedprox.FedProx:9 of msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"\\\\frac{\\\\mu}{2} || w - w^t ||^2\n" +"\n" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.strategy.fedprox.FedProx:12 of msgid "" -":py:obj:`configure_fit " -"`\\ \\(server\\_round\\, " -"parameters\\, ...\\)" +"Where $w^t$ are the global parameters and $w$ are the local weights the " +"function will be optimized with." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +#: flwr.server.strategy.fedprox.FedProx:15 of +msgid "In PyTorch, for example, the loss would go from:" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +#: flwr.server.strategy.fedprox.FedProx:21 of +msgid "To:" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.strategy.fedprox.FedProx:30 of msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +"With `global_params` being a copy of the parameters before the training " +"takes place." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.strategy.fedprox.FedProx:65 of msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" -msgstr "" - -#: ../../source/ref-api/flwr.server.strategy.FedYogi.rst:2 -msgid "FedYogi" -msgstr "" - -#: flwr.server.strategy.fedyogi.FedYogi:32 of -msgid "Server-side learning rate. Defaults to 1e-2." -msgstr "" - -#: flwr.server.strategy.fedyogi.FedYogi:34 of -msgid "Client-side learning rate. Defaults to 0.0316." -msgstr "" - -#: flwr.server.strategy.fedyogi.FedYogi:40 of -msgid "Controls the algorithm's degree of adaptability. Defaults to 1e-3." +"The weight of the proximal term used in the optimization. 0.0 makes this " +"strategy equivalent to FedAvg, and the higher the coefficient, the more " +"regularization will be used (that is, the client parameters will need to " +"be closer to the server parameters during training)." msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" ":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\," +"`\\ \\(server\\_round\\," " results\\, ...\\)" msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -":py:obj:`aggregate_fit `\\ " +":py:obj:`aggregate_fit `\\ " "\\(server\\_round\\, results\\, failures\\)" msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" ":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\," +"`\\ \\(server\\_round\\," " parameters\\, ...\\)" msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -":py:obj:`configure_fit `\\ " +":py:obj:`configure_fit `\\ " "\\(server\\_round\\, parameters\\, ...\\)" msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -":py:obj:`evaluate `\\ " +":py:obj:`evaluate `\\ " "\\(server\\_round\\, parameters\\)" msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" ":py:obj:`initialize_parameters " -"`\\ " +"`\\ " "\\(client\\_manager\\)" msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" ":py:obj:`num_evaluation_clients " -"`\\ " +"`\\ " "\\(num\\_available\\_clients\\)" msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" ":py:obj:`num_fit_clients " -"`\\ " +"`\\ " "\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.Krum.rst:2 -msgid "Krum" +#: flwr.server.strategy.fedprox.FedProx.configure_fit:3 of +msgid "Sends the proximal factor mu to the clients" msgstr "" -#: flwr.server.strategy.krum.Krum:3 of -msgid "Implementation based on https://arxiv.org/abs/1703.02757" +#: ../../source/ref-api/flwr.server.strategy.FedTrimmedAvg.rst:2 +msgid "FedTrimmedAvg" msgstr "" -#: flwr.server.strategy.krum.Krum:17 of -msgid "" -"Number of clients to keep before averaging (MultiKrum). Defaults to 0, in" -" that case classical Krum is applied." +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:3 of +msgid "Implemented based on: https://arxiv.org/abs/1803.01498" +msgstr "" + +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:25 of +msgid "Fraction to cut off of both tails of the distribution. Defaults to 0.2." msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" ":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\, " -"results\\, ...\\)" +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -":py:obj:`aggregate_fit `\\ " +":py:obj:`aggregate_fit " +"`\\ " "\\(server\\_round\\, results\\, failures\\)" msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.krum.Krum.aggregate_fit:1 of -msgid "Aggregate fit results using Krum." +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg.aggregate_fit:1 of +msgid "Aggregate fit results using trimmed average." msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" ":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\, " -"parameters\\, ...\\)" +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -":py:obj:`configure_fit `\\ " +":py:obj:`configure_fit " +"`\\ " "\\(server\\_round\\, parameters\\, ...\\)" msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -":py:obj:`evaluate `\\ " +":py:obj:`evaluate `\\ " "\\(server\\_round\\, parameters\\)" msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" ":py:obj:`initialize_parameters " -"`\\ " +"`\\ " "\\(client\\_manager\\)" msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" ":py:obj:`num_evaluation_clients " -"`\\ " +"`\\ " "\\(num\\_available\\_clients\\)" msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -":py:obj:`num_fit_clients `\\ " +":py:obj:`num_fit_clients " +"`\\ " "\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.QFedAvg.rst:2 -msgid "QFedAvg" +#: ../../source/ref-api/flwr.server.strategy.FedXgbBagging.rst:2 +msgid "FedXgbBagging" msgstr "" -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of msgid "" ":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\," -" results\\, ...\\)" +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of +msgid "Aggregate evaluation metrics using average." +msgstr "" + +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of msgid "" -":py:obj:`aggregate_fit `\\ " +":py:obj:`aggregate_fit " +"`\\ " "\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_fit:1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_fit:1 of +msgid "Aggregate fit results using bagging." +msgstr "" + +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of msgid "" ":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\," -" parameters\\, ...\\)" +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of msgid "" -":py:obj:`configure_fit `\\ " +":py:obj:`configure_fit " +"`\\ " "\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of msgid "" -":py:obj:`evaluate `\\ " +":py:obj:`evaluate `\\ " "\\(server\\_round\\, parameters\\)" msgstr "" -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of msgid "" ":py:obj:`initialize_parameters " -"`\\ " +"`\\ " "\\(client\\_manager\\)" msgstr "" -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of msgid "" ":py:obj:`num_evaluation_clients " -"`\\ " +"`\\ " "\\(num\\_available\\_clients\\)" msgstr "" -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of msgid "" ":py:obj:`num_fit_clients " -"`\\ " +"`\\ " "\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.Strategy.rst:2 -msgid "Strategy" +#: ../../source/ref-api/flwr.server.strategy.FedXgbCyclic.rst:2 +msgid "FedXgbCyclic" msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 #: of msgid "" ":py:obj:`aggregate_evaluate " -"`\\ " +"`\\ " "\\(server\\_round\\, results\\, ...\\)" msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1 -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: of -msgid "Aggregate evaluation results." -msgstr "" - -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 #: of msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" -msgstr "" - -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: flwr.server.strategy.strategy.Strategy.aggregate_fit:1 of -msgid "Aggregate training results." +":py:obj:`aggregate_fit " +"`\\ \\(server\\_round\\," +" results\\, failures\\)" msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 #: of msgid "" ":py:obj:`configure_evaluate " -"`\\ " +"`\\ " "\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 #: of msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +":py:obj:`configure_fit " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 #: of msgid "" -":py:obj:`evaluate `\\ " +":py:obj:`evaluate `\\ " "\\(server\\_round\\, parameters\\)" msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: flwr.server.strategy.strategy.Strategy.evaluate:1 of -msgid "Evaluate the current model parameters." -msgstr "" - -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 #: of msgid "" ":py:obj:`initialize_parameters " -"`\\ " +"`\\ " "\\(client\\_manager\\)" msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: flwr.server.strategy.strategy.Strategy.initialize_parameters:1 of -msgid "Initialize the (global) model parameters." +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:5 of +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of msgid "" -"Successful updates from the previously selected and configured clients. " -"Each pair of `(ClientProxy, FitRes` constitutes a successful update from " -"one of the previously selected clients. Not that not all previously " -"selected clients are necessarily included in this list: a client might " -"drop out and not submit a result. For each client that did not submit an " -"update, there should be an `Exception` in `failures`." +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:13 -#: flwr.server.strategy.strategy.Strategy.aggregate_fit:13 of -msgid "Exceptions that occurred while the server was waiting for client updates." +#: ../../source/ref-api/flwr.server.strategy.FedXgbNnAvg.rst:2 +msgid "FedXgbNnAvg" msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:16 of +#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg:5 of msgid "" -"**aggregation_result** -- The aggregated evaluation result. Aggregation " -"typically uses some variant of a weighted average." +"This strategy is deprecated, but a copy of it is available in Flower " +"Baselines: " +"https://github.com/adap/flower/tree/main/baselines/hfedxgboost." msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_fit:5 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Successful updates from the previously selected and configured clients. " -"Each pair of `(ClientProxy, FitRes)` constitutes a successful update from" -" one of the previously selected clients. Not that not all previously " -"selected clients are necessarily included in this list: a client might " -"drop out and not submit a result. For each client that did not submit an " -"update, there should be an `Exception` in `failures`." +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_fit:17 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**parameters** -- If parameters are returned, then the server will treat " -"these as the new global model parameters (i.e., it will replace the " -"previous parameters with the ones returned from this method). If `None` " -"is returned (e.g., because there were only failures and no viable " -"results) then the server will no update the previous model parameters, " -"the updates received in this round are discarded, and the global model " -"parameters remain the same." +":py:obj:`aggregate_fit " +"`\\ \\(server\\_round\\, " +"results\\, failures\\)" msgstr "" -#: flwr.server.strategy.strategy.Strategy.evaluate:3 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"This function can be used to perform centralized (i.e., server-side) " -"evaluation of model parameters." +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: flwr.server.strategy.strategy.Strategy.evaluate:11 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**evaluation_result** -- The evaluation result, usually a Tuple " -"containing loss and a dictionary containing task-specific metrics (e.g., " -"accuracy)." +":py:obj:`configure_fit " +"`\\ \\(server\\_round\\, " +"parameters\\, ...\\)" msgstr "" -#: flwr.server.strategy.strategy.Strategy.initialize_parameters:6 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**parameters** -- If parameters are returned, then the server will treat " -"these as the initial global model parameters." +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-api/flwr.simulation.rst:2 -msgid "simulation" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: ../../source/ref-api/flwr.simulation.rst:17::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -":py:obj:`start_simulation `\\ \\(\\*\\," -" client\\_fn\\[\\, ...\\]\\)" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-api/flwr.simulation.rst:17::1 -#: flwr.simulation.app.start_simulation:1 of -msgid "Start a Ray-based Flower simulation server." +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-api/flwr.simulation.start_simulation.rst:2 -msgid "start\\_simulation" +#: ../../source/ref-api/flwr.server.strategy.FedYogi.rst:2 +msgid "FedYogi" msgstr "" -#: flwr.simulation.app.start_simulation:3 of -msgid "" -"A function creating client instances. The function must take a single " -"`str` argument called `cid`. It should return a single client instance of" -" type Client. Note that the created client instances are ephemeral and " -"will often be destroyed after a single method invocation. Since client " -"instances are not long-lived, they should not attempt to carry state over" -" method invocations. Any state required by the instance (model, dataset, " -"hyperparameters, ...) should be (re-)created in either the call to " -"`client_fn` or the call to any of the client methods (e.g., load " -"evaluation data in the `evaluate` method itself)." +#: flwr.server.strategy.fedyogi.FedYogi:32 of +msgid "Server-side learning rate. Defaults to 1e-2." msgstr "" -#: flwr.simulation.app.start_simulation:13 of +#: flwr.server.strategy.fedyogi.FedYogi:34 of +msgid "Client-side learning rate. Defaults to 0.0316." +msgstr "" + +#: flwr.server.strategy.fedyogi.FedYogi:40 of +msgid "Controls the algorithm's degree of adaptability. Defaults to 1e-3." +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"The total number of clients in this simulation. This must be set if " -"`clients_ids` is not set and vice-versa." +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\," +" results\\, ...\\)" msgstr "" -#: flwr.simulation.app.start_simulation:16 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"List `client_id`s for each client. This is only required if `num_clients`" -" is not set. Setting both `num_clients` and `clients_ids` with " -"`len(clients_ids)` not equal to `num_clients` generates an error." +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: flwr.simulation.app.start_simulation:20 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"CPU and GPU resources for a single client. Supported keys are `num_cpus` " -"and `num_gpus`. To understand the GPU utilization caused by `num_gpus`, " -"as well as using custom resources, please consult the Ray documentation." +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" msgstr "" -#: flwr.simulation.app.start_simulation:25 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"An implementation of the abstract base class `flwr.server.Server`. If no " -"instance is provided, then `start_server` will create one." +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: flwr.simulation.app.start_simulation:31 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"An implementation of the abstract base class `flwr.server.Strategy`. If " -"no strategy is provided, then `start_server` will use " -"`flwr.server.strategy.FedAvg`." +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: flwr.simulation.app.start_simulation:35 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"An implementation of the abstract base class `flwr.server.ClientManager`." -" If no implementation is provided, then `start_simulation` will use " -"`flwr.server.client_manager.SimpleClientManager`." +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: flwr.simulation.app.start_simulation:39 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Optional dictionary containing arguments for the call to `ray.init`. If " -"ray_init_args is None (the default), Ray will be initialized with the " -"following default args: { \"ignore_reinit_error\": True, " -"\"include_dashboard\": False } An empty dictionary can be used " -"(ray_init_args={}) to prevent any arguments from being passed to " -"ray.init." +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: flwr.simulation.app.start_simulation:39 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Optional dictionary containing arguments for the call to `ray.init`. If " -"ray_init_args is None (the default), Ray will be initialized with the " -"following default args:" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: flwr.simulation.app.start_simulation:43 of -msgid "{ \"ignore_reinit_error\": True, \"include_dashboard\": False }" +#: ../../source/ref-api/flwr.server.strategy.Krum.rst:2 +msgid "Krum" msgstr "" -#: flwr.simulation.app.start_simulation:45 of -msgid "" -"An empty dictionary can be used (ray_init_args={}) to prevent any " -"arguments from being passed to ray.init." +#: flwr.server.strategy.krum.Krum:3 of +msgid "Implementation based on https://arxiv.org/abs/1703.02757" msgstr "" -#: flwr.simulation.app.start_simulation:48 of +#: flwr.server.strategy.krum.Krum:17 of msgid "" -"Set to True to prevent `ray.shutdown()` in case " -"`ray.is_initialized()=True`." +"Number of clients to keep before averaging (MultiKrum). Defaults to 0, in" +" that case classical Krum is applied." msgstr "" -#: flwr.simulation.app.start_simulation:50 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Optionally specify the type of actor to use. The actor object, which " -"persists throughout the simulation, will be the process in charge of " -"running the clients' jobs (i.e. their `fit()` method)." +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\, " +"results\\, ...\\)" msgstr "" -#: flwr.simulation.app.start_simulation:54 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"If you want to create your own Actor classes, you might need to pass some" -" input argument. You can use this dictionary for such purpose." +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: flwr.simulation.app.start_simulation:57 of -msgid "" -"(default: \"DEFAULT\") Optional string (\"DEFAULT\" or \"SPREAD\") for " -"the VCE to choose in which node the actor is placed. If you are an " -"advanced user needed more control you can use lower-level scheduling " -"strategies to pin actors to specific compute nodes (e.g. via " -"NodeAffinitySchedulingStrategy). Please note this is an advanced feature." -" For all details, please refer to the Ray documentation: " -"https://docs.ray.io/en/latest/ray-core/scheduling/index.html" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.krum.Krum.aggregate_fit:1 of +msgid "Aggregate fit results using Krum." msgstr "" -#: flwr.simulation.app.start_simulation:66 of -msgid "**hist** -- Object containing metrics from training." +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\, " +"parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:1 -msgid "Changelog" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:3 -msgid "Unreleased" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-changelog.md:5 ../../source/ref-changelog.md:17 -#: ../../source/ref-changelog.md:110 ../../source/ref-changelog.md:210 -#: ../../source/ref-changelog.md:294 ../../source/ref-changelog.md:358 -#: ../../source/ref-changelog.md:416 ../../source/ref-changelog.md:485 -#: ../../source/ref-changelog.md:614 ../../source/ref-changelog.md:656 -#: ../../source/ref-changelog.md:723 ../../source/ref-changelog.md:789 -#: ../../source/ref-changelog.md:834 ../../source/ref-changelog.md:873 -#: ../../source/ref-changelog.md:906 ../../source/ref-changelog.md:956 -msgid "What's new?" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: ../../source/ref-changelog.md:7 ../../source/ref-changelog.md:80 -#: ../../source/ref-changelog.md:192 ../../source/ref-changelog.md:282 -#: ../../source/ref-changelog.md:346 ../../source/ref-changelog.md:404 -#: ../../source/ref-changelog.md:473 ../../source/ref-changelog.md:535 -#: ../../source/ref-changelog.md:554 ../../source/ref-changelog.md:710 -#: ../../source/ref-changelog.md:781 ../../source/ref-changelog.md:818 -#: ../../source/ref-changelog.md:861 -msgid "Incompatible changes" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:9 -msgid "v1.7.0 (2024-02-05)" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_fit_clients `\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:11 ../../source/ref-changelog.md:104 -#: ../../source/ref-changelog.md:204 ../../source/ref-changelog.md:288 -#: ../../source/ref-changelog.md:352 ../../source/ref-changelog.md:410 -#: ../../source/ref-changelog.md:479 ../../source/ref-changelog.md:548 -msgid "Thanks to our contributors" +#: ../../source/ref-api/flwr.server.strategy.QFedAvg.rst:2 +msgid "QFedAvg" msgstr "" -#: ../../source/ref-changelog.md:13 ../../source/ref-changelog.md:106 -#: ../../source/ref-changelog.md:206 ../../source/ref-changelog.md:290 -#: ../../source/ref-changelog.md:354 ../../source/ref-changelog.md:412 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of msgid "" -"We would like to give our special thanks to all the contributors who made" -" the new version of Flower possible (in `git shortlog` order):" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\," +" results\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:15 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of msgid "" -"`Aasheesh Singh`, `Adam Narozniak`, `Aml Hassan Esmil`, `Charles " -"Beauville`, `Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo " -"Gabrielli`, `Gustavo Bertoli`, `HelinLin`, `Heng Pan`, `Javier`, `M S " -"Chaitanya Kumar`, `Mohammad Naseri`, `Nikos Vlachakis`, `Pritam Neog`, " -"`Robert Kuska`, `Robert Steiner`, `Taner Topal`, `Yahia Salaheldin " -"Shaaban`, `Yan Gao`, `Yasar Abbas` " +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: ../../source/ref-changelog.md:19 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of msgid "" -"**Introduce stateful clients (experimental)** " -"([#2770](https://github.com/adap/flower/pull/2770), " -"[#2686](https://github.com/adap/flower/pull/2686), " -"[#2696](https://github.com/adap/flower/pull/2696), " -"[#2643](https://github.com/adap/flower/pull/2643), " -"[#2769](https://github.com/adap/flower/pull/2769))" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:21 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of msgid "" -"Subclasses of `Client` and `NumPyClient` can now store local state that " -"remains on the client. Let's start with the highlight first: this new " -"feature is compatible with both simulated clients (via " -"`start_simulation`) and networked clients (via `start_client`). It's also" -" the first preview of new abstractions like `Context` and `RecordSet`. " -"Clients can access state of type `RecordSet` via `state: RecordSet = " -"self.context.state`. Changes to this `RecordSet` are preserved across " -"different rounds of execution to enable stateful computations in a " -"unified way across simulation and deployment." +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:23 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of msgid "" -"**Improve performance** " -"([#2293](https://github.com/adap/flower/pull/2293))" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-changelog.md:25 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of msgid "" -"Flower is faster than ever. All `FedAvg`-derived strategies now use in-" -"place aggregation to reduce memory consumption. The Flower client " -"serialization/deserialization has been rewritten from the ground up, " -"which results in significant speedups, especially when the client-side " -"training time is short." +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: ../../source/ref-changelog.md:27 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of msgid "" -"**Support Federated Learning with Apple MLX and Flower** " -"([#2693](https://github.com/adap/flower/pull/2693))" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:29 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of msgid "" -"Flower has official support for federated learning using [Apple " -"MLX](https://ml-explore.github.io/mlx) via the new `quickstart-mlx` code " -"example." +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:31 -msgid "" -"**Introduce new XGBoost cyclic strategy** " -"([#2666](https://github.com/adap/flower/pull/2666), " -"[#2668](https://github.com/adap/flower/pull/2668))" +#: ../../source/ref-api/flwr.server.strategy.Strategy.rst:2 +msgid "Strategy" msgstr "" -#: ../../source/ref-changelog.md:33 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of msgid "" -"A new strategy called `FedXgbCyclic` supports a client-by-client style of" -" training (often called cyclic). The `xgboost-comprehensive` code example" -" shows how to use it in a full project. In addition to that, `xgboost-" -"comprehensive` now also supports simulation mode. With this, Flower " -"offers best-in-class XGBoost support." +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:35 -msgid "" -"**Support Python 3.11** " -"([#2394](https://github.com/adap/flower/pull/2394))" +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of +msgid "Aggregate evaluation results." msgstr "" -#: ../../source/ref-changelog.md:37 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of msgid "" -"Framework tests now run on Python 3.8, 3.9, 3.10, and 3.11. This will " -"ensure better support for users using more recent Python versions." +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: ../../source/ref-changelog.md:39 -msgid "" -"**Update gRPC and ProtoBuf dependencies** " -"([#2814](https://github.com/adap/flower/pull/2814))" +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: flwr.server.strategy.strategy.Strategy.aggregate_fit:1 of +msgid "Aggregate training results." msgstr "" -#: ../../source/ref-changelog.md:41 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of msgid "" -"The `grpcio` and `protobuf` dependencies were updated to their latest " -"versions for improved security and performance." +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:43 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of msgid "" -"**Introduce Docker image for Flower server** " -"([#2700](https://github.com/adap/flower/pull/2700), " -"[#2688](https://github.com/adap/flower/pull/2688), " -"[#2705](https://github.com/adap/flower/pull/2705), " -"[#2695](https://github.com/adap/flower/pull/2695), " -"[#2747](https://github.com/adap/flower/pull/2747), " -"[#2746](https://github.com/adap/flower/pull/2746), " -"[#2680](https://github.com/adap/flower/pull/2680), " -"[#2682](https://github.com/adap/flower/pull/2682), " -"[#2701](https://github.com/adap/flower/pull/2701))" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:45 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of msgid "" -"The Flower server can now be run using an official Docker image. A new " -"how-to guide explains [how to run Flower using " -"Docker](https://flower.ai/docs/framework/how-to-run-flower-using-" -"docker.html). An official Flower client Docker image will follow." +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-changelog.md:47 -msgid "" -"**Introduce** `flower-via-docker-compose` **example** " -"([#2626](https://github.com/adap/flower/pull/2626))" +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: flwr.server.strategy.strategy.Strategy.evaluate:1 of +msgid "Evaluate the current model parameters." msgstr "" -#: ../../source/ref-changelog.md:49 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of msgid "" -"**Introduce** `quickstart-sklearn-tabular` **example** " -"([#2719](https://github.com/adap/flower/pull/2719))" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: ../../source/ref-changelog.md:51 -msgid "" -"**Introduce** `custom-metrics` **example** " -"([#1958](https://github.com/adap/flower/pull/1958))" +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: flwr.server.strategy.strategy.Strategy.initialize_parameters:1 of +msgid "Initialize the (global) model parameters." msgstr "" -#: ../../source/ref-changelog.md:53 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:5 of msgid "" -"**Update code examples to use Flower Datasets** " -"([#2450](https://github.com/adap/flower/pull/2450), " -"[#2456](https://github.com/adap/flower/pull/2456), " -"[#2318](https://github.com/adap/flower/pull/2318), " -"[#2712](https://github.com/adap/flower/pull/2712))" +"Successful updates from the previously selected and configured clients. " +"Each pair of `(ClientProxy, FitRes` constitutes a successful update from " +"one of the previously selected clients. Not that not all previously " +"selected clients are necessarily included in this list: a client might " +"drop out and not submit a result. For each client that did not submit an " +"update, there should be an `Exception` in `failures`." msgstr "" -#: ../../source/ref-changelog.md:55 -msgid "" -"Several code examples were updated to use [Flower " -"Datasets](https://flower.ai/docs/datasets/)." +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:13 +#: flwr.server.strategy.strategy.Strategy.aggregate_fit:13 of +msgid "Exceptions that occurred while the server was waiting for client updates." msgstr "" -#: ../../source/ref-changelog.md:57 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:16 of msgid "" -"**General updates to Flower Examples** " -"([#2381](https://github.com/adap/flower/pull/2381), " -"[#2805](https://github.com/adap/flower/pull/2805), " -"[#2782](https://github.com/adap/flower/pull/2782), " -"[#2806](https://github.com/adap/flower/pull/2806), " -"[#2829](https://github.com/adap/flower/pull/2829), " -"[#2825](https://github.com/adap/flower/pull/2825), " -"[#2816](https://github.com/adap/flower/pull/2816), " -"[#2726](https://github.com/adap/flower/pull/2726), " -"[#2659](https://github.com/adap/flower/pull/2659), " -"[#2655](https://github.com/adap/flower/pull/2655))" -msgstr "" - -#: ../../source/ref-changelog.md:59 -msgid "Many Flower code examples received substantial updates." +"**aggregation_result** -- The aggregated evaluation result. Aggregation " +"typically uses some variant of a weighted average." msgstr "" -#: ../../source/ref-changelog.md:61 ../../source/ref-changelog.md:154 -msgid "**Update Flower Baselines**" +#: flwr.server.strategy.strategy.Strategy.aggregate_fit:5 of +msgid "" +"Successful updates from the previously selected and configured clients. " +"Each pair of `(ClientProxy, FitRes)` constitutes a successful update from" +" one of the previously selected clients. Not that not all previously " +"selected clients are necessarily included in this list: a client might " +"drop out and not submit a result. For each client that did not submit an " +"update, there should be an `Exception` in `failures`." msgstr "" -#: ../../source/ref-changelog.md:63 +#: flwr.server.strategy.strategy.Strategy.aggregate_fit:17 of msgid "" -"HFedXGBoost ([#2226](https://github.com/adap/flower/pull/2226), " -"[#2771](https://github.com/adap/flower/pull/2771))" +"**parameters** -- If parameters are returned, then the server will treat " +"these as the new global model parameters (i.e., it will replace the " +"previous parameters with the ones returned from this method). If `None` " +"is returned (e.g., because there were only failures and no viable " +"results) then the server will no update the previous model parameters, " +"the updates received in this round are discarded, and the global model " +"parameters remain the same." msgstr "" -#: ../../source/ref-changelog.md:64 -msgid "FedVSSL ([#2412](https://github.com/adap/flower/pull/2412))" +#: flwr.server.strategy.strategy.Strategy.evaluate:3 of +msgid "" +"This function can be used to perform centralized (i.e., server-side) " +"evaluation of model parameters." msgstr "" -#: ../../source/ref-changelog.md:65 -msgid "FedNova ([#2179](https://github.com/adap/flower/pull/2179))" +#: flwr.server.strategy.strategy.Strategy.evaluate:11 of +msgid "" +"**evaluation_result** -- The evaluation result, usually a Tuple " +"containing loss and a dictionary containing task-specific metrics (e.g., " +"accuracy)." msgstr "" -#: ../../source/ref-changelog.md:66 -msgid "HeteroFL ([#2439](https://github.com/adap/flower/pull/2439))" +#: flwr.server.strategy.strategy.Strategy.initialize_parameters:6 of +msgid "" +"**parameters** -- If parameters are returned, then the server will treat " +"these as the initial global model parameters." msgstr "" -#: ../../source/ref-changelog.md:67 -msgid "FedAvgM ([#2246](https://github.com/adap/flower/pull/2246))" +#: ../../source/ref-api/flwr.server.workflow.rst:2 +msgid "workflow" msgstr "" -#: ../../source/ref-changelog.md:68 -msgid "FedPara ([#2722](https://github.com/adap/flower/pull/2722))" +#: ../../source/ref-api/flwr.server.workflow.rst:24::1 +msgid "" +":py:obj:`DefaultWorkflow `\\ " +"\\(\\[fit\\_workflow\\, ...\\]\\)" msgstr "" -#: ../../source/ref-changelog.md:70 -msgid "" -"**Improve documentation** " -"([#2674](https://github.com/adap/flower/pull/2674), " -"[#2480](https://github.com/adap/flower/pull/2480), " -"[#2826](https://github.com/adap/flower/pull/2826), " -"[#2727](https://github.com/adap/flower/pull/2727), " -"[#2761](https://github.com/adap/flower/pull/2761), " -"[#2900](https://github.com/adap/flower/pull/2900))" +#: ../../source/ref-api/flwr.server.workflow.rst:24::1 +#: flwr.server.workflow.default_workflows.DefaultWorkflow:1 of +msgid "Default workflow in Flower." msgstr "" -#: ../../source/ref-changelog.md:72 +#: ../../source/ref-api/flwr.server.workflow.rst:24::1 msgid "" -"**Improved testing and development infrastructure** " -"([#2797](https://github.com/adap/flower/pull/2797), " -"[#2676](https://github.com/adap/flower/pull/2676), " -"[#2644](https://github.com/adap/flower/pull/2644), " -"[#2656](https://github.com/adap/flower/pull/2656), " -"[#2848](https://github.com/adap/flower/pull/2848), " -"[#2675](https://github.com/adap/flower/pull/2675), " -"[#2735](https://github.com/adap/flower/pull/2735), " -"[#2767](https://github.com/adap/flower/pull/2767), " -"[#2732](https://github.com/adap/flower/pull/2732), " -"[#2744](https://github.com/adap/flower/pull/2744), " -"[#2681](https://github.com/adap/flower/pull/2681), " -"[#2699](https://github.com/adap/flower/pull/2699), " -"[#2745](https://github.com/adap/flower/pull/2745), " -"[#2734](https://github.com/adap/flower/pull/2734), " -"[#2731](https://github.com/adap/flower/pull/2731), " -"[#2652](https://github.com/adap/flower/pull/2652), " -"[#2720](https://github.com/adap/flower/pull/2720), " -"[#2721](https://github.com/adap/flower/pull/2721), " -"[#2717](https://github.com/adap/flower/pull/2717), " -"[#2864](https://github.com/adap/flower/pull/2864), " -"[#2694](https://github.com/adap/flower/pull/2694), " -"[#2709](https://github.com/adap/flower/pull/2709), " -"[#2658](https://github.com/adap/flower/pull/2658), " -"[#2796](https://github.com/adap/flower/pull/2796), " -"[#2692](https://github.com/adap/flower/pull/2692), " -"[#2657](https://github.com/adap/flower/pull/2657), " -"[#2813](https://github.com/adap/flower/pull/2813), " -"[#2661](https://github.com/adap/flower/pull/2661), " -"[#2398](https://github.com/adap/flower/pull/2398))" +":py:obj:`SecAggPlusWorkflow `\\ " +"\\(num\\_shares\\, ...\\[\\, ...\\]\\)" msgstr "" -#: ../../source/ref-changelog.md:74 -msgid "" -"The Flower testing and development infrastructure has received " -"substantial updates. This makes Flower 1.7 the most tested release ever." +#: ../../source/ref-api/flwr.server.workflow.rst:24::1 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:1 +#: of +msgid "The workflow for the SecAgg+ protocol." msgstr "" -#: ../../source/ref-changelog.md:76 +#: ../../source/ref-api/flwr.server.workflow.rst:24::1 msgid "" -"**Update dependencies** " -"([#2753](https://github.com/adap/flower/pull/2753), " -"[#2651](https://github.com/adap/flower/pull/2651), " -"[#2739](https://github.com/adap/flower/pull/2739), " -"[#2837](https://github.com/adap/flower/pull/2837), " -"[#2788](https://github.com/adap/flower/pull/2788), " -"[#2811](https://github.com/adap/flower/pull/2811), " -"[#2774](https://github.com/adap/flower/pull/2774), " -"[#2790](https://github.com/adap/flower/pull/2790), " -"[#2751](https://github.com/adap/flower/pull/2751), " -"[#2850](https://github.com/adap/flower/pull/2850), " -"[#2812](https://github.com/adap/flower/pull/2812), " -"[#2872](https://github.com/adap/flower/pull/2872), " -"[#2736](https://github.com/adap/flower/pull/2736), " -"[#2756](https://github.com/adap/flower/pull/2756), " -"[#2857](https://github.com/adap/flower/pull/2857), " -"[#2757](https://github.com/adap/flower/pull/2757), " -"[#2810](https://github.com/adap/flower/pull/2810), " -"[#2740](https://github.com/adap/flower/pull/2740), " -"[#2789](https://github.com/adap/flower/pull/2789))" +":py:obj:`SecAggWorkflow `\\ " +"\\(reconstruction\\_threshold\\, \\*\\)" msgstr "" -#: ../../source/ref-changelog.md:78 -msgid "" -"**General improvements** " -"([#2803](https://github.com/adap/flower/pull/2803), " -"[#2847](https://github.com/adap/flower/pull/2847), " -"[#2877](https://github.com/adap/flower/pull/2877), " -"[#2690](https://github.com/adap/flower/pull/2690), " -"[#2889](https://github.com/adap/flower/pull/2889), " -"[#2874](https://github.com/adap/flower/pull/2874), " -"[#2819](https://github.com/adap/flower/pull/2819), " -"[#2689](https://github.com/adap/flower/pull/2689), " -"[#2457](https://github.com/adap/flower/pull/2457), " -"[#2870](https://github.com/adap/flower/pull/2870), " -"[#2669](https://github.com/adap/flower/pull/2669), " -"[#2876](https://github.com/adap/flower/pull/2876), " -"[#2885](https://github.com/adap/flower/pull/2885), " -"[#2858](https://github.com/adap/flower/pull/2858), " -"[#2867](https://github.com/adap/flower/pull/2867), " -"[#2351](https://github.com/adap/flower/pull/2351), " -"[#2886](https://github.com/adap/flower/pull/2886), " -"[#2860](https://github.com/adap/flower/pull/2860), " -"[#2828](https://github.com/adap/flower/pull/2828), " -"[#2869](https://github.com/adap/flower/pull/2869), " -"[#2875](https://github.com/adap/flower/pull/2875), " -"[#2733](https://github.com/adap/flower/pull/2733), " -"[#2488](https://github.com/adap/flower/pull/2488), " -"[#2646](https://github.com/adap/flower/pull/2646), " -"[#2879](https://github.com/adap/flower/pull/2879), " -"[#2821](https://github.com/adap/flower/pull/2821), " -"[#2855](https://github.com/adap/flower/pull/2855), " -"[#2800](https://github.com/adap/flower/pull/2800), " -"[#2807](https://github.com/adap/flower/pull/2807), " -"[#2801](https://github.com/adap/flower/pull/2801), " -"[#2804](https://github.com/adap/flower/pull/2804), " -"[#2851](https://github.com/adap/flower/pull/2851), " -"[#2787](https://github.com/adap/flower/pull/2787), " -"[#2852](https://github.com/adap/flower/pull/2852), " -"[#2672](https://github.com/adap/flower/pull/2672), " -"[#2759](https://github.com/adap/flower/pull/2759))" +#: ../../source/ref-api/flwr.server.workflow.rst:24::1 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:1 of +msgid "The workflow for the SecAgg protocol." msgstr "" -#: ../../source/ref-changelog.md:82 -msgid "" -"**Deprecate** `start_numpy_client` " -"([#2563](https://github.com/adap/flower/pull/2563), " -"[#2718](https://github.com/adap/flower/pull/2718))" +#: ../../source/ref-api/flwr.server.workflow.DefaultWorkflow.rst:2 +msgid "DefaultWorkflow" msgstr "" -#: ../../source/ref-changelog.md:84 -msgid "" -"Until now, clients of type `NumPyClient` needed to be started via " -"`start_numpy_client`. In our efforts to consolidate framework APIs, we " -"have introduced changes, and now all client types should start via " -"`start_client`. To continue using `NumPyClient` clients, you simply need " -"to first call the `.to_client()` method and then pass returned `Client` " -"object to `start_client`. The examples and the documentation have been " -"updated accordingly." +#: ../../source/ref-api/flwr.server.workflow.SecAggPlusWorkflow.rst:2 +msgid "SecAggPlusWorkflow" msgstr "" -#: ../../source/ref-changelog.md:86 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:3 +#: of msgid "" -"**Deprecate legacy DP wrappers** " -"([#2749](https://github.com/adap/flower/pull/2749))" +"The SecAgg+ protocol ensures the secure summation of integer vectors " +"owned by multiple parties, without accessing any individual integer " +"vector. This workflow allows the server to compute the weighted average " +"of model parameters across all clients, ensuring individual contributions" +" remain private. This is achieved by clients sending both, a weighting " +"factor and a weighted version of the locally updated parameters, both of " +"which are masked for privacy. Specifically, each client uploads \"[w, w *" +" params]\" with masks, where weighting factor 'w' is the number of " +"examples ('num_examples') and 'params' represents the model parameters " +"('parameters') from the client's `FitRes`. The server then aggregates " +"these contributions to compute the weighted average of model parameters." msgstr "" -#: ../../source/ref-changelog.md:88 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:14 +#: of msgid "" -"Legacy DP wrapper classes are deprecated, but still functional. This is " -"in preparation for an all-new pluggable version of differential privacy " -"support in Flower." +"The protocol involves four main stages: - 'setup': Send SecAgg+ " +"configuration to clients and collect their public keys. - 'share keys': " +"Broadcast public keys among clients and collect encrypted secret" msgstr "" -#: ../../source/ref-changelog.md:90 -msgid "" -"**Make optional arg** `--callable` **in** `flower-client` **a required " -"positional arg** ([#2673](https://github.com/adap/flower/pull/2673))" +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:17 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:17 +#: of +msgid "key shares." msgstr "" -#: ../../source/ref-changelog.md:92 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:18 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:18 +#: of msgid "" -"**Rename** `certificates` **to** `root_certificates` **in** `Driver` " -"([#2890](https://github.com/adap/flower/pull/2890))" +"'collect masked vectors': Forward encrypted secret key shares to target " +"clients and collect masked model parameters." msgstr "" -#: ../../source/ref-changelog.md:94 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:20 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:20 +#: of msgid "" -"**Drop experimental** `Task` **fields** " -"([#2866](https://github.com/adap/flower/pull/2866), " -"[#2865](https://github.com/adap/flower/pull/2865))" +"'unmask': Collect secret key shares to decrypt and aggregate the model " +"parameters." msgstr "" -#: ../../source/ref-changelog.md:96 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:22 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:22 +#: of msgid "" -"Experimental fields `sa`, `legacy_server_message` and " -"`legacy_client_message` were removed from `Task` message. The removed " -"fields are superseded by the new `RecordSet` abstraction." +"Only the aggregated model parameters are exposed and passed to " +"`Strategy.aggregate_fit`, ensuring individual data privacy." msgstr "" -#: ../../source/ref-changelog.md:98 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:25 +#: of msgid "" -"**Retire MXNet examples** " -"([#2724](https://github.com/adap/flower/pull/2724))" +"The number of shares into which each client's private key is split under " +"the SecAgg+ protocol. If specified as a float, it represents the " +"proportion of all selected clients, and the number of shares will be set " +"dynamically in the run time. A private key can be reconstructed from " +"these shares, allowing for the secure aggregation of model updates. Each " +"client sends one share to each of its neighbors while retaining one." msgstr "" -#: ../../source/ref-changelog.md:100 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:25 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:32 +#: of msgid "" -"The development of the MXNet fremework has ended and the project is now " -"[archived on GitHub](https://github.com/apache/mxnet). Existing MXNet " -"examples won't receive updates." +"The minimum number of shares required to reconstruct a client's private " +"key, or, if specified as a float, it represents the proportion of the " +"total number of shares needed for reconstruction. This threshold ensures " +"privacy by allowing for the recovery of contributions from dropped " +"clients during aggregation, without compromising individual client data." msgstr "" -#: ../../source/ref-changelog.md:102 -msgid "v1.6.0 (2023-11-28)" +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:31 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:38 +#: of +msgid "" +"The maximum value of the weight that can be assigned to any single " +"client's update during the weighted average calculation on the server " +"side, e.g., in the FedAvg algorithm." msgstr "" -#: ../../source/ref-changelog.md:108 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:35 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:42 +#: of msgid "" -"`Aashish Kolluri`, `Adam Narozniak`, `Alessio Mora`, `Barathwaja S`, " -"`Charles Beauville`, `Daniel J. Beutel`, `Daniel Nata Nugraha`, `Gabriel " -"Mota`, `Heng Pan`, `Ivan Agarský`, `JS.KIM`, `Javier`, `Marius Schlegel`," -" `Navin Chandra`, `Nic Lane`, `Peterpan828`, `Qinbin Li`, `Shaz-hash`, " -"`Steve Laskaridis`, `Taner Topal`, `William Lindskog`, `Yan Gao`, " -"`cnxdeveloper`, `k3nfalt` " +"The range within which model parameters are clipped before quantization. " +"This parameter ensures each model parameter is bounded within " +"[-clipping_range, clipping_range], facilitating quantization." msgstr "" -#: ../../source/ref-changelog.md:112 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:39 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:46 +#: of msgid "" -"**Add experimental support for Python 3.12** " -"([#2565](https://github.com/adap/flower/pull/2565))" +"The size of the range into which floating-point model parameters are " +"quantized, mapping each parameter to an integer in [0, " +"quantization_range-1]. This facilitates cryptographic operations on the " +"model updates." msgstr "" -#: ../../source/ref-changelog.md:114 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:43 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:50 +#: of msgid "" -"**Add new XGBoost examples** " -"([#2612](https://github.com/adap/flower/pull/2612), " -"[#2554](https://github.com/adap/flower/pull/2554), " -"[#2617](https://github.com/adap/flower/pull/2617), " -"[#2618](https://github.com/adap/flower/pull/2618), " -"[#2619](https://github.com/adap/flower/pull/2619), " -"[#2567](https://github.com/adap/flower/pull/2567))" +"The range of values from which random mask entries are uniformly sampled " +"([0, modulus_range-1]). `modulus_range` must be less than 4294967296. " +"Please use 2**n values for `modulus_range` to prevent overflow issues." msgstr "" -#: ../../source/ref-changelog.md:116 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:47 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:54 +#: of msgid "" -"We have added a new `xgboost-quickstart` example alongside a new " -"`xgboost-comprehensive` example that goes more in-depth." +"The timeout duration in seconds. If specified, the workflow will wait for" +" replies for this duration each time. If `None`, there is no time limit " +"and the workflow will wait until replies for all messages are received." msgstr "" -#: ../../source/ref-changelog.md:118 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:61 +#: of msgid "" -"**Add Vertical FL example** " -"([#2598](https://github.com/adap/flower/pull/2598))" +"Generally, higher `num_shares` means more robust to dropouts while " +"increasing the computational costs; higher `reconstruction_threshold` " +"means better privacy guarantees but less tolerance to dropouts." msgstr "" -#: ../../source/ref-changelog.md:120 -msgid "" -"We had many questions about Vertical Federated Learning using Flower, so " -"we decided to add an simple example for it on the [Titanic " -"dataset](https://www.kaggle.com/competitions/titanic/data) alongside a " -"tutorial (in the README)." +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:58 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:64 +#: of +msgid "Too large `max_weight` may compromise the precision of the quantization." msgstr "" -#: ../../source/ref-changelog.md:122 -msgid "" -"**Support custom** `ClientManager` **in** `start_driver()` " -"([#2292](https://github.com/adap/flower/pull/2292))" +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:59 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:65 +#: of +msgid "`modulus_range` must be 2**n and larger than `quantization_range`." msgstr "" -#: ../../source/ref-changelog.md:124 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:66 +#: of msgid "" -"**Update REST API to support create and delete nodes** " -"([#2283](https://github.com/adap/flower/pull/2283))" +"When `num_shares` is a float, it is interpreted as the proportion of all " +"selected clients, and hence the number of shares will be determined in " +"the runtime. This allows for dynamic adjustment based on the total number" +" of participating clients." msgstr "" -#: ../../source/ref-changelog.md:126 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:69 +#: of msgid "" -"**Update the Android SDK** " -"([#2187](https://github.com/adap/flower/pull/2187))" +"Similarly, when `reconstruction_threshold` is a float, it is interpreted " +"as the proportion of the number of shares needed for the reconstruction " +"of a private key. This feature enables flexibility in setting the " +"security threshold relative to the number of distributed shares." msgstr "" -#: ../../source/ref-changelog.md:128 -msgid "Add gRPC request-response capability to the Android SDK." +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:73 +#: of +msgid "" +"`num_shares`, `reconstruction_threshold`, and the quantization parameters" +" (`clipping_range`, `quantization_range`, `modulus_range`) play critical " +"roles in balancing privacy, robustness, and efficiency within the SecAgg+" +" protocol." msgstr "" -#: ../../source/ref-changelog.md:130 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of msgid "" -"**Update the C++ SDK** " -"([#2537](https://github.com/adap/flower/pull/2537), " -"[#2528](https://github.com/adap/flower/pull/2528), " -"[#2523](https://github.com/adap/flower/pull/2523), " -"[#2522](https://github.com/adap/flower/pull/2522))" +":py:obj:`collect_masked_vectors_stage " +"`\\" +" \\(driver\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:132 -msgid "Add gRPC request-response capability to the C++ SDK." +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of +msgid "Execute the 'collect masked vectors' stage." msgstr "" -#: ../../source/ref-changelog.md:134 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of msgid "" -"**Make HTTPS the new default** " -"([#2591](https://github.com/adap/flower/pull/2591), " -"[#2636](https://github.com/adap/flower/pull/2636))" +":py:obj:`setup_stage " +"`\\ \\(driver\\, " +"context\\, state\\)" msgstr "" -#: ../../source/ref-changelog.md:136 -msgid "" -"Flower is moving to HTTPS by default. The new `flower-server` requires " -"passing `--certificates`, but users can enable `--insecure` to use HTTP " -"for prototyping. The same applies to `flower-client`, which can either " -"use user-provided credentials or gRPC-bundled certificates to connect to " -"an HTTPS-enabled server or requires opt-out via passing `--insecure` to " -"enable insecure HTTP connections." +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.setup_stage:1 +#: of +msgid "Execute the 'setup' stage." msgstr "" -#: ../../source/ref-changelog.md:138 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of msgid "" -"For backward compatibility, `start_client()` and `start_numpy_client()` " -"will still start in insecure mode by default. In a future release, " -"insecure connections will require user opt-in by passing `insecure=True`." +":py:obj:`share_keys_stage " +"`\\ " +"\\(driver\\, context\\, state\\)" msgstr "" -#: ../../source/ref-changelog.md:140 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.share_keys_stage:1 +#: of +msgid "Execute the 'share keys' stage." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of msgid "" -"**Unify client API** ([#2303](https://github.com/adap/flower/pull/2303), " -"[#2390](https://github.com/adap/flower/pull/2390), " -"[#2493](https://github.com/adap/flower/pull/2493))" +":py:obj:`unmask_stage " +"`\\ \\(driver\\, " +"context\\, state\\)" msgstr "" -#: ../../source/ref-changelog.md:142 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.unmask_stage:1 +#: of +msgid "Execute the 'unmask' stage." +msgstr "" + +#: ../../source/ref-api/flwr.server.workflow.SecAggWorkflow.rst:2 +msgid "SecAggWorkflow" +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:1 of msgid "" -"Using the `client_fn`, Flower clients can interchangeably run as " -"standalone processes (i.e. via `start_client`) or in simulation (i.e. via" -" `start_simulation`) without requiring changes to how the client class is" -" defined and instantiated. The `to_client()` function is introduced to " -"convert a `NumPyClient` to a `Client`." +"Bases: " +":py:class:`~flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow`" msgstr "" -#: ../../source/ref-changelog.md:144 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:3 of msgid "" -"**Add new** `Bulyan` **strategy** " -"([#1817](https://github.com/adap/flower/pull/1817), " -"[#1891](https://github.com/adap/flower/pull/1891))" +"The SecAgg protocol ensures the secure summation of integer vectors owned" +" by multiple parties, without accessing any individual integer vector. " +"This workflow allows the server to compute the weighted average of model " +"parameters across all clients, ensuring individual contributions remain " +"private. This is achieved by clients sending both, a weighting factor and" +" a weighted version of the locally updated parameters, both of which are " +"masked for privacy. Specifically, each client uploads \"[w, w * params]\"" +" with masks, where weighting factor 'w' is the number of examples " +"('num_examples') and 'params' represents the model parameters " +"('parameters') from the client's `FitRes`. The server then aggregates " +"these contributions to compute the weighted average of model parameters." msgstr "" -#: ../../source/ref-changelog.md:146 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:14 of msgid "" -"The new `Bulyan` strategy implements Bulyan by [El Mhamdi et al., " -"2018](https://arxiv.org/abs/1802.07927)" +"The protocol involves four main stages: - 'setup': Send SecAgg " +"configuration to clients and collect their public keys. - 'share keys': " +"Broadcast public keys among clients and collect encrypted secret" msgstr "" -#: ../../source/ref-changelog.md:148 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:54 of msgid "" -"**Add new** `XGB Bagging` **strategy** " -"([#2611](https://github.com/adap/flower/pull/2611))" +"Each client's private key is split into N shares under the SecAgg " +"protocol, where N is the number of selected clients." msgstr "" -#: ../../source/ref-changelog.md:150 ../../source/ref-changelog.md:152 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:56 of msgid "" -"**Introduce `WorkloadState`** " -"([#2564](https://github.com/adap/flower/pull/2564), " -"[#2632](https://github.com/adap/flower/pull/2632))" +"Generally, higher `reconstruction_threshold` means better privacy " +"guarantees but less tolerance to dropouts." msgstr "" -#: ../../source/ref-changelog.md:156 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:60 of msgid "" -"FedProx ([#2210](https://github.com/adap/flower/pull/2210), " -"[#2286](https://github.com/adap/flower/pull/2286), " -"[#2509](https://github.com/adap/flower/pull/2509))" +"When `reconstruction_threshold` is a float, it is interpreted as the " +"proportion of the number of all selected clients needed for the " +"reconstruction of a private key. This feature enables flexibility in " +"setting the security threshold relative to the number of selected " +"clients." msgstr "" -#: ../../source/ref-changelog.md:158 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:64 of msgid "" -"Baselines Docs ([#2290](https://github.com/adap/flower/pull/2290), " -"[#2400](https://github.com/adap/flower/pull/2400))" +"`reconstruction_threshold`, and the quantization parameters " +"(`clipping_range`, `quantization_range`, `modulus_range`) play critical " +"roles in balancing privacy, robustness, and efficiency within the SecAgg " +"protocol." msgstr "" -#: ../../source/ref-changelog.md:160 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of msgid "" -"FedMLB ([#2340](https://github.com/adap/flower/pull/2340), " -"[#2507](https://github.com/adap/flower/pull/2507))" +":py:obj:`collect_masked_vectors_stage " +"`\\ " +"\\(driver\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:162 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of msgid "" -"TAMUNA ([#2254](https://github.com/adap/flower/pull/2254), " -"[#2508](https://github.com/adap/flower/pull/2508))" +":py:obj:`setup_stage `\\" +" \\(driver\\, context\\, state\\)" msgstr "" -#: ../../source/ref-changelog.md:164 -msgid "FedMeta [#2438](https://github.com/adap/flower/pull/2438)" +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of +msgid "" +":py:obj:`share_keys_stage " +"`\\ \\(driver\\, " +"context\\, state\\)" msgstr "" -#: ../../source/ref-changelog.md:166 -msgid "FjORD [#2431](https://github.com/adap/flower/pull/2431)" +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of +msgid "" +":py:obj:`unmask_stage " +"`\\ \\(driver\\, " +"context\\, state\\)" msgstr "" -#: ../../source/ref-changelog.md:168 -msgid "MOON [#2421](https://github.com/adap/flower/pull/2421)" +#: ../../source/ref-api/flwr.simulation.rst:2 +msgid "simulation" msgstr "" -#: ../../source/ref-changelog.md:170 -msgid "DepthFL [#2295](https://github.com/adap/flower/pull/2295)" +#: ../../source/ref-api/flwr.simulation.rst:19::1 +msgid "" +":py:obj:`start_simulation `\\ \\(\\*\\," +" client\\_fn\\[\\, ...\\]\\)" msgstr "" -#: ../../source/ref-changelog.md:172 -msgid "FedPer [#2266](https://github.com/adap/flower/pull/2266)" +#: ../../source/ref-api/flwr.simulation.rst:19::1 +#: flwr.simulation.app.start_simulation:1 of +msgid "Start a Ray-based Flower simulation server." msgstr "" -#: ../../source/ref-changelog.md:174 -msgid "FedWav2vec [#2551](https://github.com/adap/flower/pull/2551)" +#: ../../source/ref-api/flwr.simulation.rst:19::1 +msgid "" +":py:obj:`run_simulation_from_cli " +"`\\ \\(\\)" msgstr "" -#: ../../source/ref-changelog.md:176 -msgid "niid-Bench [#2428](https://github.com/adap/flower/pull/2428)" +#: ../../source/ref-api/flwr.simulation.rst:19::1 +#: flwr.simulation.run_simulation.run_simulation_from_cli:1 of +msgid "Run Simulation Engine from the CLI." msgstr "" -#: ../../source/ref-changelog.md:178 +#: ../../source/ref-api/flwr.simulation.rst:19::1 msgid "" -"FedBN ([#2608](https://github.com/adap/flower/pull/2608), " -"[#2615](https://github.com/adap/flower/pull/2615))" +":py:obj:`run_simulation `\\ " +"\\(server\\_app\\, client\\_app\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:180 -msgid "" -"**General updates to Flower Examples** " -"([#2384](https://github.com/adap/flower/pull/2384), " -"[#2425](https://github.com/adap/flower/pull/2425), " -"[#2526](https://github.com/adap/flower/pull/2526), " -"[#2302](https://github.com/adap/flower/pull/2302), " -"[#2545](https://github.com/adap/flower/pull/2545))" +#: ../../source/ref-api/flwr.simulation.rst:19::1 +#: flwr.simulation.run_simulation.run_simulation:1 of +msgid "Run a Flower App using the Simulation Engine." msgstr "" -#: ../../source/ref-changelog.md:182 -msgid "" -"**General updates to Flower Baselines** " -"([#2301](https://github.com/adap/flower/pull/2301), " -"[#2305](https://github.com/adap/flower/pull/2305), " -"[#2307](https://github.com/adap/flower/pull/2307), " -"[#2327](https://github.com/adap/flower/pull/2327), " -"[#2435](https://github.com/adap/flower/pull/2435), " -"[#2462](https://github.com/adap/flower/pull/2462), " -"[#2463](https://github.com/adap/flower/pull/2463), " -"[#2461](https://github.com/adap/flower/pull/2461), " -"[#2469](https://github.com/adap/flower/pull/2469), " -"[#2466](https://github.com/adap/flower/pull/2466), " -"[#2471](https://github.com/adap/flower/pull/2471), " -"[#2472](https://github.com/adap/flower/pull/2472), " -"[#2470](https://github.com/adap/flower/pull/2470))" +#: ../../source/ref-api/flwr.simulation.run_simulation.rst:2 +msgid "run\\_simulation" msgstr "" -#: ../../source/ref-changelog.md:184 +#: flwr.simulation.run_simulation.run_simulation:3 of msgid "" -"**General updates to the simulation engine** " -"([#2331](https://github.com/adap/flower/pull/2331), " -"[#2447](https://github.com/adap/flower/pull/2447), " -"[#2448](https://github.com/adap/flower/pull/2448), " -"[#2294](https://github.com/adap/flower/pull/2294))" +"The `ServerApp` to be executed. It will send messages to different " +"`ClientApp` instances running on different (virtual) SuperNodes." msgstr "" -#: ../../source/ref-changelog.md:186 +#: flwr.simulation.run_simulation.run_simulation:6 of msgid "" -"**General updates to Flower SDKs** " -"([#2288](https://github.com/adap/flower/pull/2288), " -"[#2429](https://github.com/adap/flower/pull/2429), " -"[#2555](https://github.com/adap/flower/pull/2555), " -"[#2543](https://github.com/adap/flower/pull/2543), " -"[#2544](https://github.com/adap/flower/pull/2544), " -"[#2597](https://github.com/adap/flower/pull/2597), " -"[#2623](https://github.com/adap/flower/pull/2623))" +"The `ClientApp` to be executed by each of the SuperNodes. It will receive" +" messages sent by the `ServerApp`." msgstr "" -#: ../../source/ref-changelog.md:188 +#: flwr.simulation.run_simulation.run_simulation:9 of msgid "" -"**General improvements** " -"([#2309](https://github.com/adap/flower/pull/2309), " -"[#2310](https://github.com/adap/flower/pull/2310), " -"[#2313](https://github.com/adap/flower/pull/2313), " -"[#2316](https://github.com/adap/flower/pull/2316), " -"[#2317](https://github.com/adap/flower/pull/2317), " -"[#2349](https://github.com/adap/flower/pull/2349), " -"[#2360](https://github.com/adap/flower/pull/2360), " -"[#2402](https://github.com/adap/flower/pull/2402), " -"[#2446](https://github.com/adap/flower/pull/2446), " -"[#2561](https://github.com/adap/flower/pull/2561), " -"[#2273](https://github.com/adap/flower/pull/2273), " -"[#2267](https://github.com/adap/flower/pull/2267), " -"[#2274](https://github.com/adap/flower/pull/2274), " -"[#2275](https://github.com/adap/flower/pull/2275), " -"[#2432](https://github.com/adap/flower/pull/2432), " -"[#2251](https://github.com/adap/flower/pull/2251), " -"[#2321](https://github.com/adap/flower/pull/2321), " -"[#1936](https://github.com/adap/flower/pull/1936), " -"[#2408](https://github.com/adap/flower/pull/2408), " -"[#2413](https://github.com/adap/flower/pull/2413), " -"[#2401](https://github.com/adap/flower/pull/2401), " -"[#2531](https://github.com/adap/flower/pull/2531), " -"[#2534](https://github.com/adap/flower/pull/2534), " -"[#2535](https://github.com/adap/flower/pull/2535), " -"[#2521](https://github.com/adap/flower/pull/2521), " -"[#2553](https://github.com/adap/flower/pull/2553), " -"[#2596](https://github.com/adap/flower/pull/2596))" +"Number of nodes that run a ClientApp. They can be sampled by a Driver in " +"the ServerApp and receive a Message describing what the ClientApp should " +"perform." msgstr "" -#: ../../source/ref-changelog.md:190 ../../source/ref-changelog.md:280 -#: ../../source/ref-changelog.md:344 ../../source/ref-changelog.md:398 -#: ../../source/ref-changelog.md:465 -msgid "Flower received many improvements under the hood, too many to list here." +#: flwr.simulation.run_simulation.run_simulation:13 of +msgid "A simulation backend that runs `ClientApp`s." msgstr "" -#: ../../source/ref-changelog.md:194 +#: flwr.simulation.run_simulation.run_simulation:15 of msgid "" -"**Remove support for Python 3.7** " -"([#2280](https://github.com/adap/flower/pull/2280), " -"[#2299](https://github.com/adap/flower/pull/2299), " -"[#2304](https://github.com/adap/flower/pull/2304), " -"[#2306](https://github.com/adap/flower/pull/2306), " -"[#2355](https://github.com/adap/flower/pull/2355), " -"[#2356](https://github.com/adap/flower/pull/2356))" +"'A dictionary, e.g {\"\": , \"\": } to " +"configure a backend. Values supported in are those included by " +"`flwr.common.typing.ConfigsRecordValues`." msgstr "" -#: ../../source/ref-changelog.md:196 +#: flwr.simulation.run_simulation.run_simulation:19 of msgid "" -"Python 3.7 support was deprecated in Flower 1.5, and this release removes" -" support. Flower now requires Python 3.8." +"A boolean to indicate whether to enable GPU growth on the main thread. " +"This is desirable if you make use of a TensorFlow model on your " +"`ServerApp` while having your `ClientApp` running on the same GPU. " +"Without enabling this, you might encounter an out-of-memory error because" +" TensorFlow, by default, allocates all GPU memory. Read more about how " +"`tf.config.experimental.set_memory_growth()` works in the TensorFlow " +"documentation: https://www.tensorflow.org/api/stable." msgstr "" -#: ../../source/ref-changelog.md:198 +#: flwr.simulation.run_simulation.run_simulation:26 of msgid "" -"**Remove experimental argument** `rest` **from** `start_client` " -"([#2324](https://github.com/adap/flower/pull/2324))" +"When diabled, only INFO, WARNING and ERROR log messages will be shown. If" +" enabled, DEBUG-level logs will be displayed." msgstr "" -#: ../../source/ref-changelog.md:200 +#: ../../source/ref-api/flwr.simulation.run_simulation_from_cli.rst:2 +msgid "run\\_simulation\\_from\\_cli" +msgstr "" + +#: ../../source/ref-api/flwr.simulation.start_simulation.rst:2 +msgid "start\\_simulation" +msgstr "" + +#: flwr.simulation.app.start_simulation:3 of msgid "" -"The (still experimental) argument `rest` was removed from `start_client` " -"and `start_numpy_client`. Use `transport=\"rest\"` to opt into the " -"experimental REST API instead." +"A function creating client instances. The function must take a single " +"`str` argument called `cid`. It should return a single client instance of" +" type Client. Note that the created client instances are ephemeral and " +"will often be destroyed after a single method invocation. Since client " +"instances are not long-lived, they should not attempt to carry state over" +" method invocations. Any state required by the instance (model, dataset, " +"hyperparameters, ...) should be (re-)created in either the call to " +"`client_fn` or the call to any of the client methods (e.g., load " +"evaluation data in the `evaluate` method itself)." msgstr "" -#: ../../source/ref-changelog.md:202 -msgid "v1.5.0 (2023-08-31)" +#: flwr.simulation.app.start_simulation:13 of +msgid "" +"The total number of clients in this simulation. This must be set if " +"`clients_ids` is not set and vice-versa." msgstr "" -#: ../../source/ref-changelog.md:208 +#: flwr.simulation.app.start_simulation:16 of msgid "" -"`Adam Narozniak`, `Anass Anhari`, `Charles Beauville`, `Dana-Farber`, " -"`Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo Gabrielli`, `Gustavo " -"Bertoli`, `Heng Pan`, `Javier`, `Mahdi`, `Steven Hé (Sīchàng)`, `Taner " -"Topal`, `achiverram28`, `danielnugraha`, `eunchung`, `ruthgal` " +"List `client_id`s for each client. This is only required if `num_clients`" +" is not set. Setting both `num_clients` and `clients_ids` with " +"`len(clients_ids)` not equal to `num_clients` generates an error." msgstr "" -#: ../../source/ref-changelog.md:212 +#: flwr.simulation.app.start_simulation:20 of msgid "" -"**Introduce new simulation engine** " -"([#1969](https://github.com/adap/flower/pull/1969), " -"[#2221](https://github.com/adap/flower/pull/2221), " -"[#2248](https://github.com/adap/flower/pull/2248))" +"CPU and GPU resources for a single client. Supported keys are `num_cpus` " +"and `num_gpus`. To understand the GPU utilization caused by `num_gpus`, " +"as well as using custom resources, please consult the Ray documentation." msgstr "" -#: ../../source/ref-changelog.md:214 +#: flwr.simulation.app.start_simulation:25 of msgid "" -"The new simulation engine has been rewritten from the ground up, yet it " -"remains fully backwards compatible. It offers much improved stability and" -" memory handling, especially when working with GPUs. Simulations " -"transparently adapt to different settings to scale simulation in CPU-" -"only, CPU+GPU, multi-GPU, or multi-node multi-GPU environments." +"An implementation of the abstract base class `flwr.server.Server`. If no " +"instance is provided, then `start_server` will create one." msgstr "" -#: ../../source/ref-changelog.md:216 +#: flwr.simulation.app.start_simulation:31 of msgid "" -"Comprehensive documentation includes a new [how-to run " -"simulations](https://flower.ai/docs/framework/how-to-run-" -"simulations.html) guide, new [simulation-" -"pytorch](https://flower.ai/docs/examples/simulation-pytorch.html) and " -"[simulation-tensorflow](https://flower.ai/docs/examples/simulation-" -"tensorflow.html) notebooks, and a new [YouTube tutorial " -"series](https://www.youtube.com/watch?v=cRebUIGB5RU&list=PLNG4feLHqCWlnj8a_E1A_n5zr2-8pafTB)." +"An implementation of the abstract base class `flwr.server.Strategy`. If " +"no strategy is provided, then `start_server` will use " +"`flwr.server.strategy.FedAvg`." msgstr "" -#: ../../source/ref-changelog.md:218 +#: flwr.simulation.app.start_simulation:35 of msgid "" -"**Restructure Flower Docs** " -"([#1824](https://github.com/adap/flower/pull/1824), " -"[#1865](https://github.com/adap/flower/pull/1865), " -"[#1884](https://github.com/adap/flower/pull/1884), " -"[#1887](https://github.com/adap/flower/pull/1887), " -"[#1919](https://github.com/adap/flower/pull/1919), " -"[#1922](https://github.com/adap/flower/pull/1922), " -"[#1920](https://github.com/adap/flower/pull/1920), " -"[#1923](https://github.com/adap/flower/pull/1923), " -"[#1924](https://github.com/adap/flower/pull/1924), " -"[#1962](https://github.com/adap/flower/pull/1962), " -"[#2006](https://github.com/adap/flower/pull/2006), " -"[#2133](https://github.com/adap/flower/pull/2133), " -"[#2203](https://github.com/adap/flower/pull/2203), " -"[#2215](https://github.com/adap/flower/pull/2215), " -"[#2122](https://github.com/adap/flower/pull/2122), " -"[#2223](https://github.com/adap/flower/pull/2223), " -"[#2219](https://github.com/adap/flower/pull/2219), " -"[#2232](https://github.com/adap/flower/pull/2232), " -"[#2233](https://github.com/adap/flower/pull/2233), " -"[#2234](https://github.com/adap/flower/pull/2234), " -"[#2235](https://github.com/adap/flower/pull/2235), " -"[#2237](https://github.com/adap/flower/pull/2237), " -"[#2238](https://github.com/adap/flower/pull/2238), " -"[#2242](https://github.com/adap/flower/pull/2242), " -"[#2231](https://github.com/adap/flower/pull/2231), " -"[#2243](https://github.com/adap/flower/pull/2243), " -"[#2227](https://github.com/adap/flower/pull/2227))" +"An implementation of the abstract base class `flwr.server.ClientManager`." +" If no implementation is provided, then `start_simulation` will use " +"`flwr.server.client_manager.SimpleClientManager`." msgstr "" -#: ../../source/ref-changelog.md:220 +#: flwr.simulation.app.start_simulation:39 of msgid "" -"Much effort went into a completely restructured Flower docs experience. " -"The documentation on [flower.ai/docs](flower.ai/docs) is now divided " -"into Flower Framework, Flower Baselines, Flower Android SDK, Flower iOS " -"SDK, and code example projects." +"Optional dictionary containing arguments for the call to `ray.init`. If " +"ray_init_args is None (the default), Ray will be initialized with the " +"following default args: { \"ignore_reinit_error\": True, " +"\"include_dashboard\": False } An empty dictionary can be used " +"(ray_init_args={}) to prevent any arguments from being passed to " +"ray.init." msgstr "" -#: ../../source/ref-changelog.md:222 +#: flwr.simulation.app.start_simulation:39 of msgid "" -"**Introduce Flower Swift SDK** " -"([#1858](https://github.com/adap/flower/pull/1858), " -"[#1897](https://github.com/adap/flower/pull/1897))" +"Optional dictionary containing arguments for the call to `ray.init`. If " +"ray_init_args is None (the default), Ray will be initialized with the " +"following default args:" msgstr "" -#: ../../source/ref-changelog.md:224 +#: flwr.simulation.app.start_simulation:43 of +msgid "{ \"ignore_reinit_error\": True, \"include_dashboard\": False }" +msgstr "" + +#: flwr.simulation.app.start_simulation:45 of msgid "" -"This is the first preview release of the Flower Swift SDK. Flower support" -" on iOS is improving, and alongside the Swift SDK and code example, there" -" is now also an iOS quickstart tutorial." +"An empty dictionary can be used (ray_init_args={}) to prevent any " +"arguments from being passed to ray.init." msgstr "" -#: ../../source/ref-changelog.md:226 +#: flwr.simulation.app.start_simulation:48 of msgid "" -"**Introduce Flower Android SDK** " -"([#2131](https://github.com/adap/flower/pull/2131))" +"Set to True to prevent `ray.shutdown()` in case " +"`ray.is_initialized()=True`." msgstr "" -#: ../../source/ref-changelog.md:228 +#: flwr.simulation.app.start_simulation:50 of msgid "" -"This is the first preview release of the Flower Kotlin SDK. Flower " -"support on Android is improving, and alongside the Kotlin SDK and code " -"example, there is now also an Android quickstart tutorial." +"Optionally specify the type of actor to use. The actor object, which " +"persists throughout the simulation, will be the process in charge of " +"executing a ClientApp wrapping input argument `client_fn`." msgstr "" -#: ../../source/ref-changelog.md:230 +#: flwr.simulation.app.start_simulation:54 of msgid "" -"**Introduce new end-to-end testing infrastructure** " -"([#1842](https://github.com/adap/flower/pull/1842), " -"[#2071](https://github.com/adap/flower/pull/2071), " -"[#2072](https://github.com/adap/flower/pull/2072), " -"[#2068](https://github.com/adap/flower/pull/2068), " -"[#2067](https://github.com/adap/flower/pull/2067), " -"[#2069](https://github.com/adap/flower/pull/2069), " -"[#2073](https://github.com/adap/flower/pull/2073), " -"[#2070](https://github.com/adap/flower/pull/2070), " -"[#2074](https://github.com/adap/flower/pull/2074), " -"[#2082](https://github.com/adap/flower/pull/2082), " -"[#2084](https://github.com/adap/flower/pull/2084), " -"[#2093](https://github.com/adap/flower/pull/2093), " -"[#2109](https://github.com/adap/flower/pull/2109), " -"[#2095](https://github.com/adap/flower/pull/2095), " -"[#2140](https://github.com/adap/flower/pull/2140), " -"[#2137](https://github.com/adap/flower/pull/2137), " -"[#2165](https://github.com/adap/flower/pull/2165))" +"If you want to create your own Actor classes, you might need to pass some" +" input argument. You can use this dictionary for such purpose." msgstr "" -#: ../../source/ref-changelog.md:232 +#: flwr.simulation.app.start_simulation:57 of msgid "" -"A new testing infrastructure ensures that new changes stay compatible " -"with existing framework integrations or strategies." +"(default: \"DEFAULT\") Optional string (\"DEFAULT\" or \"SPREAD\") for " +"the VCE to choose in which node the actor is placed. If you are an " +"advanced user needed more control you can use lower-level scheduling " +"strategies to pin actors to specific compute nodes (e.g. via " +"NodeAffinitySchedulingStrategy). Please note this is an advanced feature." +" For all details, please refer to the Ray documentation: " +"https://docs.ray.io/en/latest/ray-core/scheduling/index.html" msgstr "" -#: ../../source/ref-changelog.md:234 -msgid "**Deprecate Python 3.7**" +#: flwr.simulation.app.start_simulation:66 of +msgid "**hist** -- Object containing metrics from training." msgstr "" -#: ../../source/ref-changelog.md:236 -msgid "" -"Since Python 3.7 reached its end of life (EOL) on 2023-06-27, support for" -" Python 3.7 is now deprecated and will be removed in an upcoming release." +#: ../../source/ref-changelog.md:1 +msgid "Changelog" msgstr "" -#: ../../source/ref-changelog.md:238 -msgid "" -"**Add new** `FedTrimmedAvg` **strategy** " -"([#1769](https://github.com/adap/flower/pull/1769), " -"[#1853](https://github.com/adap/flower/pull/1853))" +#: ../../source/ref-changelog.md:3 +msgid "Unreleased" msgstr "" -#: ../../source/ref-changelog.md:240 -msgid "" -"The new `FedTrimmedAvg` strategy implements Trimmed Mean by [Dong Yin, " -"2018](https://arxiv.org/abs/1803.01498)." +#: ../../source/ref-changelog.md:5 ../../source/ref-changelog.md:17 +#: ../../source/ref-changelog.md:110 ../../source/ref-changelog.md:210 +#: ../../source/ref-changelog.md:294 ../../source/ref-changelog.md:358 +#: ../../source/ref-changelog.md:416 ../../source/ref-changelog.md:485 +#: ../../source/ref-changelog.md:614 ../../source/ref-changelog.md:656 +#: ../../source/ref-changelog.md:723 ../../source/ref-changelog.md:789 +#: ../../source/ref-changelog.md:834 ../../source/ref-changelog.md:873 +#: ../../source/ref-changelog.md:906 ../../source/ref-changelog.md:956 +msgid "What's new?" msgstr "" -#: ../../source/ref-changelog.md:242 -msgid "" -"**Introduce start_driver** " -"([#1697](https://github.com/adap/flower/pull/1697))" +#: ../../source/ref-changelog.md:7 ../../source/ref-changelog.md:80 +#: ../../source/ref-changelog.md:192 ../../source/ref-changelog.md:282 +#: ../../source/ref-changelog.md:346 ../../source/ref-changelog.md:404 +#: ../../source/ref-changelog.md:473 ../../source/ref-changelog.md:535 +#: ../../source/ref-changelog.md:554 ../../source/ref-changelog.md:710 +#: ../../source/ref-changelog.md:781 ../../source/ref-changelog.md:818 +#: ../../source/ref-changelog.md:861 +msgid "Incompatible changes" msgstr "" -#: ../../source/ref-changelog.md:244 -msgid "" -"In addition to `start_server` and using the raw Driver API, there is a " -"new `start_driver` function that allows for running `start_server` " -"scripts as a Flower driver with only a single-line code change. Check out" -" the `mt-pytorch` code example to see a working example using " -"`start_driver`." +#: ../../source/ref-changelog.md:9 +msgid "v1.7.0 (2024-02-05)" msgstr "" -#: ../../source/ref-changelog.md:246 -msgid "" -"**Add parameter aggregation to** `mt-pytorch` **code example** " -"([#1785](https://github.com/adap/flower/pull/1785))" +#: ../../source/ref-changelog.md:11 ../../source/ref-changelog.md:104 +#: ../../source/ref-changelog.md:204 ../../source/ref-changelog.md:288 +#: ../../source/ref-changelog.md:352 ../../source/ref-changelog.md:410 +#: ../../source/ref-changelog.md:479 ../../source/ref-changelog.md:548 +msgid "Thanks to our contributors" msgstr "" -#: ../../source/ref-changelog.md:248 +#: ../../source/ref-changelog.md:13 ../../source/ref-changelog.md:106 +#: ../../source/ref-changelog.md:206 ../../source/ref-changelog.md:290 +#: ../../source/ref-changelog.md:354 ../../source/ref-changelog.md:412 msgid "" -"The `mt-pytorch` example shows how to aggregate parameters when writing a" -" driver script. The included `driver.py` and `server.py` have been " -"aligned to demonstrate both the low-level way and the high-level way of " -"building server-side logic." +"We would like to give our special thanks to all the contributors who made" +" the new version of Flower possible (in `git shortlog` order):" msgstr "" -#: ../../source/ref-changelog.md:250 +#: ../../source/ref-changelog.md:15 msgid "" -"**Migrate experimental REST API to Starlette** " -"([2171](https://github.com/adap/flower/pull/2171))" +"`Aasheesh Singh`, `Adam Narozniak`, `Aml Hassan Esmil`, `Charles " +"Beauville`, `Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo " +"Gabrielli`, `Gustavo Bertoli`, `HelinLin`, `Heng Pan`, `Javier`, `M S " +"Chaitanya Kumar`, `Mohammad Naseri`, `Nikos Vlachakis`, `Pritam Neog`, " +"`Robert Kuska`, `Robert Steiner`, `Taner Topal`, `Yahia Salaheldin " +"Shaaban`, `Yan Gao`, `Yasar Abbas` " msgstr "" -#: ../../source/ref-changelog.md:252 +#: ../../source/ref-changelog.md:19 msgid "" -"The (experimental) REST API used to be implemented in " -"[FastAPI](https://fastapi.tiangolo.com/), but it has now been migrated to" -" use [Starlette](https://www.starlette.io/) directly." +"**Introduce stateful clients (experimental)** " +"([#2770](https://github.com/adap/flower/pull/2770), " +"[#2686](https://github.com/adap/flower/pull/2686), " +"[#2696](https://github.com/adap/flower/pull/2696), " +"[#2643](https://github.com/adap/flower/pull/2643), " +"[#2769](https://github.com/adap/flower/pull/2769))" msgstr "" -#: ../../source/ref-changelog.md:254 +#: ../../source/ref-changelog.md:21 msgid "" -"Please note: The REST request-response API is still experimental and will" -" likely change significantly over time." +"Subclasses of `Client` and `NumPyClient` can now store local state that " +"remains on the client. Let's start with the highlight first: this new " +"feature is compatible with both simulated clients (via " +"`start_simulation`) and networked clients (via `start_client`). It's also" +" the first preview of new abstractions like `Context` and `RecordSet`. " +"Clients can access state of type `RecordSet` via `state: RecordSet = " +"self.context.state`. Changes to this `RecordSet` are preserved across " +"different rounds of execution to enable stateful computations in a " +"unified way across simulation and deployment." msgstr "" -#: ../../source/ref-changelog.md:256 +#: ../../source/ref-changelog.md:23 msgid "" -"**Introduce experimental gRPC request-response API** " -"([#1867](https://github.com/adap/flower/pull/1867), " -"[#1901](https://github.com/adap/flower/pull/1901))" +"**Improve performance** " +"([#2293](https://github.com/adap/flower/pull/2293))" msgstr "" -#: ../../source/ref-changelog.md:258 +#: ../../source/ref-changelog.md:25 msgid "" -"In addition to the existing gRPC API (based on bidirectional streaming) " -"and the experimental REST API, there is now a new gRPC API that uses a " -"request-response model to communicate with client nodes." +"Flower is faster than ever. All `FedAvg`-derived strategies now use in-" +"place aggregation to reduce memory consumption. The Flower client " +"serialization/deserialization has been rewritten from the ground up, " +"which results in significant speedups, especially when the client-side " +"training time is short." msgstr "" -#: ../../source/ref-changelog.md:260 +#: ../../source/ref-changelog.md:27 msgid "" -"Please note: The gRPC request-response API is still experimental and will" -" likely change significantly over time." +"**Support Federated Learning with Apple MLX and Flower** " +"([#2693](https://github.com/adap/flower/pull/2693))" msgstr "" -#: ../../source/ref-changelog.md:262 +#: ../../source/ref-changelog.md:29 msgid "" -"**Replace the experimental** `start_client(rest=True)` **with the new** " -"`start_client(transport=\"rest\")` " -"([#1880](https://github.com/adap/flower/pull/1880))" +"Flower has official support for federated learning using [Apple " +"MLX](https://ml-explore.github.io/mlx) via the new `quickstart-mlx` code " +"example." msgstr "" -#: ../../source/ref-changelog.md:264 +#: ../../source/ref-changelog.md:31 msgid "" -"The (experimental) `start_client` argument `rest` was deprecated in " -"favour of a new argument `transport`. `start_client(transport=\"rest\")` " -"will yield the same behaviour as `start_client(rest=True)` did before. " -"All code should migrate to the new argument `transport`. The deprecated " -"argument `rest` will be removed in a future release." +"**Introduce new XGBoost cyclic strategy** " +"([#2666](https://github.com/adap/flower/pull/2666), " +"[#2668](https://github.com/adap/flower/pull/2668))" msgstr "" -#: ../../source/ref-changelog.md:266 +#: ../../source/ref-changelog.md:33 msgid "" -"**Add a new gRPC option** " -"([#2197](https://github.com/adap/flower/pull/2197))" +"A new strategy called `FedXgbCyclic` supports a client-by-client style of" +" training (often called cyclic). The `xgboost-comprehensive` code example" +" shows how to use it in a full project. In addition to that, `xgboost-" +"comprehensive` now also supports simulation mode. With this, Flower " +"offers best-in-class XGBoost support." msgstr "" -#: ../../source/ref-changelog.md:268 +#: ../../source/ref-changelog.md:35 msgid "" -"We now start a gRPC server with the `grpc.keepalive_permit_without_calls`" -" option set to 0 by default. This prevents the clients from sending " -"keepalive pings when there is no outstanding stream." +"**Support Python 3.11** " +"([#2394](https://github.com/adap/flower/pull/2394))" msgstr "" -#: ../../source/ref-changelog.md:270 +#: ../../source/ref-changelog.md:37 msgid "" -"**Improve example notebooks** " -"([#2005](https://github.com/adap/flower/pull/2005))" +"Framework tests now run on Python 3.8, 3.9, 3.10, and 3.11. This will " +"ensure better support for users using more recent Python versions." msgstr "" -#: ../../source/ref-changelog.md:272 -msgid "There's a new 30min Federated Learning PyTorch tutorial!" +#: ../../source/ref-changelog.md:39 +msgid "" +"**Update gRPC and ProtoBuf dependencies** " +"([#2814](https://github.com/adap/flower/pull/2814))" msgstr "" -#: ../../source/ref-changelog.md:274 +#: ../../source/ref-changelog.md:41 msgid "" -"**Example updates** ([#1772](https://github.com/adap/flower/pull/1772), " -"[#1873](https://github.com/adap/flower/pull/1873), " -"[#1981](https://github.com/adap/flower/pull/1981), " -"[#1988](https://github.com/adap/flower/pull/1988), " -"[#1984](https://github.com/adap/flower/pull/1984), " -"[#1982](https://github.com/adap/flower/pull/1982), " -"[#2112](https://github.com/adap/flower/pull/2112), " -"[#2144](https://github.com/adap/flower/pull/2144), " -"[#2174](https://github.com/adap/flower/pull/2174), " -"[#2225](https://github.com/adap/flower/pull/2225), " -"[#2183](https://github.com/adap/flower/pull/2183))" +"The `grpcio` and `protobuf` dependencies were updated to their latest " +"versions for improved security and performance." msgstr "" -#: ../../source/ref-changelog.md:276 +#: ../../source/ref-changelog.md:43 msgid "" -"Many examples have received significant updates, including simplified " -"advanced-tensorflow and advanced-pytorch examples, improved macOS " -"compatibility of TensorFlow examples, and code examples for simulation. A" -" major upgrade is that all code examples now have a `requirements.txt` " -"(in addition to `pyproject.toml`)." +"**Introduce Docker image for Flower server** " +"([#2700](https://github.com/adap/flower/pull/2700), " +"[#2688](https://github.com/adap/flower/pull/2688), " +"[#2705](https://github.com/adap/flower/pull/2705), " +"[#2695](https://github.com/adap/flower/pull/2695), " +"[#2747](https://github.com/adap/flower/pull/2747), " +"[#2746](https://github.com/adap/flower/pull/2746), " +"[#2680](https://github.com/adap/flower/pull/2680), " +"[#2682](https://github.com/adap/flower/pull/2682), " +"[#2701](https://github.com/adap/flower/pull/2701))" msgstr "" -#: ../../source/ref-changelog.md:278 +#: ../../source/ref-changelog.md:45 msgid "" -"**General improvements** " -"([#1872](https://github.com/adap/flower/pull/1872), " -"[#1866](https://github.com/adap/flower/pull/1866), " -"[#1884](https://github.com/adap/flower/pull/1884), " -"[#1837](https://github.com/adap/flower/pull/1837), " -"[#1477](https://github.com/adap/flower/pull/1477), " -"[#2171](https://github.com/adap/flower/pull/2171))" +"The Flower server can now be run using an official Docker image. A new " +"how-to guide explains [how to run Flower using " +"Docker](https://flower.ai/docs/framework/how-to-run-flower-using-" +"docker.html). An official Flower client Docker image will follow." msgstr "" -#: ../../source/ref-changelog.md:284 ../../source/ref-changelog.md:348 -#: ../../source/ref-changelog.md:406 ../../source/ref-changelog.md:475 -#: ../../source/ref-changelog.md:537 -msgid "None" +#: ../../source/ref-changelog.md:47 +msgid "" +"**Introduce** `flower-via-docker-compose` **example** " +"([#2626](https://github.com/adap/flower/pull/2626))" msgstr "" -#: ../../source/ref-changelog.md:286 -msgid "v1.4.0 (2023-04-21)" +#: ../../source/ref-changelog.md:49 +msgid "" +"**Introduce** `quickstart-sklearn-tabular` **example** " +"([#2719](https://github.com/adap/flower/pull/2719))" msgstr "" -#: ../../source/ref-changelog.md:292 +#: ../../source/ref-changelog.md:51 msgid "" -"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " -"`Chenyang Ma (Danny)`, `Daniel J. Beutel`, `Edoardo`, `Gautam Jajoo`, " -"`Iacob-Alexandru-Andrei`, `JDRanpariya`, `Jean Charle Yaacoub`, `Kunal " -"Sarkhel`, `L. Jiang`, `Lennart Behme`, `Max Kapsecker`, `Michał`, `Nic " -"Lane`, `Nikolaos Episkopos`, `Ragy`, `Saurav Maheshkar`, `Semo Yang`, " -"`Steve Laskaridis`, `Steven Hé (Sīchàng)`, `Taner Topal`" +"**Introduce** `custom-metrics` **example** " +"([#1958](https://github.com/adap/flower/pull/1958))" msgstr "" -#: ../../source/ref-changelog.md:296 +#: ../../source/ref-changelog.md:53 msgid "" -"**Introduce support for XGBoost (**`FedXgbNnAvg` **strategy and " -"example)** ([#1694](https://github.com/adap/flower/pull/1694), " -"[#1709](https://github.com/adap/flower/pull/1709), " -"[#1715](https://github.com/adap/flower/pull/1715), " -"[#1717](https://github.com/adap/flower/pull/1717), " -"[#1763](https://github.com/adap/flower/pull/1763), " -"[#1795](https://github.com/adap/flower/pull/1795))" +"**Update code examples to use Flower Datasets** " +"([#2450](https://github.com/adap/flower/pull/2450), " +"[#2456](https://github.com/adap/flower/pull/2456), " +"[#2318](https://github.com/adap/flower/pull/2318), " +"[#2712](https://github.com/adap/flower/pull/2712))" msgstr "" -#: ../../source/ref-changelog.md:298 +#: ../../source/ref-changelog.md:55 msgid "" -"XGBoost is a tree-based ensemble machine learning algorithm that uses " -"gradient boosting to improve model accuracy. We added a new `FedXgbNnAvg`" -" " -"[strategy](https://github.com/adap/flower/tree/main/src/py/flwr/server/strategy/fedxgb_nn_avg.py)," -" and a [code " -"example](https://github.com/adap/flower/tree/main/examples/xgboost-quickstart)" -" that demonstrates the usage of this new strategy in an XGBoost project." +"Several code examples were updated to use [Flower " +"Datasets](https://flower.ai/docs/datasets/)." msgstr "" -#: ../../source/ref-changelog.md:300 +#: ../../source/ref-changelog.md:57 msgid "" -"**Introduce iOS SDK (preview)** " -"([#1621](https://github.com/adap/flower/pull/1621), " -"[#1764](https://github.com/adap/flower/pull/1764))" +"**General updates to Flower Examples** " +"([#2381](https://github.com/adap/flower/pull/2381), " +"[#2805](https://github.com/adap/flower/pull/2805), " +"[#2782](https://github.com/adap/flower/pull/2782), " +"[#2806](https://github.com/adap/flower/pull/2806), " +"[#2829](https://github.com/adap/flower/pull/2829), " +"[#2825](https://github.com/adap/flower/pull/2825), " +"[#2816](https://github.com/adap/flower/pull/2816), " +"[#2726](https://github.com/adap/flower/pull/2726), " +"[#2659](https://github.com/adap/flower/pull/2659), " +"[#2655](https://github.com/adap/flower/pull/2655))" msgstr "" -#: ../../source/ref-changelog.md:302 -msgid "" -"This is a major update for anyone wanting to implement Federated Learning" -" on iOS mobile devices. We now have a swift iOS SDK present under " -"[src/swift/flwr](https://github.com/adap/flower/tree/main/src/swift/flwr)" -" that will facilitate greatly the app creating process. To showcase its " -"use, the [iOS " -"example](https://github.com/adap/flower/tree/main/examples/ios) has also " -"been updated!" +#: ../../source/ref-changelog.md:59 +msgid "Many Flower code examples received substantial updates." msgstr "" -#: ../../source/ref-changelog.md:304 -msgid "" -"**Introduce new \"What is Federated Learning?\" tutorial** " -"([#1657](https://github.com/adap/flower/pull/1657), " -"[#1721](https://github.com/adap/flower/pull/1721))" +#: ../../source/ref-changelog.md:61 ../../source/ref-changelog.md:154 +msgid "**Update Flower Baselines**" msgstr "" -#: ../../source/ref-changelog.md:306 +#: ../../source/ref-changelog.md:63 msgid "" -"A new [entry-level tutorial](https://flower.ai/docs/framework/tutorial-" -"what-is-federated-learning.html) in our documentation explains the basics" -" of Fedetated Learning. It enables anyone who's unfamiliar with Federated" -" Learning to start their journey with Flower. Forward it to anyone who's " -"interested in Federated Learning!" +"HFedXGBoost ([#2226](https://github.com/adap/flower/pull/2226), " +"[#2771](https://github.com/adap/flower/pull/2771))" msgstr "" -#: ../../source/ref-changelog.md:308 -msgid "" -"**Introduce new Flower Baseline: FedProx MNIST** " -"([#1513](https://github.com/adap/flower/pull/1513), " -"[#1680](https://github.com/adap/flower/pull/1680), " -"[#1681](https://github.com/adap/flower/pull/1681), " -"[#1679](https://github.com/adap/flower/pull/1679))" +#: ../../source/ref-changelog.md:64 +msgid "FedVSSL ([#2412](https://github.com/adap/flower/pull/2412))" msgstr "" -#: ../../source/ref-changelog.md:310 -msgid "" -"This new baseline replicates the MNIST+CNN task from the paper [Federated" -" Optimization in Heterogeneous Networks (Li et al., " -"2018)](https://arxiv.org/abs/1812.06127). It uses the `FedProx` strategy," -" which aims at making convergence more robust in heterogeneous settings." +#: ../../source/ref-changelog.md:65 +msgid "FedNova ([#2179](https://github.com/adap/flower/pull/2179))" msgstr "" -#: ../../source/ref-changelog.md:312 -msgid "" -"**Introduce new Flower Baseline: FedAvg FEMNIST** " -"([#1655](https://github.com/adap/flower/pull/1655))" +#: ../../source/ref-changelog.md:66 +msgid "HeteroFL ([#2439](https://github.com/adap/flower/pull/2439))" msgstr "" -#: ../../source/ref-changelog.md:314 -msgid "" -"This new baseline replicates an experiment evaluating the performance of " -"the FedAvg algorithm on the FEMNIST dataset from the paper [LEAF: A " -"Benchmark for Federated Settings (Caldas et al., " -"2018)](https://arxiv.org/abs/1812.01097)." +#: ../../source/ref-changelog.md:67 +msgid "FedAvgM ([#2246](https://github.com/adap/flower/pull/2246))" msgstr "" -#: ../../source/ref-changelog.md:316 -msgid "" -"**Introduce (experimental) REST API** " -"([#1594](https://github.com/adap/flower/pull/1594), " -"[#1690](https://github.com/adap/flower/pull/1690), " -"[#1695](https://github.com/adap/flower/pull/1695), " -"[#1712](https://github.com/adap/flower/pull/1712), " -"[#1802](https://github.com/adap/flower/pull/1802), " -"[#1770](https://github.com/adap/flower/pull/1770), " -"[#1733](https://github.com/adap/flower/pull/1733))" +#: ../../source/ref-changelog.md:68 +msgid "FedPara ([#2722](https://github.com/adap/flower/pull/2722))" msgstr "" -#: ../../source/ref-changelog.md:318 +#: ../../source/ref-changelog.md:70 msgid "" -"A new REST API has been introduced as an alternative to the gRPC-based " -"communication stack. In this initial version, the REST API only supports " -"anonymous clients." +"**Improve documentation** " +"([#2674](https://github.com/adap/flower/pull/2674), " +"[#2480](https://github.com/adap/flower/pull/2480), " +"[#2826](https://github.com/adap/flower/pull/2826), " +"[#2727](https://github.com/adap/flower/pull/2727), " +"[#2761](https://github.com/adap/flower/pull/2761), " +"[#2900](https://github.com/adap/flower/pull/2900))" msgstr "" -#: ../../source/ref-changelog.md:320 +#: ../../source/ref-changelog.md:72 msgid "" -"Please note: The REST API is still experimental and will likely change " -"significantly over time." +"**Improved testing and development infrastructure** " +"([#2797](https://github.com/adap/flower/pull/2797), " +"[#2676](https://github.com/adap/flower/pull/2676), " +"[#2644](https://github.com/adap/flower/pull/2644), " +"[#2656](https://github.com/adap/flower/pull/2656), " +"[#2848](https://github.com/adap/flower/pull/2848), " +"[#2675](https://github.com/adap/flower/pull/2675), " +"[#2735](https://github.com/adap/flower/pull/2735), " +"[#2767](https://github.com/adap/flower/pull/2767), " +"[#2732](https://github.com/adap/flower/pull/2732), " +"[#2744](https://github.com/adap/flower/pull/2744), " +"[#2681](https://github.com/adap/flower/pull/2681), " +"[#2699](https://github.com/adap/flower/pull/2699), " +"[#2745](https://github.com/adap/flower/pull/2745), " +"[#2734](https://github.com/adap/flower/pull/2734), " +"[#2731](https://github.com/adap/flower/pull/2731), " +"[#2652](https://github.com/adap/flower/pull/2652), " +"[#2720](https://github.com/adap/flower/pull/2720), " +"[#2721](https://github.com/adap/flower/pull/2721), " +"[#2717](https://github.com/adap/flower/pull/2717), " +"[#2864](https://github.com/adap/flower/pull/2864), " +"[#2694](https://github.com/adap/flower/pull/2694), " +"[#2709](https://github.com/adap/flower/pull/2709), " +"[#2658](https://github.com/adap/flower/pull/2658), " +"[#2796](https://github.com/adap/flower/pull/2796), " +"[#2692](https://github.com/adap/flower/pull/2692), " +"[#2657](https://github.com/adap/flower/pull/2657), " +"[#2813](https://github.com/adap/flower/pull/2813), " +"[#2661](https://github.com/adap/flower/pull/2661), " +"[#2398](https://github.com/adap/flower/pull/2398))" msgstr "" -#: ../../source/ref-changelog.md:322 +#: ../../source/ref-changelog.md:74 msgid "" -"**Improve the (experimental) Driver API** " -"([#1663](https://github.com/adap/flower/pull/1663), " -"[#1666](https://github.com/adap/flower/pull/1666), " -"[#1667](https://github.com/adap/flower/pull/1667), " -"[#1664](https://github.com/adap/flower/pull/1664), " -"[#1675](https://github.com/adap/flower/pull/1675), " -"[#1676](https://github.com/adap/flower/pull/1676), " -"[#1693](https://github.com/adap/flower/pull/1693), " -"[#1662](https://github.com/adap/flower/pull/1662), " -"[#1794](https://github.com/adap/flower/pull/1794))" +"The Flower testing and development infrastructure has received " +"substantial updates. This makes Flower 1.7 the most tested release ever." msgstr "" -#: ../../source/ref-changelog.md:324 +#: ../../source/ref-changelog.md:76 msgid "" -"The Driver API is still an experimental feature, but this release " -"introduces some major upgrades. One of the main improvements is the " -"introduction of an SQLite database to store server state on disk (instead" -" of in-memory). Another improvement is that tasks (instructions or " -"results) that have been delivered will now be deleted. This greatly " -"improves the memory efficiency of a long-running Flower server." +"**Update dependencies** " +"([#2753](https://github.com/adap/flower/pull/2753), " +"[#2651](https://github.com/adap/flower/pull/2651), " +"[#2739](https://github.com/adap/flower/pull/2739), " +"[#2837](https://github.com/adap/flower/pull/2837), " +"[#2788](https://github.com/adap/flower/pull/2788), " +"[#2811](https://github.com/adap/flower/pull/2811), " +"[#2774](https://github.com/adap/flower/pull/2774), " +"[#2790](https://github.com/adap/flower/pull/2790), " +"[#2751](https://github.com/adap/flower/pull/2751), " +"[#2850](https://github.com/adap/flower/pull/2850), " +"[#2812](https://github.com/adap/flower/pull/2812), " +"[#2872](https://github.com/adap/flower/pull/2872), " +"[#2736](https://github.com/adap/flower/pull/2736), " +"[#2756](https://github.com/adap/flower/pull/2756), " +"[#2857](https://github.com/adap/flower/pull/2857), " +"[#2757](https://github.com/adap/flower/pull/2757), " +"[#2810](https://github.com/adap/flower/pull/2810), " +"[#2740](https://github.com/adap/flower/pull/2740), " +"[#2789](https://github.com/adap/flower/pull/2789))" msgstr "" -#: ../../source/ref-changelog.md:326 +#: ../../source/ref-changelog.md:78 msgid "" -"**Fix spilling issues related to Ray during simulations** " -"([#1698](https://github.com/adap/flower/pull/1698))" +"**General improvements** " +"([#2803](https://github.com/adap/flower/pull/2803), " +"[#2847](https://github.com/adap/flower/pull/2847), " +"[#2877](https://github.com/adap/flower/pull/2877), " +"[#2690](https://github.com/adap/flower/pull/2690), " +"[#2889](https://github.com/adap/flower/pull/2889), " +"[#2874](https://github.com/adap/flower/pull/2874), " +"[#2819](https://github.com/adap/flower/pull/2819), " +"[#2689](https://github.com/adap/flower/pull/2689), " +"[#2457](https://github.com/adap/flower/pull/2457), " +"[#2870](https://github.com/adap/flower/pull/2870), " +"[#2669](https://github.com/adap/flower/pull/2669), " +"[#2876](https://github.com/adap/flower/pull/2876), " +"[#2885](https://github.com/adap/flower/pull/2885), " +"[#2858](https://github.com/adap/flower/pull/2858), " +"[#2867](https://github.com/adap/flower/pull/2867), " +"[#2351](https://github.com/adap/flower/pull/2351), " +"[#2886](https://github.com/adap/flower/pull/2886), " +"[#2860](https://github.com/adap/flower/pull/2860), " +"[#2828](https://github.com/adap/flower/pull/2828), " +"[#2869](https://github.com/adap/flower/pull/2869), " +"[#2875](https://github.com/adap/flower/pull/2875), " +"[#2733](https://github.com/adap/flower/pull/2733), " +"[#2488](https://github.com/adap/flower/pull/2488), " +"[#2646](https://github.com/adap/flower/pull/2646), " +"[#2879](https://github.com/adap/flower/pull/2879), " +"[#2821](https://github.com/adap/flower/pull/2821), " +"[#2855](https://github.com/adap/flower/pull/2855), " +"[#2800](https://github.com/adap/flower/pull/2800), " +"[#2807](https://github.com/adap/flower/pull/2807), " +"[#2801](https://github.com/adap/flower/pull/2801), " +"[#2804](https://github.com/adap/flower/pull/2804), " +"[#2851](https://github.com/adap/flower/pull/2851), " +"[#2787](https://github.com/adap/flower/pull/2787), " +"[#2852](https://github.com/adap/flower/pull/2852), " +"[#2672](https://github.com/adap/flower/pull/2672), " +"[#2759](https://github.com/adap/flower/pull/2759))" msgstr "" -#: ../../source/ref-changelog.md:328 +#: ../../source/ref-changelog.md:82 msgid "" -"While running long simulations, `ray` was sometimes spilling huge amounts" -" of data that would make the training unable to continue. This is now " -"fixed! 🎉" +"**Deprecate** `start_numpy_client` " +"([#2563](https://github.com/adap/flower/pull/2563), " +"[#2718](https://github.com/adap/flower/pull/2718))" msgstr "" -#: ../../source/ref-changelog.md:330 +#: ../../source/ref-changelog.md:84 msgid "" -"**Add new example using** `TabNet` **and Flower** " -"([#1725](https://github.com/adap/flower/pull/1725))" +"Until now, clients of type `NumPyClient` needed to be started via " +"`start_numpy_client`. In our efforts to consolidate framework APIs, we " +"have introduced changes, and now all client types should start via " +"`start_client`. To continue using `NumPyClient` clients, you simply need " +"to first call the `.to_client()` method and then pass returned `Client` " +"object to `start_client`. The examples and the documentation have been " +"updated accordingly." msgstr "" -#: ../../source/ref-changelog.md:332 +#: ../../source/ref-changelog.md:86 msgid "" -"TabNet is a powerful and flexible framework for training machine learning" -" models on tabular data. We now have a federated example using Flower: " -"[quickstart-tabnet](https://github.com/adap/flower/tree/main/examples/quickstart-tabnet)." +"**Deprecate legacy DP wrappers** " +"([#2749](https://github.com/adap/flower/pull/2749))" msgstr "" -#: ../../source/ref-changelog.md:334 +#: ../../source/ref-changelog.md:88 msgid "" -"**Add new how-to guide for monitoring simulations** " -"([#1649](https://github.com/adap/flower/pull/1649))" +"Legacy DP wrapper classes are deprecated, but still functional. This is " +"in preparation for an all-new pluggable version of differential privacy " +"support in Flower." msgstr "" -#: ../../source/ref-changelog.md:336 +#: ../../source/ref-changelog.md:90 msgid "" -"We now have a documentation guide to help users monitor their performance" -" during simulations." +"**Make optional arg** `--callable` **in** `flower-client` **a required " +"positional arg** ([#2673](https://github.com/adap/flower/pull/2673))" msgstr "" -#: ../../source/ref-changelog.md:338 +#: ../../source/ref-changelog.md:92 msgid "" -"**Add training metrics to** `History` **object during simulations** " -"([#1696](https://github.com/adap/flower/pull/1696))" +"**Rename** `certificates` **to** `root_certificates` **in** `Driver` " +"([#2890](https://github.com/adap/flower/pull/2890))" msgstr "" -#: ../../source/ref-changelog.md:340 +#: ../../source/ref-changelog.md:94 msgid "" -"The `fit_metrics_aggregation_fn` can be used to aggregate training " -"metrics, but previous releases did not save the results in the `History` " -"object. This is now the case!" +"**Drop experimental** `Task` **fields** " +"([#2866](https://github.com/adap/flower/pull/2866), " +"[#2865](https://github.com/adap/flower/pull/2865))" msgstr "" -#: ../../source/ref-changelog.md:342 +#: ../../source/ref-changelog.md:96 msgid "" -"**General improvements** " -"([#1659](https://github.com/adap/flower/pull/1659), " -"[#1646](https://github.com/adap/flower/pull/1646), " -"[#1647](https://github.com/adap/flower/pull/1647), " -"[#1471](https://github.com/adap/flower/pull/1471), " -"[#1648](https://github.com/adap/flower/pull/1648), " -"[#1651](https://github.com/adap/flower/pull/1651), " -"[#1652](https://github.com/adap/flower/pull/1652), " -"[#1653](https://github.com/adap/flower/pull/1653), " -"[#1659](https://github.com/adap/flower/pull/1659), " -"[#1665](https://github.com/adap/flower/pull/1665), " -"[#1670](https://github.com/adap/flower/pull/1670), " -"[#1672](https://github.com/adap/flower/pull/1672), " -"[#1677](https://github.com/adap/flower/pull/1677), " -"[#1684](https://github.com/adap/flower/pull/1684), " -"[#1683](https://github.com/adap/flower/pull/1683), " -"[#1686](https://github.com/adap/flower/pull/1686), " -"[#1682](https://github.com/adap/flower/pull/1682), " -"[#1685](https://github.com/adap/flower/pull/1685), " -"[#1692](https://github.com/adap/flower/pull/1692), " -"[#1705](https://github.com/adap/flower/pull/1705), " -"[#1708](https://github.com/adap/flower/pull/1708), " -"[#1711](https://github.com/adap/flower/pull/1711), " -"[#1713](https://github.com/adap/flower/pull/1713), " -"[#1714](https://github.com/adap/flower/pull/1714), " -"[#1718](https://github.com/adap/flower/pull/1718), " -"[#1716](https://github.com/adap/flower/pull/1716), " -"[#1723](https://github.com/adap/flower/pull/1723), " -"[#1735](https://github.com/adap/flower/pull/1735), " -"[#1678](https://github.com/adap/flower/pull/1678), " -"[#1750](https://github.com/adap/flower/pull/1750), " -"[#1753](https://github.com/adap/flower/pull/1753), " -"[#1736](https://github.com/adap/flower/pull/1736), " -"[#1766](https://github.com/adap/flower/pull/1766), " -"[#1760](https://github.com/adap/flower/pull/1760), " -"[#1775](https://github.com/adap/flower/pull/1775), " -"[#1776](https://github.com/adap/flower/pull/1776), " -"[#1777](https://github.com/adap/flower/pull/1777), " -"[#1779](https://github.com/adap/flower/pull/1779), " -"[#1784](https://github.com/adap/flower/pull/1784), " -"[#1773](https://github.com/adap/flower/pull/1773), " -"[#1755](https://github.com/adap/flower/pull/1755), " -"[#1789](https://github.com/adap/flower/pull/1789), " -"[#1788](https://github.com/adap/flower/pull/1788), " -"[#1798](https://github.com/adap/flower/pull/1798), " -"[#1799](https://github.com/adap/flower/pull/1799), " -"[#1739](https://github.com/adap/flower/pull/1739), " -"[#1800](https://github.com/adap/flower/pull/1800), " -"[#1804](https://github.com/adap/flower/pull/1804), " -"[#1805](https://github.com/adap/flower/pull/1805))" +"Experimental fields `sa`, `legacy_server_message` and " +"`legacy_client_message` were removed from `Task` message. The removed " +"fields are superseded by the new `RecordSet` abstraction." msgstr "" -#: ../../source/ref-changelog.md:350 -msgid "v1.3.0 (2023-02-06)" +#: ../../source/ref-changelog.md:98 +msgid "" +"**Retire MXNet examples** " +"([#2724](https://github.com/adap/flower/pull/2724))" msgstr "" -#: ../../source/ref-changelog.md:356 +#: ../../source/ref-changelog.md:100 msgid "" -"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " -"`Daniel J. Beutel`, `JDRanpariya`, `Lennart Behme`, `Taner Topal`" +"The development of the MXNet fremework has ended and the project is now " +"[archived on GitHub](https://github.com/apache/mxnet). Existing MXNet " +"examples won't receive updates." msgstr "" -#: ../../source/ref-changelog.md:360 -msgid "" -"**Add support for** `workload_id` **and** `group_id` **in Driver API** " -"([#1595](https://github.com/adap/flower/pull/1595))" +#: ../../source/ref-changelog.md:102 +msgid "v1.6.0 (2023-11-28)" msgstr "" -#: ../../source/ref-changelog.md:362 +#: ../../source/ref-changelog.md:108 msgid "" -"The (experimental) Driver API now supports a `workload_id` that can be " -"used to identify which workload a task belongs to. It also supports a new" -" `group_id` that can be used, for example, to indicate the current " -"training round. Both the `workload_id` and `group_id` enable client nodes" -" to decide whether they want to handle a task or not." +"`Aashish Kolluri`, `Adam Narozniak`, `Alessio Mora`, `Barathwaja S`, " +"`Charles Beauville`, `Daniel J. Beutel`, `Daniel Nata Nugraha`, `Gabriel " +"Mota`, `Heng Pan`, `Ivan Agarský`, `JS.KIM`, `Javier`, `Marius Schlegel`," +" `Navin Chandra`, `Nic Lane`, `Peterpan828`, `Qinbin Li`, `Shaz-hash`, " +"`Steve Laskaridis`, `Taner Topal`, `William Lindskog`, `Yan Gao`, " +"`cnxdeveloper`, `k3nfalt` " msgstr "" -#: ../../source/ref-changelog.md:364 +#: ../../source/ref-changelog.md:112 msgid "" -"**Make Driver API and Fleet API address configurable** " -"([#1637](https://github.com/adap/flower/pull/1637))" +"**Add experimental support for Python 3.12** " +"([#2565](https://github.com/adap/flower/pull/2565))" msgstr "" -#: ../../source/ref-changelog.md:366 +#: ../../source/ref-changelog.md:114 msgid "" -"The (experimental) long-running Flower server (Driver API and Fleet API) " -"can now configure the server address of both Driver API (via `--driver-" -"api-address`) and Fleet API (via `--fleet-api-address`) when starting:" +"**Add new XGBoost examples** " +"([#2612](https://github.com/adap/flower/pull/2612), " +"[#2554](https://github.com/adap/flower/pull/2554), " +"[#2617](https://github.com/adap/flower/pull/2617), " +"[#2618](https://github.com/adap/flower/pull/2618), " +"[#2619](https://github.com/adap/flower/pull/2619), " +"[#2567](https://github.com/adap/flower/pull/2567))" msgstr "" -#: ../../source/ref-changelog.md:368 +#: ../../source/ref-changelog.md:116 msgid "" -"`flower-server --driver-api-address \"0.0.0.0:8081\" --fleet-api-address " -"\"0.0.0.0:8086\"`" +"We have added a new `xgboost-quickstart` example alongside a new " +"`xgboost-comprehensive` example that goes more in-depth." msgstr "" -#: ../../source/ref-changelog.md:370 -msgid "Both IPv4 and IPv6 addresses are supported." +#: ../../source/ref-changelog.md:118 +msgid "" +"**Add Vertical FL example** " +"([#2598](https://github.com/adap/flower/pull/2598))" msgstr "" -#: ../../source/ref-changelog.md:372 +#: ../../source/ref-changelog.md:120 msgid "" -"**Add new example of Federated Learning using fastai and Flower** " -"([#1598](https://github.com/adap/flower/pull/1598))" +"We had many questions about Vertical Federated Learning using Flower, so " +"we decided to add an simple example for it on the [Titanic " +"dataset](https://www.kaggle.com/competitions/titanic/data) alongside a " +"tutorial (in the README)." msgstr "" -#: ../../source/ref-changelog.md:374 +#: ../../source/ref-changelog.md:122 msgid "" -"A new code example (`quickstart-fastai`) demonstrates federated learning " -"with [fastai](https://www.fast.ai/) and Flower. You can find it here: " -"[quickstart-fastai](https://github.com/adap/flower/tree/main/examples/quickstart-fastai)." +"**Support custom** `ClientManager` **in** `start_driver()` " +"([#2292](https://github.com/adap/flower/pull/2292))" msgstr "" -#: ../../source/ref-changelog.md:376 +#: ../../source/ref-changelog.md:124 msgid "" -"**Make Android example compatible with** `flwr >= 1.0.0` **and the latest" -" versions of Android** " -"([#1603](https://github.com/adap/flower/pull/1603))" +"**Update REST API to support create and delete nodes** " +"([#2283](https://github.com/adap/flower/pull/2283))" msgstr "" -#: ../../source/ref-changelog.md:378 +#: ../../source/ref-changelog.md:126 msgid "" -"The Android code example has received a substantial update: the project " -"is compatible with Flower 1.0 (and later), the UI received a full " -"refresh, and the project is updated to be compatible with newer Android " -"tooling." +"**Update the Android SDK** " +"([#2187](https://github.com/adap/flower/pull/2187))" msgstr "" -#: ../../source/ref-changelog.md:380 -msgid "" -"**Add new `FedProx` strategy** " -"([#1619](https://github.com/adap/flower/pull/1619))" +#: ../../source/ref-changelog.md:128 +msgid "Add gRPC request-response capability to the Android SDK." msgstr "" -#: ../../source/ref-changelog.md:382 +#: ../../source/ref-changelog.md:130 msgid "" -"This " -"[strategy](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedprox.py)" -" is almost identical to " -"[`FedAvg`](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedavg.py)," -" but helps users replicate what is described in this " -"[paper](https://arxiv.org/abs/1812.06127). It essentially adds a " -"parameter called `proximal_mu` to regularize the local models with " -"respect to the global models." +"**Update the C++ SDK** " +"([#2537](https://github.com/adap/flower/pull/2537), " +"[#2528](https://github.com/adap/flower/pull/2528), " +"[#2523](https://github.com/adap/flower/pull/2523), " +"[#2522](https://github.com/adap/flower/pull/2522))" msgstr "" -#: ../../source/ref-changelog.md:384 -msgid "" -"**Add new metrics to telemetry events** " -"([#1640](https://github.com/adap/flower/pull/1640))" +#: ../../source/ref-changelog.md:132 +msgid "Add gRPC request-response capability to the C++ SDK." msgstr "" -#: ../../source/ref-changelog.md:386 +#: ../../source/ref-changelog.md:134 msgid "" -"An updated event structure allows, for example, the clustering of events " -"within the same workload." +"**Make HTTPS the new default** " +"([#2591](https://github.com/adap/flower/pull/2591), " +"[#2636](https://github.com/adap/flower/pull/2636))" msgstr "" -#: ../../source/ref-changelog.md:388 +#: ../../source/ref-changelog.md:136 msgid "" -"**Add new custom strategy tutorial section** " -"[#1623](https://github.com/adap/flower/pull/1623)" +"Flower is moving to HTTPS by default. The new `flower-server` requires " +"passing `--certificates`, but users can enable `--insecure` to use HTTP " +"for prototyping. The same applies to `flower-client`, which can either " +"use user-provided credentials or gRPC-bundled certificates to connect to " +"an HTTPS-enabled server or requires opt-out via passing `--insecure` to " +"enable insecure HTTP connections." msgstr "" -#: ../../source/ref-changelog.md:390 +#: ../../source/ref-changelog.md:138 msgid "" -"The Flower tutorial now has a new section that covers implementing a " -"custom strategy from scratch: [Open in " -"Colab](https://colab.research.google.com/github/adap/flower/blob/main/doc/source" -"/tutorial-build-a-strategy-from-scratch-pytorch.ipynb)" +"For backward compatibility, `start_client()` and `start_numpy_client()` " +"will still start in insecure mode by default. In a future release, " +"insecure connections will require user opt-in by passing `insecure=True`." msgstr "" -#: ../../source/ref-changelog.md:392 +#: ../../source/ref-changelog.md:140 msgid "" -"**Add new custom serialization tutorial section** " -"([#1622](https://github.com/adap/flower/pull/1622))" +"**Unify client API** ([#2303](https://github.com/adap/flower/pull/2303), " +"[#2390](https://github.com/adap/flower/pull/2390), " +"[#2493](https://github.com/adap/flower/pull/2493))" msgstr "" -#: ../../source/ref-changelog.md:394 +#: ../../source/ref-changelog.md:142 msgid "" -"The Flower tutorial now has a new section that covers custom " -"serialization: [Open in " -"Colab](https://colab.research.google.com/github/adap/flower/blob/main/doc/source" -"/tutorial-customize-the-client-pytorch.ipynb)" +"Using the `client_fn`, Flower clients can interchangeably run as " +"standalone processes (i.e. via `start_client`) or in simulation (i.e. via" +" `start_simulation`) without requiring changes to how the client class is" +" defined and instantiated. The `to_client()` function is introduced to " +"convert a `NumPyClient` to a `Client`." msgstr "" -#: ../../source/ref-changelog.md:396 +#: ../../source/ref-changelog.md:144 msgid "" -"**General improvements** " -"([#1638](https://github.com/adap/flower/pull/1638), " -"[#1634](https://github.com/adap/flower/pull/1634), " -"[#1636](https://github.com/adap/flower/pull/1636), " -"[#1635](https://github.com/adap/flower/pull/1635), " -"[#1633](https://github.com/adap/flower/pull/1633), " -"[#1632](https://github.com/adap/flower/pull/1632), " -"[#1631](https://github.com/adap/flower/pull/1631), " -"[#1630](https://github.com/adap/flower/pull/1630), " -"[#1627](https://github.com/adap/flower/pull/1627), " -"[#1593](https://github.com/adap/flower/pull/1593), " -"[#1616](https://github.com/adap/flower/pull/1616), " -"[#1615](https://github.com/adap/flower/pull/1615), " -"[#1607](https://github.com/adap/flower/pull/1607), " -"[#1609](https://github.com/adap/flower/pull/1609), " -"[#1608](https://github.com/adap/flower/pull/1608), " -"[#1603](https://github.com/adap/flower/pull/1603), " -"[#1590](https://github.com/adap/flower/pull/1590), " -"[#1580](https://github.com/adap/flower/pull/1580), " -"[#1599](https://github.com/adap/flower/pull/1599), " -"[#1600](https://github.com/adap/flower/pull/1600), " -"[#1601](https://github.com/adap/flower/pull/1601), " -"[#1597](https://github.com/adap/flower/pull/1597), " -"[#1595](https://github.com/adap/flower/pull/1595), " -"[#1591](https://github.com/adap/flower/pull/1591), " -"[#1588](https://github.com/adap/flower/pull/1588), " -"[#1589](https://github.com/adap/flower/pull/1589), " -"[#1587](https://github.com/adap/flower/pull/1587), " -"[#1573](https://github.com/adap/flower/pull/1573), " -"[#1581](https://github.com/adap/flower/pull/1581), " -"[#1578](https://github.com/adap/flower/pull/1578), " -"[#1574](https://github.com/adap/flower/pull/1574), " -"[#1572](https://github.com/adap/flower/pull/1572), " -"[#1586](https://github.com/adap/flower/pull/1586))" +"**Add new** `Bulyan` **strategy** " +"([#1817](https://github.com/adap/flower/pull/1817), " +"[#1891](https://github.com/adap/flower/pull/1891))" msgstr "" -#: ../../source/ref-changelog.md:400 +#: ../../source/ref-changelog.md:146 msgid "" -"**Updated documentation** " -"([#1629](https://github.com/adap/flower/pull/1629), " -"[#1628](https://github.com/adap/flower/pull/1628), " -"[#1620](https://github.com/adap/flower/pull/1620), " -"[#1618](https://github.com/adap/flower/pull/1618), " -"[#1617](https://github.com/adap/flower/pull/1617), " -"[#1613](https://github.com/adap/flower/pull/1613), " -"[#1614](https://github.com/adap/flower/pull/1614))" +"The new `Bulyan` strategy implements Bulyan by [El Mhamdi et al., " +"2018](https://arxiv.org/abs/1802.07927)" msgstr "" -#: ../../source/ref-changelog.md:402 ../../source/ref-changelog.md:469 +#: ../../source/ref-changelog.md:148 msgid "" -"As usual, the documentation has improved quite a bit. It is another step " -"in our effort to make the Flower documentation the best documentation of " -"any project. Stay tuned and as always, feel free to provide feedback!" -msgstr "" - -#: ../../source/ref-changelog.md:408 -msgid "v1.2.0 (2023-01-13)" +"**Add new** `XGB Bagging` **strategy** " +"([#2611](https://github.com/adap/flower/pull/2611))" msgstr "" -#: ../../source/ref-changelog.md:414 +#: ../../source/ref-changelog.md:150 ../../source/ref-changelog.md:152 msgid "" -"`Adam Narozniak`, `Charles Beauville`, `Daniel J. Beutel`, `Edoardo`, `L." -" Jiang`, `Ragy`, `Taner Topal`, `dannymcy`" +"**Introduce `WorkloadState`** " +"([#2564](https://github.com/adap/flower/pull/2564), " +"[#2632](https://github.com/adap/flower/pull/2632))" msgstr "" -#: ../../source/ref-changelog.md:418 +#: ../../source/ref-changelog.md:156 msgid "" -"**Introduce new Flower Baseline: FedAvg MNIST** " -"([#1497](https://github.com/adap/flower/pull/1497), " -"[#1552](https://github.com/adap/flower/pull/1552))" +"FedProx ([#2210](https://github.com/adap/flower/pull/2210), " +"[#2286](https://github.com/adap/flower/pull/2286), " +"[#2509](https://github.com/adap/flower/pull/2509))" msgstr "" -#: ../../source/ref-changelog.md:420 +#: ../../source/ref-changelog.md:158 msgid "" -"Over the coming weeks, we will be releasing a number of new reference " -"implementations useful especially to FL newcomers. They will typically " -"revisit well known papers from the literature, and be suitable for " -"integration in your own application or for experimentation, in order to " -"deepen your knowledge of FL in general. Today's release is the first in " -"this series. [Read more.](https://flower.ai/blog/2023-01-12-fl-starter-" -"pack-fedavg-mnist-cnn/)" +"Baselines Docs ([#2290](https://github.com/adap/flower/pull/2290), " +"[#2400](https://github.com/adap/flower/pull/2400))" msgstr "" -#: ../../source/ref-changelog.md:422 +#: ../../source/ref-changelog.md:160 msgid "" -"**Improve GPU support in simulations** " -"([#1555](https://github.com/adap/flower/pull/1555))" +"FedMLB ([#2340](https://github.com/adap/flower/pull/2340), " +"[#2507](https://github.com/adap/flower/pull/2507))" msgstr "" -#: ../../source/ref-changelog.md:424 +#: ../../source/ref-changelog.md:162 msgid "" -"The Ray-based Virtual Client Engine (`start_simulation`) has been updated" -" to improve GPU support. The update includes some of the hard-earned " -"lessons from scaling simulations in GPU cluster environments. New " -"defaults make running GPU-based simulations substantially more robust." +"TAMUNA ([#2254](https://github.com/adap/flower/pull/2254), " +"[#2508](https://github.com/adap/flower/pull/2508))" msgstr "" -#: ../../source/ref-changelog.md:426 -msgid "" -"**Improve GPU support in Jupyter Notebook tutorials** " -"([#1527](https://github.com/adap/flower/pull/1527), " -"[#1558](https://github.com/adap/flower/pull/1558))" +#: ../../source/ref-changelog.md:164 +msgid "FedMeta [#2438](https://github.com/adap/flower/pull/2438)" msgstr "" -#: ../../source/ref-changelog.md:428 -msgid "" -"Some users reported that Jupyter Notebooks have not always been easy to " -"use on GPU instances. We listened and made improvements to all of our " -"Jupyter notebooks! Check out the updated notebooks here:" +#: ../../source/ref-changelog.md:166 +msgid "FjORD [#2431](https://github.com/adap/flower/pull/2431)" msgstr "" -#: ../../source/ref-changelog.md:430 -msgid "" -"[An Introduction to Federated Learning](https://flower.ai/docs/framework" -"/tutorial-get-started-with-flower-pytorch.html)" +#: ../../source/ref-changelog.md:168 +msgid "MOON [#2421](https://github.com/adap/flower/pull/2421)" msgstr "" -#: ../../source/ref-changelog.md:431 -msgid "" -"[Strategies in Federated Learning](https://flower.ai/docs/framework" -"/tutorial-use-a-federated-learning-strategy-pytorch.html)" +#: ../../source/ref-changelog.md:170 +msgid "DepthFL [#2295](https://github.com/adap/flower/pull/2295)" msgstr "" -#: ../../source/ref-changelog.md:432 -msgid "" -"[Building a Strategy](https://flower.ai/docs/framework/tutorial-build-a" -"-strategy-from-scratch-pytorch.html)" +#: ../../source/ref-changelog.md:172 +msgid "FedPer [#2266](https://github.com/adap/flower/pull/2266)" msgstr "" -#: ../../source/ref-changelog.md:433 -msgid "" -"[Client and NumPyClient](https://flower.ai/docs/framework/tutorial-" -"customize-the-client-pytorch.html)" +#: ../../source/ref-changelog.md:174 +msgid "FedWav2vec [#2551](https://github.com/adap/flower/pull/2551)" msgstr "" -#: ../../source/ref-changelog.md:435 -msgid "" -"**Introduce optional telemetry** " -"([#1533](https://github.com/adap/flower/pull/1533), " -"[#1544](https://github.com/adap/flower/pull/1544), " -"[#1584](https://github.com/adap/flower/pull/1584))" +#: ../../source/ref-changelog.md:176 +msgid "niid-Bench [#2428](https://github.com/adap/flower/pull/2428)" msgstr "" -#: ../../source/ref-changelog.md:437 +#: ../../source/ref-changelog.md:178 msgid "" -"After a [request for " -"feedback](https://github.com/adap/flower/issues/1534) from the community," -" the Flower open-source project introduces optional collection of " -"*anonymous* usage metrics to make well-informed decisions to improve " -"Flower. Doing this enables the Flower team to understand how Flower is " -"used and what challenges users might face." +"FedBN ([#2608](https://github.com/adap/flower/pull/2608), " +"[#2615](https://github.com/adap/flower/pull/2615))" msgstr "" -#: ../../source/ref-changelog.md:439 +#: ../../source/ref-changelog.md:180 msgid "" -"**Flower is a friendly framework for collaborative AI and data science.**" -" Staying true to this statement, Flower makes it easy to disable " -"telemetry for users who do not want to share anonymous usage metrics. " -"[Read more.](https://flower.ai/docs/telemetry.html)." +"**General updates to Flower Examples** " +"([#2384](https://github.com/adap/flower/pull/2384), " +"[#2425](https://github.com/adap/flower/pull/2425), " +"[#2526](https://github.com/adap/flower/pull/2526), " +"[#2302](https://github.com/adap/flower/pull/2302), " +"[#2545](https://github.com/adap/flower/pull/2545))" msgstr "" -#: ../../source/ref-changelog.md:441 +#: ../../source/ref-changelog.md:182 msgid "" -"**Introduce (experimental) Driver API** " -"([#1520](https://github.com/adap/flower/pull/1520), " -"[#1525](https://github.com/adap/flower/pull/1525), " -"[#1545](https://github.com/adap/flower/pull/1545), " -"[#1546](https://github.com/adap/flower/pull/1546), " -"[#1550](https://github.com/adap/flower/pull/1550), " -"[#1551](https://github.com/adap/flower/pull/1551), " -"[#1567](https://github.com/adap/flower/pull/1567))" +"**General updates to Flower Baselines** " +"([#2301](https://github.com/adap/flower/pull/2301), " +"[#2305](https://github.com/adap/flower/pull/2305), " +"[#2307](https://github.com/adap/flower/pull/2307), " +"[#2327](https://github.com/adap/flower/pull/2327), " +"[#2435](https://github.com/adap/flower/pull/2435), " +"[#2462](https://github.com/adap/flower/pull/2462), " +"[#2463](https://github.com/adap/flower/pull/2463), " +"[#2461](https://github.com/adap/flower/pull/2461), " +"[#2469](https://github.com/adap/flower/pull/2469), " +"[#2466](https://github.com/adap/flower/pull/2466), " +"[#2471](https://github.com/adap/flower/pull/2471), " +"[#2472](https://github.com/adap/flower/pull/2472), " +"[#2470](https://github.com/adap/flower/pull/2470))" msgstr "" -#: ../../source/ref-changelog.md:443 +#: ../../source/ref-changelog.md:184 msgid "" -"Flower now has a new (experimental) Driver API which will enable fully " -"programmable, async, and multi-tenant Federated Learning and Federated " -"Analytics applications. Phew, that's a lot! Going forward, the Driver API" -" will be the abstraction that many upcoming features will be built on - " -"and you can start building those things now, too." +"**General updates to the simulation engine** " +"([#2331](https://github.com/adap/flower/pull/2331), " +"[#2447](https://github.com/adap/flower/pull/2447), " +"[#2448](https://github.com/adap/flower/pull/2448), " +"[#2294](https://github.com/adap/flower/pull/2294))" msgstr "" -#: ../../source/ref-changelog.md:445 +#: ../../source/ref-changelog.md:186 msgid "" -"The Driver API also enables a new execution mode in which the server runs" -" indefinitely. Multiple individual workloads can run concurrently and " -"start and stop their execution independent of the server. This is " -"especially useful for users who want to deploy Flower in production." +"**General updates to Flower SDKs** " +"([#2288](https://github.com/adap/flower/pull/2288), " +"[#2429](https://github.com/adap/flower/pull/2429), " +"[#2555](https://github.com/adap/flower/pull/2555), " +"[#2543](https://github.com/adap/flower/pull/2543), " +"[#2544](https://github.com/adap/flower/pull/2544), " +"[#2597](https://github.com/adap/flower/pull/2597), " +"[#2623](https://github.com/adap/flower/pull/2623))" msgstr "" -#: ../../source/ref-changelog.md:447 +#: ../../source/ref-changelog.md:188 msgid "" -"To learn more, check out the `mt-pytorch` code example. We look forward " -"to you feedback!" +"**General improvements** " +"([#2309](https://github.com/adap/flower/pull/2309), " +"[#2310](https://github.com/adap/flower/pull/2310), " +"[#2313](https://github.com/adap/flower/pull/2313), " +"[#2316](https://github.com/adap/flower/pull/2316), " +"[#2317](https://github.com/adap/flower/pull/2317), " +"[#2349](https://github.com/adap/flower/pull/2349), " +"[#2360](https://github.com/adap/flower/pull/2360), " +"[#2402](https://github.com/adap/flower/pull/2402), " +"[#2446](https://github.com/adap/flower/pull/2446), " +"[#2561](https://github.com/adap/flower/pull/2561), " +"[#2273](https://github.com/adap/flower/pull/2273), " +"[#2267](https://github.com/adap/flower/pull/2267), " +"[#2274](https://github.com/adap/flower/pull/2274), " +"[#2275](https://github.com/adap/flower/pull/2275), " +"[#2432](https://github.com/adap/flower/pull/2432), " +"[#2251](https://github.com/adap/flower/pull/2251), " +"[#2321](https://github.com/adap/flower/pull/2321), " +"[#1936](https://github.com/adap/flower/pull/1936), " +"[#2408](https://github.com/adap/flower/pull/2408), " +"[#2413](https://github.com/adap/flower/pull/2413), " +"[#2401](https://github.com/adap/flower/pull/2401), " +"[#2531](https://github.com/adap/flower/pull/2531), " +"[#2534](https://github.com/adap/flower/pull/2534), " +"[#2535](https://github.com/adap/flower/pull/2535), " +"[#2521](https://github.com/adap/flower/pull/2521), " +"[#2553](https://github.com/adap/flower/pull/2553), " +"[#2596](https://github.com/adap/flower/pull/2596))" msgstr "" -#: ../../source/ref-changelog.md:449 -msgid "" -"Please note: *The Driver API is still experimental and will likely change" -" significantly over time.*" +#: ../../source/ref-changelog.md:190 ../../source/ref-changelog.md:280 +#: ../../source/ref-changelog.md:344 ../../source/ref-changelog.md:398 +#: ../../source/ref-changelog.md:465 +msgid "Flower received many improvements under the hood, too many to list here." msgstr "" -#: ../../source/ref-changelog.md:451 +#: ../../source/ref-changelog.md:194 msgid "" -"**Add new Federated Analytics with Pandas example** " -"([#1469](https://github.com/adap/flower/pull/1469), " -"[#1535](https://github.com/adap/flower/pull/1535))" +"**Remove support for Python 3.7** " +"([#2280](https://github.com/adap/flower/pull/2280), " +"[#2299](https://github.com/adap/flower/pull/2299), " +"[#2304](https://github.com/adap/flower/pull/2304), " +"[#2306](https://github.com/adap/flower/pull/2306), " +"[#2355](https://github.com/adap/flower/pull/2355), " +"[#2356](https://github.com/adap/flower/pull/2356))" msgstr "" -#: ../../source/ref-changelog.md:453 +#: ../../source/ref-changelog.md:196 msgid "" -"A new code example (`quickstart-pandas`) demonstrates federated analytics" -" with Pandas and Flower. You can find it here: " -"[quickstart-pandas](https://github.com/adap/flower/tree/main/examples/quickstart-pandas)." +"Python 3.7 support was deprecated in Flower 1.5, and this release removes" +" support. Flower now requires Python 3.8." msgstr "" -#: ../../source/ref-changelog.md:455 +#: ../../source/ref-changelog.md:198 msgid "" -"**Add new strategies: Krum and MultiKrum** " -"([#1481](https://github.com/adap/flower/pull/1481))" +"**Remove experimental argument** `rest` **from** `start_client` " +"([#2324](https://github.com/adap/flower/pull/2324))" msgstr "" -#: ../../source/ref-changelog.md:457 +#: ../../source/ref-changelog.md:200 msgid "" -"Edoardo, a computer science student at the Sapienza University of Rome, " -"contributed a new `Krum` strategy that enables users to easily use Krum " -"and MultiKrum in their workloads." +"The (still experimental) argument `rest` was removed from `start_client` " +"and `start_numpy_client`. Use `transport=\"rest\"` to opt into the " +"experimental REST API instead." msgstr "" -#: ../../source/ref-changelog.md:459 -msgid "" -"**Update C++ example to be compatible with Flower v1.2.0** " -"([#1495](https://github.com/adap/flower/pull/1495))" +#: ../../source/ref-changelog.md:202 +msgid "v1.5.0 (2023-08-31)" msgstr "" -#: ../../source/ref-changelog.md:461 +#: ../../source/ref-changelog.md:208 msgid "" -"The C++ code example has received a substantial update to make it " -"compatible with the latest version of Flower." +"`Adam Narozniak`, `Anass Anhari`, `Charles Beauville`, `Dana-Farber`, " +"`Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo Gabrielli`, `Gustavo " +"Bertoli`, `Heng Pan`, `Javier`, `Mahdi`, `Steven Hé (Sīchàng)`, `Taner " +"Topal`, `achiverram28`, `danielnugraha`, `eunchung`, `ruthgal` " msgstr "" -#: ../../source/ref-changelog.md:463 +#: ../../source/ref-changelog.md:212 msgid "" -"**General improvements** " -"([#1491](https://github.com/adap/flower/pull/1491), " -"[#1504](https://github.com/adap/flower/pull/1504), " -"[#1506](https://github.com/adap/flower/pull/1506), " -"[#1514](https://github.com/adap/flower/pull/1514), " -"[#1522](https://github.com/adap/flower/pull/1522), " -"[#1523](https://github.com/adap/flower/pull/1523), " -"[#1526](https://github.com/adap/flower/pull/1526), " -"[#1528](https://github.com/adap/flower/pull/1528), " -"[#1547](https://github.com/adap/flower/pull/1547), " -"[#1549](https://github.com/adap/flower/pull/1549), " -"[#1560](https://github.com/adap/flower/pull/1560), " -"[#1564](https://github.com/adap/flower/pull/1564), " -"[#1566](https://github.com/adap/flower/pull/1566))" +"**Introduce new simulation engine** " +"([#1969](https://github.com/adap/flower/pull/1969), " +"[#2221](https://github.com/adap/flower/pull/2221), " +"[#2248](https://github.com/adap/flower/pull/2248))" msgstr "" -#: ../../source/ref-changelog.md:467 +#: ../../source/ref-changelog.md:214 msgid "" -"**Updated documentation** " -"([#1494](https://github.com/adap/flower/pull/1494), " -"[#1496](https://github.com/adap/flower/pull/1496), " -"[#1500](https://github.com/adap/flower/pull/1500), " -"[#1503](https://github.com/adap/flower/pull/1503), " -"[#1505](https://github.com/adap/flower/pull/1505), " -"[#1524](https://github.com/adap/flower/pull/1524), " -"[#1518](https://github.com/adap/flower/pull/1518), " -"[#1519](https://github.com/adap/flower/pull/1519), " -"[#1515](https://github.com/adap/flower/pull/1515))" +"The new simulation engine has been rewritten from the ground up, yet it " +"remains fully backwards compatible. It offers much improved stability and" +" memory handling, especially when working with GPUs. Simulations " +"transparently adapt to different settings to scale simulation in CPU-" +"only, CPU+GPU, multi-GPU, or multi-node multi-GPU environments." msgstr "" -#: ../../source/ref-changelog.md:471 +#: ../../source/ref-changelog.md:216 msgid "" -"One highlight is the new [first time contributor " -"guide](https://flower.ai/docs/first-time-contributors.html): if you've " -"never contributed on GitHub before, this is the perfect place to start!" +"Comprehensive documentation includes a new [how-to run " +"simulations](https://flower.ai/docs/framework/how-to-run-" +"simulations.html) guide, new [simulation-" +"pytorch](https://flower.ai/docs/examples/simulation-pytorch.html) and " +"[simulation-tensorflow](https://flower.ai/docs/examples/simulation-" +"tensorflow.html) notebooks, and a new [YouTube tutorial " +"series](https://www.youtube.com/watch?v=cRebUIGB5RU&list=PLNG4feLHqCWlnj8a_E1A_n5zr2-8pafTB)." msgstr "" -#: ../../source/ref-changelog.md:477 -msgid "v1.1.0 (2022-10-31)" +#: ../../source/ref-changelog.md:218 +msgid "" +"**Restructure Flower Docs** " +"([#1824](https://github.com/adap/flower/pull/1824), " +"[#1865](https://github.com/adap/flower/pull/1865), " +"[#1884](https://github.com/adap/flower/pull/1884), " +"[#1887](https://github.com/adap/flower/pull/1887), " +"[#1919](https://github.com/adap/flower/pull/1919), " +"[#1922](https://github.com/adap/flower/pull/1922), " +"[#1920](https://github.com/adap/flower/pull/1920), " +"[#1923](https://github.com/adap/flower/pull/1923), " +"[#1924](https://github.com/adap/flower/pull/1924), " +"[#1962](https://github.com/adap/flower/pull/1962), " +"[#2006](https://github.com/adap/flower/pull/2006), " +"[#2133](https://github.com/adap/flower/pull/2133), " +"[#2203](https://github.com/adap/flower/pull/2203), " +"[#2215](https://github.com/adap/flower/pull/2215), " +"[#2122](https://github.com/adap/flower/pull/2122), " +"[#2223](https://github.com/adap/flower/pull/2223), " +"[#2219](https://github.com/adap/flower/pull/2219), " +"[#2232](https://github.com/adap/flower/pull/2232), " +"[#2233](https://github.com/adap/flower/pull/2233), " +"[#2234](https://github.com/adap/flower/pull/2234), " +"[#2235](https://github.com/adap/flower/pull/2235), " +"[#2237](https://github.com/adap/flower/pull/2237), " +"[#2238](https://github.com/adap/flower/pull/2238), " +"[#2242](https://github.com/adap/flower/pull/2242), " +"[#2231](https://github.com/adap/flower/pull/2231), " +"[#2243](https://github.com/adap/flower/pull/2243), " +"[#2227](https://github.com/adap/flower/pull/2227))" msgstr "" -#: ../../source/ref-changelog.md:481 +#: ../../source/ref-changelog.md:220 msgid "" -"We would like to give our **special thanks** to all the contributors who " -"made the new version of Flower possible (in `git shortlog` order):" +"Much effort went into a completely restructured Flower docs experience. " +"The documentation on [flower.ai/docs](https://flower.ai/docs) is now " +"divided into Flower Framework, Flower Baselines, Flower Android SDK, " +"Flower iOS SDK, and code example projects." msgstr "" -#: ../../source/ref-changelog.md:483 +#: ../../source/ref-changelog.md:222 msgid "" -"`Akis Linardos`, `Christopher S`, `Daniel J. Beutel`, `George`, `Jan " -"Schlicht`, `Mohammad Fares`, `Pedro Porto Buarque de Gusmão`, `Philipp " -"Wiesner`, `Rob Luke`, `Taner Topal`, `VasundharaAgarwal`, " -"`danielnugraha`, `edogab33`" +"**Introduce Flower Swift SDK** " +"([#1858](https://github.com/adap/flower/pull/1858), " +"[#1897](https://github.com/adap/flower/pull/1897))" msgstr "" -#: ../../source/ref-changelog.md:487 +#: ../../source/ref-changelog.md:224 msgid "" -"**Introduce Differential Privacy wrappers (preview)** " -"([#1357](https://github.com/adap/flower/pull/1357), " -"[#1460](https://github.com/adap/flower/pull/1460))" +"This is the first preview release of the Flower Swift SDK. Flower support" +" on iOS is improving, and alongside the Swift SDK and code example, there" +" is now also an iOS quickstart tutorial." msgstr "" -#: ../../source/ref-changelog.md:489 +#: ../../source/ref-changelog.md:226 msgid "" -"The first (experimental) preview of pluggable Differential Privacy " -"wrappers enables easy configuration and usage of differential privacy " -"(DP). The pluggable DP wrappers enable framework-agnostic **and** " -"strategy-agnostic usage of both client-side DP and server-side DP. Head " -"over to the Flower docs, a new explainer goes into more detail." +"**Introduce Flower Android SDK** " +"([#2131](https://github.com/adap/flower/pull/2131))" msgstr "" -#: ../../source/ref-changelog.md:491 +#: ../../source/ref-changelog.md:228 msgid "" -"**New iOS CoreML code example** " -"([#1289](https://github.com/adap/flower/pull/1289))" +"This is the first preview release of the Flower Kotlin SDK. Flower " +"support on Android is improving, and alongside the Kotlin SDK and code " +"example, there is now also an Android quickstart tutorial." msgstr "" -#: ../../source/ref-changelog.md:493 +#: ../../source/ref-changelog.md:230 msgid "" -"Flower goes iOS! A massive new code example shows how Flower clients can " -"be built for iOS. The code example contains both Flower iOS SDK " -"components that can be used for many tasks, and one task example running " -"on CoreML." +"**Introduce new end-to-end testing infrastructure** " +"([#1842](https://github.com/adap/flower/pull/1842), " +"[#2071](https://github.com/adap/flower/pull/2071), " +"[#2072](https://github.com/adap/flower/pull/2072), " +"[#2068](https://github.com/adap/flower/pull/2068), " +"[#2067](https://github.com/adap/flower/pull/2067), " +"[#2069](https://github.com/adap/flower/pull/2069), " +"[#2073](https://github.com/adap/flower/pull/2073), " +"[#2070](https://github.com/adap/flower/pull/2070), " +"[#2074](https://github.com/adap/flower/pull/2074), " +"[#2082](https://github.com/adap/flower/pull/2082), " +"[#2084](https://github.com/adap/flower/pull/2084), " +"[#2093](https://github.com/adap/flower/pull/2093), " +"[#2109](https://github.com/adap/flower/pull/2109), " +"[#2095](https://github.com/adap/flower/pull/2095), " +"[#2140](https://github.com/adap/flower/pull/2140), " +"[#2137](https://github.com/adap/flower/pull/2137), " +"[#2165](https://github.com/adap/flower/pull/2165))" msgstr "" -#: ../../source/ref-changelog.md:495 +#: ../../source/ref-changelog.md:232 msgid "" -"**New FedMedian strategy** " -"([#1461](https://github.com/adap/flower/pull/1461))" +"A new testing infrastructure ensures that new changes stay compatible " +"with existing framework integrations or strategies." msgstr "" -#: ../../source/ref-changelog.md:497 -msgid "" -"The new `FedMedian` strategy implements Federated Median (FedMedian) by " -"[Yin et al., 2018](https://arxiv.org/pdf/1803.01498v1.pdf)." +#: ../../source/ref-changelog.md:234 +msgid "**Deprecate Python 3.7**" msgstr "" -#: ../../source/ref-changelog.md:499 +#: ../../source/ref-changelog.md:236 msgid "" -"**Log** `Client` **exceptions in Virtual Client Engine** " -"([#1493](https://github.com/adap/flower/pull/1493))" +"Since Python 3.7 reached its end of life (EOL) on 2023-06-27, support for" +" Python 3.7 is now deprecated and will be removed in an upcoming release." msgstr "" -#: ../../source/ref-changelog.md:501 +#: ../../source/ref-changelog.md:238 msgid "" -"All `Client` exceptions happening in the VCE are now logged by default " -"and not just exposed to the configured `Strategy` (via the `failures` " -"argument)." +"**Add new** `FedTrimmedAvg` **strategy** " +"([#1769](https://github.com/adap/flower/pull/1769), " +"[#1853](https://github.com/adap/flower/pull/1853))" msgstr "" -#: ../../source/ref-changelog.md:503 +#: ../../source/ref-changelog.md:240 msgid "" -"**Improve Virtual Client Engine internals** " -"([#1401](https://github.com/adap/flower/pull/1401), " -"[#1453](https://github.com/adap/flower/pull/1453))" +"The new `FedTrimmedAvg` strategy implements Trimmed Mean by [Dong Yin, " +"2018](https://arxiv.org/abs/1803.01498)." msgstr "" -#: ../../source/ref-changelog.md:505 +#: ../../source/ref-changelog.md:242 msgid "" -"Some internals of the Virtual Client Engine have been revamped. The VCE " -"now uses Ray 2.0 under the hood, the value type of the `client_resources`" -" dictionary changed to `float` to allow fractions of resources to be " -"allocated." +"**Introduce start_driver** " +"([#1697](https://github.com/adap/flower/pull/1697))" msgstr "" -#: ../../source/ref-changelog.md:507 +#: ../../source/ref-changelog.md:244 msgid "" -"**Support optional** `Client`**/**`NumPyClient` **methods in Virtual " -"Client Engine**" +"In addition to `start_server` and using the raw Driver API, there is a " +"new `start_driver` function that allows for running `start_server` " +"scripts as a Flower driver with only a single-line code change. Check out" +" the `mt-pytorch` code example to see a working example using " +"`start_driver`." msgstr "" -#: ../../source/ref-changelog.md:509 +#: ../../source/ref-changelog.md:246 msgid "" -"The Virtual Client Engine now has full support for optional `Client` (and" -" `NumPyClient`) methods." +"**Add parameter aggregation to** `mt-pytorch` **code example** " +"([#1785](https://github.com/adap/flower/pull/1785))" msgstr "" -#: ../../source/ref-changelog.md:511 +#: ../../source/ref-changelog.md:248 msgid "" -"**Provide type information to packages using** `flwr` " -"([#1377](https://github.com/adap/flower/pull/1377))" +"The `mt-pytorch` example shows how to aggregate parameters when writing a" +" driver script. The included `driver.py` and `server.py` have been " +"aligned to demonstrate both the low-level way and the high-level way of " +"building server-side logic." msgstr "" -#: ../../source/ref-changelog.md:513 +#: ../../source/ref-changelog.md:250 msgid "" -"The package `flwr` is now bundled with a `py.typed` file indicating that " -"the package is typed. This enables typing support for projects or " -"packages that use `flwr` by enabling them to improve their code using " -"static type checkers like `mypy`." +"**Migrate experimental REST API to Starlette** " +"([2171](https://github.com/adap/flower/pull/2171))" msgstr "" -#: ../../source/ref-changelog.md:515 +#: ../../source/ref-changelog.md:252 msgid "" -"**Updated code example** " -"([#1344](https://github.com/adap/flower/pull/1344), " -"[#1347](https://github.com/adap/flower/pull/1347))" +"The (experimental) REST API used to be implemented in " +"[FastAPI](https://fastapi.tiangolo.com/), but it has now been migrated to" +" use [Starlette](https://www.starlette.io/) directly." msgstr "" -#: ../../source/ref-changelog.md:517 +#: ../../source/ref-changelog.md:254 msgid "" -"The code examples covering scikit-learn and PyTorch Lightning have been " -"updated to work with the latest version of Flower." +"Please note: The REST request-response API is still experimental and will" +" likely change significantly over time." msgstr "" -#: ../../source/ref-changelog.md:519 +#: ../../source/ref-changelog.md:256 msgid "" -"**Updated documentation** " -"([#1355](https://github.com/adap/flower/pull/1355), " -"[#1558](https://github.com/adap/flower/pull/1558), " -"[#1379](https://github.com/adap/flower/pull/1379), " -"[#1380](https://github.com/adap/flower/pull/1380), " -"[#1381](https://github.com/adap/flower/pull/1381), " -"[#1332](https://github.com/adap/flower/pull/1332), " -"[#1391](https://github.com/adap/flower/pull/1391), " -"[#1403](https://github.com/adap/flower/pull/1403), " -"[#1364](https://github.com/adap/flower/pull/1364), " -"[#1409](https://github.com/adap/flower/pull/1409), " -"[#1419](https://github.com/adap/flower/pull/1419), " -"[#1444](https://github.com/adap/flower/pull/1444), " -"[#1448](https://github.com/adap/flower/pull/1448), " -"[#1417](https://github.com/adap/flower/pull/1417), " -"[#1449](https://github.com/adap/flower/pull/1449), " -"[#1465](https://github.com/adap/flower/pull/1465), " -"[#1467](https://github.com/adap/flower/pull/1467))" +"**Introduce experimental gRPC request-response API** " +"([#1867](https://github.com/adap/flower/pull/1867), " +"[#1901](https://github.com/adap/flower/pull/1901))" msgstr "" -#: ../../source/ref-changelog.md:521 +#: ../../source/ref-changelog.md:258 msgid "" -"There have been so many documentation updates that it doesn't even make " -"sense to list them individually." +"In addition to the existing gRPC API (based on bidirectional streaming) " +"and the experimental REST API, there is now a new gRPC API that uses a " +"request-response model to communicate with client nodes." msgstr "" -#: ../../source/ref-changelog.md:523 +#: ../../source/ref-changelog.md:260 msgid "" -"**Restructured documentation** " -"([#1387](https://github.com/adap/flower/pull/1387))" +"Please note: The gRPC request-response API is still experimental and will" +" likely change significantly over time." msgstr "" -#: ../../source/ref-changelog.md:525 +#: ../../source/ref-changelog.md:262 msgid "" -"The documentation has been restructured to make it easier to navigate. " -"This is just the first step in a larger effort to make the Flower " -"documentation the best documentation of any project ever. Stay tuned!" +"**Replace the experimental** `start_client(rest=True)` **with the new** " +"`start_client(transport=\"rest\")` " +"([#1880](https://github.com/adap/flower/pull/1880))" msgstr "" -#: ../../source/ref-changelog.md:527 +#: ../../source/ref-changelog.md:264 msgid "" -"**Open in Colab button** " -"([#1389](https://github.com/adap/flower/pull/1389))" +"The (experimental) `start_client` argument `rest` was deprecated in " +"favour of a new argument `transport`. `start_client(transport=\"rest\")` " +"will yield the same behaviour as `start_client(rest=True)` did before. " +"All code should migrate to the new argument `transport`. The deprecated " +"argument `rest` will be removed in a future release." msgstr "" -#: ../../source/ref-changelog.md:529 +#: ../../source/ref-changelog.md:266 msgid "" -"The four parts of the Flower Federated Learning Tutorial now come with a " -"new `Open in Colab` button. No need to install anything on your local " -"machine, you can now use and learn about Flower in your browser, it's " -"only a single click away." +"**Add a new gRPC option** " +"([#2197](https://github.com/adap/flower/pull/2197))" msgstr "" -#: ../../source/ref-changelog.md:531 +#: ../../source/ref-changelog.md:268 msgid "" -"**Improved tutorial** ([#1468](https://github.com/adap/flower/pull/1468)," -" [#1470](https://github.com/adap/flower/pull/1470), " -"[#1472](https://github.com/adap/flower/pull/1472), " -"[#1473](https://github.com/adap/flower/pull/1473), " -"[#1474](https://github.com/adap/flower/pull/1474), " -"[#1475](https://github.com/adap/flower/pull/1475))" +"We now start a gRPC server with the `grpc.keepalive_permit_without_calls`" +" option set to 0 by default. This prevents the clients from sending " +"keepalive pings when there is no outstanding stream." msgstr "" -#: ../../source/ref-changelog.md:533 +#: ../../source/ref-changelog.md:270 msgid "" -"The Flower Federated Learning Tutorial has two brand-new parts covering " -"custom strategies (still WIP) and the distinction between `Client` and " -"`NumPyClient`. The existing parts one and two have also been improved " -"(many small changes and fixes)." +"**Improve example notebooks** " +"([#2005](https://github.com/adap/flower/pull/2005))" msgstr "" -#: ../../source/ref-changelog.md:539 -msgid "v1.0.0 (2022-07-28)" +#: ../../source/ref-changelog.md:272 +msgid "There's a new 30min Federated Learning PyTorch tutorial!" msgstr "" -#: ../../source/ref-changelog.md:541 -msgid "Highlights" +#: ../../source/ref-changelog.md:274 +msgid "" +"**Example updates** ([#1772](https://github.com/adap/flower/pull/1772), " +"[#1873](https://github.com/adap/flower/pull/1873), " +"[#1981](https://github.com/adap/flower/pull/1981), " +"[#1988](https://github.com/adap/flower/pull/1988), " +"[#1984](https://github.com/adap/flower/pull/1984), " +"[#1982](https://github.com/adap/flower/pull/1982), " +"[#2112](https://github.com/adap/flower/pull/2112), " +"[#2144](https://github.com/adap/flower/pull/2144), " +"[#2174](https://github.com/adap/flower/pull/2174), " +"[#2225](https://github.com/adap/flower/pull/2225), " +"[#2183](https://github.com/adap/flower/pull/2183))" msgstr "" -#: ../../source/ref-changelog.md:543 -msgid "Stable **Virtual Client Engine** (accessible via `start_simulation`)" +#: ../../source/ref-changelog.md:276 +msgid "" +"Many examples have received significant updates, including simplified " +"advanced-tensorflow and advanced-pytorch examples, improved macOS " +"compatibility of TensorFlow examples, and code examples for simulation. A" +" major upgrade is that all code examples now have a `requirements.txt` " +"(in addition to `pyproject.toml`)." msgstr "" -#: ../../source/ref-changelog.md:544 -msgid "All `Client`/`NumPyClient` methods are now optional" +#: ../../source/ref-changelog.md:278 +msgid "" +"**General improvements** " +"([#1872](https://github.com/adap/flower/pull/1872), " +"[#1866](https://github.com/adap/flower/pull/1866), " +"[#1884](https://github.com/adap/flower/pull/1884), " +"[#1837](https://github.com/adap/flower/pull/1837), " +"[#1477](https://github.com/adap/flower/pull/1477), " +"[#2171](https://github.com/adap/flower/pull/2171))" msgstr "" -#: ../../source/ref-changelog.md:545 -msgid "Configurable `get_parameters`" +#: ../../source/ref-changelog.md:284 ../../source/ref-changelog.md:348 +#: ../../source/ref-changelog.md:406 ../../source/ref-changelog.md:475 +#: ../../source/ref-changelog.md:537 +msgid "None" msgstr "" -#: ../../source/ref-changelog.md:546 -msgid "" -"Tons of small API cleanups resulting in a more coherent developer " -"experience" +#: ../../source/ref-changelog.md:286 +msgid "v1.4.0 (2023-04-21)" msgstr "" -#: ../../source/ref-changelog.md:550 +#: ../../source/ref-changelog.md:292 msgid "" -"We would like to give our **special thanks** to all the contributors who " -"made Flower 1.0 possible (in reverse [GitHub " -"Contributors](https://github.com/adap/flower/graphs/contributors) order):" +"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " +"`Chenyang Ma (Danny)`, `Daniel J. Beutel`, `Edoardo`, `Gautam Jajoo`, " +"`Iacob-Alexandru-Andrei`, `JDRanpariya`, `Jean Charle Yaacoub`, `Kunal " +"Sarkhel`, `L. Jiang`, `Lennart Behme`, `Max Kapsecker`, `Michał`, `Nic " +"Lane`, `Nikolaos Episkopos`, `Ragy`, `Saurav Maheshkar`, `Semo Yang`, " +"`Steve Laskaridis`, `Steven Hé (Sīchàng)`, `Taner Topal`" msgstr "" -#: ../../source/ref-changelog.md:552 +#: ../../source/ref-changelog.md:296 msgid "" -"[@rtaiello](https://github.com/rtaiello), " -"[@g-pichler](https://github.com/g-pichler), [@rob-" -"luke](https://github.com/rob-luke), [@andreea-zaharia](https://github.com" -"/andreea-zaharia), [@kinshukdua](https://github.com/kinshukdua), " -"[@nfnt](https://github.com/nfnt), " -"[@tatiana-s](https://github.com/tatiana-s), " -"[@TParcollet](https://github.com/TParcollet), " -"[@vballoli](https://github.com/vballoli), " -"[@negedng](https://github.com/negedng), " -"[@RISHIKESHAVAN](https://github.com/RISHIKESHAVAN), " -"[@hei411](https://github.com/hei411), " -"[@SebastianSpeitel](https://github.com/SebastianSpeitel), " -"[@AmitChaulwar](https://github.com/AmitChaulwar), " -"[@Rubiel1](https://github.com/Rubiel1), [@FANTOME-PAN](https://github.com" -"/FANTOME-PAN), [@Rono-BC](https://github.com/Rono-BC), " -"[@lbhm](https://github.com/lbhm), " -"[@sishtiaq](https://github.com/sishtiaq), " -"[@remde](https://github.com/remde), [@Jueun-Park](https://github.com" -"/Jueun-Park), [@architjen](https://github.com/architjen), " -"[@PratikGarai](https://github.com/PratikGarai), " -"[@mrinaald](https://github.com/mrinaald), " -"[@zliel](https://github.com/zliel), " -"[@MeiruiJiang](https://github.com/MeiruiJiang), " -"[@sancarlim](https://github.com/sancarlim), " -"[@gubertoli](https://github.com/gubertoli), " -"[@Vingt100](https://github.com/Vingt100), " -"[@MakGulati](https://github.com/MakGulati), " -"[@cozek](https://github.com/cozek), " -"[@jafermarq](https://github.com/jafermarq), " -"[@sisco0](https://github.com/sisco0), " -"[@akhilmathurs](https://github.com/akhilmathurs), " -"[@CanTuerk](https://github.com/CanTuerk), " -"[@mariaboerner1987](https://github.com/mariaboerner1987), " -"[@pedropgusmao](https://github.com/pedropgusmao), " -"[@tanertopal](https://github.com/tanertopal), " -"[@danieljanes](https://github.com/danieljanes)." -msgstr "" - -#: ../../source/ref-changelog.md:556 -msgid "" -"**All arguments must be passed as keyword arguments** " -"([#1338](https://github.com/adap/flower/pull/1338))" +"**Introduce support for XGBoost (**`FedXgbNnAvg` **strategy and " +"example)** ([#1694](https://github.com/adap/flower/pull/1694), " +"[#1709](https://github.com/adap/flower/pull/1709), " +"[#1715](https://github.com/adap/flower/pull/1715), " +"[#1717](https://github.com/adap/flower/pull/1717), " +"[#1763](https://github.com/adap/flower/pull/1763), " +"[#1795](https://github.com/adap/flower/pull/1795))" msgstr "" -#: ../../source/ref-changelog.md:558 +#: ../../source/ref-changelog.md:298 msgid "" -"Pass all arguments as keyword arguments, positional arguments are not " -"longer supported. Code that uses positional arguments (e.g., " -"`start_client(\"127.0.0.1:8080\", FlowerClient())`) must add the keyword " -"for each positional argument (e.g., " -"`start_client(server_address=\"127.0.0.1:8080\", " -"client=FlowerClient())`)." +"XGBoost is a tree-based ensemble machine learning algorithm that uses " +"gradient boosting to improve model accuracy. We added a new `FedXgbNnAvg`" +" " +"[strategy](https://github.com/adap/flower/tree/main/src/py/flwr/server/strategy/fedxgb_nn_avg.py)," +" and a [code example](https://github.com/adap/flower/tree/main/examples" +"/xgboost-quickstart) that demonstrates the usage of this new strategy in " +"an XGBoost project." msgstr "" -#: ../../source/ref-changelog.md:560 +#: ../../source/ref-changelog.md:300 msgid "" -"**Introduce configuration object** `ServerConfig` **in** `start_server` " -"**and** `start_simulation` " -"([#1317](https://github.com/adap/flower/pull/1317))" +"**Introduce iOS SDK (preview)** " +"([#1621](https://github.com/adap/flower/pull/1621), " +"[#1764](https://github.com/adap/flower/pull/1764))" msgstr "" -#: ../../source/ref-changelog.md:562 +#: ../../source/ref-changelog.md:302 msgid "" -"Instead of a config dictionary `{\"num_rounds\": 3, \"round_timeout\": " -"600.0}`, `start_server` and `start_simulation` now expect a configuration" -" object of type `flwr.server.ServerConfig`. `ServerConfig` takes the same" -" arguments that as the previous config dict, but it makes writing type-" -"safe code easier and the default parameters values more transparent." +"This is a major update for anyone wanting to implement Federated Learning" +" on iOS mobile devices. We now have a swift iOS SDK present under " +"[src/swift/flwr](https://github.com/adap/flower/tree/main/src/swift/flwr)" +" that will facilitate greatly the app creating process. To showcase its " +"use, the [iOS " +"example](https://github.com/adap/flower/tree/main/examples/ios) has also " +"been updated!" msgstr "" -#: ../../source/ref-changelog.md:564 +#: ../../source/ref-changelog.md:304 msgid "" -"**Rename built-in strategy parameters for clarity** " -"([#1334](https://github.com/adap/flower/pull/1334))" +"**Introduce new \"What is Federated Learning?\" tutorial** " +"([#1657](https://github.com/adap/flower/pull/1657), " +"[#1721](https://github.com/adap/flower/pull/1721))" msgstr "" -#: ../../source/ref-changelog.md:566 +#: ../../source/ref-changelog.md:306 msgid "" -"The following built-in strategy parameters were renamed to improve " -"readability and consistency with other API's:" -msgstr "" - -#: ../../source/ref-changelog.md:568 -msgid "`fraction_eval` --> `fraction_evaluate`" -msgstr "" - -#: ../../source/ref-changelog.md:569 -msgid "`min_eval_clients` --> `min_evaluate_clients`" -msgstr "" - -#: ../../source/ref-changelog.md:570 -msgid "`eval_fn` --> `evaluate_fn`" +"A new [entry-level tutorial](https://flower.ai/docs/framework/tutorial-" +"what-is-federated-learning.html) in our documentation explains the basics" +" of Fedetated Learning. It enables anyone who's unfamiliar with Federated" +" Learning to start their journey with Flower. Forward it to anyone who's " +"interested in Federated Learning!" msgstr "" -#: ../../source/ref-changelog.md:572 +#: ../../source/ref-changelog.md:308 msgid "" -"**Update default arguments of built-in strategies** " -"([#1278](https://github.com/adap/flower/pull/1278))" +"**Introduce new Flower Baseline: FedProx MNIST** " +"([#1513](https://github.com/adap/flower/pull/1513), " +"[#1680](https://github.com/adap/flower/pull/1680), " +"[#1681](https://github.com/adap/flower/pull/1681), " +"[#1679](https://github.com/adap/flower/pull/1679))" msgstr "" -#: ../../source/ref-changelog.md:574 +#: ../../source/ref-changelog.md:310 msgid "" -"All built-in strategies now use `fraction_fit=1.0` and " -"`fraction_evaluate=1.0`, which means they select *all* currently " -"available clients for training and evaluation. Projects that relied on " -"the previous default values can get the previous behaviour by " -"initializing the strategy in the following way:" -msgstr "" - -#: ../../source/ref-changelog.md:576 -msgid "`strategy = FedAvg(fraction_fit=0.1, fraction_evaluate=0.1)`" +"This new baseline replicates the MNIST+CNN task from the paper [Federated" +" Optimization in Heterogeneous Networks (Li et al., " +"2018)](https://arxiv.org/abs/1812.06127). It uses the `FedProx` strategy," +" which aims at making convergence more robust in heterogeneous settings." msgstr "" -#: ../../source/ref-changelog.md:578 +#: ../../source/ref-changelog.md:312 msgid "" -"**Add** `server_round` **to** `Strategy.evaluate` " -"([#1334](https://github.com/adap/flower/pull/1334))" +"**Introduce new Flower Baseline: FedAvg FEMNIST** " +"([#1655](https://github.com/adap/flower/pull/1655))" msgstr "" -#: ../../source/ref-changelog.md:580 +#: ../../source/ref-changelog.md:314 msgid "" -"The `Strategy` method `evaluate` now receives the current round of " -"federated learning/evaluation as the first parameter." +"This new baseline replicates an experiment evaluating the performance of " +"the FedAvg algorithm on the FEMNIST dataset from the paper [LEAF: A " +"Benchmark for Federated Settings (Caldas et al., " +"2018)](https://arxiv.org/abs/1812.01097)." msgstr "" -#: ../../source/ref-changelog.md:582 +#: ../../source/ref-changelog.md:316 msgid "" -"**Add** `server_round` **and** `config` **parameters to** `evaluate_fn` " -"([#1334](https://github.com/adap/flower/pull/1334))" +"**Introduce (experimental) REST API** " +"([#1594](https://github.com/adap/flower/pull/1594), " +"[#1690](https://github.com/adap/flower/pull/1690), " +"[#1695](https://github.com/adap/flower/pull/1695), " +"[#1712](https://github.com/adap/flower/pull/1712), " +"[#1802](https://github.com/adap/flower/pull/1802), " +"[#1770](https://github.com/adap/flower/pull/1770), " +"[#1733](https://github.com/adap/flower/pull/1733))" msgstr "" -#: ../../source/ref-changelog.md:584 +#: ../../source/ref-changelog.md:318 msgid "" -"The `evaluate_fn` passed to built-in strategies like `FedAvg` now takes " -"three parameters: (1) The current round of federated learning/evaluation " -"(`server_round`), (2) the model parameters to evaluate (`parameters`), " -"and (3) a config dictionary (`config`)." +"A new REST API has been introduced as an alternative to the gRPC-based " +"communication stack. In this initial version, the REST API only supports " +"anonymous clients." msgstr "" -#: ../../source/ref-changelog.md:586 +#: ../../source/ref-changelog.md:320 msgid "" -"**Rename** `rnd` **to** `server_round` " -"([#1321](https://github.com/adap/flower/pull/1321))" +"Please note: The REST API is still experimental and will likely change " +"significantly over time." msgstr "" -#: ../../source/ref-changelog.md:588 +#: ../../source/ref-changelog.md:322 msgid "" -"Several Flower methods and functions (`evaluate_fn`, `configure_fit`, " -"`aggregate_fit`, `configure_evaluate`, `aggregate_evaluate`) receive the " -"current round of federated learning/evaluation as their first parameter. " -"To improve reaability and avoid confusion with *random*, this parameter " -"has been renamed from `rnd` to `server_round`." +"**Improve the (experimental) Driver API** " +"([#1663](https://github.com/adap/flower/pull/1663), " +"[#1666](https://github.com/adap/flower/pull/1666), " +"[#1667](https://github.com/adap/flower/pull/1667), " +"[#1664](https://github.com/adap/flower/pull/1664), " +"[#1675](https://github.com/adap/flower/pull/1675), " +"[#1676](https://github.com/adap/flower/pull/1676), " +"[#1693](https://github.com/adap/flower/pull/1693), " +"[#1662](https://github.com/adap/flower/pull/1662), " +"[#1794](https://github.com/adap/flower/pull/1794))" msgstr "" -#: ../../source/ref-changelog.md:590 +#: ../../source/ref-changelog.md:324 msgid "" -"**Move** `flwr.dataset` **to** `flwr_baselines` " -"([#1273](https://github.com/adap/flower/pull/1273))" -msgstr "" - -#: ../../source/ref-changelog.md:592 -msgid "The experimental package `flwr.dataset` was migrated to Flower Baselines." +"The Driver API is still an experimental feature, but this release " +"introduces some major upgrades. One of the main improvements is the " +"introduction of an SQLite database to store server state on disk (instead" +" of in-memory). Another improvement is that tasks (instructions or " +"results) that have been delivered will now be deleted. This greatly " +"improves the memory efficiency of a long-running Flower server." msgstr "" -#: ../../source/ref-changelog.md:594 +#: ../../source/ref-changelog.md:326 msgid "" -"**Remove experimental strategies** " -"([#1280](https://github.com/adap/flower/pull/1280))" +"**Fix spilling issues related to Ray during simulations** " +"([#1698](https://github.com/adap/flower/pull/1698))" msgstr "" -#: ../../source/ref-changelog.md:596 +#: ../../source/ref-changelog.md:328 msgid "" -"Remove unmaintained experimental strategies (`FastAndSlow`, `FedFSv0`, " -"`FedFSv1`)." +"While running long simulations, `ray` was sometimes spilling huge amounts" +" of data that would make the training unable to continue. This is now " +"fixed! 🎉" msgstr "" -#: ../../source/ref-changelog.md:598 +#: ../../source/ref-changelog.md:330 msgid "" -"**Rename** `Weights` **to** `NDArrays` " -"([#1258](https://github.com/adap/flower/pull/1258), " -"[#1259](https://github.com/adap/flower/pull/1259))" +"**Add new example using** `TabNet` **and Flower** " +"([#1725](https://github.com/adap/flower/pull/1725))" msgstr "" -#: ../../source/ref-changelog.md:600 +#: ../../source/ref-changelog.md:332 msgid "" -"`flwr.common.Weights` was renamed to `flwr.common.NDArrays` to better " -"capture what this type is all about." +"TabNet is a powerful and flexible framework for training machine learning" +" models on tabular data. We now have a federated example using Flower: " +"[quickstart-tabnet](https://github.com/adap/flower/tree/main/examples" +"/quickstart-tabnet)." msgstr "" -#: ../../source/ref-changelog.md:602 +#: ../../source/ref-changelog.md:334 msgid "" -"**Remove antiquated** `force_final_distributed_eval` **from** " -"`start_server` ([#1258](https://github.com/adap/flower/pull/1258), " -"[#1259](https://github.com/adap/flower/pull/1259))" +"**Add new how-to guide for monitoring simulations** " +"([#1649](https://github.com/adap/flower/pull/1649))" msgstr "" -#: ../../source/ref-changelog.md:604 +#: ../../source/ref-changelog.md:336 msgid "" -"The `start_server` parameter `force_final_distributed_eval` has long been" -" a historic artefact, in this release it is finally gone for good." +"We now have a documentation guide to help users monitor their performance" +" during simulations." msgstr "" -#: ../../source/ref-changelog.md:606 +#: ../../source/ref-changelog.md:338 msgid "" -"**Make** `get_parameters` **configurable** " -"([#1242](https://github.com/adap/flower/pull/1242))" +"**Add training metrics to** `History` **object during simulations** " +"([#1696](https://github.com/adap/flower/pull/1696))" msgstr "" -#: ../../source/ref-changelog.md:608 +#: ../../source/ref-changelog.md:340 msgid "" -"The `get_parameters` method now accepts a configuration dictionary, just " -"like `get_properties`, `fit`, and `evaluate`." +"The `fit_metrics_aggregation_fn` can be used to aggregate training " +"metrics, but previous releases did not save the results in the `History` " +"object. This is now the case!" msgstr "" -#: ../../source/ref-changelog.md:610 +#: ../../source/ref-changelog.md:342 msgid "" -"**Replace** `num_rounds` **in** `start_simulation` **with new** `config` " -"**parameter** ([#1281](https://github.com/adap/flower/pull/1281))" +"**General improvements** " +"([#1659](https://github.com/adap/flower/pull/1659), " +"[#1646](https://github.com/adap/flower/pull/1646), " +"[#1647](https://github.com/adap/flower/pull/1647), " +"[#1471](https://github.com/adap/flower/pull/1471), " +"[#1648](https://github.com/adap/flower/pull/1648), " +"[#1651](https://github.com/adap/flower/pull/1651), " +"[#1652](https://github.com/adap/flower/pull/1652), " +"[#1653](https://github.com/adap/flower/pull/1653), " +"[#1659](https://github.com/adap/flower/pull/1659), " +"[#1665](https://github.com/adap/flower/pull/1665), " +"[#1670](https://github.com/adap/flower/pull/1670), " +"[#1672](https://github.com/adap/flower/pull/1672), " +"[#1677](https://github.com/adap/flower/pull/1677), " +"[#1684](https://github.com/adap/flower/pull/1684), " +"[#1683](https://github.com/adap/flower/pull/1683), " +"[#1686](https://github.com/adap/flower/pull/1686), " +"[#1682](https://github.com/adap/flower/pull/1682), " +"[#1685](https://github.com/adap/flower/pull/1685), " +"[#1692](https://github.com/adap/flower/pull/1692), " +"[#1705](https://github.com/adap/flower/pull/1705), " +"[#1708](https://github.com/adap/flower/pull/1708), " +"[#1711](https://github.com/adap/flower/pull/1711), " +"[#1713](https://github.com/adap/flower/pull/1713), " +"[#1714](https://github.com/adap/flower/pull/1714), " +"[#1718](https://github.com/adap/flower/pull/1718), " +"[#1716](https://github.com/adap/flower/pull/1716), " +"[#1723](https://github.com/adap/flower/pull/1723), " +"[#1735](https://github.com/adap/flower/pull/1735), " +"[#1678](https://github.com/adap/flower/pull/1678), " +"[#1750](https://github.com/adap/flower/pull/1750), " +"[#1753](https://github.com/adap/flower/pull/1753), " +"[#1736](https://github.com/adap/flower/pull/1736), " +"[#1766](https://github.com/adap/flower/pull/1766), " +"[#1760](https://github.com/adap/flower/pull/1760), " +"[#1775](https://github.com/adap/flower/pull/1775), " +"[#1776](https://github.com/adap/flower/pull/1776), " +"[#1777](https://github.com/adap/flower/pull/1777), " +"[#1779](https://github.com/adap/flower/pull/1779), " +"[#1784](https://github.com/adap/flower/pull/1784), " +"[#1773](https://github.com/adap/flower/pull/1773), " +"[#1755](https://github.com/adap/flower/pull/1755), " +"[#1789](https://github.com/adap/flower/pull/1789), " +"[#1788](https://github.com/adap/flower/pull/1788), " +"[#1798](https://github.com/adap/flower/pull/1798), " +"[#1799](https://github.com/adap/flower/pull/1799), " +"[#1739](https://github.com/adap/flower/pull/1739), " +"[#1800](https://github.com/adap/flower/pull/1800), " +"[#1804](https://github.com/adap/flower/pull/1804), " +"[#1805](https://github.com/adap/flower/pull/1805))" msgstr "" -#: ../../source/ref-changelog.md:612 -msgid "" -"The `start_simulation` function now accepts a configuration dictionary " -"`config` instead of the `num_rounds` integer. This improves the " -"consistency between `start_simulation` and `start_server` and makes " -"transitioning between the two easier." +#: ../../source/ref-changelog.md:350 +msgid "v1.3.0 (2023-02-06)" msgstr "" -#: ../../source/ref-changelog.md:616 +#: ../../source/ref-changelog.md:356 msgid "" -"**Support Python 3.10** " -"([#1320](https://github.com/adap/flower/pull/1320))" +"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " +"`Daniel J. Beutel`, `JDRanpariya`, `Lennart Behme`, `Taner Topal`" msgstr "" -#: ../../source/ref-changelog.md:618 +#: ../../source/ref-changelog.md:360 msgid "" -"The previous Flower release introduced experimental support for Python " -"3.10, this release declares Python 3.10 support as stable." +"**Add support for** `workload_id` **and** `group_id` **in Driver API** " +"([#1595](https://github.com/adap/flower/pull/1595))" msgstr "" -#: ../../source/ref-changelog.md:620 +#: ../../source/ref-changelog.md:362 msgid "" -"**Make all** `Client` **and** `NumPyClient` **methods optional** " -"([#1260](https://github.com/adap/flower/pull/1260), " -"[#1277](https://github.com/adap/flower/pull/1277))" +"The (experimental) Driver API now supports a `workload_id` that can be " +"used to identify which workload a task belongs to. It also supports a new" +" `group_id` that can be used, for example, to indicate the current " +"training round. Both the `workload_id` and `group_id` enable client nodes" +" to decide whether they want to handle a task or not." msgstr "" -#: ../../source/ref-changelog.md:622 +#: ../../source/ref-changelog.md:364 msgid "" -"The `Client`/`NumPyClient` methods `get_properties`, `get_parameters`, " -"`fit`, and `evaluate` are all optional. This enables writing clients that" -" implement, for example, only `fit`, but no other method. No need to " -"implement `evaluate` when using centralized evaluation!" +"**Make Driver API and Fleet API address configurable** " +"([#1637](https://github.com/adap/flower/pull/1637))" msgstr "" -#: ../../source/ref-changelog.md:624 +#: ../../source/ref-changelog.md:366 msgid "" -"**Enable passing a** `Server` **instance to** `start_simulation` " -"([#1281](https://github.com/adap/flower/pull/1281))" +"The (experimental) long-running Flower server (Driver API and Fleet API) " +"can now configure the server address of both Driver API (via `--driver-" +"api-address`) and Fleet API (via `--fleet-api-address`) when starting:" msgstr "" -#: ../../source/ref-changelog.md:626 +#: ../../source/ref-changelog.md:368 msgid "" -"Similar to `start_server`, `start_simulation` now accepts a full `Server`" -" instance. This enables users to heavily customize the execution of " -"eperiments and opens the door to running, for example, async FL using the" -" Virtual Client Engine." +"`flower-server --driver-api-address \"0.0.0.0:8081\" --fleet-api-address " +"\"0.0.0.0:8086\"`" msgstr "" -#: ../../source/ref-changelog.md:628 -msgid "" -"**Update code examples** " -"([#1291](https://github.com/adap/flower/pull/1291), " -"[#1286](https://github.com/adap/flower/pull/1286), " -"[#1282](https://github.com/adap/flower/pull/1282))" +#: ../../source/ref-changelog.md:370 +msgid "Both IPv4 and IPv6 addresses are supported." msgstr "" -#: ../../source/ref-changelog.md:630 +#: ../../source/ref-changelog.md:372 msgid "" -"Many code examples received small or even large maintenance updates, " -"among them are" -msgstr "" - -#: ../../source/ref-changelog.md:632 -msgid "`scikit-learn`" -msgstr "" - -#: ../../source/ref-changelog.md:633 -msgid "`simulation_pytorch`" -msgstr "" - -#: ../../source/ref-changelog.md:634 -msgid "`quickstart_pytorch`" -msgstr "" - -#: ../../source/ref-changelog.md:635 -msgid "`quickstart_simulation`" -msgstr "" - -#: ../../source/ref-changelog.md:636 -msgid "`quickstart_tensorflow`" -msgstr "" - -#: ../../source/ref-changelog.md:637 -msgid "`advanced_tensorflow`" +"**Add new example of Federated Learning using fastai and Flower** " +"([#1598](https://github.com/adap/flower/pull/1598))" msgstr "" -#: ../../source/ref-changelog.md:639 +#: ../../source/ref-changelog.md:374 msgid "" -"**Remove the obsolete simulation example** " -"([#1328](https://github.com/adap/flower/pull/1328))" +"A new code example (`quickstart-fastai`) demonstrates federated learning " +"with [fastai](https://www.fast.ai/) and Flower. You can find it here: " +"[quickstart-fastai](https://github.com/adap/flower/tree/main/examples" +"/quickstart-fastai)." msgstr "" -#: ../../source/ref-changelog.md:641 +#: ../../source/ref-changelog.md:376 msgid "" -"Removes the obsolete `simulation` example and renames " -"`quickstart_simulation` to `simulation_tensorflow` so it fits withs the " -"naming of `simulation_pytorch`" +"**Make Android example compatible with** `flwr >= 1.0.0` **and the latest" +" versions of Android** " +"([#1603](https://github.com/adap/flower/pull/1603))" msgstr "" -#: ../../source/ref-changelog.md:643 +#: ../../source/ref-changelog.md:378 msgid "" -"**Update documentation** " -"([#1223](https://github.com/adap/flower/pull/1223), " -"[#1209](https://github.com/adap/flower/pull/1209), " -"[#1251](https://github.com/adap/flower/pull/1251), " -"[#1257](https://github.com/adap/flower/pull/1257), " -"[#1267](https://github.com/adap/flower/pull/1267), " -"[#1268](https://github.com/adap/flower/pull/1268), " -"[#1300](https://github.com/adap/flower/pull/1300), " -"[#1304](https://github.com/adap/flower/pull/1304), " -"[#1305](https://github.com/adap/flower/pull/1305), " -"[#1307](https://github.com/adap/flower/pull/1307))" +"The Android code example has received a substantial update: the project " +"is compatible with Flower 1.0 (and later), the UI received a full " +"refresh, and the project is updated to be compatible with newer Android " +"tooling." msgstr "" -#: ../../source/ref-changelog.md:645 +#: ../../source/ref-changelog.md:380 msgid "" -"One substantial documentation update fixes multiple smaller rendering " -"issues, makes titles more succinct to improve navigation, removes a " -"deprecated library, updates documentation dependencies, includes the " -"`flwr.common` module in the API reference, includes support for markdown-" -"based documentation, migrates the changelog from `.rst` to `.md`, and " -"fixes a number of smaller details!" -msgstr "" - -#: ../../source/ref-changelog.md:647 ../../source/ref-changelog.md:702 -#: ../../source/ref-changelog.md:771 ../../source/ref-changelog.md:810 -msgid "**Minor updates**" +"**Add new `FedProx` strategy** " +"([#1619](https://github.com/adap/flower/pull/1619))" msgstr "" -#: ../../source/ref-changelog.md:649 +#: ../../source/ref-changelog.md:382 msgid "" -"Add round number to fit and evaluate log messages " -"([#1266](https://github.com/adap/flower/pull/1266))" +"This " +"[strategy](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedprox.py)" +" is almost identical to " +"[`FedAvg`](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedavg.py)," +" but helps users replicate what is described in this " +"[paper](https://arxiv.org/abs/1812.06127). It essentially adds a " +"parameter called `proximal_mu` to regularize the local models with " +"respect to the global models." msgstr "" -#: ../../source/ref-changelog.md:650 +#: ../../source/ref-changelog.md:384 msgid "" -"Add secure gRPC connection to the `advanced_tensorflow` code example " -"([#847](https://github.com/adap/flower/pull/847))" +"**Add new metrics to telemetry events** " +"([#1640](https://github.com/adap/flower/pull/1640))" msgstr "" -#: ../../source/ref-changelog.md:651 +#: ../../source/ref-changelog.md:386 msgid "" -"Update developer tooling " -"([#1231](https://github.com/adap/flower/pull/1231), " -"[#1276](https://github.com/adap/flower/pull/1276), " -"[#1301](https://github.com/adap/flower/pull/1301), " -"[#1310](https://github.com/adap/flower/pull/1310))" +"An updated event structure allows, for example, the clustering of events " +"within the same workload." msgstr "" -#: ../../source/ref-changelog.md:652 +#: ../../source/ref-changelog.md:388 msgid "" -"Rename ProtoBuf messages to improve consistency " -"([#1214](https://github.com/adap/flower/pull/1214), " -"[#1258](https://github.com/adap/flower/pull/1258), " -"[#1259](https://github.com/adap/flower/pull/1259))" -msgstr "" - -#: ../../source/ref-changelog.md:654 -msgid "v0.19.0 (2022-05-18)" +"**Add new custom strategy tutorial section** " +"[#1623](https://github.com/adap/flower/pull/1623)" msgstr "" -#: ../../source/ref-changelog.md:658 +#: ../../source/ref-changelog.md:390 msgid "" -"**Flower Baselines (preview): FedOpt, FedBN, FedAvgM** " -"([#919](https://github.com/adap/flower/pull/919), " -"[#1127](https://github.com/adap/flower/pull/1127), " -"[#914](https://github.com/adap/flower/pull/914))" +"The Flower tutorial now has a new section that covers implementing a " +"custom strategy from scratch: [Open in " +"Colab](https://colab.research.google.com/github/adap/flower/blob/main/doc/source" +"/tutorial-build-a-strategy-from-scratch-pytorch.ipynb)" msgstr "" -#: ../../source/ref-changelog.md:660 +#: ../../source/ref-changelog.md:392 msgid "" -"The first preview release of Flower Baselines has arrived! We're " -"kickstarting Flower Baselines with implementations of FedOpt (FedYogi, " -"FedAdam, FedAdagrad), FedBN, and FedAvgM. Check the documentation on how " -"to use [Flower Baselines](https://flower.ai/docs/using-baselines.html). " -"With this first preview release we're also inviting the community to " -"[contribute their own baselines](https://flower.ai/docs/contributing-" -"baselines.html)." +"**Add new custom serialization tutorial section** " +"([#1622](https://github.com/adap/flower/pull/1622))" msgstr "" -#: ../../source/ref-changelog.md:662 +#: ../../source/ref-changelog.md:394 msgid "" -"**C++ client SDK (preview) and code example** " -"([#1111](https://github.com/adap/flower/pull/1111))" +"The Flower tutorial now has a new section that covers custom " +"serialization: [Open in " +"Colab](https://colab.research.google.com/github/adap/flower/blob/main/doc/source" +"/tutorial-customize-the-client-pytorch.ipynb)" msgstr "" -#: ../../source/ref-changelog.md:664 +#: ../../source/ref-changelog.md:396 msgid "" -"Preview support for Flower clients written in C++. The C++ preview " -"includes a Flower client SDK and a quickstart code example that " -"demonstrates a simple C++ client using the SDK." +"**General improvements** " +"([#1638](https://github.com/adap/flower/pull/1638), " +"[#1634](https://github.com/adap/flower/pull/1634), " +"[#1636](https://github.com/adap/flower/pull/1636), " +"[#1635](https://github.com/adap/flower/pull/1635), " +"[#1633](https://github.com/adap/flower/pull/1633), " +"[#1632](https://github.com/adap/flower/pull/1632), " +"[#1631](https://github.com/adap/flower/pull/1631), " +"[#1630](https://github.com/adap/flower/pull/1630), " +"[#1627](https://github.com/adap/flower/pull/1627), " +"[#1593](https://github.com/adap/flower/pull/1593), " +"[#1616](https://github.com/adap/flower/pull/1616), " +"[#1615](https://github.com/adap/flower/pull/1615), " +"[#1607](https://github.com/adap/flower/pull/1607), " +"[#1609](https://github.com/adap/flower/pull/1609), " +"[#1608](https://github.com/adap/flower/pull/1608), " +"[#1603](https://github.com/adap/flower/pull/1603), " +"[#1590](https://github.com/adap/flower/pull/1590), " +"[#1580](https://github.com/adap/flower/pull/1580), " +"[#1599](https://github.com/adap/flower/pull/1599), " +"[#1600](https://github.com/adap/flower/pull/1600), " +"[#1601](https://github.com/adap/flower/pull/1601), " +"[#1597](https://github.com/adap/flower/pull/1597), " +"[#1595](https://github.com/adap/flower/pull/1595), " +"[#1591](https://github.com/adap/flower/pull/1591), " +"[#1588](https://github.com/adap/flower/pull/1588), " +"[#1589](https://github.com/adap/flower/pull/1589), " +"[#1587](https://github.com/adap/flower/pull/1587), " +"[#1573](https://github.com/adap/flower/pull/1573), " +"[#1581](https://github.com/adap/flower/pull/1581), " +"[#1578](https://github.com/adap/flower/pull/1578), " +"[#1574](https://github.com/adap/flower/pull/1574), " +"[#1572](https://github.com/adap/flower/pull/1572), " +"[#1586](https://github.com/adap/flower/pull/1586))" msgstr "" -#: ../../source/ref-changelog.md:666 +#: ../../source/ref-changelog.md:400 msgid "" -"**Add experimental support for Python 3.10 and Python 3.11** " -"([#1135](https://github.com/adap/flower/pull/1135))" +"**Updated documentation** " +"([#1629](https://github.com/adap/flower/pull/1629), " +"[#1628](https://github.com/adap/flower/pull/1628), " +"[#1620](https://github.com/adap/flower/pull/1620), " +"[#1618](https://github.com/adap/flower/pull/1618), " +"[#1617](https://github.com/adap/flower/pull/1617), " +"[#1613](https://github.com/adap/flower/pull/1613), " +"[#1614](https://github.com/adap/flower/pull/1614))" msgstr "" -#: ../../source/ref-changelog.md:668 +#: ../../source/ref-changelog.md:402 ../../source/ref-changelog.md:469 msgid "" -"Python 3.10 is the latest stable release of Python and Python 3.11 is due" -" to be released in October. This Flower release adds experimental support" -" for both Python versions." +"As usual, the documentation has improved quite a bit. It is another step " +"in our effort to make the Flower documentation the best documentation of " +"any project. Stay tuned and as always, feel free to provide feedback!" msgstr "" -#: ../../source/ref-changelog.md:670 -msgid "" -"**Aggregate custom metrics through user-provided functions** " -"([#1144](https://github.com/adap/flower/pull/1144))" +#: ../../source/ref-changelog.md:408 +msgid "v1.2.0 (2023-01-13)" msgstr "" -#: ../../source/ref-changelog.md:672 +#: ../../source/ref-changelog.md:414 msgid "" -"Custom metrics (e.g., `accuracy`) can now be aggregated without having to" -" customize the strategy. Built-in strategies support two new arguments, " -"`fit_metrics_aggregation_fn` and `evaluate_metrics_aggregation_fn`, that " -"allow passing custom metric aggregation functions." +"`Adam Narozniak`, `Charles Beauville`, `Daniel J. Beutel`, `Edoardo`, `L." +" Jiang`, `Ragy`, `Taner Topal`, `dannymcy`" msgstr "" -#: ../../source/ref-changelog.md:674 +#: ../../source/ref-changelog.md:418 msgid "" -"**User-configurable round timeout** " -"([#1162](https://github.com/adap/flower/pull/1162))" +"**Introduce new Flower Baseline: FedAvg MNIST** " +"([#1497](https://github.com/adap/flower/pull/1497), " +"[#1552](https://github.com/adap/flower/pull/1552))" msgstr "" -#: ../../source/ref-changelog.md:676 +#: ../../source/ref-changelog.md:420 msgid "" -"A new configuration value allows the round timeout to be set for " -"`start_server` and `start_simulation`. If the `config` dictionary " -"contains a `round_timeout` key (with a `float` value in seconds), the " -"server will wait *at least* `round_timeout` seconds before it closes the " -"connection." +"Over the coming weeks, we will be releasing a number of new reference " +"implementations useful especially to FL newcomers. They will typically " +"revisit well known papers from the literature, and be suitable for " +"integration in your own application or for experimentation, in order to " +"deepen your knowledge of FL in general. Today's release is the first in " +"this series. [Read more.](https://flower.ai/blog/2023-01-12-fl-starter-" +"pack-fedavg-mnist-cnn/)" msgstr "" -#: ../../source/ref-changelog.md:678 +#: ../../source/ref-changelog.md:422 msgid "" -"**Enable both federated evaluation and centralized evaluation to be used " -"at the same time in all built-in strategies** " -"([#1091](https://github.com/adap/flower/pull/1091))" +"**Improve GPU support in simulations** " +"([#1555](https://github.com/adap/flower/pull/1555))" msgstr "" -#: ../../source/ref-changelog.md:680 +#: ../../source/ref-changelog.md:424 msgid "" -"Built-in strategies can now perform both federated evaluation (i.e., " -"client-side) and centralized evaluation (i.e., server-side) in the same " -"round. Federated evaluation can be disabled by setting `fraction_eval` to" -" `0.0`." +"The Ray-based Virtual Client Engine (`start_simulation`) has been updated" +" to improve GPU support. The update includes some of the hard-earned " +"lessons from scaling simulations in GPU cluster environments. New " +"defaults make running GPU-based simulations substantially more robust." msgstr "" -#: ../../source/ref-changelog.md:682 +#: ../../source/ref-changelog.md:426 msgid "" -"**Two new Jupyter Notebook tutorials** " -"([#1141](https://github.com/adap/flower/pull/1141))" +"**Improve GPU support in Jupyter Notebook tutorials** " +"([#1527](https://github.com/adap/flower/pull/1527), " +"[#1558](https://github.com/adap/flower/pull/1558))" msgstr "" -#: ../../source/ref-changelog.md:684 +#: ../../source/ref-changelog.md:428 msgid "" -"Two Jupyter Notebook tutorials (compatible with Google Colab) explain " -"basic and intermediate Flower features:" +"Some users reported that Jupyter Notebooks have not always been easy to " +"use on GPU instances. We listened and made improvements to all of our " +"Jupyter notebooks! Check out the updated notebooks here:" msgstr "" -#: ../../source/ref-changelog.md:686 +#: ../../source/ref-changelog.md:430 msgid "" -"*An Introduction to Federated Learning*: [Open in " -"Colab](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-1" -"-Intro-to-FL-PyTorch.ipynb)" +"[An Introduction to Federated Learning](https://flower.ai/docs/framework" +"/tutorial-get-started-with-flower-pytorch.html)" msgstr "" -#: ../../source/ref-changelog.md:688 +#: ../../source/ref-changelog.md:431 msgid "" -"*Using Strategies in Federated Learning*: [Open in " -"Colab](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-2" -"-Strategies-in-FL-PyTorch.ipynb)" +"[Strategies in Federated Learning](https://flower.ai/docs/framework" +"/tutorial-use-a-federated-learning-strategy-pytorch.html)" msgstr "" -#: ../../source/ref-changelog.md:690 +#: ../../source/ref-changelog.md:432 msgid "" -"**New FedAvgM strategy (Federated Averaging with Server Momentum)** " -"([#1076](https://github.com/adap/flower/pull/1076))" +"[Building a Strategy](https://flower.ai/docs/framework/tutorial-build-a" +"-strategy-from-scratch-pytorch.html)" msgstr "" -#: ../../source/ref-changelog.md:692 +#: ../../source/ref-changelog.md:433 msgid "" -"The new `FedAvgM` strategy implements Federated Averaging with Server " -"Momentum \\[Hsu et al., 2019\\]." +"[Client and NumPyClient](https://flower.ai/docs/framework/tutorial-" +"customize-the-client-pytorch.html)" msgstr "" -#: ../../source/ref-changelog.md:694 +#: ../../source/ref-changelog.md:435 msgid "" -"**New advanced PyTorch code example** " -"([#1007](https://github.com/adap/flower/pull/1007))" +"**Introduce optional telemetry** " +"([#1533](https://github.com/adap/flower/pull/1533), " +"[#1544](https://github.com/adap/flower/pull/1544), " +"[#1584](https://github.com/adap/flower/pull/1584))" msgstr "" -#: ../../source/ref-changelog.md:696 +#: ../../source/ref-changelog.md:437 msgid "" -"A new code example (`advanced_pytorch`) demonstrates advanced Flower " -"concepts with PyTorch." +"After a [request for " +"feedback](https://github.com/adap/flower/issues/1534) from the community," +" the Flower open-source project introduces optional collection of " +"*anonymous* usage metrics to make well-informed decisions to improve " +"Flower. Doing this enables the Flower team to understand how Flower is " +"used and what challenges users might face." msgstr "" -#: ../../source/ref-changelog.md:698 +#: ../../source/ref-changelog.md:439 msgid "" -"**New JAX code example** " -"([#906](https://github.com/adap/flower/pull/906), " -"[#1143](https://github.com/adap/flower/pull/1143))" +"**Flower is a friendly framework for collaborative AI and data science.**" +" Staying true to this statement, Flower makes it easy to disable " +"telemetry for users who do not want to share anonymous usage metrics. " +"[Read more.](https://flower.ai/docs/telemetry.html)." msgstr "" -#: ../../source/ref-changelog.md:700 +#: ../../source/ref-changelog.md:441 msgid "" -"A new code example (`jax_from_centralized_to_federated`) shows federated " -"learning with JAX and Flower." +"**Introduce (experimental) Driver API** " +"([#1520](https://github.com/adap/flower/pull/1520), " +"[#1525](https://github.com/adap/flower/pull/1525), " +"[#1545](https://github.com/adap/flower/pull/1545), " +"[#1546](https://github.com/adap/flower/pull/1546), " +"[#1550](https://github.com/adap/flower/pull/1550), " +"[#1551](https://github.com/adap/flower/pull/1551), " +"[#1567](https://github.com/adap/flower/pull/1567))" msgstr "" -#: ../../source/ref-changelog.md:704 +#: ../../source/ref-changelog.md:443 msgid "" -"New option to keep Ray running if Ray was already initialized in " -"`start_simulation` ([#1177](https://github.com/adap/flower/pull/1177))" +"Flower now has a new (experimental) Driver API which will enable fully " +"programmable, async, and multi-tenant Federated Learning and Federated " +"Analytics applications. Phew, that's a lot! Going forward, the Driver API" +" will be the abstraction that many upcoming features will be built on - " +"and you can start building those things now, too." msgstr "" -#: ../../source/ref-changelog.md:705 +#: ../../source/ref-changelog.md:445 msgid "" -"Add support for custom `ClientManager` as a `start_simulation` parameter " -"([#1171](https://github.com/adap/flower/pull/1171))" +"The Driver API also enables a new execution mode in which the server runs" +" indefinitely. Multiple individual workloads can run concurrently and " +"start and stop their execution independent of the server. This is " +"especially useful for users who want to deploy Flower in production." msgstr "" -#: ../../source/ref-changelog.md:706 +#: ../../source/ref-changelog.md:447 msgid "" -"New documentation for [implementing " -"strategies](https://flower.ai/docs/framework/how-to-implement-" -"strategies.html) ([#1097](https://github.com/adap/flower/pull/1097), " -"[#1175](https://github.com/adap/flower/pull/1175))" +"To learn more, check out the `mt-pytorch` code example. We look forward " +"to you feedback!" msgstr "" -#: ../../source/ref-changelog.md:707 +#: ../../source/ref-changelog.md:449 msgid "" -"New mobile-friendly documentation theme " -"([#1174](https://github.com/adap/flower/pull/1174))" +"Please note: *The Driver API is still experimental and will likely change" +" significantly over time.*" msgstr "" -#: ../../source/ref-changelog.md:708 +#: ../../source/ref-changelog.md:451 msgid "" -"Limit version range for (optional) `ray` dependency to include only " -"compatible releases (`>=1.9.2,<1.12.0`) " -"([#1205](https://github.com/adap/flower/pull/1205))" +"**Add new Federated Analytics with Pandas example** " +"([#1469](https://github.com/adap/flower/pull/1469), " +"[#1535](https://github.com/adap/flower/pull/1535))" msgstr "" -#: ../../source/ref-changelog.md:712 +#: ../../source/ref-changelog.md:453 msgid "" -"**Remove deprecated support for Python 3.6** " -"([#871](https://github.com/adap/flower/pull/871))" +"A new code example (`quickstart-pandas`) demonstrates federated analytics" +" with Pandas and Flower. You can find it here: [quickstart-" +"pandas](https://github.com/adap/flower/tree/main/examples/quickstart-" +"pandas)." msgstr "" -#: ../../source/ref-changelog.md:713 +#: ../../source/ref-changelog.md:455 msgid "" -"**Remove deprecated KerasClient** " -"([#857](https://github.com/adap/flower/pull/857))" +"**Add new strategies: Krum and MultiKrum** " +"([#1481](https://github.com/adap/flower/pull/1481))" msgstr "" -#: ../../source/ref-changelog.md:714 +#: ../../source/ref-changelog.md:457 msgid "" -"**Remove deprecated no-op extra installs** " -"([#973](https://github.com/adap/flower/pull/973))" +"Edoardo, a computer science student at the Sapienza University of Rome, " +"contributed a new `Krum` strategy that enables users to easily use Krum " +"and MultiKrum in their workloads." msgstr "" -#: ../../source/ref-changelog.md:715 +#: ../../source/ref-changelog.md:459 msgid "" -"**Remove deprecated proto fields from** `FitRes` **and** `EvaluateRes` " -"([#869](https://github.com/adap/flower/pull/869))" +"**Update C++ example to be compatible with Flower v1.2.0** " +"([#1495](https://github.com/adap/flower/pull/1495))" msgstr "" -#: ../../source/ref-changelog.md:716 +#: ../../source/ref-changelog.md:461 msgid "" -"**Remove deprecated QffedAvg strategy (replaced by QFedAvg)** " -"([#1107](https://github.com/adap/flower/pull/1107))" +"The C++ code example has received a substantial update to make it " +"compatible with the latest version of Flower." msgstr "" -#: ../../source/ref-changelog.md:717 +#: ../../source/ref-changelog.md:463 msgid "" -"**Remove deprecated DefaultStrategy strategy** " -"([#1142](https://github.com/adap/flower/pull/1142))" +"**General improvements** " +"([#1491](https://github.com/adap/flower/pull/1491), " +"[#1504](https://github.com/adap/flower/pull/1504), " +"[#1506](https://github.com/adap/flower/pull/1506), " +"[#1514](https://github.com/adap/flower/pull/1514), " +"[#1522](https://github.com/adap/flower/pull/1522), " +"[#1523](https://github.com/adap/flower/pull/1523), " +"[#1526](https://github.com/adap/flower/pull/1526), " +"[#1528](https://github.com/adap/flower/pull/1528), " +"[#1547](https://github.com/adap/flower/pull/1547), " +"[#1549](https://github.com/adap/flower/pull/1549), " +"[#1560](https://github.com/adap/flower/pull/1560), " +"[#1564](https://github.com/adap/flower/pull/1564), " +"[#1566](https://github.com/adap/flower/pull/1566))" msgstr "" -#: ../../source/ref-changelog.md:718 +#: ../../source/ref-changelog.md:467 msgid "" -"**Remove deprecated support for eval_fn accuracy return value** " -"([#1142](https://github.com/adap/flower/pull/1142))" +"**Updated documentation** " +"([#1494](https://github.com/adap/flower/pull/1494), " +"[#1496](https://github.com/adap/flower/pull/1496), " +"[#1500](https://github.com/adap/flower/pull/1500), " +"[#1503](https://github.com/adap/flower/pull/1503), " +"[#1505](https://github.com/adap/flower/pull/1505), " +"[#1524](https://github.com/adap/flower/pull/1524), " +"[#1518](https://github.com/adap/flower/pull/1518), " +"[#1519](https://github.com/adap/flower/pull/1519), " +"[#1515](https://github.com/adap/flower/pull/1515))" msgstr "" -#: ../../source/ref-changelog.md:719 +#: ../../source/ref-changelog.md:471 msgid "" -"**Remove deprecated support for passing initial parameters as NumPy " -"ndarrays** ([#1142](https://github.com/adap/flower/pull/1142))" -msgstr "" - -#: ../../source/ref-changelog.md:721 -msgid "v0.18.0 (2022-02-28)" +"One highlight is the new [first time contributor " +"guide](https://flower.ai/docs/first-time-contributors.html): if you've " +"never contributed on GitHub before, this is the perfect place to start!" msgstr "" -#: ../../source/ref-changelog.md:725 -msgid "" -"**Improved Virtual Client Engine compatibility with Jupyter Notebook / " -"Google Colab** ([#866](https://github.com/adap/flower/pull/866), " -"[#872](https://github.com/adap/flower/pull/872), " -"[#833](https://github.com/adap/flower/pull/833), " -"[#1036](https://github.com/adap/flower/pull/1036))" +#: ../../source/ref-changelog.md:477 +msgid "v1.1.0 (2022-10-31)" msgstr "" -#: ../../source/ref-changelog.md:727 +#: ../../source/ref-changelog.md:481 msgid "" -"Simulations (using the Virtual Client Engine through `start_simulation`) " -"now work more smoothly on Jupyter Notebooks (incl. Google Colab) after " -"installing Flower with the `simulation` extra (`pip install " -"flwr[simulation]`)." +"We would like to give our **special thanks** to all the contributors who " +"made the new version of Flower possible (in `git shortlog` order):" msgstr "" -#: ../../source/ref-changelog.md:729 +#: ../../source/ref-changelog.md:483 msgid "" -"**New Jupyter Notebook code example** " -"([#833](https://github.com/adap/flower/pull/833))" +"`Akis Linardos`, `Christopher S`, `Daniel J. Beutel`, `George`, `Jan " +"Schlicht`, `Mohammad Fares`, `Pedro Porto Buarque de Gusmão`, `Philipp " +"Wiesner`, `Rob Luke`, `Taner Topal`, `VasundharaAgarwal`, " +"`danielnugraha`, `edogab33`" msgstr "" -#: ../../source/ref-changelog.md:731 +#: ../../source/ref-changelog.md:487 msgid "" -"A new code example (`quickstart_simulation`) demonstrates Flower " -"simulations using the Virtual Client Engine through Jupyter Notebook " -"(incl. Google Colab)." +"**Introduce Differential Privacy wrappers (preview)** " +"([#1357](https://github.com/adap/flower/pull/1357), " +"[#1460](https://github.com/adap/flower/pull/1460))" msgstr "" -#: ../../source/ref-changelog.md:733 +#: ../../source/ref-changelog.md:489 msgid "" -"**Client properties (feature preview)** " -"([#795](https://github.com/adap/flower/pull/795))" +"The first (experimental) preview of pluggable Differential Privacy " +"wrappers enables easy configuration and usage of differential privacy " +"(DP). The pluggable DP wrappers enable framework-agnostic **and** " +"strategy-agnostic usage of both client-side DP and server-side DP. Head " +"over to the Flower docs, a new explainer goes into more detail." msgstr "" -#: ../../source/ref-changelog.md:735 +#: ../../source/ref-changelog.md:491 msgid "" -"Clients can implement a new method `get_properties` to enable server-side" -" strategies to query client properties." +"**New iOS CoreML code example** " +"([#1289](https://github.com/adap/flower/pull/1289))" msgstr "" -#: ../../source/ref-changelog.md:737 +#: ../../source/ref-changelog.md:493 msgid "" -"**Experimental Android support with TFLite** " -"([#865](https://github.com/adap/flower/pull/865))" +"Flower goes iOS! A massive new code example shows how Flower clients can " +"be built for iOS. The code example contains both Flower iOS SDK " +"components that can be used for many tasks, and one task example running " +"on CoreML." msgstr "" -#: ../../source/ref-changelog.md:739 +#: ../../source/ref-changelog.md:495 msgid "" -"Android support has finally arrived in `main`! Flower is both client-" -"agnostic and framework-agnostic by design. One can integrate arbitrary " -"client platforms and with this release, using Flower on Android has " -"become a lot easier." +"**New FedMedian strategy** " +"([#1461](https://github.com/adap/flower/pull/1461))" msgstr "" -#: ../../source/ref-changelog.md:741 +#: ../../source/ref-changelog.md:497 msgid "" -"The example uses TFLite on the client side, along with a new " -"`FedAvgAndroid` strategy. The Android client and `FedAvgAndroid` are " -"still experimental, but they are a first step towards a fully-fledged " -"Android SDK and a unified `FedAvg` implementation that integrated the new" -" functionality from `FedAvgAndroid`." +"The new `FedMedian` strategy implements Federated Median (FedMedian) by " +"[Yin et al., 2018](https://arxiv.org/pdf/1803.01498v1.pdf)." msgstr "" -#: ../../source/ref-changelog.md:743 +#: ../../source/ref-changelog.md:499 msgid "" -"**Make gRPC keepalive time user-configurable and decrease default " -"keepalive time** ([#1069](https://github.com/adap/flower/pull/1069))" +"**Log** `Client` **exceptions in Virtual Client Engine** " +"([#1493](https://github.com/adap/flower/pull/1493))" msgstr "" -#: ../../source/ref-changelog.md:745 +#: ../../source/ref-changelog.md:501 msgid "" -"The default gRPC keepalive time has been reduced to increase the " -"compatibility of Flower with more cloud environments (for example, " -"Microsoft Azure). Users can configure the keepalive time to customize the" -" gRPC stack based on specific requirements." +"All `Client` exceptions happening in the VCE are now logged by default " +"and not just exposed to the configured `Strategy` (via the `failures` " +"argument)." msgstr "" -#: ../../source/ref-changelog.md:747 +#: ../../source/ref-changelog.md:503 msgid "" -"**New differential privacy example using Opacus and PyTorch** " -"([#805](https://github.com/adap/flower/pull/805))" +"**Improve Virtual Client Engine internals** " +"([#1401](https://github.com/adap/flower/pull/1401), " +"[#1453](https://github.com/adap/flower/pull/1453))" msgstr "" -#: ../../source/ref-changelog.md:749 +#: ../../source/ref-changelog.md:505 msgid "" -"A new code example (`opacus`) demonstrates differentially-private " -"federated learning with Opacus, PyTorch, and Flower." +"Some internals of the Virtual Client Engine have been revamped. The VCE " +"now uses Ray 2.0 under the hood, the value type of the `client_resources`" +" dictionary changed to `float` to allow fractions of resources to be " +"allocated." msgstr "" -#: ../../source/ref-changelog.md:751 +#: ../../source/ref-changelog.md:507 msgid "" -"**New Hugging Face Transformers code example** " -"([#863](https://github.com/adap/flower/pull/863))" +"**Support optional** `Client`**/**`NumPyClient` **methods in Virtual " +"Client Engine**" msgstr "" -#: ../../source/ref-changelog.md:753 +#: ../../source/ref-changelog.md:509 msgid "" -"A new code example (`quickstart_huggingface`) demonstrates usage of " -"Hugging Face Transformers with Flower." +"The Virtual Client Engine now has full support for optional `Client` (and" +" `NumPyClient`) methods." msgstr "" -#: ../../source/ref-changelog.md:755 +#: ../../source/ref-changelog.md:511 msgid "" -"**New MLCube code example** " -"([#779](https://github.com/adap/flower/pull/779), " -"[#1034](https://github.com/adap/flower/pull/1034), " -"[#1065](https://github.com/adap/flower/pull/1065), " -"[#1090](https://github.com/adap/flower/pull/1090))" +"**Provide type information to packages using** `flwr` " +"([#1377](https://github.com/adap/flower/pull/1377))" msgstr "" -#: ../../source/ref-changelog.md:757 +#: ../../source/ref-changelog.md:513 msgid "" -"A new code example (`quickstart_mlcube`) demonstrates usage of MLCube " -"with Flower." +"The package `flwr` is now bundled with a `py.typed` file indicating that " +"the package is typed. This enables typing support for projects or " +"packages that use `flwr` by enabling them to improve their code using " +"static type checkers like `mypy`." msgstr "" -#: ../../source/ref-changelog.md:759 +#: ../../source/ref-changelog.md:515 msgid "" -"**SSL-enabled server and client** " -"([#842](https://github.com/adap/flower/pull/842), " -"[#844](https://github.com/adap/flower/pull/844), " -"[#845](https://github.com/adap/flower/pull/845), " -"[#847](https://github.com/adap/flower/pull/847), " -"[#993](https://github.com/adap/flower/pull/993), " -"[#994](https://github.com/adap/flower/pull/994))" +"**Updated code example** " +"([#1344](https://github.com/adap/flower/pull/1344), " +"[#1347](https://github.com/adap/flower/pull/1347))" msgstr "" -#: ../../source/ref-changelog.md:761 +#: ../../source/ref-changelog.md:517 msgid "" -"SSL enables secure encrypted connections between clients and servers. " -"This release open-sources the Flower secure gRPC implementation to make " -"encrypted communication channels accessible to all Flower users." +"The code examples covering scikit-learn and PyTorch Lightning have been " +"updated to work with the latest version of Flower." msgstr "" -#: ../../source/ref-changelog.md:763 +#: ../../source/ref-changelog.md:519 msgid "" -"**Updated** `FedAdam` **and** `FedYogi` **strategies** " -"([#885](https://github.com/adap/flower/pull/885), " -"[#895](https://github.com/adap/flower/pull/895))" +"**Updated documentation** " +"([#1355](https://github.com/adap/flower/pull/1355), " +"[#1558](https://github.com/adap/flower/pull/1558), " +"[#1379](https://github.com/adap/flower/pull/1379), " +"[#1380](https://github.com/adap/flower/pull/1380), " +"[#1381](https://github.com/adap/flower/pull/1381), " +"[#1332](https://github.com/adap/flower/pull/1332), " +"[#1391](https://github.com/adap/flower/pull/1391), " +"[#1403](https://github.com/adap/flower/pull/1403), " +"[#1364](https://github.com/adap/flower/pull/1364), " +"[#1409](https://github.com/adap/flower/pull/1409), " +"[#1419](https://github.com/adap/flower/pull/1419), " +"[#1444](https://github.com/adap/flower/pull/1444), " +"[#1448](https://github.com/adap/flower/pull/1448), " +"[#1417](https://github.com/adap/flower/pull/1417), " +"[#1449](https://github.com/adap/flower/pull/1449), " +"[#1465](https://github.com/adap/flower/pull/1465), " +"[#1467](https://github.com/adap/flower/pull/1467))" msgstr "" -#: ../../source/ref-changelog.md:765 +#: ../../source/ref-changelog.md:521 msgid "" -"`FedAdam` and `FedAdam` match the latest version of the Adaptive " -"Federated Optimization paper." +"There have been so many documentation updates that it doesn't even make " +"sense to list them individually." msgstr "" -#: ../../source/ref-changelog.md:767 +#: ../../source/ref-changelog.md:523 msgid "" -"**Initialize** `start_simulation` **with a list of client IDs** " -"([#860](https://github.com/adap/flower/pull/860))" +"**Restructured documentation** " +"([#1387](https://github.com/adap/flower/pull/1387))" msgstr "" -#: ../../source/ref-changelog.md:769 +#: ../../source/ref-changelog.md:525 msgid "" -"`start_simulation` can now be called with a list of client IDs " -"(`clients_ids`, type: `List[str]`). Those IDs will be passed to the " -"`client_fn` whenever a client needs to be initialized, which can make it " -"easier to load data partitions that are not accessible through `int` " -"identifiers." +"The documentation has been restructured to make it easier to navigate. " +"This is just the first step in a larger effort to make the Flower " +"documentation the best documentation of any project ever. Stay tuned!" msgstr "" -#: ../../source/ref-changelog.md:773 +#: ../../source/ref-changelog.md:527 msgid "" -"Update `num_examples` calculation in PyTorch code examples in " -"([#909](https://github.com/adap/flower/pull/909))" +"**Open in Colab button** " +"([#1389](https://github.com/adap/flower/pull/1389))" msgstr "" -#: ../../source/ref-changelog.md:774 +#: ../../source/ref-changelog.md:529 msgid "" -"Expose Flower version through `flwr.__version__` " -"([#952](https://github.com/adap/flower/pull/952))" +"The four parts of the Flower Federated Learning Tutorial now come with a " +"new `Open in Colab` button. No need to install anything on your local " +"machine, you can now use and learn about Flower in your browser, it's " +"only a single click away." msgstr "" -#: ../../source/ref-changelog.md:775 +#: ../../source/ref-changelog.md:531 msgid "" -"`start_server` in `app.py` now returns a `History` object containing " -"metrics from training ([#974](https://github.com/adap/flower/pull/974))" +"**Improved tutorial** ([#1468](https://github.com/adap/flower/pull/1468)," +" [#1470](https://github.com/adap/flower/pull/1470), " +"[#1472](https://github.com/adap/flower/pull/1472), " +"[#1473](https://github.com/adap/flower/pull/1473), " +"[#1474](https://github.com/adap/flower/pull/1474), " +"[#1475](https://github.com/adap/flower/pull/1475))" msgstr "" -#: ../../source/ref-changelog.md:776 +#: ../../source/ref-changelog.md:533 msgid "" -"Make `max_workers` (used by `ThreadPoolExecutor`) configurable " -"([#978](https://github.com/adap/flower/pull/978))" +"The Flower Federated Learning Tutorial has two brand-new parts covering " +"custom strategies (still WIP) and the distinction between `Client` and " +"`NumPyClient`. The existing parts one and two have also been improved " +"(many small changes and fixes)." msgstr "" -#: ../../source/ref-changelog.md:777 -msgid "" -"Increase sleep time after server start to three seconds in all code " -"examples ([#1086](https://github.com/adap/flower/pull/1086))" +#: ../../source/ref-changelog.md:539 +msgid "v1.0.0 (2022-07-28)" msgstr "" -#: ../../source/ref-changelog.md:778 -msgid "" -"Added a new FAQ section to the documentation " -"([#948](https://github.com/adap/flower/pull/948))" +#: ../../source/ref-changelog.md:541 +msgid "Highlights" msgstr "" -#: ../../source/ref-changelog.md:779 -msgid "" -"And many more under-the-hood changes, library updates, documentation " -"changes, and tooling improvements!" +#: ../../source/ref-changelog.md:543 +msgid "Stable **Virtual Client Engine** (accessible via `start_simulation`)" msgstr "" -#: ../../source/ref-changelog.md:783 -msgid "" -"**Removed** `flwr_example` **and** `flwr_experimental` **from release " -"build** ([#869](https://github.com/adap/flower/pull/869))" +#: ../../source/ref-changelog.md:544 +msgid "All `Client`/`NumPyClient` methods are now optional" msgstr "" -#: ../../source/ref-changelog.md:785 -msgid "" -"The packages `flwr_example` and `flwr_experimental` have been deprecated " -"since Flower 0.12.0 and they are not longer included in Flower release " -"builds. The associated extras (`baseline`, `examples-pytorch`, `examples-" -"tensorflow`, `http-logger`, `ops`) are now no-op and will be removed in " -"an upcoming release." +#: ../../source/ref-changelog.md:545 +msgid "Configurable `get_parameters`" msgstr "" -#: ../../source/ref-changelog.md:787 -msgid "v0.17.0 (2021-09-24)" +#: ../../source/ref-changelog.md:546 +msgid "" +"Tons of small API cleanups resulting in a more coherent developer " +"experience" msgstr "" -#: ../../source/ref-changelog.md:791 +#: ../../source/ref-changelog.md:550 msgid "" -"**Experimental virtual client engine** " -"([#781](https://github.com/adap/flower/pull/781) " -"[#790](https://github.com/adap/flower/pull/790) " -"[#791](https://github.com/adap/flower/pull/791))" +"We would like to give our **special thanks** to all the contributors who " +"made Flower 1.0 possible (in reverse [GitHub " +"Contributors](https://github.com/adap/flower/graphs/contributors) order):" msgstr "" -#: ../../source/ref-changelog.md:793 +#: ../../source/ref-changelog.md:552 msgid "" -"One of Flower's goals is to enable research at scale. This release " -"enables a first (experimental) peek at a major new feature, codenamed the" -" virtual client engine. Virtual clients enable simulations that scale to " -"a (very) large number of clients on a single machine or compute cluster. " -"The easiest way to test the new functionality is to look at the two new " -"code examples called `quickstart_simulation` and `simulation_pytorch`." +"[@rtaiello](https://github.com/rtaiello), " +"[@g-pichler](https://github.com/g-pichler), [@rob-" +"luke](https://github.com/rob-luke), [@andreea-zaharia](https://github.com" +"/andreea-zaharia), [@kinshukdua](https://github.com/kinshukdua), " +"[@nfnt](https://github.com/nfnt), " +"[@tatiana-s](https://github.com/tatiana-s), " +"[@TParcollet](https://github.com/TParcollet), " +"[@vballoli](https://github.com/vballoli), " +"[@negedng](https://github.com/negedng), " +"[@RISHIKESHAVAN](https://github.com/RISHIKESHAVAN), " +"[@hei411](https://github.com/hei411), " +"[@SebastianSpeitel](https://github.com/SebastianSpeitel), " +"[@AmitChaulwar](https://github.com/AmitChaulwar), " +"[@Rubiel1](https://github.com/Rubiel1), [@FANTOME-PAN](https://github.com" +"/FANTOME-PAN), [@Rono-BC](https://github.com/Rono-BC), " +"[@lbhm](https://github.com/lbhm), " +"[@sishtiaq](https://github.com/sishtiaq), " +"[@remde](https://github.com/remde), [@Jueun-Park](https://github.com" +"/Jueun-Park), [@architjen](https://github.com/architjen), " +"[@PratikGarai](https://github.com/PratikGarai), " +"[@mrinaald](https://github.com/mrinaald), " +"[@zliel](https://github.com/zliel), " +"[@MeiruiJiang](https://github.com/MeiruiJiang), " +"[@sancarlim](https://github.com/sancarlim), " +"[@gubertoli](https://github.com/gubertoli), " +"[@Vingt100](https://github.com/Vingt100), " +"[@MakGulati](https://github.com/MakGulati), " +"[@cozek](https://github.com/cozek), " +"[@jafermarq](https://github.com/jafermarq), " +"[@sisco0](https://github.com/sisco0), " +"[@akhilmathurs](https://github.com/akhilmathurs), " +"[@CanTuerk](https://github.com/CanTuerk), " +"[@mariaboerner1987](https://github.com/mariaboerner1987), " +"[@pedropgusmao](https://github.com/pedropgusmao), " +"[@tanertopal](https://github.com/tanertopal), " +"[@danieljanes](https://github.com/danieljanes)." msgstr "" -#: ../../source/ref-changelog.md:795 +#: ../../source/ref-changelog.md:556 msgid "" -"The feature is still experimental, so there's no stability guarantee for " -"the API. It's also not quite ready for prime time and comes with a few " -"known caveats. However, those who are curious are encouraged to try it " -"out and share their thoughts." +"**All arguments must be passed as keyword arguments** " +"([#1338](https://github.com/adap/flower/pull/1338))" msgstr "" -#: ../../source/ref-changelog.md:797 +#: ../../source/ref-changelog.md:558 msgid "" -"**New built-in strategies** " -"([#828](https://github.com/adap/flower/pull/828) " -"[#822](https://github.com/adap/flower/pull/822))" +"Pass all arguments as keyword arguments, positional arguments are not " +"longer supported. Code that uses positional arguments (e.g., " +"`start_client(\"127.0.0.1:8080\", FlowerClient())`) must add the keyword " +"for each positional argument (e.g., " +"`start_client(server_address=\"127.0.0.1:8080\", " +"client=FlowerClient())`)." msgstr "" -#: ../../source/ref-changelog.md:799 +#: ../../source/ref-changelog.md:560 msgid "" -"FedYogi - Federated learning strategy using Yogi on server-side. " -"Implementation based on https://arxiv.org/abs/2003.00295" +"**Introduce configuration object** `ServerConfig` **in** `start_server` " +"**and** `start_simulation` " +"([#1317](https://github.com/adap/flower/pull/1317))" msgstr "" -#: ../../source/ref-changelog.md:800 +#: ../../source/ref-changelog.md:562 msgid "" -"FedAdam - Federated learning strategy using Adam on server-side. " -"Implementation based on https://arxiv.org/abs/2003.00295" +"Instead of a config dictionary `{\"num_rounds\": 3, \"round_timeout\": " +"600.0}`, `start_server` and `start_simulation` now expect a configuration" +" object of type `flwr.server.ServerConfig`. `ServerConfig` takes the same" +" arguments that as the previous config dict, but it makes writing type-" +"safe code easier and the default parameters values more transparent." msgstr "" -#: ../../source/ref-changelog.md:802 +#: ../../source/ref-changelog.md:564 msgid "" -"**New PyTorch Lightning code example** " -"([#617](https://github.com/adap/flower/pull/617))" +"**Rename built-in strategy parameters for clarity** " +"([#1334](https://github.com/adap/flower/pull/1334))" msgstr "" -#: ../../source/ref-changelog.md:804 +#: ../../source/ref-changelog.md:566 msgid "" -"**New Variational Auto-Encoder code example** " -"([#752](https://github.com/adap/flower/pull/752))" +"The following built-in strategy parameters were renamed to improve " +"readability and consistency with other API's:" msgstr "" -#: ../../source/ref-changelog.md:806 -msgid "" -"**New scikit-learn code example** " -"([#748](https://github.com/adap/flower/pull/748))" +#: ../../source/ref-changelog.md:568 +msgid "`fraction_eval` --> `fraction_evaluate`" msgstr "" -#: ../../source/ref-changelog.md:808 -msgid "" -"**New experimental TensorBoard strategy** " -"([#789](https://github.com/adap/flower/pull/789))" +#: ../../source/ref-changelog.md:569 +msgid "`min_eval_clients` --> `min_evaluate_clients`" msgstr "" -#: ../../source/ref-changelog.md:812 -msgid "" -"Improved advanced TensorFlow code example " -"([#769](https://github.com/adap/flower/pull/769))" +#: ../../source/ref-changelog.md:570 +msgid "`eval_fn` --> `evaluate_fn`" msgstr "" -#: ../../source/ref-changelog.md:813 +#: ../../source/ref-changelog.md:572 msgid "" -"Warning when `min_available_clients` is misconfigured " -"([#830](https://github.com/adap/flower/pull/830))" +"**Update default arguments of built-in strategies** " +"([#1278](https://github.com/adap/flower/pull/1278))" msgstr "" -#: ../../source/ref-changelog.md:814 +#: ../../source/ref-changelog.md:574 msgid "" -"Improved gRPC server docs " -"([#841](https://github.com/adap/flower/pull/841))" +"All built-in strategies now use `fraction_fit=1.0` and " +"`fraction_evaluate=1.0`, which means they select *all* currently " +"available clients for training and evaluation. Projects that relied on " +"the previous default values can get the previous behaviour by " +"initializing the strategy in the following way:" msgstr "" -#: ../../source/ref-changelog.md:815 -msgid "" -"Improved error message in `NumPyClient` " -"([#851](https://github.com/adap/flower/pull/851))" +#: ../../source/ref-changelog.md:576 +msgid "`strategy = FedAvg(fraction_fit=0.1, fraction_evaluate=0.1)`" msgstr "" -#: ../../source/ref-changelog.md:816 +#: ../../source/ref-changelog.md:578 msgid "" -"Improved PyTorch quickstart code example " -"([#852](https://github.com/adap/flower/pull/852))" +"**Add** `server_round` **to** `Strategy.evaluate` " +"([#1334](https://github.com/adap/flower/pull/1334))" msgstr "" -#: ../../source/ref-changelog.md:820 +#: ../../source/ref-changelog.md:580 msgid "" -"**Disabled final distributed evaluation** " -"([#800](https://github.com/adap/flower/pull/800))" +"The `Strategy` method `evaluate` now receives the current round of " +"federated learning/evaluation as the first parameter." msgstr "" -#: ../../source/ref-changelog.md:822 +#: ../../source/ref-changelog.md:582 msgid "" -"Prior behaviour was to perform a final round of distributed evaluation on" -" all connected clients, which is often not required (e.g., when using " -"server-side evaluation). The prior behaviour can be enabled by passing " -"`force_final_distributed_eval=True` to `start_server`." +"**Add** `server_round` **and** `config` **parameters to** `evaluate_fn` " +"([#1334](https://github.com/adap/flower/pull/1334))" msgstr "" -#: ../../source/ref-changelog.md:824 +#: ../../source/ref-changelog.md:584 msgid "" -"**Renamed q-FedAvg strategy** " -"([#802](https://github.com/adap/flower/pull/802))" +"The `evaluate_fn` passed to built-in strategies like `FedAvg` now takes " +"three parameters: (1) The current round of federated learning/evaluation " +"(`server_round`), (2) the model parameters to evaluate (`parameters`), " +"and (3) a config dictionary (`config`)." msgstr "" -#: ../../source/ref-changelog.md:826 +#: ../../source/ref-changelog.md:586 msgid "" -"The strategy named `QffedAvg` was renamed to `QFedAvg` to better reflect " -"the notation given in the original paper (q-FFL is the optimization " -"objective, q-FedAvg is the proposed solver). Note the original (now " -"deprecated) `QffedAvg` class is still available for compatibility reasons" -" (it will be removed in a future release)." +"**Rename** `rnd` **to** `server_round` " +"([#1321](https://github.com/adap/flower/pull/1321))" msgstr "" -#: ../../source/ref-changelog.md:828 +#: ../../source/ref-changelog.md:588 msgid "" -"**Deprecated and renamed code example** `simulation_pytorch` **to** " -"`simulation_pytorch_legacy` " -"([#791](https://github.com/adap/flower/pull/791))" +"Several Flower methods and functions (`evaluate_fn`, `configure_fit`, " +"`aggregate_fit`, `configure_evaluate`, `aggregate_evaluate`) receive the " +"current round of federated learning/evaluation as their first parameter. " +"To improve reaability and avoid confusion with *random*, this parameter " +"has been renamed from `rnd` to `server_round`." msgstr "" -#: ../../source/ref-changelog.md:830 +#: ../../source/ref-changelog.md:590 msgid "" -"This example has been replaced by a new example. The new example is based" -" on the experimental virtual client engine, which will become the new " -"default way of doing most types of large-scale simulations in Flower. The" -" existing example was kept for reference purposes, but it might be " -"removed in the future." +"**Move** `flwr.dataset` **to** `flwr_baselines` " +"([#1273](https://github.com/adap/flower/pull/1273))" msgstr "" -#: ../../source/ref-changelog.md:832 -msgid "v0.16.0 (2021-05-11)" +#: ../../source/ref-changelog.md:592 +msgid "The experimental package `flwr.dataset` was migrated to Flower Baselines." msgstr "" -#: ../../source/ref-changelog.md:836 +#: ../../source/ref-changelog.md:594 msgid "" -"**New built-in strategies** " -"([#549](https://github.com/adap/flower/pull/549))" -msgstr "" - -#: ../../source/ref-changelog.md:838 -msgid "(abstract) FedOpt" +"**Remove experimental strategies** " +"([#1280](https://github.com/adap/flower/pull/1280))" msgstr "" -#: ../../source/ref-changelog.md:841 +#: ../../source/ref-changelog.md:596 msgid "" -"**Custom metrics for server and strategies** " -"([#717](https://github.com/adap/flower/pull/717))" +"Remove unmaintained experimental strategies (`FastAndSlow`, `FedFSv0`, " +"`FedFSv1`)." msgstr "" -#: ../../source/ref-changelog.md:843 +#: ../../source/ref-changelog.md:598 msgid "" -"The Flower server is now fully task-agnostic, all remaining instances of " -"task-specific metrics (such as `accuracy`) have been replaced by custom " -"metrics dictionaries. Flower 0.15 introduced the capability to pass a " -"dictionary containing custom metrics from client to server. As of this " -"release, custom metrics replace task-specific metrics on the server." +"**Rename** `Weights` **to** `NDArrays` " +"([#1258](https://github.com/adap/flower/pull/1258), " +"[#1259](https://github.com/adap/flower/pull/1259))" msgstr "" -#: ../../source/ref-changelog.md:845 +#: ../../source/ref-changelog.md:600 msgid "" -"Custom metric dictionaries are now used in two user-facing APIs: they are" -" returned from Strategy methods `aggregate_fit`/`aggregate_evaluate` and " -"they enable evaluation functions passed to built-in strategies (via " -"`eval_fn`) to return more than two evaluation metrics. Strategies can " -"even return *aggregated* metrics dictionaries for the server to keep " -"track of." +"`flwr.common.Weights` was renamed to `flwr.common.NDArrays` to better " +"capture what this type is all about." msgstr "" -#: ../../source/ref-changelog.md:847 +#: ../../source/ref-changelog.md:602 msgid "" -"Strategy implementations should migrate their `aggregate_fit` and " -"`aggregate_evaluate` methods to the new return type (e.g., by simply " -"returning an empty `{}`), server-side evaluation functions should migrate" -" from `return loss, accuracy` to `return loss, {\"accuracy\": accuracy}`." +"**Remove antiquated** `force_final_distributed_eval` **from** " +"`start_server` ([#1258](https://github.com/adap/flower/pull/1258), " +"[#1259](https://github.com/adap/flower/pull/1259))" msgstr "" -#: ../../source/ref-changelog.md:849 +#: ../../source/ref-changelog.md:604 msgid "" -"Flower 0.15-style return types are deprecated (but still supported), " -"compatibility will be removed in a future release." +"The `start_server` parameter `force_final_distributed_eval` has long been" +" a historic artefact, in this release it is finally gone for good." msgstr "" -#: ../../source/ref-changelog.md:851 +#: ../../source/ref-changelog.md:606 msgid "" -"**Migration warnings for deprecated functionality** " -"([#690](https://github.com/adap/flower/pull/690))" +"**Make** `get_parameters` **configurable** " +"([#1242](https://github.com/adap/flower/pull/1242))" msgstr "" -#: ../../source/ref-changelog.md:853 +#: ../../source/ref-changelog.md:608 msgid "" -"Earlier versions of Flower were often migrated to new APIs, while " -"maintaining compatibility with legacy APIs. This release introduces " -"detailed warning messages if usage of deprecated APIs is detected. The " -"new warning messages often provide details on how to migrate to more " -"recent APIs, thus easing the transition from one release to another." +"The `get_parameters` method now accepts a configuration dictionary, just " +"like `get_properties`, `fit`, and `evaluate`." msgstr "" -#: ../../source/ref-changelog.md:855 +#: ../../source/ref-changelog.md:610 msgid "" -"Improved docs and docstrings " -"([#691](https://github.com/adap/flower/pull/691) " -"[#692](https://github.com/adap/flower/pull/692) " -"[#713](https://github.com/adap/flower/pull/713))" +"**Replace** `num_rounds` **in** `start_simulation` **with new** `config` " +"**parameter** ([#1281](https://github.com/adap/flower/pull/1281))" msgstr "" -#: ../../source/ref-changelog.md:857 -msgid "MXNet example and documentation" +#: ../../source/ref-changelog.md:612 +msgid "" +"The `start_simulation` function now accepts a configuration dictionary " +"`config` instead of the `num_rounds` integer. This improves the " +"consistency between `start_simulation` and `start_server` and makes " +"transitioning between the two easier." msgstr "" -#: ../../source/ref-changelog.md:859 +#: ../../source/ref-changelog.md:616 msgid "" -"FedBN implementation in example PyTorch: From Centralized To Federated " -"([#696](https://github.com/adap/flower/pull/696) " -"[#702](https://github.com/adap/flower/pull/702) " -"[#705](https://github.com/adap/flower/pull/705))" +"**Support Python 3.10** " +"([#1320](https://github.com/adap/flower/pull/1320))" msgstr "" -#: ../../source/ref-changelog.md:863 +#: ../../source/ref-changelog.md:618 msgid "" -"**Serialization-agnostic server** " -"([#721](https://github.com/adap/flower/pull/721))" +"The previous Flower release introduced experimental support for Python " +"3.10, this release declares Python 3.10 support as stable." msgstr "" -#: ../../source/ref-changelog.md:865 +#: ../../source/ref-changelog.md:620 msgid "" -"The Flower server is now fully serialization-agnostic. Prior usage of " -"class `Weights` (which represents parameters as deserialized NumPy " -"ndarrays) was replaced by class `Parameters` (e.g., in `Strategy`). " -"`Parameters` objects are fully serialization-agnostic and represents " -"parameters as byte arrays, the `tensor_type` attributes indicates how " -"these byte arrays should be interpreted (e.g., for " -"serialization/deserialization)." +"**Make all** `Client` **and** `NumPyClient` **methods optional** " +"([#1260](https://github.com/adap/flower/pull/1260), " +"[#1277](https://github.com/adap/flower/pull/1277))" msgstr "" -#: ../../source/ref-changelog.md:867 +#: ../../source/ref-changelog.md:622 msgid "" -"Built-in strategies implement this approach by handling serialization and" -" deserialization to/from `Weights` internally. Custom/3rd-party Strategy " -"implementations should update to the slightly changed Strategy method " -"definitions. Strategy authors can consult PR " -"[#721](https://github.com/adap/flower/pull/721) to see how strategies can" -" easily migrate to the new format." +"The `Client`/`NumPyClient` methods `get_properties`, `get_parameters`, " +"`fit`, and `evaluate` are all optional. This enables writing clients that" +" implement, for example, only `fit`, but no other method. No need to " +"implement `evaluate` when using centralized evaluation!" msgstr "" -#: ../../source/ref-changelog.md:869 +#: ../../source/ref-changelog.md:624 msgid "" -"Deprecated `flwr.server.Server.evaluate`, use " -"`flwr.server.Server.evaluate_round` instead " -"([#717](https://github.com/adap/flower/pull/717))" +"**Enable passing a** `Server` **instance to** `start_simulation` " +"([#1281](https://github.com/adap/flower/pull/1281))" msgstr "" -#: ../../source/ref-changelog.md:871 -msgid "v0.15.0 (2021-03-12)" +#: ../../source/ref-changelog.md:626 +msgid "" +"Similar to `start_server`, `start_simulation` now accepts a full `Server`" +" instance. This enables users to heavily customize the execution of " +"eperiments and opens the door to running, for example, async FL using the" +" Virtual Client Engine." msgstr "" -#: ../../source/ref-changelog.md:875 +#: ../../source/ref-changelog.md:628 msgid "" -"**Server-side parameter initialization** " -"([#658](https://github.com/adap/flower/pull/658))" +"**Update code examples** " +"([#1291](https://github.com/adap/flower/pull/1291), " +"[#1286](https://github.com/adap/flower/pull/1286), " +"[#1282](https://github.com/adap/flower/pull/1282))" msgstr "" -#: ../../source/ref-changelog.md:877 +#: ../../source/ref-changelog.md:630 msgid "" -"Model parameters can now be initialized on the server-side. Server-side " -"parameter initialization works via a new `Strategy` method called " -"`initialize_parameters`." +"Many code examples received small or even large maintenance updates, " +"among them are" msgstr "" -#: ../../source/ref-changelog.md:879 -msgid "" -"Built-in strategies support a new constructor argument called " -"`initial_parameters` to set the initial parameters. Built-in strategies " -"will provide these initial parameters to the server on startup and then " -"delete them to free the memory afterwards." +#: ../../source/ref-changelog.md:632 +msgid "`scikit-learn`" msgstr "" -#: ../../source/ref-changelog.md:898 -msgid "" -"If no initial parameters are provided to the strategy, the server will " -"continue to use the current behaviour (namely, it will ask one of the " -"connected clients for its parameters and use these as the initial global " -"parameters)." +#: ../../source/ref-changelog.md:633 +msgid "`simulation_pytorch`" msgstr "" -#: ../../source/ref-changelog.md:900 -msgid "Deprecations" +#: ../../source/ref-changelog.md:634 +msgid "`quickstart_pytorch`" msgstr "" -#: ../../source/ref-changelog.md:902 -msgid "" -"Deprecate `flwr.server.strategy.DefaultStrategy` (migrate to " -"`flwr.server.strategy.FedAvg`, which is equivalent)" +#: ../../source/ref-changelog.md:635 +msgid "`quickstart_simulation`" msgstr "" -#: ../../source/ref-changelog.md:904 -msgid "v0.14.0 (2021-02-18)" +#: ../../source/ref-changelog.md:636 +msgid "`quickstart_tensorflow`" msgstr "" -#: ../../source/ref-changelog.md:908 -msgid "" -"**Generalized** `Client.fit` **and** `Client.evaluate` **return values** " -"([#610](https://github.com/adap/flower/pull/610) " -"[#572](https://github.com/adap/flower/pull/572) " -"[#633](https://github.com/adap/flower/pull/633))" +#: ../../source/ref-changelog.md:637 +msgid "`advanced_tensorflow`" msgstr "" -#: ../../source/ref-changelog.md:910 +#: ../../source/ref-changelog.md:639 msgid "" -"Clients can now return an additional dictionary mapping `str` keys to " -"values of the following types: `bool`, `bytes`, `float`, `int`, `str`. " -"This means one can return almost arbitrary values from `fit`/`evaluate` " -"and make use of them on the server side!" +"**Remove the obsolete simulation example** " +"([#1328](https://github.com/adap/flower/pull/1328))" msgstr "" -#: ../../source/ref-changelog.md:912 +#: ../../source/ref-changelog.md:641 msgid "" -"This improvement also allowed for more consistent return types between " -"`fit` and `evaluate`: `evaluate` should now return a tuple `(float, int, " -"dict)` representing the loss, number of examples, and a dictionary " -"holding arbitrary problem-specific values like accuracy." +"Removes the obsolete `simulation` example and renames " +"`quickstart_simulation` to `simulation_tensorflow` so it fits withs the " +"naming of `simulation_pytorch`" msgstr "" -#: ../../source/ref-changelog.md:914 +#: ../../source/ref-changelog.md:643 msgid "" -"In case you wondered: this feature is compatible with existing projects, " -"the additional dictionary return value is optional. New code should " -"however migrate to the new return types to be compatible with upcoming " -"Flower releases (`fit`: `List[np.ndarray], int, Dict[str, Scalar]`, " -"`evaluate`: `float, int, Dict[str, Scalar]`). See the example below for " -"details." +"**Update documentation** " +"([#1223](https://github.com/adap/flower/pull/1223), " +"[#1209](https://github.com/adap/flower/pull/1209), " +"[#1251](https://github.com/adap/flower/pull/1251), " +"[#1257](https://github.com/adap/flower/pull/1257), " +"[#1267](https://github.com/adap/flower/pull/1267), " +"[#1268](https://github.com/adap/flower/pull/1268), " +"[#1300](https://github.com/adap/flower/pull/1300), " +"[#1304](https://github.com/adap/flower/pull/1304), " +"[#1305](https://github.com/adap/flower/pull/1305), " +"[#1307](https://github.com/adap/flower/pull/1307))" msgstr "" -#: ../../source/ref-changelog.md:916 +#: ../../source/ref-changelog.md:645 msgid "" -"*Code example:* note the additional dictionary return values in both " -"`FlwrClient.fit` and `FlwrClient.evaluate`:" +"One substantial documentation update fixes multiple smaller rendering " +"issues, makes titles more succinct to improve navigation, removes a " +"deprecated library, updates documentation dependencies, includes the " +"`flwr.common` module in the API reference, includes support for markdown-" +"based documentation, migrates the changelog from `.rst` to `.md`, and " +"fixes a number of smaller details!" msgstr "" -#: ../../source/ref-changelog.md:931 -msgid "" -"**Generalized** `config` **argument in** `Client.fit` **and** " -"`Client.evaluate` ([#595](https://github.com/adap/flower/pull/595))" +#: ../../source/ref-changelog.md:647 ../../source/ref-changelog.md:702 +#: ../../source/ref-changelog.md:771 ../../source/ref-changelog.md:810 +msgid "**Minor updates**" msgstr "" -#: ../../source/ref-changelog.md:933 +#: ../../source/ref-changelog.md:649 msgid "" -"The `config` argument used to be of type `Dict[str, str]`, which means " -"that dictionary values were expected to be strings. The new release " -"generalizes this to enable values of the following types: `bool`, " -"`bytes`, `float`, `int`, `str`." +"Add round number to fit and evaluate log messages " +"([#1266](https://github.com/adap/flower/pull/1266))" msgstr "" -#: ../../source/ref-changelog.md:935 +#: ../../source/ref-changelog.md:650 msgid "" -"This means one can now pass almost arbitrary values to `fit`/`evaluate` " -"using the `config` dictionary. Yay, no more `str(epochs)` on the server-" -"side and `int(config[\"epochs\"])` on the client side!" +"Add secure gRPC connection to the `advanced_tensorflow` code example " +"([#847](https://github.com/adap/flower/pull/847))" msgstr "" -#: ../../source/ref-changelog.md:937 +#: ../../source/ref-changelog.md:651 msgid "" -"*Code example:* note that the `config` dictionary now contains non-`str` " -"values in both `Client.fit` and `Client.evaluate`:" -msgstr "" - -#: ../../source/ref-changelog.md:954 -msgid "v0.13.0 (2021-01-08)" +"Update developer tooling " +"([#1231](https://github.com/adap/flower/pull/1231), " +"[#1276](https://github.com/adap/flower/pull/1276), " +"[#1301](https://github.com/adap/flower/pull/1301), " +"[#1310](https://github.com/adap/flower/pull/1310))" msgstr "" -#: ../../source/ref-changelog.md:958 +#: ../../source/ref-changelog.md:652 msgid "" -"New example: PyTorch From Centralized To Federated " -"([#549](https://github.com/adap/flower/pull/549))" -msgstr "" - -#: ../../source/ref-changelog.md:959 -msgid "Improved documentation" +"Rename ProtoBuf messages to improve consistency " +"([#1214](https://github.com/adap/flower/pull/1214), " +"[#1258](https://github.com/adap/flower/pull/1258), " +"[#1259](https://github.com/adap/flower/pull/1259))" msgstr "" -#: ../../source/ref-changelog.md:960 -msgid "New documentation theme ([#551](https://github.com/adap/flower/pull/551))" +#: ../../source/ref-changelog.md:654 +msgid "v0.19.0 (2022-05-18)" msgstr "" -#: ../../source/ref-changelog.md:961 -msgid "New API reference ([#554](https://github.com/adap/flower/pull/554))" +#: ../../source/ref-changelog.md:658 +msgid "" +"**Flower Baselines (preview): FedOpt, FedBN, FedAvgM** " +"([#919](https://github.com/adap/flower/pull/919), " +"[#1127](https://github.com/adap/flower/pull/1127), " +"[#914](https://github.com/adap/flower/pull/914))" msgstr "" -#: ../../source/ref-changelog.md:962 +#: ../../source/ref-changelog.md:660 msgid "" -"Updated examples documentation " -"([#549](https://github.com/adap/flower/pull/549))" +"The first preview release of Flower Baselines has arrived! We're " +"kickstarting Flower Baselines with implementations of FedOpt (FedYogi, " +"FedAdam, FedAdagrad), FedBN, and FedAvgM. Check the documentation on how " +"to use [Flower Baselines](https://flower.ai/docs/using-baselines.html). " +"With this first preview release we're also inviting the community to " +"[contribute their own baselines](https://flower.ai/docs/baselines/how-to-" +"contribute-baselines.html)." msgstr "" -#: ../../source/ref-changelog.md:963 +#: ../../source/ref-changelog.md:662 msgid "" -"Removed obsolete documentation " -"([#548](https://github.com/adap/flower/pull/548))" +"**C++ client SDK (preview) and code example** " +"([#1111](https://github.com/adap/flower/pull/1111))" msgstr "" -#: ../../source/ref-changelog.md:965 -msgid "Bugfix:" +#: ../../source/ref-changelog.md:664 +msgid "" +"Preview support for Flower clients written in C++. The C++ preview " +"includes a Flower client SDK and a quickstart code example that " +"demonstrates a simple C++ client using the SDK." msgstr "" -#: ../../source/ref-changelog.md:967 +#: ../../source/ref-changelog.md:666 msgid "" -"`Server.fit` does not disconnect clients when finished, disconnecting the" -" clients is now handled in `flwr.server.start_server` " -"([#553](https://github.com/adap/flower/pull/553) " -"[#540](https://github.com/adap/flower/issues/540))." +"**Add experimental support for Python 3.10 and Python 3.11** " +"([#1135](https://github.com/adap/flower/pull/1135))" msgstr "" -#: ../../source/ref-changelog.md:969 -msgid "v0.12.0 (2020-12-07)" +#: ../../source/ref-changelog.md:668 +msgid "" +"Python 3.10 is the latest stable release of Python and Python 3.11 is due" +" to be released in October. This Flower release adds experimental support" +" for both Python versions." msgstr "" -#: ../../source/ref-changelog.md:971 ../../source/ref-changelog.md:987 -msgid "Important changes:" +#: ../../source/ref-changelog.md:670 +msgid "" +"**Aggregate custom metrics through user-provided functions** " +"([#1144](https://github.com/adap/flower/pull/1144))" msgstr "" -#: ../../source/ref-changelog.md:973 +#: ../../source/ref-changelog.md:672 msgid "" -"Added an example for embedded devices " -"([#507](https://github.com/adap/flower/pull/507))" +"Custom metrics (e.g., `accuracy`) can now be aggregated without having to" +" customize the strategy. Built-in strategies support two new arguments, " +"`fit_metrics_aggregation_fn` and `evaluate_metrics_aggregation_fn`, that " +"allow passing custom metric aggregation functions." msgstr "" -#: ../../source/ref-changelog.md:974 +#: ../../source/ref-changelog.md:674 msgid "" -"Added a new NumPyClient (in addition to the existing KerasClient) " -"([#504](https://github.com/adap/flower/pull/504) " -"[#508](https://github.com/adap/flower/pull/508))" +"**User-configurable round timeout** " +"([#1162](https://github.com/adap/flower/pull/1162))" msgstr "" -#: ../../source/ref-changelog.md:975 +#: ../../source/ref-changelog.md:676 msgid "" -"Deprecated `flwr_example` package and started to migrate examples into " -"the top-level `examples` directory " -"([#494](https://github.com/adap/flower/pull/494) " -"[#512](https://github.com/adap/flower/pull/512))" +"A new configuration value allows the round timeout to be set for " +"`start_server` and `start_simulation`. If the `config` dictionary " +"contains a `round_timeout` key (with a `float` value in seconds), the " +"server will wait *at least* `round_timeout` seconds before it closes the " +"connection." msgstr "" -#: ../../source/ref-changelog.md:977 -msgid "v0.11.0 (2020-11-30)" +#: ../../source/ref-changelog.md:678 +msgid "" +"**Enable both federated evaluation and centralized evaluation to be used " +"at the same time in all built-in strategies** " +"([#1091](https://github.com/adap/flower/pull/1091))" msgstr "" -#: ../../source/ref-changelog.md:979 -msgid "Incompatible changes:" +#: ../../source/ref-changelog.md:680 +msgid "" +"Built-in strategies can now perform both federated evaluation (i.e., " +"client-side) and centralized evaluation (i.e., server-side) in the same " +"round. Federated evaluation can be disabled by setting `fraction_eval` to" +" `0.0`." msgstr "" -#: ../../source/ref-changelog.md:981 +#: ../../source/ref-changelog.md:682 msgid "" -"Renamed strategy methods " -"([#486](https://github.com/adap/flower/pull/486)) to unify the naming of " -"Flower's public APIs. Other public methods/functions (e.g., every method " -"in `Client`, but also `Strategy.evaluate`) do not use the `on_` prefix, " -"which is why we're removing it from the four methods in Strategy. To " -"migrate rename the following `Strategy` methods accordingly:" +"**Two new Jupyter Notebook tutorials** " +"([#1141](https://github.com/adap/flower/pull/1141))" msgstr "" -#: ../../source/ref-changelog.md:982 -msgid "`on_configure_evaluate` => `configure_evaluate`" +#: ../../source/ref-changelog.md:684 +msgid "" +"Two Jupyter Notebook tutorials (compatible with Google Colab) explain " +"basic and intermediate Flower features:" msgstr "" -#: ../../source/ref-changelog.md:983 -msgid "`on_aggregate_evaluate` => `aggregate_evaluate`" +#: ../../source/ref-changelog.md:686 +msgid "" +"*An Introduction to Federated Learning*: [Open in " +"Colab](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-1" +"-Intro-to-FL-PyTorch.ipynb)" msgstr "" -#: ../../source/ref-changelog.md:984 -msgid "`on_configure_fit` => `configure_fit`" +#: ../../source/ref-changelog.md:688 +msgid "" +"*Using Strategies in Federated Learning*: [Open in " +"Colab](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-2" +"-Strategies-in-FL-PyTorch.ipynb)" msgstr "" -#: ../../source/ref-changelog.md:985 -msgid "`on_aggregate_fit` => `aggregate_fit`" +#: ../../source/ref-changelog.md:690 +msgid "" +"**New FedAvgM strategy (Federated Averaging with Server Momentum)** " +"([#1076](https://github.com/adap/flower/pull/1076))" msgstr "" -#: ../../source/ref-changelog.md:989 +#: ../../source/ref-changelog.md:692 msgid "" -"Deprecated `DefaultStrategy` " -"([#479](https://github.com/adap/flower/pull/479)). To migrate use " -"`FedAvg` instead." +"The new `FedAvgM` strategy implements Federated Averaging with Server " +"Momentum \\[Hsu et al., 2019\\]." msgstr "" -#: ../../source/ref-changelog.md:990 +#: ../../source/ref-changelog.md:694 msgid "" -"Simplified examples and baselines " -"([#484](https://github.com/adap/flower/pull/484))." +"**New advanced PyTorch code example** " +"([#1007](https://github.com/adap/flower/pull/1007))" msgstr "" -#: ../../source/ref-changelog.md:991 +#: ../../source/ref-changelog.md:696 msgid "" -"Removed presently unused `on_conclude_round` from strategy interface " -"([#483](https://github.com/adap/flower/pull/483))." +"A new code example (`advanced_pytorch`) demonstrates advanced Flower " +"concepts with PyTorch." msgstr "" -#: ../../source/ref-changelog.md:992 +#: ../../source/ref-changelog.md:698 msgid "" -"Set minimal Python version to 3.6.1 instead of 3.6.9 " -"([#471](https://github.com/adap/flower/pull/471))." +"**New JAX code example** " +"([#906](https://github.com/adap/flower/pull/906), " +"[#1143](https://github.com/adap/flower/pull/1143))" msgstr "" -#: ../../source/ref-changelog.md:993 +#: ../../source/ref-changelog.md:700 msgid "" -"Improved `Strategy` docstrings " -"([#470](https://github.com/adap/flower/pull/470))." +"A new code example (`jax_from_centralized_to_federated`) shows federated " +"learning with JAX and Flower." msgstr "" -#: ../../source/ref-example-projects.rst:2 -msgid "Example projects" +#: ../../source/ref-changelog.md:704 +msgid "" +"New option to keep Ray running if Ray was already initialized in " +"`start_simulation` ([#1177](https://github.com/adap/flower/pull/1177))" msgstr "" -#: ../../source/ref-example-projects.rst:4 +#: ../../source/ref-changelog.md:705 msgid "" -"Flower comes with a number of usage examples. The examples demonstrate " -"how Flower can be used to federate different kinds of existing machine " -"learning pipelines, usually leveraging popular machine learning " -"frameworks such as `PyTorch `_ or `TensorFlow " -"`_." +"Add support for custom `ClientManager` as a `start_simulation` parameter " +"([#1171](https://github.com/adap/flower/pull/1171))" msgstr "" -#: ../../source/ref-example-projects.rst:11 +#: ../../source/ref-changelog.md:706 msgid "" -"Flower usage examples used to be bundled with Flower in a package called " -"``flwr_example``. We are migrating those examples to standalone projects " -"to make them easier to use. All new examples are based in the directory " -"`examples `_." +"New documentation for [implementing " +"strategies](https://flower.ai/docs/framework/how-to-implement-" +"strategies.html) ([#1097](https://github.com/adap/flower/pull/1097), " +"[#1175](https://github.com/adap/flower/pull/1175))" msgstr "" -#: ../../source/ref-example-projects.rst:16 -msgid "The following examples are available as standalone projects." +#: ../../source/ref-changelog.md:707 +msgid "" +"New mobile-friendly documentation theme " +"([#1174](https://github.com/adap/flower/pull/1174))" msgstr "" -#: ../../source/ref-example-projects.rst:20 -msgid "Quickstart TensorFlow/Keras" +#: ../../source/ref-changelog.md:708 +msgid "" +"Limit version range for (optional) `ray` dependency to include only " +"compatible releases (`>=1.9.2,<1.12.0`) " +"([#1205](https://github.com/adap/flower/pull/1205))" msgstr "" -#: ../../source/ref-example-projects.rst:22 +#: ../../source/ref-changelog.md:712 msgid "" -"The TensorFlow/Keras quickstart example shows CIFAR-10 image " -"classification with MobileNetV2:" +"**Remove deprecated support for Python 3.6** " +"([#871](https://github.com/adap/flower/pull/871))" msgstr "" -#: ../../source/ref-example-projects.rst:25 +#: ../../source/ref-changelog.md:713 msgid "" -"`Quickstart TensorFlow (Code) " -"`_" +"**Remove deprecated KerasClient** " +"([#857](https://github.com/adap/flower/pull/857))" msgstr "" -#: ../../source/ref-example-projects.rst:26 +#: ../../source/ref-changelog.md:714 msgid "" -"`Quickstart TensorFlow (Tutorial) `_" +"**Remove deprecated no-op extra installs** " +"([#973](https://github.com/adap/flower/pull/973))" msgstr "" -#: ../../source/ref-example-projects.rst:27 +#: ../../source/ref-changelog.md:715 msgid "" -"`Quickstart TensorFlow (Blog Post) `_" +"**Remove deprecated proto fields from** `FitRes` **and** `EvaluateRes` " +"([#869](https://github.com/adap/flower/pull/869))" msgstr "" -#: ../../source/ref-example-projects.rst:31 -#: ../../source/tutorial-quickstart-pytorch.rst:5 -msgid "Quickstart PyTorch" +#: ../../source/ref-changelog.md:716 +msgid "" +"**Remove deprecated QffedAvg strategy (replaced by QFedAvg)** " +"([#1107](https://github.com/adap/flower/pull/1107))" msgstr "" -#: ../../source/ref-example-projects.rst:33 +#: ../../source/ref-changelog.md:717 msgid "" -"The PyTorch quickstart example shows CIFAR-10 image classification with a" -" simple Convolutional Neural Network:" +"**Remove deprecated DefaultStrategy strategy** " +"([#1142](https://github.com/adap/flower/pull/1142))" msgstr "" -#: ../../source/ref-example-projects.rst:36 +#: ../../source/ref-changelog.md:718 msgid "" -"`Quickstart PyTorch (Code) " -"`_" +"**Remove deprecated support for eval_fn accuracy return value** " +"([#1142](https://github.com/adap/flower/pull/1142))" msgstr "" -#: ../../source/ref-example-projects.rst:37 +#: ../../source/ref-changelog.md:719 msgid "" -"`Quickstart PyTorch (Tutorial) `_" +"**Remove deprecated support for passing initial parameters as NumPy " +"ndarrays** ([#1142](https://github.com/adap/flower/pull/1142))" msgstr "" -#: ../../source/ref-example-projects.rst:41 -msgid "PyTorch: From Centralized To Federated" +#: ../../source/ref-changelog.md:721 +msgid "v0.18.0 (2022-02-28)" msgstr "" -#: ../../source/ref-example-projects.rst:43 +#: ../../source/ref-changelog.md:725 msgid "" -"This example shows how a regular PyTorch project can be federated using " -"Flower:" +"**Improved Virtual Client Engine compatibility with Jupyter Notebook / " +"Google Colab** ([#866](https://github.com/adap/flower/pull/866), " +"[#872](https://github.com/adap/flower/pull/872), " +"[#833](https://github.com/adap/flower/pull/833), " +"[#1036](https://github.com/adap/flower/pull/1036))" msgstr "" -#: ../../source/ref-example-projects.rst:45 +#: ../../source/ref-changelog.md:727 msgid "" -"`PyTorch: From Centralized To Federated (Code) " -"`_" +"Simulations (using the Virtual Client Engine through `start_simulation`) " +"now work more smoothly on Jupyter Notebooks (incl. Google Colab) after " +"installing Flower with the `simulation` extra (`pip install " +"flwr[simulation]`)." msgstr "" -#: ../../source/ref-example-projects.rst:46 +#: ../../source/ref-changelog.md:729 msgid "" -"`PyTorch: From Centralized To Federated (Tutorial) " -"`_" -msgstr "" - -#: ../../source/ref-example-projects.rst:50 -msgid "Federated Learning on Raspberry Pi and Nvidia Jetson" +"**New Jupyter Notebook code example** " +"([#833](https://github.com/adap/flower/pull/833))" msgstr "" -#: ../../source/ref-example-projects.rst:52 +#: ../../source/ref-changelog.md:731 msgid "" -"This example shows how Flower can be used to build a federated learning " -"system that run across Raspberry Pi and Nvidia Jetson:" +"A new code example (`quickstart_simulation`) demonstrates Flower " +"simulations using the Virtual Client Engine through Jupyter Notebook " +"(incl. Google Colab)." msgstr "" -#: ../../source/ref-example-projects.rst:54 +#: ../../source/ref-changelog.md:733 msgid "" -"`Federated Learning on Raspberry Pi and Nvidia Jetson (Code) " -"`_" +"**Client properties (feature preview)** " +"([#795](https://github.com/adap/flower/pull/795))" msgstr "" -#: ../../source/ref-example-projects.rst:55 +#: ../../source/ref-changelog.md:735 msgid "" -"`Federated Learning on Raspberry Pi and Nvidia Jetson (Blog Post) " -"`_" +"Clients can implement a new method `get_properties` to enable server-side" +" strategies to query client properties." msgstr "" -#: ../../source/ref-example-projects.rst:60 -msgid "Legacy Examples (`flwr_example`)" +#: ../../source/ref-changelog.md:737 +msgid "" +"**Experimental Android support with TFLite** " +"([#865](https://github.com/adap/flower/pull/865))" msgstr "" -#: ../../source/ref-example-projects.rst:63 +#: ../../source/ref-changelog.md:739 msgid "" -"The useage examples in `flwr_example` are deprecated and will be removed " -"in the future. New examples are provided as standalone projects in " -"`examples `_." +"Android support has finally arrived in `main`! Flower is both client-" +"agnostic and framework-agnostic by design. One can integrate arbitrary " +"client platforms and with this release, using Flower on Android has " +"become a lot easier." msgstr "" -#: ../../source/ref-example-projects.rst:69 -msgid "Extra Dependencies" +#: ../../source/ref-changelog.md:741 +msgid "" +"The example uses TFLite on the client side, along with a new " +"`FedAvgAndroid` strategy. The Android client and `FedAvgAndroid` are " +"still experimental, but they are a first step towards a fully-fledged " +"Android SDK and a unified `FedAvg` implementation that integrated the new" +" functionality from `FedAvgAndroid`." msgstr "" -#: ../../source/ref-example-projects.rst:71 +#: ../../source/ref-changelog.md:743 msgid "" -"The core Flower framework keeps a minimal set of dependencies. The " -"examples demonstrate Flower in the context of different machine learning " -"frameworks, so additional dependencies need to be installed before an " -"example can be run." +"**Make gRPC keepalive time user-configurable and decrease default " +"keepalive time** ([#1069](https://github.com/adap/flower/pull/1069))" msgstr "" -#: ../../source/ref-example-projects.rst:75 -msgid "For PyTorch examples::" +#: ../../source/ref-changelog.md:745 +msgid "" +"The default gRPC keepalive time has been reduced to increase the " +"compatibility of Flower with more cloud environments (for example, " +"Microsoft Azure). Users can configure the keepalive time to customize the" +" gRPC stack based on specific requirements." msgstr "" -#: ../../source/ref-example-projects.rst:79 -msgid "For TensorFlow examples::" +#: ../../source/ref-changelog.md:747 +msgid "" +"**New differential privacy example using Opacus and PyTorch** " +"([#805](https://github.com/adap/flower/pull/805))" msgstr "" -#: ../../source/ref-example-projects.rst:83 -msgid "For both PyTorch and TensorFlow examples::" +#: ../../source/ref-changelog.md:749 +msgid "" +"A new code example (`opacus`) demonstrates differentially-private " +"federated learning with Opacus, PyTorch, and Flower." msgstr "" -#: ../../source/ref-example-projects.rst:87 +#: ../../source/ref-changelog.md:751 msgid "" -"Please consult :code:`pyproject.toml` for a full list of possible extras " -"(section :code:`[tool.poetry.extras]`)." +"**New Hugging Face Transformers code example** " +"([#863](https://github.com/adap/flower/pull/863))" msgstr "" -#: ../../source/ref-example-projects.rst:92 -msgid "PyTorch Examples" +#: ../../source/ref-changelog.md:753 +msgid "" +"A new code example (`quickstart_huggingface`) demonstrates usage of " +"Hugging Face Transformers with Flower." msgstr "" -#: ../../source/ref-example-projects.rst:94 +#: ../../source/ref-changelog.md:755 msgid "" -"Our PyTorch examples are based on PyTorch 1.7. They should work with " -"other releases as well. So far, we provide the following examples." +"**New MLCube code example** " +"([#779](https://github.com/adap/flower/pull/779), " +"[#1034](https://github.com/adap/flower/pull/1034), " +"[#1065](https://github.com/adap/flower/pull/1065), " +"[#1090](https://github.com/adap/flower/pull/1090))" msgstr "" -#: ../../source/ref-example-projects.rst:98 -msgid "CIFAR-10 Image Classification" +#: ../../source/ref-changelog.md:757 +msgid "" +"A new code example (`quickstart_mlcube`) demonstrates usage of MLCube " +"with Flower." msgstr "" -#: ../../source/ref-example-projects.rst:100 +#: ../../source/ref-changelog.md:759 msgid "" -"`CIFAR-10 and CIFAR-100 `_ " -"are popular RGB image datasets. The Flower CIFAR-10 example uses PyTorch " -"to train a simple CNN classifier in a federated learning setup with two " -"clients." +"**SSL-enabled server and client** " +"([#842](https://github.com/adap/flower/pull/842), " +"[#844](https://github.com/adap/flower/pull/844), " +"[#845](https://github.com/adap/flower/pull/845), " +"[#847](https://github.com/adap/flower/pull/847), " +"[#993](https://github.com/adap/flower/pull/993), " +"[#994](https://github.com/adap/flower/pull/994))" msgstr "" -#: ../../source/ref-example-projects.rst:104 -#: ../../source/ref-example-projects.rst:121 -#: ../../source/ref-example-projects.rst:146 -msgid "First, start a Flower server:" +#: ../../source/ref-changelog.md:761 +msgid "" +"SSL enables secure encrypted connections between clients and servers. " +"This release open-sources the Flower secure gRPC implementation to make " +"encrypted communication channels accessible to all Flower users." msgstr "" -#: ../../source/ref-example-projects.rst:106 -msgid "$ ./src/py/flwr_example/pytorch_cifar/run-server.sh" +#: ../../source/ref-changelog.md:763 +msgid "" +"**Updated** `FedAdam` **and** `FedYogi` **strategies** " +"([#885](https://github.com/adap/flower/pull/885), " +"[#895](https://github.com/adap/flower/pull/895))" msgstr "" -#: ../../source/ref-example-projects.rst:108 -#: ../../source/ref-example-projects.rst:125 -#: ../../source/ref-example-projects.rst:150 -msgid "Then, start the two clients in a new terminal window:" +#: ../../source/ref-changelog.md:765 +msgid "" +"`FedAdam` and `FedAdam` match the latest version of the Adaptive " +"Federated Optimization paper." msgstr "" -#: ../../source/ref-example-projects.rst:110 -msgid "$ ./src/py/flwr_example/pytorch_cifar/run-clients.sh" +#: ../../source/ref-changelog.md:767 +msgid "" +"**Initialize** `start_simulation` **with a list of client IDs** " +"([#860](https://github.com/adap/flower/pull/860))" msgstr "" -#: ../../source/ref-example-projects.rst:112 -msgid "For more details, see :code:`src/py/flwr_example/pytorch_cifar`." +#: ../../source/ref-changelog.md:769 +msgid "" +"`start_simulation` can now be called with a list of client IDs " +"(`clients_ids`, type: `List[str]`). Those IDs will be passed to the " +"`client_fn` whenever a client needs to be initialized, which can make it " +"easier to load data partitions that are not accessible through `int` " +"identifiers." msgstr "" -#: ../../source/ref-example-projects.rst:115 -msgid "ImageNet-2012 Image Classification" +#: ../../source/ref-changelog.md:773 +msgid "" +"Update `num_examples` calculation in PyTorch code examples in " +"([#909](https://github.com/adap/flower/pull/909))" msgstr "" -#: ../../source/ref-example-projects.rst:117 +#: ../../source/ref-changelog.md:774 msgid "" -"`ImageNet-2012 `_ is one of the major computer" -" vision datasets. The Flower ImageNet example uses PyTorch to train a " -"ResNet-18 classifier in a federated learning setup with ten clients." +"Expose Flower version through `flwr.__version__` " +"([#952](https://github.com/adap/flower/pull/952))" msgstr "" -#: ../../source/ref-example-projects.rst:123 -msgid "$ ./src/py/flwr_example/pytorch_imagenet/run-server.sh" +#: ../../source/ref-changelog.md:775 +msgid "" +"`start_server` in `app.py` now returns a `History` object containing " +"metrics from training ([#974](https://github.com/adap/flower/pull/974))" msgstr "" -#: ../../source/ref-example-projects.rst:127 -msgid "$ ./src/py/flwr_example/pytorch_imagenet/run-clients.sh" +#: ../../source/ref-changelog.md:776 +msgid "" +"Make `max_workers` (used by `ThreadPoolExecutor`) configurable " +"([#978](https://github.com/adap/flower/pull/978))" msgstr "" -#: ../../source/ref-example-projects.rst:129 -msgid "For more details, see :code:`src/py/flwr_example/pytorch_imagenet`." +#: ../../source/ref-changelog.md:777 +msgid "" +"Increase sleep time after server start to three seconds in all code " +"examples ([#1086](https://github.com/adap/flower/pull/1086))" msgstr "" -#: ../../source/ref-example-projects.rst:133 -msgid "TensorFlow Examples" +#: ../../source/ref-changelog.md:778 +msgid "" +"Added a new FAQ section to the documentation " +"([#948](https://github.com/adap/flower/pull/948))" msgstr "" -#: ../../source/ref-example-projects.rst:135 +#: ../../source/ref-changelog.md:779 msgid "" -"Our TensorFlow examples are based on TensorFlow 2.0 or newer. So far, we " -"provide the following examples." +"And many more under-the-hood changes, library updates, documentation " +"changes, and tooling improvements!" msgstr "" -#: ../../source/ref-example-projects.rst:139 -msgid "Fashion-MNIST Image Classification" +#: ../../source/ref-changelog.md:783 +msgid "" +"**Removed** `flwr_example` **and** `flwr_experimental` **from release " +"build** ([#869](https://github.com/adap/flower/pull/869))" msgstr "" -#: ../../source/ref-example-projects.rst:141 +#: ../../source/ref-changelog.md:785 msgid "" -"`Fashion-MNIST `_ is " -"often used as the \"Hello, world!\" of machine learning. We follow this " -"tradition and provide an example which samples random local datasets from" -" Fashion-MNIST and trains a simple image classification model over those " -"partitions." +"The packages `flwr_example` and `flwr_experimental` have been deprecated " +"since Flower 0.12.0 and they are not longer included in Flower release " +"builds. The associated extras (`baseline`, `examples-pytorch`, `examples-" +"tensorflow`, `http-logger`, `ops`) are now no-op and will be removed in " +"an upcoming release." msgstr "" -#: ../../source/ref-example-projects.rst:148 -msgid "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-server.sh" +#: ../../source/ref-changelog.md:787 +msgid "v0.17.0 (2021-09-24)" msgstr "" -#: ../../source/ref-example-projects.rst:152 -msgid "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-clients.sh" +#: ../../source/ref-changelog.md:791 +msgid "" +"**Experimental virtual client engine** " +"([#781](https://github.com/adap/flower/pull/781) " +"[#790](https://github.com/adap/flower/pull/790) " +"[#791](https://github.com/adap/flower/pull/791))" msgstr "" -#: ../../source/ref-example-projects.rst:154 +#: ../../source/ref-changelog.md:793 msgid "" -"For more details, see " -":code:`src/py/flwr_example/tensorflow_fashion_mnist`." +"One of Flower's goals is to enable research at scale. This release " +"enables a first (experimental) peek at a major new feature, codenamed the" +" virtual client engine. Virtual clients enable simulations that scale to " +"a (very) large number of clients on a single machine or compute cluster. " +"The easiest way to test the new functionality is to look at the two new " +"code examples called `quickstart_simulation` and `simulation_pytorch`." msgstr "" -#: ../../source/ref-faq.rst:4 +#: ../../source/ref-changelog.md:795 msgid "" -"This page collects answers to commonly asked questions about Federated " -"Learning with Flower." +"The feature is still experimental, so there's no stability guarantee for " +"the API. It's also not quite ready for prime time and comes with a few " +"known caveats. However, those who are curious are encouraged to try it " +"out and share their thoughts." msgstr "" -#: ../../source/ref-faq.rst -msgid ":fa:`eye,mr-1` Can Flower run on Juptyter Notebooks / Google Colab?" +#: ../../source/ref-changelog.md:797 +msgid "" +"**New built-in strategies** " +"([#828](https://github.com/adap/flower/pull/828) " +"[#822](https://github.com/adap/flower/pull/822))" msgstr "" -#: ../../source/ref-faq.rst:8 +#: ../../source/ref-changelog.md:799 msgid "" -"Yes, it can! Flower even comes with a few under-the-hood optimizations to" -" make it work even better on Colab. Here's a quickstart example:" +"FedYogi - Federated learning strategy using Yogi on server-side. " +"Implementation based on https://arxiv.org/abs/2003.00295" msgstr "" -#: ../../source/ref-faq.rst:10 +#: ../../source/ref-changelog.md:800 msgid "" -"`Flower simulation PyTorch " -"`_" +"FedAdam - Federated learning strategy using Adam on server-side. " +"Implementation based on https://arxiv.org/abs/2003.00295" msgstr "" -#: ../../source/ref-faq.rst:11 +#: ../../source/ref-changelog.md:802 msgid "" -"`Flower simulation TensorFlow/Keras " -"`_" +"**New PyTorch Lightning code example** " +"([#617](https://github.com/adap/flower/pull/617))" msgstr "" -#: ../../source/ref-faq.rst -msgid ":fa:`eye,mr-1` How can I run Federated Learning on a Raspberry Pi?" +#: ../../source/ref-changelog.md:804 +msgid "" +"**New Variational Auto-Encoder code example** " +"([#752](https://github.com/adap/flower/pull/752))" msgstr "" -#: ../../source/ref-faq.rst:15 +#: ../../source/ref-changelog.md:806 msgid "" -"Find the `blog post about federated learning on embedded device here " -"`_" -" and the corresponding `GitHub code example " -"`_." +"**New scikit-learn code example** " +"([#748](https://github.com/adap/flower/pull/748))" msgstr "" -#: ../../source/ref-faq.rst -msgid ":fa:`eye,mr-1` Does Flower support federated learning on Android devices?" +#: ../../source/ref-changelog.md:808 +msgid "" +"**New experimental TensorBoard strategy** " +"([#789](https://github.com/adap/flower/pull/789))" msgstr "" -#: ../../source/ref-faq.rst:19 +#: ../../source/ref-changelog.md:812 msgid "" -"Yes, it does. Please take a look at our `blog post " -"`_ or check out the code examples:" +"Improved advanced TensorFlow code example " +"([#769](https://github.com/adap/flower/pull/769))" msgstr "" -#: ../../source/ref-faq.rst:21 +#: ../../source/ref-changelog.md:813 msgid "" -"`Android Kotlin example `_" +"Warning when `min_available_clients` is misconfigured " +"([#830](https://github.com/adap/flower/pull/830))" msgstr "" -#: ../../source/ref-faq.rst:22 -msgid "`Android Java example `_" +#: ../../source/ref-changelog.md:814 +msgid "" +"Improved gRPC server docs " +"([#841](https://github.com/adap/flower/pull/841))" msgstr "" -#: ../../source/ref-faq.rst -msgid ":fa:`eye,mr-1` Can I combine federated learning with blockchain?" +#: ../../source/ref-changelog.md:815 +msgid "" +"Improved error message in `NumPyClient` " +"([#851](https://github.com/adap/flower/pull/851))" msgstr "" -#: ../../source/ref-faq.rst:26 +#: ../../source/ref-changelog.md:816 msgid "" -"Yes, of course. A list of available examples using Flower within a " -"blockchain environment is available here:" +"Improved PyTorch quickstart code example " +"([#852](https://github.com/adap/flower/pull/852))" msgstr "" -#: ../../source/ref-faq.rst:28 +#: ../../source/ref-changelog.md:820 msgid "" -"`Flower meets Nevermined GitHub Repository `_." +"**Disabled final distributed evaluation** " +"([#800](https://github.com/adap/flower/pull/800))" msgstr "" -#: ../../source/ref-faq.rst:29 +#: ../../source/ref-changelog.md:822 msgid "" -"`Flower meets Nevermined YouTube video " -"`_." +"Prior behaviour was to perform a final round of distributed evaluation on" +" all connected clients, which is often not required (e.g., when using " +"server-side evaluation). The prior behaviour can be enabled by passing " +"`force_final_distributed_eval=True` to `start_server`." msgstr "" -#: ../../source/ref-faq.rst:30 +#: ../../source/ref-changelog.md:824 msgid "" -"`Flower meets KOSMoS `_." +"**Renamed q-FedAvg strategy** " +"([#802](https://github.com/adap/flower/pull/802))" msgstr "" -#: ../../source/ref-faq.rst:31 +#: ../../source/ref-changelog.md:826 msgid "" -"`Flower meets Talan blog post `_ ." +"The strategy named `QffedAvg` was renamed to `QFedAvg` to better reflect " +"the notation given in the original paper (q-FFL is the optimization " +"objective, q-FedAvg is the proposed solver). Note the original (now " +"deprecated) `QffedAvg` class is still available for compatibility reasons" +" (it will be removed in a future release)." msgstr "" -#: ../../source/ref-faq.rst:32 +#: ../../source/ref-changelog.md:828 msgid "" -"`Flower meets Talan GitHub Repository " -"`_ ." -msgstr "" - -#: ../../source/ref-telemetry.md:1 -msgid "Telemetry" +"**Deprecated and renamed code example** `simulation_pytorch` **to** " +"`simulation_pytorch_legacy` " +"([#791](https://github.com/adap/flower/pull/791))" msgstr "" -#: ../../source/ref-telemetry.md:3 +#: ../../source/ref-changelog.md:830 msgid "" -"The Flower open-source project collects **anonymous** usage metrics to " -"make well-informed decisions to improve Flower. Doing this enables the " -"Flower team to understand how Flower is used and what challenges users " -"might face." +"This example has been replaced by a new example. The new example is based" +" on the experimental virtual client engine, which will become the new " +"default way of doing most types of large-scale simulations in Flower. The" +" existing example was kept for reference purposes, but it might be " +"removed in the future." msgstr "" -#: ../../source/ref-telemetry.md:5 -msgid "" -"**Flower is a friendly framework for collaborative AI and data science.**" -" Staying true to this statement, Flower makes it easy to disable " -"telemetry for users that do not want to share anonymous usage metrics." +#: ../../source/ref-changelog.md:832 +msgid "v0.16.0 (2021-05-11)" msgstr "" -#: ../../source/ref-telemetry.md:7 -msgid "Principles" +#: ../../source/ref-changelog.md:836 +msgid "" +"**New built-in strategies** " +"([#549](https://github.com/adap/flower/pull/549))" msgstr "" -#: ../../source/ref-telemetry.md:9 -msgid "We follow strong principles guarding anonymous usage metrics collection:" +#: ../../source/ref-changelog.md:838 +msgid "(abstract) FedOpt" msgstr "" -#: ../../source/ref-telemetry.md:11 +#: ../../source/ref-changelog.md:841 msgid "" -"**Optional:** You will always be able to disable telemetry; read on to " -"learn “[How to opt-out](#how-to-opt-out)”." +"**Custom metrics for server and strategies** " +"([#717](https://github.com/adap/flower/pull/717))" msgstr "" -#: ../../source/ref-telemetry.md:12 +#: ../../source/ref-changelog.md:843 msgid "" -"**Anonymous:** The reported usage metrics are anonymous and do not " -"contain any personally identifiable information (PII). See “[Collected " -"metrics](#collected-metrics)” to understand what metrics are being " -"reported." +"The Flower server is now fully task-agnostic, all remaining instances of " +"task-specific metrics (such as `accuracy`) have been replaced by custom " +"metrics dictionaries. Flower 0.15 introduced the capability to pass a " +"dictionary containing custom metrics from client to server. As of this " +"release, custom metrics replace task-specific metrics on the server." msgstr "" -#: ../../source/ref-telemetry.md:13 +#: ../../source/ref-changelog.md:845 msgid "" -"**Transparent:** You can easily inspect what anonymous metrics are being " -"reported; see the section “[How to inspect what is being reported](#how-" -"to-inspect-what-is-being-reported)”" +"Custom metric dictionaries are now used in two user-facing APIs: they are" +" returned from Strategy methods `aggregate_fit`/`aggregate_evaluate` and " +"they enable evaluation functions passed to built-in strategies (via " +"`eval_fn`) to return more than two evaluation metrics. Strategies can " +"even return *aggregated* metrics dictionaries for the server to keep " +"track of." msgstr "" -#: ../../source/ref-telemetry.md:14 +#: ../../source/ref-changelog.md:847 msgid "" -"**Open for feedback:** You can always reach out to us if you have " -"feedback; see the section “[How to contact us](#how-to-contact-us)” for " -"details." +"Strategy implementations should migrate their `aggregate_fit` and " +"`aggregate_evaluate` methods to the new return type (e.g., by simply " +"returning an empty `{}`), server-side evaluation functions should migrate" +" from `return loss, accuracy` to `return loss, {\"accuracy\": accuracy}`." msgstr "" -#: ../../source/ref-telemetry.md:16 -msgid "How to opt-out" +#: ../../source/ref-changelog.md:849 +msgid "" +"Flower 0.15-style return types are deprecated (but still supported), " +"compatibility will be removed in a future release." msgstr "" -#: ../../source/ref-telemetry.md:18 +#: ../../source/ref-changelog.md:851 msgid "" -"When Flower starts, it will check for an environment variable called " -"`FLWR_TELEMETRY_ENABLED`. Telemetry can easily be disabled by setting " -"`FLWR_TELEMETRY_ENABLED=0`. Assuming you are starting a Flower server or " -"client, simply do so by prepending your command as in:" +"**Migration warnings for deprecated functionality** " +"([#690](https://github.com/adap/flower/pull/690))" msgstr "" -#: ../../source/ref-telemetry.md:24 +#: ../../source/ref-changelog.md:853 msgid "" -"Alternatively, you can export `FLWR_TELEMETRY_ENABLED=0` in, for example," -" `.bashrc` (or whatever configuration file applies to your environment) " -"to disable Flower telemetry permanently." +"Earlier versions of Flower were often migrated to new APIs, while " +"maintaining compatibility with legacy APIs. This release introduces " +"detailed warning messages if usage of deprecated APIs is detected. The " +"new warning messages often provide details on how to migrate to more " +"recent APIs, thus easing the transition from one release to another." msgstr "" -#: ../../source/ref-telemetry.md:26 -msgid "Collected metrics" +#: ../../source/ref-changelog.md:855 +msgid "" +"Improved docs and docstrings " +"([#691](https://github.com/adap/flower/pull/691) " +"[#692](https://github.com/adap/flower/pull/692) " +"[#713](https://github.com/adap/flower/pull/713))" msgstr "" -#: ../../source/ref-telemetry.md:28 -msgid "Flower telemetry collects the following metrics:" +#: ../../source/ref-changelog.md:857 +msgid "MXNet example and documentation" msgstr "" -#: ../../source/ref-telemetry.md:30 +#: ../../source/ref-changelog.md:859 msgid "" -"**Flower version.** Understand which versions of Flower are currently " -"being used. This helps us to decide whether we should invest effort into " -"releasing a patch version for an older version of Flower or instead use " -"the bandwidth to build new features." +"FedBN implementation in example PyTorch: From Centralized To Federated " +"([#696](https://github.com/adap/flower/pull/696) " +"[#702](https://github.com/adap/flower/pull/702) " +"[#705](https://github.com/adap/flower/pull/705))" msgstr "" -#: ../../source/ref-telemetry.md:32 +#: ../../source/ref-changelog.md:863 msgid "" -"**Operating system.** Enables us to answer questions such as: *Should we " -"create more guides for Linux, macOS, or Windows?*" +"**Serialization-agnostic server** " +"([#721](https://github.com/adap/flower/pull/721))" msgstr "" -#: ../../source/ref-telemetry.md:34 +#: ../../source/ref-changelog.md:865 msgid "" -"**Python version.** Knowing the Python version helps us, for example, to " -"decide whether we should invest effort into supporting old versions of " -"Python or stop supporting them and start taking advantage of new Python " -"features." +"The Flower server is now fully serialization-agnostic. Prior usage of " +"class `Weights` (which represents parameters as deserialized NumPy " +"ndarrays) was replaced by class `Parameters` (e.g., in `Strategy`). " +"`Parameters` objects are fully serialization-agnostic and represents " +"parameters as byte arrays, the `tensor_type` attributes indicates how " +"these byte arrays should be interpreted (e.g., for " +"serialization/deserialization)." msgstr "" -#: ../../source/ref-telemetry.md:36 +#: ../../source/ref-changelog.md:867 msgid "" -"**Hardware properties.** Understanding the hardware environment that " -"Flower is being used in helps to decide whether we should, for example, " -"put more effort into supporting low-resource environments." +"Built-in strategies implement this approach by handling serialization and" +" deserialization to/from `Weights` internally. Custom/3rd-party Strategy " +"implementations should update to the slightly changed Strategy method " +"definitions. Strategy authors can consult PR " +"[#721](https://github.com/adap/flower/pull/721) to see how strategies can" +" easily migrate to the new format." msgstr "" -#: ../../source/ref-telemetry.md:38 +#: ../../source/ref-changelog.md:869 msgid "" -"**Execution mode.** Knowing what execution mode Flower starts in enables " -"us to understand how heavily certain features are being used and better " -"prioritize based on that." +"Deprecated `flwr.server.Server.evaluate`, use " +"`flwr.server.Server.evaluate_round` instead " +"([#717](https://github.com/adap/flower/pull/717))" msgstr "" -#: ../../source/ref-telemetry.md:40 -msgid "" -"**Cluster.** Flower telemetry assigns a random in-memory cluster ID each " -"time a Flower workload starts. This allows us to understand which device " -"types not only start Flower workloads but also successfully complete " -"them." +#: ../../source/ref-changelog.md:871 +msgid "v0.15.0 (2021-03-12)" msgstr "" -#: ../../source/ref-telemetry.md:42 +#: ../../source/ref-changelog.md:875 msgid "" -"**Source.** Flower telemetry tries to store a random source ID in " -"`~/.flwr/source` the first time a telemetry event is generated. The " -"source ID is important to identify whether an issue is recurring or " -"whether an issue is triggered by multiple clusters running concurrently " -"(which often happens in simulation). For example, if a device runs " -"multiple workloads at the same time, and this results in an issue, then, " -"in order to reproduce the issue, multiple workloads must be started at " -"the same time." +"**Server-side parameter initialization** " +"([#658](https://github.com/adap/flower/pull/658))" msgstr "" -#: ../../source/ref-telemetry.md:44 +#: ../../source/ref-changelog.md:877 msgid "" -"You may delete the source ID at any time. If you wish for all events " -"logged under a specific source ID to be deleted, you can send a deletion " -"request mentioning the source ID to `telemetry@flower.ai`. All events " -"related to that source ID will then be permanently deleted." +"Model parameters can now be initialized on the server-side. Server-side " +"parameter initialization works via a new `Strategy` method called " +"`initialize_parameters`." msgstr "" -#: ../../source/ref-telemetry.md:46 +#: ../../source/ref-changelog.md:879 msgid "" -"We will not collect any personally identifiable information. If you think" -" any of the metrics collected could be misused in any way, please [get in" -" touch with us](#how-to-contact-us). We will update this page to reflect " -"any changes to the metrics collected and publish changes in the " -"changelog." +"Built-in strategies support a new constructor argument called " +"`initial_parameters` to set the initial parameters. Built-in strategies " +"will provide these initial parameters to the server on startup and then " +"delete them to free the memory afterwards." msgstr "" -#: ../../source/ref-telemetry.md:48 +#: ../../source/ref-changelog.md:898 msgid "" -"If you think other metrics would be helpful for us to better guide our " -"decisions, please let us know! We will carefully review them; if we are " -"confident that they do not compromise user privacy, we may add them." +"If no initial parameters are provided to the strategy, the server will " +"continue to use the current behaviour (namely, it will ask one of the " +"connected clients for its parameters and use these as the initial global " +"parameters)." msgstr "" -#: ../../source/ref-telemetry.md:50 -msgid "How to inspect what is being reported" +#: ../../source/ref-changelog.md:900 +msgid "Deprecations" msgstr "" -#: ../../source/ref-telemetry.md:52 +#: ../../source/ref-changelog.md:902 msgid "" -"We wanted to make it very easy for you to inspect what anonymous usage " -"metrics are reported. You can view all the reported telemetry information" -" by setting the environment variable `FLWR_TELEMETRY_LOGGING=1`. Logging " -"is disabled by default. You may use logging independently from " -"`FLWR_TELEMETRY_ENABLED` so that you can inspect the telemetry feature " -"without sending any metrics." +"Deprecate `flwr.server.strategy.DefaultStrategy` (migrate to " +"`flwr.server.strategy.FedAvg`, which is equivalent)" msgstr "" -#: ../../source/ref-telemetry.md:58 +#: ../../source/ref-changelog.md:904 +msgid "v0.14.0 (2021-02-18)" +msgstr "" + +#: ../../source/ref-changelog.md:908 msgid "" -"The inspect Flower telemetry without sending any anonymous usage metrics," -" use both environment variables:" +"**Generalized** `Client.fit` **and** `Client.evaluate` **return values** " +"([#610](https://github.com/adap/flower/pull/610) " +"[#572](https://github.com/adap/flower/pull/572) " +"[#633](https://github.com/adap/flower/pull/633))" msgstr "" -#: ../../source/ref-telemetry.md:64 -msgid "How to contact us" +#: ../../source/ref-changelog.md:910 +msgid "" +"Clients can now return an additional dictionary mapping `str` keys to " +"values of the following types: `bool`, `bytes`, `float`, `int`, `str`. " +"This means one can return almost arbitrary values from `fit`/`evaluate` " +"and make use of them on the server side!" msgstr "" -#: ../../source/ref-telemetry.md:66 +#: ../../source/ref-changelog.md:912 msgid "" -"We want to hear from you. If you have any feedback or ideas on how to " -"improve the way we handle anonymous usage metrics, reach out to us via " -"[Slack](https://flower.ai/join-slack/) (channel `#telemetry`) or email " -"(`telemetry@flower.ai`)." +"This improvement also allowed for more consistent return types between " +"`fit` and `evaluate`: `evaluate` should now return a tuple `(float, int, " +"dict)` representing the loss, number of examples, and a dictionary " +"holding arbitrary problem-specific values like accuracy." msgstr "" -#: ../../source/tutorial-quickstart-android.rst:-1 +#: ../../source/ref-changelog.md:914 msgid "" -"Read this Federated Learning quickstart tutorial for creating an Android " -"app using Flower." +"In case you wondered: this feature is compatible with existing projects, " +"the additional dictionary return value is optional. New code should " +"however migrate to the new return types to be compatible with upcoming " +"Flower releases (`fit`: `List[np.ndarray], int, Dict[str, Scalar]`, " +"`evaluate`: `float, int, Dict[str, Scalar]`). See the example below for " +"details." msgstr "" -#: ../../source/tutorial-quickstart-android.rst:5 -msgid "Quickstart Android" +#: ../../source/ref-changelog.md:916 +msgid "" +"*Code example:* note the additional dictionary return values in both " +"`FlwrClient.fit` and `FlwrClient.evaluate`:" msgstr "" -#: ../../source/tutorial-quickstart-android.rst:10 +#: ../../source/ref-changelog.md:931 msgid "" -"Let's build a federated learning system using TFLite and Flower on " -"Android!" +"**Generalized** `config` **argument in** `Client.fit` **and** " +"`Client.evaluate` ([#595](https://github.com/adap/flower/pull/595))" msgstr "" -#: ../../source/tutorial-quickstart-android.rst:12 +#: ../../source/ref-changelog.md:933 msgid "" -"Please refer to the `full code example " -"`_ to learn " -"more." +"The `config` argument used to be of type `Dict[str, str]`, which means " +"that dictionary values were expected to be strings. The new release " +"generalizes this to enable values of the following types: `bool`, " +"`bytes`, `float`, `int`, `str`." msgstr "" -#: ../../source/tutorial-quickstart-fastai.rst:-1 +#: ../../source/ref-changelog.md:935 msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with FastAI to train a vision model on CIFAR-10." +"This means one can now pass almost arbitrary values to `fit`/`evaluate` " +"using the `config` dictionary. Yay, no more `str(epochs)` on the server-" +"side and `int(config[\"epochs\"])` on the client side!" msgstr "" -#: ../../source/tutorial-quickstart-fastai.rst:5 -msgid "Quickstart fastai" +#: ../../source/ref-changelog.md:937 +msgid "" +"*Code example:* note that the `config` dictionary now contains non-`str` " +"values in both `Client.fit` and `Client.evaluate`:" msgstr "" -#: ../../source/tutorial-quickstart-fastai.rst:10 -msgid "Let's build a federated learning system using fastai and Flower!" +#: ../../source/ref-changelog.md:954 +msgid "v0.13.0 (2021-01-08)" msgstr "" -#: ../../source/tutorial-quickstart-fastai.rst:12 +#: ../../source/ref-changelog.md:958 msgid "" -"Please refer to the `full code example " -"`_ " -"to learn more." +"New example: PyTorch From Centralized To Federated " +"([#549](https://github.com/adap/flower/pull/549))" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:-1 -msgid "" -"Check out this Federating Learning quickstart tutorial for using Flower " -"with HuggingFace Transformers in order to fine-tune an LLM." +#: ../../source/ref-changelog.md:959 +msgid "Improved documentation" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:5 -msgid "Quickstart 🤗 Transformers" +#: ../../source/ref-changelog.md:960 +msgid "New documentation theme ([#551](https://github.com/adap/flower/pull/551))" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:10 +#: ../../source/ref-changelog.md:961 +msgid "New API reference ([#554](https://github.com/adap/flower/pull/554))" +msgstr "" + +#: ../../source/ref-changelog.md:962 msgid "" -"Let's build a federated learning system using Hugging Face Transformers " -"and Flower!" +"Updated examples documentation " +"([#549](https://github.com/adap/flower/pull/549))" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:12 +#: ../../source/ref-changelog.md:963 msgid "" -"We will leverage Hugging Face to federate the training of language models" -" over multiple clients using Flower. More specifically, we will fine-tune" -" a pre-trained Transformer model (distilBERT) for sequence classification" -" over a dataset of IMDB ratings. The end goal is to detect if a movie " -"rating is positive or negative." +"Removed obsolete documentation " +"([#548](https://github.com/adap/flower/pull/548))" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:18 -msgid "Dependencies" +#: ../../source/ref-changelog.md:965 +msgid "Bugfix:" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:20 +#: ../../source/ref-changelog.md:967 msgid "" -"To follow along this tutorial you will need to install the following " -"packages: :code:`datasets`, :code:`evaluate`, :code:`flwr`, " -":code:`torch`, and :code:`transformers`. This can be done using " -":code:`pip`:" +"`Server.fit` does not disconnect clients when finished, disconnecting the" +" clients is now handled in `flwr.server.start_server` " +"([#553](https://github.com/adap/flower/pull/553) " +"[#540](https://github.com/adap/flower/issues/540))." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:30 -msgid "Standard Hugging Face workflow" +#: ../../source/ref-changelog.md:969 +msgid "v0.12.0 (2020-12-07)" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:33 -msgid "Handling the data" +#: ../../source/ref-changelog.md:971 ../../source/ref-changelog.md:987 +msgid "Important changes:" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:35 +#: ../../source/ref-changelog.md:973 msgid "" -"To fetch the IMDB dataset, we will use Hugging Face's :code:`datasets` " -"library. We then need to tokenize the data and create :code:`PyTorch` " -"dataloaders, this is all done in the :code:`load_data` function:" +"Added an example for embedded devices " +"([#507](https://github.com/adap/flower/pull/507))" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:81 -msgid "Training and testing the model" +#: ../../source/ref-changelog.md:974 +msgid "" +"Added a new NumPyClient (in addition to the existing KerasClient) " +"([#504](https://github.com/adap/flower/pull/504) " +"[#508](https://github.com/adap/flower/pull/508))" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:83 +#: ../../source/ref-changelog.md:975 msgid "" -"Once we have a way of creating our trainloader and testloader, we can " -"take care of the training and testing. This is very similar to any " -":code:`PyTorch` training or testing loop:" +"Deprecated `flwr_example` package and started to migrate examples into " +"the top-level `examples` directory " +"([#494](https://github.com/adap/flower/pull/494) " +"[#512](https://github.com/adap/flower/pull/512))" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:121 -msgid "Creating the model itself" +#: ../../source/ref-changelog.md:977 +msgid "v0.11.0 (2020-11-30)" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:123 +#: ../../source/ref-changelog.md:979 +msgid "Incompatible changes:" +msgstr "" + +#: ../../source/ref-changelog.md:981 msgid "" -"To create the model itself, we will just load the pre-trained distillBERT" -" model using Hugging Face’s :code:`AutoModelForSequenceClassification` :" +"Renamed strategy methods " +"([#486](https://github.com/adap/flower/pull/486)) to unify the naming of " +"Flower's public APIs. Other public methods/functions (e.g., every method " +"in `Client`, but also `Strategy.evaluate`) do not use the `on_` prefix, " +"which is why we're removing it from the four methods in Strategy. To " +"migrate rename the following `Strategy` methods accordingly:" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:136 -msgid "Federating the example" +#: ../../source/ref-changelog.md:982 +msgid "`on_configure_evaluate` => `configure_evaluate`" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:139 -msgid "Creating the IMDBClient" +#: ../../source/ref-changelog.md:983 +msgid "`on_aggregate_evaluate` => `aggregate_evaluate`" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:141 -msgid "" -"To federate our example to multiple clients, we first need to write our " -"Flower client class (inheriting from :code:`flwr.client.NumPyClient`). " -"This is very easy, as our model is a standard :code:`PyTorch` model:" +#: ../../source/ref-changelog.md:984 +msgid "`on_configure_fit` => `configure_fit`" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:169 +#: ../../source/ref-changelog.md:985 +msgid "`on_aggregate_fit` => `aggregate_fit`" +msgstr "" + +#: ../../source/ref-changelog.md:989 msgid "" -"The :code:`get_parameters` function lets the server get the client's " -"parameters. Inversely, the :code:`set_parameters` function allows the " -"server to send its parameters to the client. Finally, the :code:`fit` " -"function trains the model locally for the client, and the " -":code:`evaluate` function tests the model locally and returns the " -"relevant metrics." +"Deprecated `DefaultStrategy` " +"([#479](https://github.com/adap/flower/pull/479)). To migrate use " +"`FedAvg` instead." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:175 -msgid "Starting the server" +#: ../../source/ref-changelog.md:990 +msgid "" +"Simplified examples and baselines " +"([#484](https://github.com/adap/flower/pull/484))." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:177 +#: ../../source/ref-changelog.md:991 msgid "" -"Now that we have a way to instantiate clients, we need to create our " -"server in order to aggregate the results. Using Flower, this can be done " -"very easily by first choosing a strategy (here, we are using " -":code:`FedAvg`, which will define the global weights as the average of " -"all the clients' weights at each round) and then using the " -":code:`flwr.server.start_server` function:" +"Removed presently unused `on_conclude_round` from strategy interface " +"([#483](https://github.com/adap/flower/pull/483))." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:205 +#: ../../source/ref-changelog.md:992 msgid "" -"The :code:`weighted_average` function is there to provide a way to " -"aggregate the metrics distributed amongst the clients (basically this " -"allows us to display a nice average accuracy and loss for every round)." +"Set minimal Python version to 3.6.1 instead of 3.6.9 " +"([#471](https://github.com/adap/flower/pull/471))." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:209 -msgid "Putting everything together" +#: ../../source/ref-changelog.md:993 +msgid "" +"Improved `Strategy` docstrings " +"([#470](https://github.com/adap/flower/pull/470))." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:211 -msgid "We can now start client instances using:" +#: ../../source/ref-example-projects.rst:2 +msgid "Example projects" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:221 +#: ../../source/ref-example-projects.rst:4 msgid "" -"And they will be able to connect to the server and start the federated " -"training." +"Flower comes with a number of usage examples. The examples demonstrate " +"how Flower can be used to federate different kinds of existing machine " +"learning pipelines, usually leveraging popular machine learning " +"frameworks such as `PyTorch `_ or `TensorFlow " +"`_." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:223 +#: ../../source/ref-example-projects.rst:10 msgid "" -"If you want to check out everything put together, you should check out " -"the full code example: [https://github.com/adap/flower/tree/main/examples" -"/quickstart-" -"huggingface](https://github.com/adap/flower/tree/main/examples" -"/quickstart-huggingface)." +"The following examples are available as standalone projects. Quickstart " +"TensorFlow/Keras ---------------------------" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:227 +#: ../../source/ref-example-projects.rst:14 msgid "" -"Of course, this is a very basic example, and a lot can be added or " -"modified, it was just to showcase how simply we could federate a Hugging " -"Face workflow using Flower." +"The TensorFlow/Keras quickstart example shows CIFAR-10 image " +"classification with MobileNetV2:" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:230 +#: ../../source/ref-example-projects.rst:17 msgid "" -"Note that in this example we used :code:`PyTorch`, but we could have very" -" well used :code:`TensorFlow`." +"`Quickstart TensorFlow (Code) " +"`_" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:-1 +#: ../../source/ref-example-projects.rst:18 +msgid ":doc:`Quickstart TensorFlow (Tutorial) `" +msgstr "" + +#: ../../source/ref-example-projects.rst:19 msgid "" -"Read this Federated Learning quickstart tutorial for creating an iOS app " -"using Flower to train a neural network on MNIST." +"`Quickstart TensorFlow (Blog Post) `_" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:5 -msgid "Quickstart iOS" +#: ../../source/ref-example-projects.rst:23 +#: ../../source/tutorial-quickstart-pytorch.rst:5 +msgid "Quickstart PyTorch" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:10 +#: ../../source/ref-example-projects.rst:25 msgid "" -"In this tutorial we will learn how to train a Neural Network on MNIST " -"using Flower and CoreML on iOS devices." +"The PyTorch quickstart example shows CIFAR-10 image classification with a" +" simple Convolutional Neural Network:" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:12 +#: ../../source/ref-example-projects.rst:28 msgid "" -"First of all, for running the Flower Python server, it is recommended to " -"create a virtual environment and run everything within a `virtualenv " -"`_. For the Flower " -"client implementation in iOS, it is recommended to use Xcode as our IDE." +"`Quickstart PyTorch (Code) " +"`_" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:15 -msgid "" -"Our example consists of one Python *server* and two iPhone *clients* that" -" all have the same model." +#: ../../source/ref-example-projects.rst:29 +msgid ":doc:`Quickstart PyTorch (Tutorial) `" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:17 -msgid "" -"*Clients* are responsible for generating individual weight updates for " -"the model based on their local datasets. These updates are then sent to " -"the *server* which will aggregate them to produce a better model. " -"Finally, the *server* sends this improved version of the model back to " -"each *client*. A complete cycle of weight updates is called a *round*." +#: ../../source/ref-example-projects.rst:33 +msgid "PyTorch: From Centralized To Federated" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:21 +#: ../../source/ref-example-projects.rst:35 msgid "" -"Now that we have a rough idea of what is going on, let's get started to " -"setup our Flower server environment. We first need to install Flower. You" -" can do this by using pip:" +"This example shows how a regular PyTorch project can be federated using " +"Flower:" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:27 -msgid "Or Poetry:" +#: ../../source/ref-example-projects.rst:37 +msgid "" +"`PyTorch: From Centralized To Federated (Code) " +"`_" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:36 +#: ../../source/ref-example-projects.rst:38 msgid "" -"Now that we have all our dependencies installed, let's run a simple " -"distributed training using CoreML as our local training pipeline and " -"MNIST as our dataset. For simplicity reasons we will use the complete " -"Flower client with CoreML, that has been implemented and stored inside " -"the Swift SDK. The client implementation can be seen below:" +":doc:`PyTorch: From Centralized To Federated (Tutorial) `" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:72 -msgid "" -"Let's create a new application project in Xcode and add :code:`flwr` as a" -" dependency in your project. For our application, we will store the logic" -" of our app in :code:`FLiOSModel.swift` and the UI elements in " -":code:`ContentView.swift`. We will focus more on :code:`FLiOSModel.swift`" -" in this quickstart. Please refer to the `full code example " -"`_ to learn more " -"about the app." +#: ../../source/ref-example-projects.rst:42 +msgid "Federated Learning on Raspberry Pi and Nvidia Jetson" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:75 -msgid "Import Flower and CoreML related packages in :code:`FLiOSModel.swift`:" +#: ../../source/ref-example-projects.rst:44 +msgid "" +"This example shows how Flower can be used to build a federated learning " +"system that run across Raspberry Pi and Nvidia Jetson:" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:83 +#: ../../source/ref-example-projects.rst:46 msgid "" -"Then add the mlmodel to the project simply by drag-and-drop, the mlmodel " -"will be bundled inside the application during deployment to your iOS " -"device. We need to pass the url to access mlmodel and run CoreML machine " -"learning processes, it can be retrieved by calling the function " -":code:`Bundle.main.url`. For the MNIST dataset, we need to preprocess it " -"into :code:`MLBatchProvider` object. The preprocessing is done inside " -":code:`DataLoader.swift`." +"`Federated Learning on Raspberry Pi and Nvidia Jetson (Code) " +"`_" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:99 +#: ../../source/ref-example-projects.rst:47 msgid "" -"Since CoreML does not allow the model parameters to be seen before " -"training, and accessing the model parameters during or after the training" -" can only be done by specifying the layer name, we need to know this " -"informations beforehand, through looking at the model specification, " -"which are written as proto files. The implementation can be seen in " -":code:`MLModelInspect`." +"`Federated Learning on Raspberry Pi and Nvidia Jetson (Blog Post) " +"`_" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:102 +#: ../../source/ref-faq.rst:4 msgid "" -"After we have all of the necessary informations, let's create our Flower " -"client." +"This page collects answers to commonly asked questions about Federated " +"Learning with Flower." msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:117 +#: ../../source/ref-faq.rst +msgid ":fa:`eye,mr-1` Can Flower run on Jupyter Notebooks / Google Colab?" +msgstr "" + +#: ../../source/ref-faq.rst:8 msgid "" -"Then start the Flower gRPC client and start communicating to the server " -"by passing our Flower client to the function :code:`startFlwrGRPC`." +"Yes, it can! Flower even comes with a few under-the-hood optimizations to" +" make it work even better on Colab. Here's a quickstart example:" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:124 +#: ../../source/ref-faq.rst:10 msgid "" -"That's it for the client. We only have to implement :code:`Client` or " -"call the provided :code:`MLFlwrClient` and call :code:`startFlwrGRPC()`. " -"The attribute :code:`hostname` and :code:`port` tells the client which " -"server to connect to. This can be done by entering the hostname and port " -"in the application before clicking the start button to start the " -"federated learning process." +"`Flower simulation PyTorch " +"`_" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:131 -#: ../../source/tutorial-quickstart-mxnet.rst:228 -#: ../../source/tutorial-quickstart-pytorch.rst:205 -#: ../../source/tutorial-quickstart-tensorflow.rst:100 +#: ../../source/ref-faq.rst:11 msgid "" -"For simple workloads we can start a Flower server and leave all the " -"configuration possibilities at their default values. In a file named " -":code:`server.py`, import Flower and start the server:" +"`Flower simulation TensorFlow/Keras " +"`_" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:142 -#: ../../source/tutorial-quickstart-mxnet.rst:239 -#: ../../source/tutorial-quickstart-pytorch.rst:216 -#: ../../source/tutorial-quickstart-scikitlearn.rst:215 -#: ../../source/tutorial-quickstart-tensorflow.rst:112 -msgid "Train the model, federated!" +#: ../../source/ref-faq.rst +msgid ":fa:`eye,mr-1` How can I run Federated Learning on a Raspberry Pi?" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:144 -#: ../../source/tutorial-quickstart-pytorch.rst:218 -#: ../../source/tutorial-quickstart-tensorflow.rst:114 -#: ../../source/tutorial-quickstart-xgboost.rst:525 +#: ../../source/ref-faq.rst:15 msgid "" -"With both client and server ready, we can now run everything and see " -"federated learning in action. FL systems usually have a server and " -"multiple clients. We therefore have to start the server first:" +"Find the `blog post about federated learning on embedded device here " +"`_" +" and the corresponding `GitHub code example " +"`_." msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:152 -msgid "" -"Once the server is running we can start the clients in different " -"terminals. Build and run the client through your Xcode, one through Xcode" -" Simulator and the other by deploying it to your iPhone. To see more " -"about how to deploy your app to iPhone or Simulator visit `here " -"`_." +#: ../../source/ref-faq.rst +msgid ":fa:`eye,mr-1` Does Flower support federated learning on Android devices?" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:156 +#: ../../source/ref-faq.rst:19 msgid "" -"Congratulations! You've successfully built and run your first federated " -"learning system in your ios device. The full `source code " -"`_ for this " -"example can be found in :code:`examples/ios`." +"Yes, it does. Please take a look at our `blog post " +"`_ or check out the code examples:" msgstr "" -#: ../../source/tutorial-quickstart-jax.rst:-1 +#: ../../source/ref-faq.rst:21 msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with Jax to train a linear regression model on a scikit-learn dataset." +"`Android Kotlin example `_" msgstr "" -#: ../../source/tutorial-quickstart-jax.rst:5 -msgid "Quickstart JAX" +#: ../../source/ref-faq.rst:22 +msgid "`Android Java example `_" msgstr "" -#: ../../source/tutorial-quickstart-mxnet.rst:-1 -msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with MXNet to train a Sequential model on MNIST." +#: ../../source/ref-faq.rst +msgid ":fa:`eye,mr-1` Can I combine federated learning with blockchain?" msgstr "" -#: ../../source/tutorial-quickstart-mxnet.rst:5 -msgid "Quickstart MXNet" +#: ../../source/ref-faq.rst:26 +msgid "" +"Yes, of course. A list of available examples using Flower within a " +"blockchain environment is available here:" msgstr "" -#: ../../source/tutorial-quickstart-mxnet.rst:7 +#: ../../source/ref-faq.rst:28 msgid "" -"MXNet is no longer maintained and has been moved into `Attic " -"`_. As a result, we would " -"encourage you to use other ML frameworks alongise Flower, for example, " -"PyTorch. This tutorial might be removed in future versions of Flower." +"`Flower meets Nevermined GitHub Repository `_." msgstr "" -#: ../../source/tutorial-quickstart-mxnet.rst:12 +#: ../../source/ref-faq.rst:29 msgid "" -"In this tutorial, we will learn how to train a :code:`Sequential` model " -"on MNIST using Flower and MXNet." +"`Flower meets Nevermined YouTube video " +"`_." msgstr "" -#: ../../source/tutorial-quickstart-mxnet.rst:14 -#: ../../source/tutorial-quickstart-scikitlearn.rst:12 +#: ../../source/ref-faq.rst:30 msgid "" -"It is recommended to create a virtual environment and run everything " -"within this `virtualenv `_." +"`Flower meets KOSMoS `_." msgstr "" -#: ../../source/tutorial-quickstart-mxnet.rst:18 -#: ../../source/tutorial-quickstart-scikitlearn.rst:16 +#: ../../source/ref-faq.rst:31 msgid "" -"*Clients* are responsible for generating individual model parameter " -"updates for the model based on their local datasets. These updates are " -"then sent to the *server* which will aggregate them to produce an updated" -" global model. Finally, the *server* sends this improved version of the " -"model back to each *client*. A complete cycle of parameters updates is " -"called a *round*." +"`Flower meets Talan blog post `_ ." msgstr "" -#: ../../source/tutorial-quickstart-mxnet.rst:22 -#: ../../source/tutorial-quickstart-scikitlearn.rst:20 +#: ../../source/ref-faq.rst:32 msgid "" -"Now that we have a rough idea of what is going on, let's get started. We " -"first need to install Flower. You can do this by running:" +"`Flower meets Talan GitHub Repository " +"`_ ." msgstr "" -#: ../../source/tutorial-quickstart-mxnet.rst:28 -msgid "Since we want to use MXNet, let's go ahead and install it:" +#: ../../source/ref-telemetry.md:1 +msgid "Telemetry" msgstr "" -#: ../../source/tutorial-quickstart-mxnet.rst:38 +#: ../../source/ref-telemetry.md:3 msgid "" -"Now that we have all our dependencies installed, let's run a simple " -"distributed training with two clients and one server. Our training " -"procedure and network architecture are based on MXNet´s `Hand-written " -"Digit Recognition tutorial " -"`_." +"The Flower open-source project collects **anonymous** usage metrics to " +"make well-informed decisions to improve Flower. Doing this enables the " +"Flower team to understand how Flower is used and what challenges users " +"might face." msgstr "" -#: ../../source/tutorial-quickstart-mxnet.rst:40 +#: ../../source/ref-telemetry.md:5 msgid "" -"In a file called :code:`client.py`, import Flower and MXNet related " -"packages:" +"**Flower is a friendly framework for collaborative AI and data science.**" +" Staying true to this statement, Flower makes it easy to disable " +"telemetry for users that do not want to share anonymous usage metrics." msgstr "" -#: ../../source/tutorial-quickstart-mxnet.rst:55 -msgid "In addition, define the device allocation in MXNet with:" +#: ../../source/ref-telemetry.md:7 +msgid "Principles" msgstr "" -#: ../../source/tutorial-quickstart-mxnet.rst:61 -msgid "" -"We use MXNet to load MNIST, a popular image classification dataset of " -"handwritten digits for machine learning. The MXNet utility " -":code:`mx.test_utils.get_mnist()` downloads the training and test data." +#: ../../source/ref-telemetry.md:9 +msgid "We follow strong principles guarding anonymous usage metrics collection:" msgstr "" -#: ../../source/tutorial-quickstart-mxnet.rst:75 +#: ../../source/ref-telemetry.md:11 msgid "" -"Define the training and loss with MXNet. We train the model by looping " -"over the dataset, measure the corresponding loss, and optimize it." +"**Optional:** You will always be able to disable telemetry; read on to " +"learn “[How to opt-out](#how-to-opt-out)”." msgstr "" -#: ../../source/tutorial-quickstart-mxnet.rst:113 +#: ../../source/ref-telemetry.md:12 msgid "" -"Next, we define the validation of our machine learning model. We loop " -"over the test set and measure both loss and accuracy on the test set." +"**Anonymous:** The reported usage metrics are anonymous and do not " +"contain any personally identifiable information (PII). See “[Collected " +"metrics](#collected-metrics)” to understand what metrics are being " +"reported." msgstr "" -#: ../../source/tutorial-quickstart-mxnet.rst:137 +#: ../../source/ref-telemetry.md:13 msgid "" -"After defining the training and testing of a MXNet machine learning " -"model, we use these functions to implement a Flower client." +"**Transparent:** You can easily inspect what anonymous metrics are being " +"reported; see the section “[How to inspect what is being reported](#how-" +"to-inspect-what-is-being-reported)”" msgstr "" -#: ../../source/tutorial-quickstart-mxnet.rst:139 -msgid "Our Flower clients will use a simple :code:`Sequential` model:" +#: ../../source/ref-telemetry.md:14 +msgid "" +"**Open for feedback:** You can always reach out to us if you have " +"feedback; see the section “[How to contact us](#how-to-contact-us)” for " +"details." msgstr "" -#: ../../source/tutorial-quickstart-mxnet.rst:158 -msgid "" -"After loading the dataset with :code:`load_data()` we perform one forward" -" propagation to initialize the model and model parameters with " -":code:`model(init)`. Next, we implement a Flower client." +#: ../../source/ref-telemetry.md:16 +msgid "How to opt-out" msgstr "" -#: ../../source/tutorial-quickstart-mxnet.rst:160 -#: ../../source/tutorial-quickstart-pytorch.rst:144 -#: ../../source/tutorial-quickstart-tensorflow.rst:54 +#: ../../source/ref-telemetry.md:18 msgid "" -"The Flower server interacts with clients through an interface called " -":code:`Client`. When the server selects a particular client for training," -" it sends training instructions over the network. The client receives " -"those instructions and calls one of the :code:`Client` methods to run " -"your code (i.e., to train the neural network we defined earlier)." +"When Flower starts, it will check for an environment variable called " +"`FLWR_TELEMETRY_ENABLED`. Telemetry can easily be disabled by setting " +"`FLWR_TELEMETRY_ENABLED=0`. Assuming you are starting a Flower server or " +"client, simply do so by prepending your command as in:" msgstr "" -#: ../../source/tutorial-quickstart-mxnet.rst:166 +#: ../../source/ref-telemetry.md:24 msgid "" -"Flower provides a convenience class called :code:`NumPyClient` which " -"makes it easier to implement the :code:`Client` interface when your " -"workload uses MXNet. Implementing :code:`NumPyClient` usually means " -"defining the following methods (:code:`set_parameters` is optional " -"though):" +"Alternatively, you can export `FLWR_TELEMETRY_ENABLED=0` in, for example," +" `.bashrc` (or whatever configuration file applies to your environment) " +"to disable Flower telemetry permanently." msgstr "" -#: ../../source/tutorial-quickstart-mxnet.rst:172 -#: ../../source/tutorial-quickstart-pytorch.rst:156 -#: ../../source/tutorial-quickstart-scikitlearn.rst:109 -msgid "return the model weight as a list of NumPy ndarrays" +#: ../../source/ref-telemetry.md:26 +msgid "Collected metrics" msgstr "" -#: ../../source/tutorial-quickstart-mxnet.rst:173 -#: ../../source/tutorial-quickstart-pytorch.rst:157 -#: ../../source/tutorial-quickstart-scikitlearn.rst:111 -msgid ":code:`set_parameters` (optional)" +#: ../../source/ref-telemetry.md:28 +msgid "Flower telemetry collects the following metrics:" msgstr "" -#: ../../source/tutorial-quickstart-mxnet.rst:174 -#: ../../source/tutorial-quickstart-pytorch.rst:158 -#: ../../source/tutorial-quickstart-scikitlearn.rst:111 +#: ../../source/ref-telemetry.md:30 msgid "" -"update the local model weights with the parameters received from the " -"server" +"**Flower version.** Understand which versions of Flower are currently " +"being used. This helps us to decide whether we should invest effort into " +"releasing a patch version for an older version of Flower or instead use " +"the bandwidth to build new features." msgstr "" -#: ../../source/tutorial-quickstart-mxnet.rst:176 -#: ../../source/tutorial-quickstart-pytorch.rst:160 -#: ../../source/tutorial-quickstart-scikitlearn.rst:114 -msgid "set the local model weights" +#: ../../source/ref-telemetry.md:32 +msgid "" +"**Operating system.** Enables us to answer questions such as: *Should we " +"create more guides for Linux, macOS, or Windows?*" msgstr "" -#: ../../source/tutorial-quickstart-mxnet.rst:177 -#: ../../source/tutorial-quickstart-pytorch.rst:161 -#: ../../source/tutorial-quickstart-scikitlearn.rst:115 -msgid "train the local model" +#: ../../source/ref-telemetry.md:34 +msgid "" +"**Python version.** Knowing the Python version helps us, for example, to " +"decide whether we should invest effort into supporting old versions of " +"Python or stop supporting them and start taking advantage of new Python " +"features." msgstr "" -#: ../../source/tutorial-quickstart-mxnet.rst:178 -#: ../../source/tutorial-quickstart-pytorch.rst:162 -#: ../../source/tutorial-quickstart-scikitlearn.rst:116 -msgid "receive the updated local model weights" +#: ../../source/ref-telemetry.md:36 +msgid "" +"**Hardware properties.** Understanding the hardware environment that " +"Flower is being used in helps to decide whether we should, for example, " +"put more effort into supporting low-resource environments." msgstr "" -#: ../../source/tutorial-quickstart-mxnet.rst:180 -#: ../../source/tutorial-quickstart-pytorch.rst:164 -#: ../../source/tutorial-quickstart-scikitlearn.rst:118 -msgid "test the local model" +#: ../../source/ref-telemetry.md:38 +msgid "" +"**Execution mode.** Knowing what execution mode Flower starts in enables " +"us to understand how heavily certain features are being used and better " +"prioritize based on that." msgstr "" -#: ../../source/tutorial-quickstart-mxnet.rst:182 -msgid "They can be implemented in the following way:" +#: ../../source/ref-telemetry.md:40 +msgid "" +"**Cluster.** Flower telemetry assigns a random in-memory cluster ID each " +"time a Flower workload starts. This allows us to understand which device " +"types not only start Flower workloads but also successfully complete " +"them." msgstr "" -#: ../../source/tutorial-quickstart-mxnet.rst:212 +#: ../../source/ref-telemetry.md:42 msgid "" -"We can now create an instance of our class :code:`MNISTClient` and add " -"one line to actually run this client:" +"**Source.** Flower telemetry tries to store a random source ID in " +"`~/.flwr/source` the first time a telemetry event is generated. The " +"source ID is important to identify whether an issue is recurring or " +"whether an issue is triggered by multiple clusters running concurrently " +"(which often happens in simulation). For example, if a device runs " +"multiple workloads at the same time, and this results in an issue, then, " +"in order to reproduce the issue, multiple workloads must be started at " +"the same time." msgstr "" -#: ../../source/tutorial-quickstart-mxnet.rst:219 +#: ../../source/ref-telemetry.md:44 msgid "" -"That's it for the client. We only have to implement :code:`Client` or " -":code:`NumPyClient` and call :code:`fl.client.start_client()` or " -":code:`fl.client.start_numpy_client()`. The string " -":code:`\"0.0.0.0:8080\"` tells the client which server to connect to. In " -"our case we can run the server and the client on the same machine, " -"therefore we use :code:`\"0.0.0.0:8080\"`. If we run a truly federated " -"workload with the server and clients running on different machines, all " -"that needs to change is the :code:`server_address` we pass to the client." +"You may delete the source ID at any time. If you wish for all events " +"logged under a specific source ID to be deleted, you can send a deletion " +"request mentioning the source ID to `telemetry@flower.ai`. All events " +"related to that source ID will then be permanently deleted." msgstr "" -#: ../../source/tutorial-quickstart-mxnet.rst:241 +#: ../../source/ref-telemetry.md:46 msgid "" -"With both client and server ready, we can now run everything and see " -"federated learning in action. Federated learning systems usually have a " -"server and multiple clients. We therefore have to start the server first:" +"We will not collect any personally identifiable information. If you think" +" any of the metrics collected could be misused in any way, please [get in" +" touch with us](#how-to-contact-us). We will update this page to reflect " +"any changes to the metrics collected and publish changes in the " +"changelog." msgstr "" -#: ../../source/tutorial-quickstart-mxnet.rst:249 -#: ../../source/tutorial-quickstart-pytorch.rst:226 -#: ../../source/tutorial-quickstart-scikitlearn.rst:224 -#: ../../source/tutorial-quickstart-tensorflow.rst:122 -#: ../../source/tutorial-quickstart-xgboost.rst:533 +#: ../../source/ref-telemetry.md:48 msgid "" -"Once the server is running we can start the clients in different " -"terminals. Open a new terminal and start the first client:" +"If you think other metrics would be helpful for us to better guide our " +"decisions, please let us know! We will carefully review them; if we are " +"confident that they do not compromise user privacy, we may add them." msgstr "" -#: ../../source/tutorial-quickstart-mxnet.rst:256 -#: ../../source/tutorial-quickstart-pytorch.rst:233 -#: ../../source/tutorial-quickstart-scikitlearn.rst:231 -#: ../../source/tutorial-quickstart-tensorflow.rst:129 -#: ../../source/tutorial-quickstart-xgboost.rst:540 -msgid "Open another terminal and start the second client:" +#: ../../source/ref-telemetry.md:50 +msgid "How to inspect what is being reported" msgstr "" -#: ../../source/tutorial-quickstart-mxnet.rst:262 -#: ../../source/tutorial-quickstart-pytorch.rst:239 -#: ../../source/tutorial-quickstart-scikitlearn.rst:237 -#: ../../source/tutorial-quickstart-xgboost.rst:546 +#: ../../source/ref-telemetry.md:52 msgid "" -"Each client will have its own dataset. You should now see how the " -"training does in the very first terminal (the one that started the " -"server):" +"We wanted to make it very easy for you to inspect what anonymous usage " +"metrics are reported. You can view all the reported telemetry information" +" by setting the environment variable `FLWR_TELEMETRY_LOGGING=1`. Logging " +"is disabled by default. You may use logging independently from " +"`FLWR_TELEMETRY_ENABLED` so that you can inspect the telemetry feature " +"without sending any metrics." msgstr "" -#: ../../source/tutorial-quickstart-mxnet.rst:294 +#: ../../source/ref-telemetry.md:58 msgid "" -"Congratulations! You've successfully built and run your first federated " -"learning system. The full `source code " -"`_ for this example can be found in :code:`examples" -"/quickstart-mxnet`." +"The inspect Flower telemetry without sending any anonymous usage metrics," +" use both environment variables:" msgstr "" -#: ../../source/tutorial-quickstart-pandas.rst:-1 +#: ../../source/ref-telemetry.md:64 +msgid "How to contact us" +msgstr "" + +#: ../../source/ref-telemetry.md:66 msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with Pandas to perform Federated Analytics." +"We want to hear from you. If you have any feedback or ideas on how to " +"improve the way we handle anonymous usage metrics, reach out to us via " +"[Slack](https://flower.ai/join-slack/) (channel `#telemetry`) or email " +"(`telemetry@flower.ai`)." msgstr "" -#: ../../source/tutorial-quickstart-pandas.rst:5 -msgid "Quickstart Pandas" +#: ../../source/tutorial-quickstart-android.rst:-1 +msgid "" +"Read this Federated Learning quickstart tutorial for creating an Android " +"app using Flower." msgstr "" -#: ../../source/tutorial-quickstart-pandas.rst:10 -msgid "Let's build a federated analytics system using Pandas and Flower!" +#: ../../source/tutorial-quickstart-android.rst:5 +msgid "Quickstart Android" msgstr "" -#: ../../source/tutorial-quickstart-pandas.rst:12 +#: ../../source/tutorial-quickstart-android.rst:10 msgid "" -"Please refer to the `full code example " -"`_ " -"to learn more." +"Let's build a federated learning system using TFLite and Flower on " +"Android!" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:-1 +#: ../../source/tutorial-quickstart-android.rst:12 msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with PyTorch to train a CNN model on MNIST." +"Please refer to the `full code example " +"`_ to learn " +"more." msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:13 +#: ../../source/tutorial-quickstart-fastai.rst:-1 msgid "" -"In this tutorial we will learn how to train a Convolutional Neural " -"Network on CIFAR10 using Flower and PyTorch." +"Check out this Federated Learning quickstart tutorial for using Flower " +"with FastAI to train a vision model on CIFAR-10." msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:15 -#: ../../source/tutorial-quickstart-xgboost.rst:39 -msgid "" -"First of all, it is recommended to create a virtual environment and run " -"everything within a `virtualenv `_." +#: ../../source/tutorial-quickstart-fastai.rst:5 +msgid "Quickstart fastai" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:29 -msgid "" -"Since we want to use PyTorch to solve a computer vision task, let's go " -"ahead and install PyTorch and the **torchvision** library:" +#: ../../source/tutorial-quickstart-fastai.rst:10 +msgid "Let's build a federated learning system using fastai and Flower!" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:39 +#: ../../source/tutorial-quickstart-fastai.rst:12 msgid "" -"Now that we have all our dependencies installed, let's run a simple " -"distributed training with two clients and one server. Our training " -"procedure and network architecture are based on PyTorch's `Deep Learning " -"with PyTorch " -"`_." +"Please refer to the `full code example " +"`_ " +"to learn more." msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:41 +#: ../../source/tutorial-quickstart-huggingface.rst:-1 msgid "" -"In a file called :code:`client.py`, import Flower and PyTorch related " -"packages:" +"Check out this Federating Learning quickstart tutorial for using Flower " +"with HuggingFace Transformers in order to fine-tune an LLM." msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:56 -msgid "In addition, we define the device allocation in PyTorch with:" +#: ../../source/tutorial-quickstart-huggingface.rst:5 +msgid "Quickstart 🤗 Transformers" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:62 +#: ../../source/tutorial-quickstart-huggingface.rst:10 msgid "" -"We use PyTorch to load CIFAR10, a popular colored image classification " -"dataset for machine learning. The PyTorch :code:`DataLoader()` downloads " -"the training and test data that are then normalized." +"Let's build a federated learning system using Hugging Face Transformers " +"and Flower!" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:78 +#: ../../source/tutorial-quickstart-huggingface.rst:12 msgid "" -"Define the loss and optimizer with PyTorch. The training of the dataset " -"is done by looping over the dataset, measure the corresponding loss and " -"optimize it." +"We will leverage Hugging Face to federate the training of language models" +" over multiple clients using Flower. More specifically, we will fine-tune" +" a pre-trained Transformer model (distilBERT) for sequence classification" +" over a dataset of IMDB ratings. The end goal is to detect if a movie " +"rating is positive or negative." msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:94 -msgid "" -"Define then the validation of the machine learning network. We loop over" -" the test set and measure the loss and accuracy of the test set." +#: ../../source/tutorial-quickstart-huggingface.rst:18 +msgid "Dependencies" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:113 +#: ../../source/tutorial-quickstart-huggingface.rst:20 msgid "" -"After defining the training and testing of a PyTorch machine learning " -"model, we use the functions for the Flower clients." +"To follow along this tutorial you will need to install the following " +"packages: :code:`datasets`, :code:`evaluate`, :code:`flwr`, " +":code:`torch`, and :code:`transformers`. This can be done using " +":code:`pip`:" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:115 -msgid "" -"The Flower clients will use a simple CNN adapted from 'PyTorch: A 60 " -"Minute Blitz':" +#: ../../source/tutorial-quickstart-huggingface.rst:30 +msgid "Standard Hugging Face workflow" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:142 -msgid "" -"After loading the data set with :code:`load_data()` we define the Flower " -"interface." +#: ../../source/tutorial-quickstart-huggingface.rst:33 +msgid "Handling the data" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:150 +#: ../../source/tutorial-quickstart-huggingface.rst:35 msgid "" -"Flower provides a convenience class called :code:`NumPyClient` which " -"makes it easier to implement the :code:`Client` interface when your " -"workload uses PyTorch. Implementing :code:`NumPyClient` usually means " -"defining the following methods (:code:`set_parameters` is optional " -"though):" +"To fetch the IMDB dataset, we will use Hugging Face's :code:`datasets` " +"library. We then need to tokenize the data and create :code:`PyTorch` " +"dataloaders, this is all done in the :code:`load_data` function:" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:166 -msgid "which can be implemented in the following way:" +#: ../../source/tutorial-quickstart-huggingface.rst:81 +msgid "Training and testing the model" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:189 -#: ../../source/tutorial-quickstart-tensorflow.rst:82 +#: ../../source/tutorial-quickstart-huggingface.rst:83 msgid "" -"We can now create an instance of our class :code:`CifarClient` and add " -"one line to actually run this client:" +"Once we have a way of creating our trainloader and testloader, we can " +"take care of the training and testing. This is very similar to any " +":code:`PyTorch` training or testing loop:" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:196 -#: ../../source/tutorial-quickstart-tensorflow.rst:90 -msgid "" -"That's it for the client. We only have to implement :code:`Client` or " -":code:`NumPyClient` and call :code:`fl.client.start_client()`. If you " -"implement a client of type :code:`NumPyClient` you'll need to first call " -"its :code:`to_client()` method. The string :code:`\"[::]:8080\"` tells " -"the client which server to connect to. In our case we can run the server " -"and the client on the same machine, therefore we use " -":code:`\"[::]:8080\"`. If we run a truly federated workload with the " -"server and clients running on different machines, all that needs to " -"change is the :code:`server_address` we point the client at." +#: ../../source/tutorial-quickstart-huggingface.rst:121 +msgid "Creating the model itself" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:271 +#: ../../source/tutorial-quickstart-huggingface.rst:123 msgid "" -"Congratulations! You've successfully built and run your first federated " -"learning system. The full `source code " -"`_ for this example can be found in :code:`examples" -"/quickstart-pytorch`." +"To create the model itself, we will just load the pre-trained distillBERT" +" model using Hugging Face’s :code:`AutoModelForSequenceClassification` :" msgstr "" -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:-1 -msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with PyTorch Lightning to train an Auto Encoder model on MNIST." +#: ../../source/tutorial-quickstart-huggingface.rst:136 +msgid "Federating the example" msgstr "" -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:5 -msgid "Quickstart PyTorch Lightning" +#: ../../source/tutorial-quickstart-huggingface.rst:139 +msgid "Creating the IMDBClient" msgstr "" -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:10 +#: ../../source/tutorial-quickstart-huggingface.rst:141 msgid "" -"Let's build a horizontal federated learning system using PyTorch " -"Lightning and Flower!" +"To federate our example to multiple clients, we first need to write our " +"Flower client class (inheriting from :code:`flwr.client.NumPyClient`). " +"This is very easy, as our model is a standard :code:`PyTorch` model:" msgstr "" -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:12 +#: ../../source/tutorial-quickstart-huggingface.rst:169 msgid "" -"Please refer to the `full code example " -"`_ to learn more." +"The :code:`get_parameters` function lets the server get the client's " +"parameters. Inversely, the :code:`set_parameters` function allows the " +"server to send its parameters to the client. Finally, the :code:`fit` " +"function trains the model locally for the client, and the " +":code:`evaluate` function tests the model locally and returns the " +"relevant metrics." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:-1 -msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with scikit-learn to train a linear regression model." +#: ../../source/tutorial-quickstart-huggingface.rst:175 +msgid "Starting the server" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:5 -msgid "Quickstart scikit-learn" +#: ../../source/tutorial-quickstart-huggingface.rst:177 +msgid "" +"Now that we have a way to instantiate clients, we need to create our " +"server in order to aggregate the results. Using Flower, this can be done " +"very easily by first choosing a strategy (here, we are using " +":code:`FedAvg`, which will define the global weights as the average of " +"all the clients' weights at each round) and then using the " +":code:`flwr.server.start_server` function:" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:10 +#: ../../source/tutorial-quickstart-huggingface.rst:205 msgid "" -"In this tutorial, we will learn how to train a :code:`Logistic " -"Regression` model on MNIST using Flower and scikit-learn." +"The :code:`weighted_average` function is there to provide a way to " +"aggregate the metrics distributed amongst the clients (basically this " +"allows us to display a nice average accuracy and loss for every round)." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:26 -msgid "Since we want to use scikt-learn, let's go ahead and install it:" +#: ../../source/tutorial-quickstart-huggingface.rst:209 +msgid "Putting everything together" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:32 -msgid "Or simply install all dependencies using Poetry:" +#: ../../source/tutorial-quickstart-huggingface.rst:211 +msgid "We can now start client instances using:" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:42 +#: ../../source/tutorial-quickstart-huggingface.rst:221 msgid "" -"Now that we have all our dependencies installed, let's run a simple " -"distributed training with two clients and one server. However, before " -"setting up the client and server, we will define all functionalities that" -" we need for our federated learning setup within :code:`utils.py`. The " -":code:`utils.py` contains different functions defining all the machine " -"learning basics:" +"And they will be able to connect to the server and start the federated " +"training." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:45 -msgid ":code:`get_model_parameters()`" +#: ../../source/tutorial-quickstart-huggingface.rst:223 +msgid "" +"If you want to check out everything put together, you should check out " +"the `full code example `_ ." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:46 -msgid "Returns the parameters of a :code:`sklearn` LogisticRegression model" +#: ../../source/tutorial-quickstart-huggingface.rst:226 +msgid "" +"Of course, this is a very basic example, and a lot can be added or " +"modified, it was just to showcase how simply we could federate a Hugging " +"Face workflow using Flower." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:47 -msgid ":code:`set_model_params()`" +#: ../../source/tutorial-quickstart-huggingface.rst:229 +msgid "" +"Note that in this example we used :code:`PyTorch`, but we could have very" +" well used :code:`TensorFlow`." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:48 -msgid "Sets the parameters of a :code:`sklean` LogisticRegression model" +#: ../../source/tutorial-quickstart-ios.rst:-1 +msgid "" +"Read this Federated Learning quickstart tutorial for creating an iOS app " +"using Flower to train a neural network on MNIST." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:49 -msgid ":code:`set_initial_params()`" +#: ../../source/tutorial-quickstart-ios.rst:5 +msgid "Quickstart iOS" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:50 -msgid "Initializes the model parameters that the Flower server will ask for" +#: ../../source/tutorial-quickstart-ios.rst:10 +msgid "" +"In this tutorial we will learn how to train a Neural Network on MNIST " +"using Flower and CoreML on iOS devices." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:51 -msgid ":code:`load_mnist()`" +#: ../../source/tutorial-quickstart-ios.rst:12 +msgid "" +"First of all, for running the Flower Python server, it is recommended to " +"create a virtual environment and run everything within a :doc:`virtualenv" +" `. For the Flower client " +"implementation in iOS, it is recommended to use Xcode as our IDE." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:52 -msgid "Loads the MNIST dataset using OpenML" +#: ../../source/tutorial-quickstart-ios.rst:15 +msgid "" +"Our example consists of one Python *server* and two iPhone *clients* that" +" all have the same model." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:53 -msgid ":code:`shuffle()`" +#: ../../source/tutorial-quickstart-ios.rst:17 +msgid "" +"*Clients* are responsible for generating individual weight updates for " +"the model based on their local datasets. These updates are then sent to " +"the *server* which will aggregate them to produce a better model. " +"Finally, the *server* sends this improved version of the model back to " +"each *client*. A complete cycle of weight updates is called a *round*." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:54 -msgid "Shuffles data and its label" +#: ../../source/tutorial-quickstart-ios.rst:21 +msgid "" +"Now that we have a rough idea of what is going on, let's get started to " +"setup our Flower server environment. We first need to install Flower. You" +" can do this by using pip:" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:56 -msgid ":code:`partition()`" +#: ../../source/tutorial-quickstart-ios.rst:27 +msgid "Or Poetry:" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:56 -msgid "Splits datasets into a number of partitions" +#: ../../source/tutorial-quickstart-ios.rst:34 +#: ../../source/tutorial-quickstart-mxnet.rst:36 +#: ../../source/tutorial-quickstart-pytorch.rst:37 +#: ../../source/tutorial-quickstart-scikitlearn.rst:40 +#: ../../source/tutorial-quickstart-tensorflow.rst:29 +#: ../../source/tutorial-quickstart-xgboost.rst:55 +msgid "Flower Client" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:58 +#: ../../source/tutorial-quickstart-ios.rst:36 msgid "" -"Please check out :code:`utils.py` `here " -"`_ for more details. The pre-defined functions are used in" -" the :code:`client.py` and imported. The :code:`client.py` also requires " -"to import several packages such as Flower and scikit-learn:" -msgstr "" - -#: ../../source/tutorial-quickstart-scikitlearn.rst:73 -msgid "" -"We load the MNIST dataset from `OpenML `_, " -"a popular image classification dataset of handwritten digits for machine " -"learning. The utility :code:`utils.load_mnist()` downloads the training " -"and test data. The training set is split afterwards into 10 partitions " -"with :code:`utils.partition()`." +"Now that we have all our dependencies installed, let's run a simple " +"distributed training using CoreML as our local training pipeline and " +"MNIST as our dataset. For simplicity reasons we will use the complete " +"Flower client with CoreML, that has been implemented and stored inside " +"the Swift SDK. The client implementation can be seen below:" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:85 +#: ../../source/tutorial-quickstart-ios.rst:72 msgid "" -"Next, the logistic regression model is defined and initialized with " -":code:`utils.set_initial_params()`." +"Let's create a new application project in Xcode and add :code:`flwr` as a" +" dependency in your project. For our application, we will store the logic" +" of our app in :code:`FLiOSModel.swift` and the UI elements in " +":code:`ContentView.swift`. We will focus more on :code:`FLiOSModel.swift`" +" in this quickstart. Please refer to the `full code example " +"`_ to learn more " +"about the app." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:97 -msgid "" -"The Flower server interacts with clients through an interface called " -":code:`Client`. When the server selects a particular client for training," -" it sends training instructions over the network. The client receives " -"those instructions and calls one of the :code:`Client` methods to run " -"your code (i.e., to fit the logistic regression we defined earlier)." +#: ../../source/tutorial-quickstart-ios.rst:75 +msgid "Import Flower and CoreML related packages in :code:`FLiOSModel.swift`:" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:103 +#: ../../source/tutorial-quickstart-ios.rst:83 msgid "" -"Flower provides a convenience class called :code:`NumPyClient` which " -"makes it easier to implement the :code:`Client` interface when your " -"workload uses scikit-learn. Implementing :code:`NumPyClient` usually " -"means defining the following methods (:code:`set_parameters` is optional " -"though):" +"Then add the mlmodel to the project simply by drag-and-drop, the mlmodel " +"will be bundled inside the application during deployment to your iOS " +"device. We need to pass the url to access mlmodel and run CoreML machine " +"learning processes, it can be retrieved by calling the function " +":code:`Bundle.main.url`. For the MNIST dataset, we need to preprocess it " +"into :code:`MLBatchProvider` object. The preprocessing is done inside " +":code:`DataLoader.swift`." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:112 -msgid "is directly imported with :code:`utils.set_model_params()`" +#: ../../source/tutorial-quickstart-ios.rst:99 +msgid "" +"Since CoreML does not allow the model parameters to be seen before " +"training, and accessing the model parameters during or after the training" +" can only be done by specifying the layer name, we need to know this " +"information beforehand, through looking at the model specification, which" +" are written as proto files. The implementation can be seen in " +":code:`MLModelInspect`." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:120 -msgid "The methods can be implemented in the following way:" +#: ../../source/tutorial-quickstart-ios.rst:102 +msgid "" +"After we have all of the necessary information, let's create our Flower " +"client." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:143 +#: ../../source/tutorial-quickstart-ios.rst:117 msgid "" -"We can now create an instance of our class :code:`MnistClient` and add " -"one line to actually run this client:" +"Then start the Flower gRPC client and start communicating to the server " +"by passing our Flower client to the function :code:`startFlwrGRPC`." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:150 +#: ../../source/tutorial-quickstart-ios.rst:124 msgid "" "That's it for the client. We only have to implement :code:`Client` or " -":code:`NumPyClient` and call :code:`fl.client.start_client()`. If you " -"implement a client of type :code:`NumPyClient` you'll need to first call " -"its :code:`to_client()` method. The string :code:`\"0.0.0.0:8080\"` tells" -" the client which server to connect to. In our case we can run the server" -" and the client on the same machine, therefore we use " -":code:`\"0.0.0.0:8080\"`. If we run a truly federated workload with the " -"server and clients running on different machines, all that needs to " -"change is the :code:`server_address` we pass to the client." +"call the provided :code:`MLFlwrClient` and call :code:`startFlwrGRPC()`. " +"The attribute :code:`hostname` and :code:`port` tells the client which " +"server to connect to. This can be done by entering the hostname and port " +"in the application before clicking the start button to start the " +"federated learning process." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:159 -msgid "" -"The following Flower server is a little bit more advanced and returns an " -"evaluation function for the server-side evaluation. First, we import " -"again all required libraries such as Flower and scikit-learn." +#: ../../source/tutorial-quickstart-ios.rst:129 +#: ../../source/tutorial-quickstart-mxnet.rst:226 +#: ../../source/tutorial-quickstart-pytorch.rst:203 +#: ../../source/tutorial-quickstart-scikitlearn.rst:157 +#: ../../source/tutorial-quickstart-tensorflow.rst:98 +#: ../../source/tutorial-quickstart-xgboost.rst:309 +msgid "Flower Server" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:162 -msgid ":code:`server.py`, import Flower and start the server:" +#: ../../source/tutorial-quickstart-ios.rst:131 +#: ../../source/tutorial-quickstart-mxnet.rst:228 +#: ../../source/tutorial-quickstart-pytorch.rst:205 +#: ../../source/tutorial-quickstart-tensorflow.rst:100 +msgid "" +"For simple workloads we can start a Flower server and leave all the " +"configuration possibilities at their default values. In a file named " +":code:`server.py`, import Flower and start the server:" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:173 -msgid "" -"The number of federated learning rounds is set in :code:`fit_round()` and" -" the evaluation is defined in :code:`get_evaluate_fn()`. The evaluation " -"function is called after each federated learning round and gives you " -"information about loss and accuracy." +#: ../../source/tutorial-quickstart-ios.rst:142 +#: ../../source/tutorial-quickstart-mxnet.rst:239 +#: ../../source/tutorial-quickstart-pytorch.rst:216 +#: ../../source/tutorial-quickstart-scikitlearn.rst:215 +#: ../../source/tutorial-quickstart-tensorflow.rst:112 +msgid "Train the model, federated!" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:198 +#: ../../source/tutorial-quickstart-ios.rst:144 +#: ../../source/tutorial-quickstart-pytorch.rst:218 +#: ../../source/tutorial-quickstart-tensorflow.rst:114 +#: ../../source/tutorial-quickstart-xgboost.rst:525 msgid "" -"The :code:`main` contains the server-side parameter initialization " -":code:`utils.set_initial_params()` as well as the aggregation strategy " -":code:`fl.server.strategy:FedAvg()`. The strategy is the default one, " -"federated averaging (or FedAvg), with two clients and evaluation after " -"each federated learning round. The server can be started with the command" -" :code:`fl.server.start_server(server_address=\"0.0.0.0:8080\", " -"strategy=strategy, config=fl.server.ServerConfig(num_rounds=3))`." +"With both client and server ready, we can now run everything and see " +"federated learning in action. FL systems usually have a server and " +"multiple clients. We therefore have to start the server first:" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:217 +#: ../../source/tutorial-quickstart-ios.rst:152 msgid "" -"With both client and server ready, we can now run everything and see " -"federated learning in action. Federated learning systems usually have a " -"server and multiple clients. We, therefore, have to start the server " -"first:" +"Once the server is running we can start the clients in different " +"terminals. Build and run the client through your Xcode, one through Xcode" +" Simulator and the other by deploying it to your iPhone. To see more " +"about how to deploy your app to iPhone or Simulator visit `here " +"`_." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:271 +#: ../../source/tutorial-quickstart-ios.rst:156 msgid "" "Congratulations! You've successfully built and run your first federated " -"learning system. The full `source code " -"`_ for this example can be found in :code:`examples/sklearn-logreg-" -"mnist`." +"learning system in your ios device. The full `source code " +"`_ for this " +"example can be found in :code:`examples/ios`." msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:-1 +#: ../../source/tutorial-quickstart-jax.rst:-1 msgid "" "Check out this Federated Learning quickstart tutorial for using Flower " -"with TensorFlow to train a MobilNetV2 model on CIFAR-10." +"with Jax to train a linear regression model on a scikit-learn dataset." msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:5 -msgid "Quickstart TensorFlow" +#: ../../source/tutorial-quickstart-jax.rst:5 +msgid "Quickstart JAX" msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:13 -msgid "Let's build a federated learning system in less than 20 lines of code!" +#: ../../source/tutorial-quickstart-mxnet.rst:-1 +msgid "" +"Check out this Federated Learning quickstart tutorial for using Flower " +"with MXNet to train a Sequential model on MNIST." msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:15 -msgid "Before Flower can be imported we have to install it:" +#: ../../source/tutorial-quickstart-mxnet.rst:5 +msgid "Quickstart MXNet" msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:21 +#: ../../source/tutorial-quickstart-mxnet.rst:7 msgid "" -"Since we want to use the Keras API of TensorFlow (TF), we have to install" -" TF as well:" +"MXNet is no longer maintained and has been moved into `Attic " +"`_. As a result, we would " +"encourage you to use other ML frameworks alongside Flower, for example, " +"PyTorch. This tutorial might be removed in future versions of Flower." msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:31 -msgid "Next, in a file called :code:`client.py`, import Flower and TensorFlow:" +#: ../../source/tutorial-quickstart-mxnet.rst:12 +msgid "" +"In this tutorial, we will learn how to train a :code:`Sequential` model " +"on MNIST using Flower and MXNet." msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:38 +#: ../../source/tutorial-quickstart-mxnet.rst:14 +#: ../../source/tutorial-quickstart-scikitlearn.rst:12 msgid "" -"We use the Keras utilities of TF to load CIFAR10, a popular colored image" -" classification dataset for machine learning. The call to " -":code:`tf.keras.datasets.cifar10.load_data()` downloads CIFAR10, caches " -"it locally, and then returns the entire training and test set as NumPy " -"ndarrays." +"It is recommended to create a virtual environment and run everything " +"within this :doc:`virtualenv `." msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:47 +#: ../../source/tutorial-quickstart-mxnet.rst:16 +#: ../../source/tutorial-quickstart-pytorch.rst:17 +#: ../../source/tutorial-quickstart-scikitlearn.rst:14 msgid "" -"Next, we need a model. For the purpose of this tutorial, we use " -"MobilNetV2 with 10 output classes:" +"Our example consists of one *server* and two *clients* all having the " +"same model." msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:60 +#: ../../source/tutorial-quickstart-mxnet.rst:18 +#: ../../source/tutorial-quickstart-scikitlearn.rst:16 msgid "" -"Flower provides a convenience class called :code:`NumPyClient` which " -"makes it easier to implement the :code:`Client` interface when your " -"workload uses Keras. The :code:`NumPyClient` interface defines three " -"methods which can be implemented in the following way:" +"*Clients* are responsible for generating individual model parameter " +"updates for the model based on their local datasets. These updates are " +"then sent to the *server* which will aggregate them to produce an updated" +" global model. Finally, the *server* sends this improved version of the " +"model back to each *client*. A complete cycle of parameters updates is " +"called a *round*." msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:135 -msgid "Each client will have its own dataset." +#: ../../source/tutorial-quickstart-mxnet.rst:22 +#: ../../source/tutorial-quickstart-scikitlearn.rst:20 +msgid "" +"Now that we have a rough idea of what is going on, let's get started. We " +"first need to install Flower. You can do this by running:" msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:137 -msgid "" -"You should now see how the training does in the very first terminal (the " -"one that started the server):" +#: ../../source/tutorial-quickstart-mxnet.rst:28 +msgid "Since we want to use MXNet, let's go ahead and install it:" msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:169 +#: ../../source/tutorial-quickstart-mxnet.rst:38 msgid "" -"Congratulations! You've successfully built and run your first federated " -"learning system. The full `source code " -"`_ for this can be found in :code:`examples" -"/quickstart-tensorflow/client.py`." +"Now that we have all our dependencies installed, let's run a simple " +"distributed training with two clients and one server. Our training " +"procedure and network architecture are based on MXNet´s `Hand-written " +"Digit Recognition tutorial " +"`_." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:-1 +#: ../../source/tutorial-quickstart-mxnet.rst:40 msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with XGBoost to train classification models on trees." +"In a file called :code:`client.py`, import Flower and MXNet related " +"packages:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:5 -msgid "Quickstart XGBoost" +#: ../../source/tutorial-quickstart-mxnet.rst:55 +msgid "In addition, define the device allocation in MXNet with:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:14 -msgid "Federated XGBoost" +#: ../../source/tutorial-quickstart-mxnet.rst:61 +msgid "" +"We use MXNet to load MNIST, a popular image classification dataset of " +"handwritten digits for machine learning. The MXNet utility " +":code:`mx.test_utils.get_mnist()` downloads the training and test data." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:16 +#: ../../source/tutorial-quickstart-mxnet.rst:75 msgid "" -"EXtreme Gradient Boosting (**XGBoost**) is a robust and efficient " -"implementation of gradient-boosted decision tree (**GBDT**), that " -"maximises the computational boundaries for boosted tree methods. It's " -"primarily designed to enhance both the performance and computational " -"speed of machine learning models. In XGBoost, trees are constructed " -"concurrently, unlike the sequential approach taken by GBDT." +"Define the training and loss with MXNet. We train the model by looping " +"over the dataset, measure the corresponding loss, and optimize it." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:20 +#: ../../source/tutorial-quickstart-mxnet.rst:113 msgid "" -"Often, for tabular data on medium-sized datasets with fewer than 10k " -"training examples, XGBoost surpasses the results of deep learning " -"techniques." +"Next, we define the validation of our machine learning model. We loop " +"over the test set and measure both loss and accuracy on the test set." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:23 -msgid "Why federated XGBoost?" +#: ../../source/tutorial-quickstart-mxnet.rst:137 +msgid "" +"After defining the training and testing of a MXNet machine learning " +"model, we use these functions to implement a Flower client." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:25 +#: ../../source/tutorial-quickstart-mxnet.rst:139 +msgid "Our Flower clients will use a simple :code:`Sequential` model:" +msgstr "" + +#: ../../source/tutorial-quickstart-mxnet.rst:158 msgid "" -"Indeed, as the demand for data privacy and decentralized learning grows, " -"there's an increasing requirement to implement federated XGBoost systems " -"for specialised applications, like survival analysis and financial fraud " -"detection." +"After loading the dataset with :code:`load_data()` we perform one forward" +" propagation to initialize the model and model parameters with " +":code:`model(init)`. Next, we implement a Flower client." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:27 +#: ../../source/tutorial-quickstart-mxnet.rst:160 +#: ../../source/tutorial-quickstart-pytorch.rst:144 +#: ../../source/tutorial-quickstart-tensorflow.rst:54 msgid "" -"Federated learning ensures that raw data remains on the local device, " -"making it an attractive approach for sensitive domains where data " -"security and privacy are paramount. Given the robustness and efficiency " -"of XGBoost, combining it with federated learning offers a promising " -"solution for these specific challenges." +"The Flower server interacts with clients through an interface called " +":code:`Client`. When the server selects a particular client for training," +" it sends training instructions over the network. The client receives " +"those instructions and calls one of the :code:`Client` methods to run " +"your code (i.e., to train the neural network we defined earlier)." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:30 +#: ../../source/tutorial-quickstart-mxnet.rst:166 msgid "" -"In this tutorial we will learn how to train a federated XGBoost model on " -"HIGGS dataset using Flower and :code:`xgboost` package. We use a simple " -"example (`full code xgboost-quickstart " -"`_)" -" with two *clients* and one *server* to demonstrate how federated XGBoost" -" works, and then we dive into a more complex example (`full code xgboost-" -"comprehensive `_) to run various experiments." +"Flower provides a convenience class called :code:`NumPyClient` which " +"makes it easier to implement the :code:`Client` interface when your " +"workload uses MXNet. Implementing :code:`NumPyClient` usually means " +"defining the following methods (:code:`set_parameters` is optional " +"though):" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:37 -msgid "Environment Setup" +#: ../../source/tutorial-quickstart-mxnet.rst:172 +#: ../../source/tutorial-quickstart-pytorch.rst:156 +#: ../../source/tutorial-quickstart-scikitlearn.rst:109 +msgid "return the model weight as a list of NumPy ndarrays" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:41 -msgid "" -"We first need to install Flower and Flower Datasets. You can do this by " -"running :" +#: ../../source/tutorial-quickstart-mxnet.rst:173 +#: ../../source/tutorial-quickstart-pytorch.rst:157 +#: ../../source/tutorial-quickstart-scikitlearn.rst:111 +msgid ":code:`set_parameters` (optional)" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:47 +#: ../../source/tutorial-quickstart-mxnet.rst:174 +#: ../../source/tutorial-quickstart-pytorch.rst:158 +#: ../../source/tutorial-quickstart-scikitlearn.rst:111 msgid "" -"Since we want to use :code:`xgboost` package to build up XGBoost trees, " -"let's go ahead and install :code:`xgboost`:" +"update the local model weights with the parameters received from the " +"server" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:57 -msgid "" -"*Clients* are responsible for generating individual weight-updates for " -"the model based on their local datasets. Now that we have all our " -"dependencies installed, let's run a simple distributed training with two " -"clients and one server." +#: ../../source/tutorial-quickstart-mxnet.rst:176 +#: ../../source/tutorial-quickstart-pytorch.rst:160 +#: ../../source/tutorial-quickstart-scikitlearn.rst:114 +msgid "set the local model weights" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:60 -msgid "" -"In a file called :code:`client.py`, import xgboost, Flower, Flower " -"Datasets and other related functions:" +#: ../../source/tutorial-quickstart-mxnet.rst:177 +#: ../../source/tutorial-quickstart-pytorch.rst:161 +#: ../../source/tutorial-quickstart-scikitlearn.rst:115 +msgid "train the local model" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:87 -msgid "Dataset partition and hyper-parameter selection" +#: ../../source/tutorial-quickstart-mxnet.rst:178 +#: ../../source/tutorial-quickstart-pytorch.rst:162 +#: ../../source/tutorial-quickstart-scikitlearn.rst:116 +msgid "receive the updated local model weights" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:89 -msgid "" -"Prior to local training, we require loading the HIGGS dataset from Flower" -" Datasets and conduct data partitioning for FL:" +#: ../../source/tutorial-quickstart-mxnet.rst:180 +#: ../../source/tutorial-quickstart-pytorch.rst:164 +#: ../../source/tutorial-quickstart-scikitlearn.rst:118 +msgid "test the local model" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:102 -msgid "" -"In this example, we split the dataset into two partitions with uniform " -"distribution (:code:`IidPartitioner(num_partitions=2)`). Then, we load " -"the partition for the given client based on :code:`node_id`:" +#: ../../source/tutorial-quickstart-mxnet.rst:182 +msgid "They can be implemented in the following way:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:121 +#: ../../source/tutorial-quickstart-mxnet.rst:212 msgid "" -"After that, we do train/test splitting on the given partition (client's " -"local data), and transform data format for :code:`xgboost` package." +"We can now create an instance of our class :code:`MNISTClient` and add " +"one line to actually run this client:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:134 +#: ../../source/tutorial-quickstart-mxnet.rst:219 msgid "" -"The functions of :code:`train_test_split` and " -":code:`transform_dataset_to_dmatrix` are defined as below:" +"That's it for the client. We only have to implement :code:`Client` or " +":code:`NumPyClient` and call :code:`fl.client.start_client()` or " +":code:`fl.client.start_numpy_client()`. The string " +":code:`\"0.0.0.0:8080\"` tells the client which server to connect to. In " +"our case we can run the server and the client on the same machine, " +"therefore we use :code:`\"0.0.0.0:8080\"`. If we run a truly federated " +"workload with the server and clients running on different machines, all " +"that needs to change is the :code:`server_address` we pass to the client." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:158 -msgid "Finally, we define the hyper-parameters used for XGBoost training." +#: ../../source/tutorial-quickstart-mxnet.rst:241 +msgid "" +"With both client and server ready, we can now run everything and see " +"federated learning in action. Federated learning systems usually have a " +"server and multiple clients. We therefore have to start the server first:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:174 +#: ../../source/tutorial-quickstart-mxnet.rst:249 +#: ../../source/tutorial-quickstart-pytorch.rst:226 +#: ../../source/tutorial-quickstart-scikitlearn.rst:224 +#: ../../source/tutorial-quickstart-tensorflow.rst:122 +#: ../../source/tutorial-quickstart-xgboost.rst:533 msgid "" -"The :code:`num_local_round` represents the number of iterations for local" -" tree boost. We use CPU for the training in default. One can shift it to " -"GPU by setting :code:`tree_method` to :code:`gpu_hist`. We use AUC as " -"evaluation metric." +"Once the server is running we can start the clients in different " +"terminals. Open a new terminal and start the first client:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:181 -msgid "Flower client definition for XGBoost" +#: ../../source/tutorial-quickstart-mxnet.rst:256 +#: ../../source/tutorial-quickstart-pytorch.rst:233 +#: ../../source/tutorial-quickstart-scikitlearn.rst:231 +#: ../../source/tutorial-quickstart-tensorflow.rst:129 +#: ../../source/tutorial-quickstart-xgboost.rst:540 +msgid "Open another terminal and start the second client:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:183 +#: ../../source/tutorial-quickstart-mxnet.rst:262 +#: ../../source/tutorial-quickstart-pytorch.rst:239 +#: ../../source/tutorial-quickstart-scikitlearn.rst:237 +#: ../../source/tutorial-quickstart-xgboost.rst:546 msgid "" -"After loading the dataset we define the Flower client. We follow the " -"general rule to define :code:`XgbClient` class inherited from " -":code:`fl.client.Client`." +"Each client will have its own dataset. You should now see how the " +"training does in the very first terminal (the one that started the " +"server):" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:193 +#: ../../source/tutorial-quickstart-mxnet.rst:294 msgid "" -"The :code:`self.bst` is used to keep the Booster objects that remain " -"consistent across rounds, allowing them to store predictions from trees " -"integrated in earlier rounds and maintain other essential data structures" -" for training." +"Congratulations! You've successfully built and run your first federated " +"learning system. The full `source code " +"`_ for this example can be found in :code:`examples" +"/quickstart-mxnet`." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:196 +#: ../../source/tutorial-quickstart-pandas.rst:-1 msgid "" -"Then, we override :code:`get_parameters`, :code:`fit` and " -":code:`evaluate` methods insides :code:`XgbClient` class as follows." +"Check out this Federated Learning quickstart tutorial for using Flower " +"with Pandas to perform Federated Analytics." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:210 -msgid "" -"Unlike neural network training, XGBoost trees are not started from a " -"specified random weights. In this case, we do not use " -":code:`get_parameters` and :code:`set_parameters` to initialise model " -"parameters for XGBoost. As a result, let's return an empty tensor in " -":code:`get_parameters` when it is called by the server at the first " -"round." +#: ../../source/tutorial-quickstart-pandas.rst:5 +msgid "Quickstart Pandas" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:251 -msgid "" -"In :code:`fit`, at the first round, we call :code:`xgb.train()` to build " -"up the first set of trees. the returned Booster object and config are " -"stored in :code:`self.bst` and :code:`self.config`, respectively. From " -"the second round, we load the global model sent from server to " -":code:`self.bst`, and then update model weights on local training data " -"with function :code:`local_boost` as follows:" +#: ../../source/tutorial-quickstart-pandas.rst:10 +msgid "Let's build a federated analytics system using Pandas and Flower!" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:269 +#: ../../source/tutorial-quickstart-pandas.rst:12 msgid "" -"Given :code:`num_local_round`, we update trees by calling " -":code:`self.bst.update` method. After training, the last " -":code:`N=num_local_round` trees will be extracted to send to the server." +"Please refer to the `full code example " +"`_ " +"to learn more." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:291 +#: ../../source/tutorial-quickstart-pytorch.rst:-1 msgid "" -"In :code:`evaluate`, we call :code:`self.bst.eval_set` function to " -"conduct evaluation on valid set. The AUC value will be returned." +"Check out this Federated Learning quickstart tutorial for using Flower " +"with PyTorch to train a CNN model on MNIST." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:294 +#: ../../source/tutorial-quickstart-pytorch.rst:13 msgid "" -"Now, we can create an instance of our class :code:`XgbClient` and add one" -" line to actually run this client:" +"In this tutorial we will learn how to train a Convolutional Neural " +"Network on CIFAR10 using Flower and PyTorch." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:300 +#: ../../source/tutorial-quickstart-pytorch.rst:15 +#: ../../source/tutorial-quickstart-xgboost.rst:39 msgid "" -"That's it for the client. We only have to implement :code:`Client`and " -"call :code:`fl.client.start_client()`. The string :code:`\"[::]:8080\"` " -"tells the client which server to connect to. In our case we can run the " -"server and the client on the same machine, therefore we use " -":code:`\"[::]:8080\"`. If we run a truly federated workload with the " -"server and clients running on different machines, all that needs to " -"change is the :code:`server_address` we point the client at." +"First of all, it is recommended to create a virtual environment and run " +"everything within a :doc:`virtualenv `." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:311 +#: ../../source/tutorial-quickstart-pytorch.rst:19 msgid "" -"These updates are then sent to the *server* which will aggregate them to " -"produce a better model. Finally, the *server* sends this improved version" -" of the model back to each *client* to finish a complete FL round." +"*Clients* are responsible for generating individual weight-updates for " +"the model based on their local datasets. These updates are then sent to " +"the *server* which will aggregate them to produce a better model. " +"Finally, the *server* sends this improved version of the model back to " +"each *client*. A complete cycle of weight updates is called a *round*." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:314 +#: ../../source/tutorial-quickstart-pytorch.rst:23 msgid "" -"In a file named :code:`server.py`, import Flower and FedXgbBagging from " -":code:`flwr.server.strategy`." +"Now that we have a rough idea of what is going on, let's get started. We " +"first need to install Flower. You can do this by running :" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:316 -msgid "We first define a strategy for XGBoost bagging aggregation." +#: ../../source/tutorial-quickstart-pytorch.rst:29 +msgid "" +"Since we want to use PyTorch to solve a computer vision task, let's go " +"ahead and install PyTorch and the **torchvision** library:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:339 +#: ../../source/tutorial-quickstart-pytorch.rst:39 msgid "" -"We use two clients for this example. An " -":code:`evaluate_metrics_aggregation` function is defined to collect and " -"wighted average the AUC values from clients." +"Now that we have all our dependencies installed, let's run a simple " +"distributed training with two clients and one server. Our training " +"procedure and network architecture are based on PyTorch's `Deep Learning " +"with PyTorch " +"`_." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:342 -msgid "Then, we start the server:" +#: ../../source/tutorial-quickstart-pytorch.rst:41 +msgid "" +"In a file called :code:`client.py`, import Flower and PyTorch related " +"packages:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:354 -msgid "Tree-based bagging aggregation" +#: ../../source/tutorial-quickstart-pytorch.rst:56 +msgid "In addition, we define the device allocation in PyTorch with:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:356 +#: ../../source/tutorial-quickstart-pytorch.rst:62 msgid "" -"You must be curious about how bagging aggregation works. Let's look into " -"the details." +"We use PyTorch to load CIFAR10, a popular colored image classification " +"dataset for machine learning. The PyTorch :code:`DataLoader()` downloads " +"the training and test data that are then normalized." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:358 +#: ../../source/tutorial-quickstart-pytorch.rst:78 msgid "" -"In file :code:`flwr.server.strategy.fedxgb_bagging.py`, we define " -":code:`FedXgbBagging` inherited from :code:`flwr.server.strategy.FedAvg`." -" Then, we override the :code:`aggregate_fit`, :code:`aggregate_evaluate` " -"and :code:`evaluate` methods as follows:" +"Define the loss and optimizer with PyTorch. The training of the dataset " +"is done by looping over the dataset, measure the corresponding loss and " +"optimize it." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:454 +#: ../../source/tutorial-quickstart-pytorch.rst:94 msgid "" -"In :code:`aggregate_fit`, we sequentially aggregate the clients' XGBoost " -"trees by calling :code:`aggregate()` function:" +"Define then the validation of the machine learning network. We loop over" +" the test set and measure the loss and accuracy of the test set." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:513 +#: ../../source/tutorial-quickstart-pytorch.rst:113 msgid "" -"In this function, we first fetch the number of trees and the number of " -"parallel trees for the current and previous model by calling " -":code:`_get_tree_nums`. Then, the fetched information will be aggregated." -" After that, the trees (containing model weights) are aggregated to " -"generate a new tree model." +"After defining the training and testing of a PyTorch machine learning " +"model, we use the functions for the Flower clients." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:518 +#: ../../source/tutorial-quickstart-pytorch.rst:115 msgid "" -"After traversal of all clients' models, a new global model is generated, " -"followed by the serialisation, and sending back to each client." -msgstr "" - -#: ../../source/tutorial-quickstart-xgboost.rst:523 -msgid "Launch Federated XGBoost!" +"The Flower clients will use a simple CNN adapted from 'PyTorch: A 60 " +"Minute Blitz':" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:585 +#: ../../source/tutorial-quickstart-pytorch.rst:142 msgid "" -"Congratulations! You've successfully built and run your first federated " -"XGBoost system. The AUC values can be checked in " -":code:`metrics_distributed`. One can see that the average AUC increases " -"over FL rounds." +"After loading the data set with :code:`load_data()` we define the Flower " +"interface." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:590 +#: ../../source/tutorial-quickstart-pytorch.rst:150 msgid "" -"The full `source code `_ for this example can be found in :code:`examples" -"/xgboost-quickstart`." +"Flower provides a convenience class called :code:`NumPyClient` which " +"makes it easier to implement the :code:`Client` interface when your " +"workload uses PyTorch. Implementing :code:`NumPyClient` usually means " +"defining the following methods (:code:`set_parameters` is optional " +"though):" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:594 -msgid "Comprehensive Federated XGBoost" +#: ../../source/tutorial-quickstart-pytorch.rst:166 +msgid "which can be implemented in the following way:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:596 +#: ../../source/tutorial-quickstart-pytorch.rst:189 +#: ../../source/tutorial-quickstart-tensorflow.rst:82 msgid "" -"Now that you have known how federated XGBoost work with Flower, it's time" -" to run some more comprehensive experiments by customising the " -"experimental settings. In the xgboost-comprehensive example (`full code " -"`_), we provide more options to define various experimental" -" setups, including aggregation strategies, data partitioning and " -"centralised/distributed evaluation. We also support `Flower simulation " -"`_ making " -"it easy to simulate large client cohorts in a resource-aware manner. " -"Let's take a look!" +"We can now create an instance of our class :code:`CifarClient` and add " +"one line to actually run this client:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:603 -msgid "Cyclic training" +#: ../../source/tutorial-quickstart-pytorch.rst:196 +#: ../../source/tutorial-quickstart-tensorflow.rst:90 +msgid "" +"That's it for the client. We only have to implement :code:`Client` or " +":code:`NumPyClient` and call :code:`fl.client.start_client()`. If you " +"implement a client of type :code:`NumPyClient` you'll need to first call " +"its :code:`to_client()` method. The string :code:`\"[::]:8080\"` tells " +"the client which server to connect to. In our case we can run the server " +"and the client on the same machine, therefore we use " +":code:`\"[::]:8080\"`. If we run a truly federated workload with the " +"server and clients running on different machines, all that needs to " +"change is the :code:`server_address` we point the client at." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:605 +#: ../../source/tutorial-quickstart-pytorch.rst:271 msgid "" -"In addition to bagging aggregation, we offer a cyclic training scheme, " -"which performs FL in a client-by-client fashion. Instead of aggregating " -"multiple clients, there is only one single client participating in the " -"training per round in the cyclic training scenario. The trained local " -"XGBoost trees will be passed to the next client as an initialised model " -"for next round's boosting." +"Congratulations! You've successfully built and run your first federated " +"learning system. The full `source code " +"`_ for this example can be found in :code:`examples" +"/quickstart-pytorch`." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:609 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:-1 msgid "" -"To do this, we first customise a :code:`ClientManager` in " -":code:`server_utils.py`:" +"Check out this Federated Learning quickstart tutorial for using Flower " +"with PyTorch Lightning to train an Auto Encoder model on MNIST." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:649 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:5 +msgid "Quickstart PyTorch Lightning" +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:10 msgid "" -"The customised :code:`ClientManager` samples all available clients in " -"each FL round based on the order of connection to the server. Then, we " -"define a new strategy :code:`FedXgbCyclic` in " -":code:`flwr.server.strategy.fedxgb_cyclic.py`, in order to sequentially " -"select only one client in given round and pass the received model to next" -" client." +"Let's build a horizontal federated learning system using PyTorch " +"Lightning and Flower!" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:690 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:12 msgid "" -"Unlike the original :code:`FedAvg`, we don't perform aggregation here. " -"Instead, we just make a copy of the received client model as global model" -" by overriding :code:`aggregate_fit`." +"Please refer to the `full code example " +"`_ to learn more." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:693 +#: ../../source/tutorial-quickstart-scikitlearn.rst:-1 msgid "" -"Also, the customised :code:`configure_fit` and :code:`configure_evaluate`" -" methods ensure the clients to be sequentially selected given FL round:" +"Check out this Federated Learning quickstart tutorial for using Flower " +"with scikit-learn to train a linear regression model." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:757 -msgid "Customised data partitioning" +#: ../../source/tutorial-quickstart-scikitlearn.rst:5 +msgid "Quickstart scikit-learn" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:759 +#: ../../source/tutorial-quickstart-scikitlearn.rst:10 msgid "" -"In :code:`dataset.py`, we have a function :code:`instantiate_partitioner`" -" to instantiate the data partitioner based on the given " -":code:`num_partitions` and :code:`partitioner_type`. Currently, we " -"provide four supported partitioner type to simulate the uniformity/non-" -"uniformity in data quantity (uniform, linear, square, exponential)." +"In this tutorial, we will learn how to train a :code:`Logistic " +"Regression` model on MNIST using Flower and scikit-learn." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:790 -msgid "Customised centralised/distributed evaluation" +#: ../../source/tutorial-quickstart-scikitlearn.rst:26 +msgid "Since we want to use scikit-learn, let's go ahead and install it:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:792 -msgid "" -"To facilitate centralised evaluation, we define a function in " -":code:`server_utils.py`:" +#: ../../source/tutorial-quickstart-scikitlearn.rst:32 +msgid "Or simply install all dependencies using Poetry:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:824 +#: ../../source/tutorial-quickstart-scikitlearn.rst:42 msgid "" -"This function returns a evaluation function which instantiates a " -":code:`Booster` object and loads the global model weights to it. The " -"evaluation is conducted by calling :code:`eval_set()` method, and the " -"tested AUC value is reported." +"Now that we have all our dependencies installed, let's run a simple " +"distributed training with two clients and one server. However, before " +"setting up the client and server, we will define all functionalities that" +" we need for our federated learning setup within :code:`utils.py`. The " +":code:`utils.py` contains different functions defining all the machine " +"learning basics:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:827 -msgid "" -"As for distributed evaluation on the clients, it's same as the quick-" -"start example by overriding the :code:`evaluate()` method insides the " -":code:`XgbClient` class in :code:`client_utils.py`." +#: ../../source/tutorial-quickstart-scikitlearn.rst:45 +msgid ":code:`get_model_parameters()`" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:831 -msgid "Flower simulation" +#: ../../source/tutorial-quickstart-scikitlearn.rst:46 +msgid "Returns the parameters of a :code:`sklearn` LogisticRegression model" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:832 -msgid "" -"We also provide an example code (:code:`sim.py`) to use the simulation " -"capabilities of Flower to simulate federated XGBoost training on either a" -" single machine or a cluster of machines." +#: ../../source/tutorial-quickstart-scikitlearn.rst:47 +msgid ":code:`set_model_params()`" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:866 -msgid "" -"After importing all required packages, we define a :code:`main()` " -"function to perform the simulation process:" +#: ../../source/tutorial-quickstart-scikitlearn.rst:48 +msgid "Sets the parameters of a :code:`sklean` LogisticRegression model" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:921 -msgid "" -"We first load the dataset and perform data partitioning, and the pre-" -"processed data is stored in a :code:`list`. After the simulation begins, " -"the clients won't need to pre-process their partitions again." +#: ../../source/tutorial-quickstart-scikitlearn.rst:49 +msgid ":code:`set_initial_params()`" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:924 -msgid "Then, we define the strategies and other hyper-parameters:" +#: ../../source/tutorial-quickstart-scikitlearn.rst:50 +msgid "Initializes the model parameters that the Flower server will ask for" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:975 -msgid "" -"After that, we start the simulation by calling " -":code:`fl.simulation.start_simulation`:" +#: ../../source/tutorial-quickstart-scikitlearn.rst:51 +msgid ":code:`load_mnist()`" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:995 -msgid "" -"One of key parameters for :code:`start_simulation` is :code:`client_fn` " -"which returns a function to construct a client. We define it as follows:" +#: ../../source/tutorial-quickstart-scikitlearn.rst:52 +msgid "Loads the MNIST dataset using OpenML" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1038 -msgid "Arguments parser" +#: ../../source/tutorial-quickstart-scikitlearn.rst:53 +msgid ":code:`shuffle()`" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1040 -msgid "" -"In :code:`utils.py`, we define the arguments parsers for clients, server " -"and simulation, allowing users to specify different experimental " -"settings. Let's first see the sever side:" +#: ../../source/tutorial-quickstart-scikitlearn.rst:54 +msgid "Shuffles data and its label" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1086 -msgid "" -"This allows user to specify training strategies / the number of total " -"clients / FL rounds / participating clients / clients for evaluation, and" -" evaluation fashion. Note that with :code:`--centralised-eval`, the sever" -" will do centralised evaluation and all functionalities for client " -"evaluation will be disabled." +#: ../../source/tutorial-quickstart-scikitlearn.rst:56 +msgid ":code:`partition()`" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1090 -msgid "Then, the argument parser on client side:" +#: ../../source/tutorial-quickstart-scikitlearn.rst:56 +msgid "Splits datasets into a number of partitions" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1144 +#: ../../source/tutorial-quickstart-scikitlearn.rst:58 msgid "" -"This defines various options for client data partitioning. Besides, " -"clients also have an option to conduct evaluation on centralised test set" -" by setting :code:`--centralised-eval`, as well as an option to perform " -"scaled learning rate based on the number of clients by setting :code" -":`--scaled-lr`." +"Please check out :code:`utils.py` `here " +"`_ for more details. The pre-defined functions are used in" +" the :code:`client.py` and imported. The :code:`client.py` also requires " +"to import several packages such as Flower and scikit-learn:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1148 -msgid "We also have an argument parser for simulation:" +#: ../../source/tutorial-quickstart-scikitlearn.rst:73 +msgid "" +"We load the MNIST dataset from `OpenML " +"`_, a popular " +"image classification dataset of handwritten digits for machine learning. " +"The utility :code:`utils.load_mnist()` downloads the training and test " +"data. The training set is split afterwards into 10 partitions with " +":code:`utils.partition()`." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1226 -msgid "This integrates all arguments for both client and server sides." +#: ../../source/tutorial-quickstart-scikitlearn.rst:85 +msgid "" +"Next, the logistic regression model is defined and initialized with " +":code:`utils.set_initial_params()`." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1229 -msgid "Example commands" +#: ../../source/tutorial-quickstart-scikitlearn.rst:97 +msgid "" +"The Flower server interacts with clients through an interface called " +":code:`Client`. When the server selects a particular client for training," +" it sends training instructions over the network. The client receives " +"those instructions and calls one of the :code:`Client` methods to run " +"your code (i.e., to fit the logistic regression we defined earlier)." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1231 +#: ../../source/tutorial-quickstart-scikitlearn.rst:103 msgid "" -"To run a centralised evaluated experiment with bagging strategy on 5 " -"clients with exponential distribution for 50 rounds, we first start the " -"server as below:" +"Flower provides a convenience class called :code:`NumPyClient` which " +"makes it easier to implement the :code:`Client` interface when your " +"workload uses scikit-learn. Implementing :code:`NumPyClient` usually " +"means defining the following methods (:code:`set_parameters` is optional " +"though):" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1238 -msgid "Then, on each client terminal, we start the clients:" +#: ../../source/tutorial-quickstart-scikitlearn.rst:112 +msgid "is directly imported with :code:`utils.set_model_params()`" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1244 -msgid "To run the same experiment with Flower simulation:" +#: ../../source/tutorial-quickstart-scikitlearn.rst:120 +msgid "The methods can be implemented in the following way:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1250 +#: ../../source/tutorial-quickstart-scikitlearn.rst:143 msgid "" -"The full `code `_ for this comprehensive example can be found in" -" :code:`examples/xgboost-comprehensive`." +"We can now create an instance of our class :code:`MnistClient` and add " +"one line to actually run this client:" msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:9 -msgid "Build a strategy from scratch" +#: ../../source/tutorial-quickstart-scikitlearn.rst:150 +msgid "" +"That's it for the client. We only have to implement :code:`Client` or " +":code:`NumPyClient` and call :code:`fl.client.start_client()`. If you " +"implement a client of type :code:`NumPyClient` you'll need to first call " +"its :code:`to_client()` method. The string :code:`\"0.0.0.0:8080\"` tells" +" the client which server to connect to. In our case we can run the server" +" and the client on the same machine, therefore we use " +":code:`\"0.0.0.0:8080\"`. If we run a truly federated workload with the " +"server and clients running on different machines, all that needs to " +"change is the :code:`server_address` we pass to the client." msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:11 +#: ../../source/tutorial-quickstart-scikitlearn.rst:159 msgid "" -"Welcome to the third part of the Flower federated learning tutorial. In " -"previous parts of this tutorial, we introduced federated learning with " -"PyTorch and Flower (`part 1 `__) and we learned how strategies " -"can be used to customize the execution on both the server and the clients" -" (`part 2 `__)." +"The following Flower server is a little bit more advanced and returns an " +"evaluation function for the server-side evaluation. First, we import " +"again all required libraries such as Flower and scikit-learn." msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:13 +#: ../../source/tutorial-quickstart-scikitlearn.rst:162 +msgid ":code:`server.py`, import Flower and start the server:" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:173 msgid "" -"In this notebook, we'll continue to customize the federated learning " -"system we built previously by creating a custom version of FedAvg (again," -" using `Flower `__ and `PyTorch " -"`__)." +"The number of federated learning rounds is set in :code:`fit_round()` and" +" the evaluation is defined in :code:`get_evaluate_fn()`. The evaluation " +"function is called after each federated learning round and gives you " +"information about loss and accuracy." msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:15 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:16 -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:15 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:15 +#: ../../source/tutorial-quickstart-scikitlearn.rst:198 msgid "" -"`Star Flower on GitHub `__ ⭐️ and join " -"the Flower community on Slack to connect, ask questions, and get help: " -"`Join Slack `__ 🌼 We'd love to hear from " -"you in the ``#introductions`` channel! And if anything is unclear, head " -"over to the ``#questions`` channel." +"The :code:`main` contains the server-side parameter initialization " +":code:`utils.set_initial_params()` as well as the aggregation strategy " +":code:`fl.server.strategy:FedAvg()`. The strategy is the default one, " +"federated averaging (or FedAvg), with two clients and evaluation after " +"each federated learning round. The server can be started with the command" +" :code:`fl.server.start_server(server_address=\"0.0.0.0:8080\", " +"strategy=strategy, config=fl.server.ServerConfig(num_rounds=3))`." msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:17 -msgid "Let's build a new ``Strategy`` from scratch!" +#: ../../source/tutorial-quickstart-scikitlearn.rst:217 +msgid "" +"With both client and server ready, we can now run everything and see " +"federated learning in action. Federated learning systems usually have a " +"server and multiple clients. We, therefore, have to start the server " +"first:" msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:29 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:29 -msgid "Preparation" +#: ../../source/tutorial-quickstart-scikitlearn.rst:271 +msgid "" +"Congratulations! You've successfully built and run your first federated " +"learning system. The full `source code " +"`_ for this example can be found in :code:`examples/sklearn-logreg-" +"mnist`." msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:31 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:32 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:31 +#: ../../source/tutorial-quickstart-tensorflow.rst:-1 msgid "" -"Before we begin with the actual code, let's make sure that we have " -"everything we need." +"Check out this Federated Learning quickstart tutorial for using Flower " +"with TensorFlow to train a MobilNetV2 model on CIFAR-10." msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:43 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:44 -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:43 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:43 -msgid "Installing dependencies" +#: ../../source/tutorial-quickstart-tensorflow.rst:5 +msgid "Quickstart TensorFlow" msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:45 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:46 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:45 -msgid "First, we install the necessary packages:" +#: ../../source/tutorial-quickstart-tensorflow.rst:13 +msgid "Let's build a federated learning system in less than 20 lines of code!" msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:65 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:66 -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:65 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:65 -msgid "" -"Now that we have all dependencies installed, we can import everything we " -"need for this tutorial:" +#: ../../source/tutorial-quickstart-tensorflow.rst:15 +msgid "Before Flower can be imported we have to install it:" msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:101 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:102 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:101 +#: ../../source/tutorial-quickstart-tensorflow.rst:21 msgid "" -"It is possible to switch to a runtime that has GPU acceleration enabled " -"(on Google Colab: ``Runtime > Change runtime type > Hardware acclerator: " -"GPU > Save``). Note, however, that Google Colab is not always able to " -"offer GPU acceleration. If you see an error related to GPU availability " -"in one of the following sections, consider switching back to CPU-based " -"execution by setting ``DEVICE = torch.device(\"cpu\")``. If the runtime " -"has GPU acceleration enabled, you should see the output ``Training on " -"cuda``, otherwise it'll say ``Training on cpu``." +"Since we want to use the Keras API of TensorFlow (TF), we have to install" +" TF as well:" msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:114 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:115 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:114 -msgid "Data loading" +#: ../../source/tutorial-quickstart-tensorflow.rst:31 +msgid "Next, in a file called :code:`client.py`, import Flower and TensorFlow:" msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:116 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:116 +#: ../../source/tutorial-quickstart-tensorflow.rst:38 msgid "" -"Let's now load the CIFAR-10 training and test set, partition them into " -"ten smaller datasets (each split into training and validation set), and " -"wrap everything in their own ``DataLoader``. We introduce a new parameter" -" ``num_clients`` which allows us to call ``load_datasets`` with different" -" numbers of clients." +"We use the Keras utilities of TF to load CIFAR10, a popular colored image" +" classification dataset for machine learning. The call to " +":code:`tf.keras.datasets.cifar10.load_data()` downloads CIFAR10, caches " +"it locally, and then returns the entire training and test set as NumPy " +"ndarrays." msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:167 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:168 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:167 -msgid "Model training/evaluation" +#: ../../source/tutorial-quickstart-tensorflow.rst:47 +msgid "" +"Next, we need a model. For the purpose of this tutorial, we use " +"MobilNetV2 with 10 output classes:" msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:169 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:170 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:169 +#: ../../source/tutorial-quickstart-tensorflow.rst:60 msgid "" -"Let's continue with the usual model definition (including " -"``set_parameters`` and ``get_parameters``), training and test functions:" +"Flower provides a convenience class called :code:`NumPyClient` which " +"makes it easier to implement the :code:`Client` interface when your " +"workload uses Keras. The :code:`NumPyClient` interface defines three " +"methods which can be implemented in the following way:" msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:258 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:258 -msgid "Flower client" +#: ../../source/tutorial-quickstart-tensorflow.rst:135 +msgid "Each client will have its own dataset." msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:260 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:260 +#: ../../source/tutorial-quickstart-tensorflow.rst:137 msgid "" -"To implement the Flower client, we (again) create a subclass of " -"``flwr.client.NumPyClient`` and implement the three methods " -"``get_parameters``, ``fit``, and ``evaluate``. Here, we also pass the " -"``cid`` to the client and use it log additional details:" +"You should now see how the training does in the very first terminal (the " +"one that started the server):" msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:308 -msgid "Let's test what we have so far before we continue:" +#: ../../source/tutorial-quickstart-tensorflow.rst:169 +msgid "" +"Congratulations! You've successfully built and run your first federated " +"learning system. The full `source code " +"`_ for this can be found in :code:`examples" +"/quickstart-tensorflow/client.py`." msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:339 -msgid "Build a Strategy from scratch" +#: ../../source/tutorial-quickstart-xgboost.rst:-1 +msgid "" +"Check out this Federated Learning quickstart tutorial for using Flower " +"with XGBoost to train classification models on trees." msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:341 -msgid "" -"Let’s overwrite the ``configure_fit`` method such that it passes a higher" -" learning rate (potentially also other hyperparameters) to the optimizer " -"of a fraction of the clients. We will keep the sampling of the clients as" -" it is in ``FedAvg`` and then change the configuration dictionary (one of" -" the ``FitIns`` attributes)." +#: ../../source/tutorial-quickstart-xgboost.rst:5 +msgid "Quickstart XGBoost" msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:507 -msgid "" -"The only thing left is to use the newly created custom Strategy " -"``FedCustom`` when starting the experiment:" +#: ../../source/tutorial-quickstart-xgboost.rst:14 +msgid "Federated XGBoost" msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:534 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:932 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:697 -msgid "Recap" +#: ../../source/tutorial-quickstart-xgboost.rst:16 +msgid "" +"EXtreme Gradient Boosting (**XGBoost**) is a robust and efficient " +"implementation of gradient-boosted decision tree (**GBDT**), that " +"maximises the computational boundaries for boosted tree methods. It's " +"primarily designed to enhance both the performance and computational " +"speed of machine learning models. In XGBoost, trees are constructed " +"concurrently, unlike the sequential approach taken by GBDT." msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:536 +#: ../../source/tutorial-quickstart-xgboost.rst:20 msgid "" -"In this notebook, we’ve seen how to implement a custom strategy. A custom" -" strategy enables granular control over client node configuration, result" -" aggregation, and more. To define a custom strategy, you only have to " -"overwrite the abstract methods of the (abstract) base class ``Strategy``." -" To make custom strategies even more powerful, you can pass custom " -"functions to the constructor of your new class (``__init__``) and then " -"call these functions whenever needed." +"Often, for tabular data on medium-sized datasets with fewer than 10k " +"training examples, XGBoost surpasses the results of deep learning " +"techniques." msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:550 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:948 -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:729 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:715 -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:369 -msgid "" -"Before you continue, make sure to join the Flower community on Slack: " -"`Join Slack `__" +#: ../../source/tutorial-quickstart-xgboost.rst:23 +msgid "Why federated XGBoost?" msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:552 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:950 -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:731 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:717 -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:371 +#: ../../source/tutorial-quickstart-xgboost.rst:25 msgid "" -"There's a dedicated ``#questions`` channel if you need help, but we'd " -"also love to hear who you are in ``#introductions``!" +"Indeed, as the demand for data privacy and decentralized learning grows, " +"there's an increasing requirement to implement federated XGBoost systems " +"for specialised applications, like survival analysis and financial fraud " +"detection." msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:554 +#: ../../source/tutorial-quickstart-xgboost.rst:27 msgid "" -"The `Flower Federated Learning Tutorial - Part 4 " -"`__ introduces ``Client``, the flexible API underlying " -"``NumPyClient``." +"Federated learning ensures that raw data remains on the local device, " +"making it an attractive approach for sensitive domains where data " +"security and privacy are paramount. Given the robustness and efficiency " +"of XGBoost, combining it with federated learning offers a promising " +"solution for these specific challenges." msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:9 -msgid "Customize the client" +#: ../../source/tutorial-quickstart-xgboost.rst:30 +msgid "" +"In this tutorial we will learn how to train a federated XGBoost model on " +"HIGGS dataset using Flower and :code:`xgboost` package. We use a simple " +"example (`full code xgboost-quickstart " +"`_)" +" with two *clients* and one *server* to demonstrate how federated XGBoost" +" works, and then we dive into a more complex example (`full code xgboost-" +"comprehensive `_) to run various experiments." msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:11 -msgid "" -"Welcome to the fourth part of the Flower federated learning tutorial. In " -"the previous parts of this tutorial, we introduced federated learning " -"with PyTorch and Flower (`part 1 `__), we learned how " -"strategies can be used to customize the execution on both the server and " -"the clients (`part 2 `__), and we built our own " -"custom strategy from scratch (`part 3 `__)." +#: ../../source/tutorial-quickstart-xgboost.rst:37 +msgid "Environment Setup" msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:14 +#: ../../source/tutorial-quickstart-xgboost.rst:41 msgid "" -"In this notebook, we revisit ``NumPyClient`` and introduce a new " -"baseclass for building clients, simply named ``Client``. In previous " -"parts of this tutorial, we've based our client on ``NumPyClient``, a " -"convenience class which makes it easy to work with machine learning " -"libraries that have good NumPy interoperability. With ``Client``, we gain" -" a lot of flexibility that we didn't have before, but we'll also have to " -"do a few things the we didn't have to do before." +"We first need to install Flower and Flower Datasets. You can do this by " +"running :" msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:18 +#: ../../source/tutorial-quickstart-xgboost.rst:47 msgid "" -"Let's go deeper and see what it takes to move from ``NumPyClient`` to " -"``Client``!" +"Since we want to use :code:`xgboost` package to build up XGBoost trees, " +"let's go ahead and install :code:`xgboost`:" msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:30 -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:29 -msgid "Step 0: Preparation" +#: ../../source/tutorial-quickstart-xgboost.rst:57 +msgid "" +"*Clients* are responsible for generating individual weight-updates for " +"the model based on their local datasets. Now that we have all our " +"dependencies installed, let's run a simple distributed training with two " +"clients and one server." msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:117 +#: ../../source/tutorial-quickstart-xgboost.rst:60 msgid "" -"Let's now load the CIFAR-10 training and test set, partition them into " -"ten smaller datasets (each split into training and validation set), and " -"wrap everything in their own ``DataLoader``." +"In a file called :code:`client.py`, import xgboost, Flower, Flower " +"Datasets and other related functions:" msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:259 -msgid "Step 1: Revisiting NumPyClient" +#: ../../source/tutorial-quickstart-xgboost.rst:87 +msgid "Dataset partition and hyper-parameter selection" msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:261 +#: ../../source/tutorial-quickstart-xgboost.rst:89 msgid "" -"So far, we've implemented our client by subclassing " -"``flwr.client.NumPyClient``. The three methods we implemented are " -"``get_parameters``, ``fit``, and ``evaluate``. Finally, we wrap the " -"creation of instances of this class in a function called ``client_fn``:" +"Prior to local training, we require loading the HIGGS dataset from Flower" +" Datasets and conduct data partitioning for FL:" msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:309 +#: ../../source/tutorial-quickstart-xgboost.rst:102 msgid "" -"We've seen this before, there's nothing new so far. The only *tiny* " -"difference compared to the previous notebook is naming, we've changed " -"``FlowerClient`` to ``FlowerNumPyClient`` and ``client_fn`` to " -"``numpyclient_fn``. Let's run it to see the output we get:" +"In this example, we split the dataset into two partitions with uniform " +"distribution (:code:`IidPartitioner(num_partitions=2)`). Then, we load " +"the partition for the given client based on :code:`node_id`:" msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:339 +#: ../../source/tutorial-quickstart-xgboost.rst:121 msgid "" -"This works as expected, two clients are training for three rounds of " -"federated learning." +"After that, we do train/test splitting on the given partition (client's " +"local data), and transform data format for :code:`xgboost` package." msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:341 +#: ../../source/tutorial-quickstart-xgboost.rst:134 msgid "" -"Let's dive a little bit deeper and discuss how Flower executes this " -"simulation. Whenever a client is selected to do some work, " -"``start_simulation`` calls the function ``numpyclient_fn`` to create an " -"instance of our ``FlowerNumPyClient`` (along with loading the model and " -"the data)." +"The functions of :code:`train_test_split` and " +":code:`transform_dataset_to_dmatrix` are defined as below:" msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:343 -msgid "" -"But here's the perhaps surprising part: Flower doesn't actually use the " -"``FlowerNumPyClient`` object directly. Instead, it wraps the object to " -"makes it look like a subclass of ``flwr.client.Client``, not " -"``flwr.client.NumPyClient``. In fact, the Flower core framework doesn't " -"know how to handle ``NumPyClient``'s, it only knows how to handle " -"``Client``'s. ``NumPyClient`` is just a convenience abstraction built on " -"top of ``Client``." +#: ../../source/tutorial-quickstart-xgboost.rst:158 +msgid "Finally, we define the hyper-parameters used for XGBoost training." msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:345 +#: ../../source/tutorial-quickstart-xgboost.rst:174 msgid "" -"Instead of building on top of ``NumPyClient``, we can directly build on " -"top of ``Client``." +"The :code:`num_local_round` represents the number of iterations for local" +" tree boost. We use CPU for the training in default. One can shift it to " +"GPU by setting :code:`tree_method` to :code:`gpu_hist`. We use AUC as " +"evaluation metric." msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:357 -msgid "Step 2: Moving from ``NumPyClient`` to ``Client``" +#: ../../source/tutorial-quickstart-xgboost.rst:181 +msgid "Flower client definition for XGBoost" msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:359 +#: ../../source/tutorial-quickstart-xgboost.rst:183 msgid "" -"Let's try to do the same thing using ``Client`` instead of " -"``NumPyClient``." +"After loading the dataset we define the Flower client. We follow the " +"general rule to define :code:`XgbClient` class inherited from " +":code:`fl.client.Client`." msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:465 +#: ../../source/tutorial-quickstart-xgboost.rst:193 msgid "" -"Before we discuss the code in more detail, let's try to run it! Gotta " -"make sure our new ``Client``-based client works, right?" +"The :code:`self.bst` is used to keep the Booster objects that remain " +"consistent across rounds, allowing them to store predictions from trees " +"integrated in earlier rounds and maintain other essential data structures" +" for training." msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:490 +#: ../../source/tutorial-quickstart-xgboost.rst:196 msgid "" -"That's it, we're now using ``Client``. It probably looks similar to what " -"we've done with ``NumPyClient``. So what's the difference?" +"Then, we override :code:`get_parameters`, :code:`fit` and " +":code:`evaluate` methods insides :code:`XgbClient` class as follows." msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:492 +#: ../../source/tutorial-quickstart-xgboost.rst:210 msgid "" -"First of all, it's more code. But why? The difference comes from the fact" -" that ``Client`` expects us to take care of parameter serialization and " -"deserialization. For Flower to be able to send parameters over the " -"network, it eventually needs to turn these parameters into ``bytes``. " -"Turning parameters (e.g., NumPy ``ndarray``'s) into raw bytes is called " -"serialization. Turning raw bytes into something more useful (like NumPy " -"``ndarray``'s) is called deserialization. Flower needs to do both: it " -"needs to serialize parameters on the server-side and send them to the " -"client, the client needs to deserialize them to use them for local " -"training, and then serialize the updated parameters again to send them " -"back to the server, which (finally!) deserializes them again in order to " -"aggregate them with the updates received from other clients." +"Unlike neural network training, XGBoost trees are not started from a " +"specified random weights. In this case, we do not use " +":code:`get_parameters` and :code:`set_parameters` to initialise model " +"parameters for XGBoost. As a result, let's return an empty tensor in " +":code:`get_parameters` when it is called by the server at the first " +"round." msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:495 +#: ../../source/tutorial-quickstart-xgboost.rst:251 msgid "" -"The only *real* difference between Client and NumPyClient is that " -"NumPyClient takes care of serialization and deserialization for you. It " -"can do so because it expects you to return parameters as NumPy ndarray's," -" and it knows how to handle these. This makes working with machine " -"learning libraries that have good NumPy support (most of them) a breeze." +"In :code:`fit`, at the first round, we call :code:`xgb.train()` to build " +"up the first set of trees. the returned Booster object and config are " +"stored in :code:`self.bst` and :code:`self.config`, respectively. From " +"the second round, we load the global model sent from server to " +":code:`self.bst`, and then update model weights on local training data " +"with function :code:`local_boost` as follows:" msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:497 +#: ../../source/tutorial-quickstart-xgboost.rst:269 msgid "" -"In terms of API, there's one major difference: all methods in Client take" -" exactly one argument (e.g., ``FitIns`` in ``Client.fit``) and return " -"exactly one value (e.g., ``FitRes`` in ``Client.fit``). The methods in " -"``NumPyClient`` on the other hand have multiple arguments (e.g., " -"``parameters`` and ``config`` in ``NumPyClient.fit``) and multiple return" -" values (e.g., ``parameters``, ``num_example``, and ``metrics`` in " -"``NumPyClient.fit``) if there are multiple things to handle. These " -"``*Ins`` and ``*Res`` objects in ``Client`` wrap all the individual " -"values you're used to from ``NumPyClient``." +"Given :code:`num_local_round`, we update trees by calling " +":code:`self.bst.update` method. After training, the last " +":code:`N=num_local_round` trees will be extracted to send to the server." msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:510 -msgid "Step 3: Custom serialization" +#: ../../source/tutorial-quickstart-xgboost.rst:291 +msgid "" +"In :code:`evaluate`, we call :code:`self.bst.eval_set` function to " +"conduct evaluation on valid set. The AUC value will be returned." msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:512 +#: ../../source/tutorial-quickstart-xgboost.rst:294 msgid "" -"Here we will explore how to implement custom serialization with a simple " -"example." +"Now, we can create an instance of our class :code:`XgbClient` and add one" +" line to actually run this client:" msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:514 +#: ../../source/tutorial-quickstart-xgboost.rst:300 msgid "" -"But first what is serialization? Serialization is just the process of " -"converting an object into raw bytes, and equally as important, " -"deserialization is the process of converting raw bytes back into an " -"object. This is very useful for network communication. Indeed, without " -"serialization, you could not just a Python object through the internet." +"That's it for the client. We only have to implement :code:`Client`and " +"call :code:`fl.client.start_client()`. The string :code:`\"[::]:8080\"` " +"tells the client which server to connect to. In our case we can run the " +"server and the client on the same machine, therefore we use " +":code:`\"[::]:8080\"`. If we run a truly federated workload with the " +"server and clients running on different machines, all that needs to " +"change is the :code:`server_address` we point the client at." msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:516 +#: ../../source/tutorial-quickstart-xgboost.rst:311 msgid "" -"Federated Learning relies heavily on internet communication for training " -"by sending Python objects back and forth between the clients and the " -"server. This means that serialization is an essential part of Federated " -"Learning." +"These updates are then sent to the *server* which will aggregate them to " +"produce a better model. Finally, the *server* sends this improved version" +" of the model back to each *client* to finish a complete FL round." msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:518 +#: ../../source/tutorial-quickstart-xgboost.rst:314 msgid "" -"In the following section, we will write a basic example where instead of " -"sending a serialized version of our ``ndarray``\\ s containing our " -"parameters, we will first convert the ``ndarray`` into sparse matrices, " -"before sending them. This technique can be used to save bandwidth, as in " -"certain cases where the weights of a model are sparse (containing many 0 " -"entries), converting them to a sparse matrix can greatly improve their " -"bytesize." +"In a file named :code:`server.py`, import Flower and FedXgbBagging from " +":code:`flwr.server.strategy`." msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:521 -msgid "Our custom serialization/deserialization functions" +#: ../../source/tutorial-quickstart-xgboost.rst:316 +msgid "We first define a strategy for XGBoost bagging aggregation." msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:523 +#: ../../source/tutorial-quickstart-xgboost.rst:339 msgid "" -"This is where the real serialization/deserialization will happen, " -"especially in ``ndarray_to_sparse_bytes`` for serialization and " -"``sparse_bytes_to_ndarray`` for deserialization." +"We use two clients for this example. An " +":code:`evaluate_metrics_aggregation` function is defined to collect and " +"wighted average the AUC values from clients." msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:525 -msgid "" -"Note that we imported the ``scipy.sparse`` library in order to convert " -"our arrays." +#: ../../source/tutorial-quickstart-xgboost.rst:342 +msgid "Then, we start the server:" msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:613 -msgid "Client-side" +#: ../../source/tutorial-quickstart-xgboost.rst:354 +msgid "Tree-based bagging aggregation" msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:615 +#: ../../source/tutorial-quickstart-xgboost.rst:356 msgid "" -"To be able to serialize our ``ndarray``\\ s into sparse " -"parameters, we will just have to call our custom functions in our " -"``flwr.client.Client``." +"You must be curious about how bagging aggregation works. Let's look into " +"the details." msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:617 +#: ../../source/tutorial-quickstart-xgboost.rst:358 msgid "" -"Indeed, in ``get_parameters`` we need to serialize the parameters we got " -"from our network using our custom ``ndarrays_to_sparse_parameters`` " -"defined above." +"In file :code:`flwr.server.strategy.fedxgb_bagging.py`, we define " +":code:`FedXgbBagging` inherited from :code:`flwr.server.strategy.FedAvg`." +" Then, we override the :code:`aggregate_fit`, :code:`aggregate_evaluate` " +"and :code:`evaluate` methods as follows:" msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:619 +#: ../../source/tutorial-quickstart-xgboost.rst:454 msgid "" -"In ``fit``, we first need to deserialize the parameters coming from the " -"server using our custom ``sparse_parameters_to_ndarrays`` and then we " -"need to serialize our local results with " -"``ndarrays_to_sparse_parameters``." +"In :code:`aggregate_fit`, we sequentially aggregate the clients' XGBoost " +"trees by calling :code:`aggregate()` function:" msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:621 +#: ../../source/tutorial-quickstart-xgboost.rst:513 msgid "" -"In ``evaluate``, we will only need to deserialize the global parameters " -"with our custom function." -msgstr "" - -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:725 -msgid "Server-side" +"In this function, we first fetch the number of trees and the number of " +"parallel trees for the current and previous model by calling " +":code:`_get_tree_nums`. Then, the fetched information will be aggregated." +" After that, the trees (containing model weights) are aggregated to " +"generate a new tree model." msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:727 +#: ../../source/tutorial-quickstart-xgboost.rst:518 msgid "" -"For this example, we will just use ``FedAvg`` as a strategy. To change " -"the serialization and deserialization here, we only need to reimplement " -"the ``evaluate`` and ``aggregate_fit`` functions of ``FedAvg``. The other" -" functions of the strategy will be inherited from the super class " -"``FedAvg``." +"After traversal of all clients' models, a new global model is generated, " +"followed by the serialisation, and sending back to each client." msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:729 -msgid "As you can see only one line as change in ``evaluate``:" +#: ../../source/tutorial-quickstart-xgboost.rst:523 +msgid "Launch Federated XGBoost!" msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:735 +#: ../../source/tutorial-quickstart-xgboost.rst:585 msgid "" -"And for ``aggregate_fit``, we will first deserialize every result we " -"received:" +"Congratulations! You've successfully built and run your first federated " +"XGBoost system. The AUC values can be checked in " +":code:`metrics_distributed`. One can see that the average AUC increases " +"over FL rounds." msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:744 -msgid "And then serialize the aggregated result:" +#: ../../source/tutorial-quickstart-xgboost.rst:590 +msgid "" +"The full `source code `_ for this example can be found in :code:`examples" +"/xgboost-quickstart`." msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:903 -msgid "We can now run our custom serialization example!" +#: ../../source/tutorial-quickstart-xgboost.rst:594 +msgid "Comprehensive Federated XGBoost" msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:934 +#: ../../source/tutorial-quickstart-xgboost.rst:596 msgid "" -"In this part of the tutorial, we've seen how we can build clients by " -"subclassing either ``NumPyClient`` or ``Client``. ``NumPyClient`` is a " -"convenience abstraction that makes it easier to work with machine " -"learning libraries that have good NumPy interoperability. ``Client`` is a" -" more flexible abstraction that allows us to do things that are not " -"possible in ``NumPyClient``. In order to do so, it requires us to handle " -"parameter serialization and deserialization ourselves." +"Now that you have known how federated XGBoost work with Flower, it's time" +" to run some more comprehensive experiments by customising the " +"experimental settings. In the xgboost-comprehensive example (`full code " +"`_), we provide more options to define various experimental" +" setups, including aggregation strategies, data partitioning and " +"centralised/distributed evaluation. We also support :doc:`Flower " +"simulation ` making it easy to simulate large " +"client cohorts in a resource-aware manner. Let's take a look!" msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:952 -msgid "" -"This is the final part of the Flower tutorial (for now!), " -"congratulations! You're now well equipped to understand the rest of the " -"documentation. There are many topics we didn't cover in the tutorial, we " -"recommend the following resources:" +#: ../../source/tutorial-quickstart-xgboost.rst:603 +msgid "Cyclic training" msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:954 -msgid "`Read Flower Docs `__" +#: ../../source/tutorial-quickstart-xgboost.rst:605 +msgid "" +"In addition to bagging aggregation, we offer a cyclic training scheme, " +"which performs FL in a client-by-client fashion. Instead of aggregating " +"multiple clients, there is only one single client participating in the " +"training per round in the cyclic training scenario. The trained local " +"XGBoost trees will be passed to the next client as an initialised model " +"for next round's boosting." msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:955 +#: ../../source/tutorial-quickstart-xgboost.rst:609 msgid "" -"`Check out Flower Code Examples " -"`__" +"To do this, we first customise a :code:`ClientManager` in " +":code:`server_utils.py`:" msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:956 +#: ../../source/tutorial-quickstart-xgboost.rst:649 msgid "" -"`Use Flower Baselines for your research " -"`__" +"The customised :code:`ClientManager` samples all available clients in " +"each FL round based on the order of connection to the server. Then, we " +"define a new strategy :code:`FedXgbCyclic` in " +":code:`flwr.server.strategy.fedxgb_cyclic.py`, in order to sequentially " +"select only one client in given round and pass the received model to next" +" client." msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:957 +#: ../../source/tutorial-quickstart-xgboost.rst:690 msgid "" -"`Watch Flower Summit 2023 videos `__" +"Unlike the original :code:`FedAvg`, we don't perform aggregation here. " +"Instead, we just make a copy of the received client model as global model" +" by overriding :code:`aggregate_fit`." msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:9 -msgid "Get started with Flower" +#: ../../source/tutorial-quickstart-xgboost.rst:693 +msgid "" +"Also, the customised :code:`configure_fit` and :code:`configure_evaluate`" +" methods ensure the clients to be sequentially selected given FL round:" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:11 -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:11 -msgid "Welcome to the Flower federated learning tutorial!" +#: ../../source/tutorial-quickstart-xgboost.rst:757 +msgid "Customised data partitioning" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:13 +#: ../../source/tutorial-quickstart-xgboost.rst:759 msgid "" -"In this notebook, we'll build a federated learning system using Flower, " -"`Flower Datasets `__ and PyTorch. In " -"part 1, we use PyTorch for the model training pipeline and data loading. " -"In part 2, we continue to federate the PyTorch-based pipeline using " -"Flower." +"In :code:`dataset.py`, we have a function :code:`instantiate_partitioner`" +" to instantiate the data partitioner based on the given " +":code:`num_partitions` and :code:`partitioner_type`. Currently, we " +"provide four supported partitioner type to simulate the uniformity/non-" +"uniformity in data quantity (uniform, linear, square, exponential)." msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:17 -msgid "Let's get stated!" +#: ../../source/tutorial-quickstart-xgboost.rst:790 +msgid "Customised centralised/distributed evaluation" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:31 +#: ../../source/tutorial-quickstart-xgboost.rst:792 msgid "" -"Before we begin with any actual code, let's make sure that we have " -"everything we need." +"To facilitate centralised evaluation, we define a function in " +":code:`server_utils.py`:" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:45 +#: ../../source/tutorial-quickstart-xgboost.rst:824 msgid "" -"Next, we install the necessary packages for PyTorch (``torch`` and " -"``torchvision``), Flower Datasets (``flwr-datasets``) and Flower " -"(``flwr``):" +"This function returns a evaluation function which instantiates a " +":code:`Booster` object and loads the global model weights to it. The " +"evaluation is conducted by calling :code:`eval_set()` method, and the " +"tested AUC value is reported." msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:105 +#: ../../source/tutorial-quickstart-xgboost.rst:827 msgid "" -"It is possible to switch to a runtime that has GPU acceleration enabled " -"(on Google Colab: ``Runtime > Change runtime type > Hardware accelerator:" -" GPU > Save``). Note, however, that Google Colab is not always able to " -"offer GPU acceleration. If you see an error related to GPU availability " -"in one of the following sections, consider switching back to CPU-based " -"execution by setting ``DEVICE = torch.device(\"cpu\")``. If the runtime " -"has GPU acceleration enabled, you should see the output ``Training on " -"cuda``, otherwise it'll say ``Training on cpu``." +"As for distributed evaluation on the clients, it's same as the quick-" +"start example by overriding the :code:`evaluate()` method insides the " +":code:`XgbClient` class in :code:`client_utils.py`." msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:118 -msgid "Loading the data" +#: ../../source/tutorial-quickstart-xgboost.rst:831 +msgid "Flower simulation" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:120 +#: ../../source/tutorial-quickstart-xgboost.rst:832 msgid "" -"Federated learning can be applied to many different types of tasks across" -" different domains. In this tutorial, we introduce federated learning by " -"training a simple convolutional neural network (CNN) on the popular " -"CIFAR-10 dataset. CIFAR-10 can be used to train image classifiers that " -"distinguish between images from ten different classes: 'airplane', " -"'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', and " -"'truck'." +"We also provide an example code (:code:`sim.py`) to use the simulation " +"capabilities of Flower to simulate federated XGBoost training on either a" +" single machine or a cluster of machines." msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:131 +#: ../../source/tutorial-quickstart-xgboost.rst:866 msgid "" -"We simulate having multiple datasets from multiple organizations (also " -"called the \"cross-silo\" setting in federated learning) by splitting the" -" original CIFAR-10 dataset into multiple partitions. Each partition will " -"represent the data from a single organization. We're doing this purely " -"for experimentation purposes, in the real world there's no need for data " -"splitting because each organization already has their own data (so the " -"data is naturally partitioned)." +"After importing all required packages, we define a :code:`main()` " +"function to perform the simulation process:" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:133 +#: ../../source/tutorial-quickstart-xgboost.rst:921 msgid "" -"Each organization will act as a client in the federated learning system. " -"So having ten organizations participate in a federation means having ten " -"clients connected to the federated learning server." +"We first load the dataset and perform data partitioning, and the pre-" +"processed data is stored in a :code:`list`. After the simulation begins, " +"the clients won't need to pre-process their partitions again." msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:144 -msgid "" -"Let's now create the Federated Dataset abstraction that from ``flwr-" -"datasets`` that partitions the CIFAR-10. We will create small training " -"and test set for each edge device and wrap each of them into a PyTorch " -"``DataLoader``:" +#: ../../source/tutorial-quickstart-xgboost.rst:924 +msgid "Then, we define the strategies and other hyper-parameters:" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:198 +#: ../../source/tutorial-quickstart-xgboost.rst:975 msgid "" -"We now have a list of ten training sets and ten validation sets " -"(``trainloaders`` and ``valloaders``) representing the data of ten " -"different organizations. Each ``trainloader``/``valloader`` pair contains" -" 4500 training examples and 500 validation examples. There's also a " -"single ``testloader`` (we did not split the test set). Again, this is " -"only necessary for building research or educational systems, actual " -"federated learning systems have their data naturally distributed across " -"multiple partitions." +"After that, we start the simulation by calling " +":code:`fl.simulation.start_simulation`:" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:201 +#: ../../source/tutorial-quickstart-xgboost.rst:995 msgid "" -"Let's take a look at the first batch of images and labels in the first " -"training set (i.e., ``trainloaders[0]``) before we move on:" +"One of key parameters for :code:`start_simulation` is :code:`client_fn` " +"which returns a function to construct a client. We define it as follows:" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:240 -msgid "" -"The output above shows a random batch of images from the first " -"``trainloader`` in our list of ten ``trainloaders``. It also prints the " -"labels associated with each image (i.e., one of the ten possible labels " -"we've seen above). If you run the cell again, you should see another " -"batch of images." +#: ../../source/tutorial-quickstart-xgboost.rst:1038 +msgid "Arguments parser" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:252 -msgid "Step 1: Centralized Training with PyTorch" +#: ../../source/tutorial-quickstart-xgboost.rst:1040 +msgid "" +"In :code:`utils.py`, we define the arguments parsers for clients, server " +"and simulation, allowing users to specify different experimental " +"settings. Let's first see the sever side:" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:263 +#: ../../source/tutorial-quickstart-xgboost.rst:1086 msgid "" -"Next, we're going to use PyTorch to define a simple convolutional neural " -"network. This introduction assumes basic familiarity with PyTorch, so it " -"doesn't cover the PyTorch-related aspects in full detail. If you want to " -"dive deeper into PyTorch, we recommend `DEEP LEARNING WITH PYTORCH: A 60 " -"MINUTE BLITZ " -"`__." +"This allows user to specify training strategies / the number of total " +"clients / FL rounds / participating clients / clients for evaluation, and" +" evaluation fashion. Note that with :code:`--centralised-eval`, the sever" +" will do centralised evaluation and all functionalities for client " +"evaluation will be disabled." msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:275 -msgid "Defining the model" +#: ../../source/tutorial-quickstart-xgboost.rst:1090 +msgid "Then, the argument parser on client side:" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:277 +#: ../../source/tutorial-quickstart-xgboost.rst:1144 msgid "" -"We use the simple CNN described in the `PyTorch tutorial " -"`__:" +"This defines various options for client data partitioning. Besides, " +"clients also have an option to conduct evaluation on centralised test set" +" by setting :code:`--centralised-eval`, as well as an option to perform " +"scaled learning rate based on the number of clients by setting :code" +":`--scaled-lr`." msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:314 -msgid "Let's continue with the usual training and test functions:" +#: ../../source/tutorial-quickstart-xgboost.rst:1148 +msgid "We also have an argument parser for simulation:" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:374 -msgid "Training the model" +#: ../../source/tutorial-quickstart-xgboost.rst:1226 +msgid "This integrates all arguments for both client and server sides." msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:376 -msgid "" -"We now have all the basic building blocks we need: a dataset, a model, a " -"training function, and a test function. Let's put them together to train " -"the model on the dataset of one of our organizations " -"(``trainloaders[0]``). This simulates the reality of most machine " -"learning projects today: each organization has their own data and trains " -"models only on this internal data:" +#: ../../source/tutorial-quickstart-xgboost.rst:1229 +msgid "Example commands" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:406 +#: ../../source/tutorial-quickstart-xgboost.rst:1231 msgid "" -"Training the simple CNN on our CIFAR-10 split for 5 epochs should result " -"in a test set accuracy of about 41%, which is not good, but at the same " -"time, it doesn't really matter for the purposes of this tutorial. The " -"intent was just to show a simplistic centralized training pipeline that " -"sets the stage for what comes next - federated learning!" +"To run a centralised evaluated experiment with bagging strategy on 5 " +"clients with exponential distribution for 50 rounds, we first start the " +"server as below:" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:418 -msgid "Step 2: Federated Learning with Flower" +#: ../../source/tutorial-quickstart-xgboost.rst:1238 +msgid "Then, on each client terminal, we start the clients:" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:420 +#: ../../source/tutorial-quickstart-xgboost.rst:1244 +msgid "To run the same experiment with Flower simulation:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1250 msgid "" -"Step 1 demonstrated a simple centralized training pipeline. All data was " -"in one place (i.e., a single ``trainloader`` and a single ``valloader``)." -" Next, we'll simulate a situation where we have multiple datasets in " -"multiple organizations and where we train a model over these " -"organizations using federated learning." +"The full `code `_ for this comprehensive example can be found in" +" :code:`examples/xgboost-comprehensive`." msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:432 -msgid "Updating model parameters" +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:9 +msgid "Build a strategy from scratch" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:434 +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:11 msgid "" -"In federated learning, the server sends the global model parameters to " -"the client, and the client updates the local model with the parameters " -"received from the server. It then trains the model on the local data " -"(which changes the model parameters locally) and sends the " -"updated/changed model parameters back to the server (or, alternatively, " -"it sends just the gradients back to the server, not the full model " -"parameters)." +"Welcome to the third part of the Flower federated learning tutorial. In " +"previous parts of this tutorial, we introduced federated learning with " +"PyTorch and Flower (`part 1 `__) and we learned how strategies " +"can be used to customize the execution on both the server and the clients" +" (`part 2 `__)." msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:436 +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:13 msgid "" -"We need two helper functions to update the local model with parameters " -"received from the server and to get the updated model parameters from the" -" local model: ``set_parameters`` and ``get_parameters``. The following " -"two functions do just that for the PyTorch model above." +"In this notebook, we'll continue to customize the federated learning " +"system we built previously by creating a custom version of FedAvg (again," +" using `Flower `__ and `PyTorch " +"`__)." msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:438 +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:15 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:16 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:15 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:15 msgid "" -"The details of how this works are not really important here (feel free to" -" consult the PyTorch documentation if you want to learn more). In " -"essence, we use ``state_dict`` to access PyTorch model parameter tensors." -" The parameter tensors are then converted to/from a list of NumPy " -"ndarray's (which Flower knows how to serialize/deserialize):" +"`Star Flower on GitHub `__ ⭐️ and join " +"the Flower community on Slack to connect, ask questions, and get help: " +"`Join Slack `__ 🌼 We'd love to hear from " +"you in the ``#introductions`` channel! And if anything is unclear, head " +"over to the ``#questions`` channel." msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:466 -msgid "Implementing a Flower client" +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:17 +msgid "Let's build a new ``Strategy`` from scratch!" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:468 -msgid "" -"With that out of the way, let's move on to the interesting part. " -"Federated learning systems consist of a server and multiple clients. In " -"Flower, we create clients by implementing subclasses of " -"``flwr.client.Client`` or ``flwr.client.NumPyClient``. We use " -"``NumPyClient`` in this tutorial because it is easier to implement and " -"requires us to write less boilerplate." +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:29 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:29 +msgid "Preparation" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:470 +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:31 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:32 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:31 msgid "" -"To implement the Flower client, we create a subclass of " -"``flwr.client.NumPyClient`` and implement the three methods " -"``get_parameters``, ``fit``, and ``evaluate``:" +"Before we begin with the actual code, let's make sure that we have " +"everything we need." msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:472 -msgid "``get_parameters``: Return the current local model parameters" +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:43 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:44 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:43 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:43 +msgid "Installing dependencies" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:473 -msgid "" -"``fit``: Receive model parameters from the server, train the model " -"parameters on the local data, and return the (updated) model parameters " -"to the server" +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:45 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:46 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:45 +msgid "First, we install the necessary packages:" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:474 +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:65 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:66 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:65 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:65 msgid "" -"``evaluate``: Receive model parameters from the server, evaluate the " -"model parameters on the local data, and return the evaluation result to " -"the server" +"Now that we have all dependencies installed, we can import everything we " +"need for this tutorial:" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:476 +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:101 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:102 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:101 msgid "" -"We mentioned that our clients will use the previously defined PyTorch " -"components for model training and evaluation. Let's see a simple Flower " -"client implementation that brings everything together:" +"It is possible to switch to a runtime that has GPU acceleration enabled " +"(on Google Colab: ``Runtime > Change runtime type > Hardware acclerator: " +"GPU > Save``). Note, however, that Google Colab is not always able to " +"offer GPU acceleration. If you see an error related to GPU availability " +"in one of the following sections, consider switching back to CPU-based " +"execution by setting ``DEVICE = torch.device(\"cpu\")``. If the runtime " +"has GPU acceleration enabled, you should see the output ``Training on " +"cuda``, otherwise it'll say ``Training on cpu``." msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:513 -msgid "" -"Our class ``FlowerClient`` defines how local training/evaluation will be " -"performed and allows Flower to call the local training/evaluation through" -" ``fit`` and ``evaluate``. Each instance of ``FlowerClient`` represents a" -" *single client* in our federated learning system. Federated learning " -"systems have multiple clients (otherwise, there's not much to federate), " -"so each client will be represented by its own instance of " -"``FlowerClient``. If we have, for example, three clients in our workload," -" then we'd have three instances of ``FlowerClient``. Flower calls " -"``FlowerClient.fit`` on the respective instance when the server selects a" -" particular client for training (and ``FlowerClient.evaluate`` for " -"evaluation)." -msgstr "" - -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:517 -msgid "Using the Virtual Client Engine" -msgstr "" - -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:519 -msgid "" -"In this notebook, we want to simulate a federated learning system with 10" -" clients on a single machine. This means that the server and all 10 " -"clients will live on a single machine and share resources such as CPU, " -"GPU, and memory. Having 10 clients would mean having 10 instances of " -"``FlowerClient`` in memory. Doing this on a single machine can quickly " -"exhaust the available memory resources, even if only a subset of these " -"clients participates in a single round of federated learning." +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:114 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:115 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:114 +msgid "Data loading" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:521 +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:116 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:116 msgid "" -"In addition to the regular capabilities where server and clients run on " -"multiple machines, Flower, therefore, provides special simulation " -"capabilities that create ``FlowerClient`` instances only when they are " -"actually necessary for training or evaluation. To enable the Flower " -"framework to create clients when necessary, we need to implement a " -"function called ``client_fn`` that creates a ``FlowerClient`` instance on" -" demand. Flower calls ``client_fn`` whenever it needs an instance of one " -"particular client to call ``fit`` or ``evaluate`` (those instances are " -"usually discarded after use, so they should not keep any local state). " -"Clients are identified by a client ID, or short ``cid``. The ``cid`` can " -"be used, for example, to load different local data partitions for " -"different clients, as can be seen below:" +"Let's now load the CIFAR-10 training and test set, partition them into " +"ten smaller datasets (each split into training and validation set), and " +"wrap everything in their own ``DataLoader``. We introduce a new parameter" +" ``num_clients`` which allows us to call ``load_datasets`` with different" +" numbers of clients." msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:556 -msgid "Starting the training" +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:167 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:168 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:167 +msgid "Model training/evaluation" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:558 +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:169 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:170 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:169 msgid "" -"We now have the class ``FlowerClient`` which defines client-side " -"training/evaluation and ``client_fn`` which allows Flower to create " -"``FlowerClient`` instances whenever it needs to call ``fit`` or " -"``evaluate`` on one particular client. The last step is to start the " -"actual simulation using ``flwr.simulation.start_simulation``." +"Let's continue with the usual model definition (including " +"``set_parameters`` and ``get_parameters``), training and test functions:" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:560 -msgid "" -"The function ``start_simulation`` accepts a number of arguments, amongst " -"them the ``client_fn`` used to create ``FlowerClient`` instances, the " -"number of clients to simulate (``num_clients``), the number of federated " -"learning rounds (``num_rounds``), and the strategy. The strategy " -"encapsulates the federated learning approach/algorithm, for example, " -"*Federated Averaging* (FedAvg)." +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:258 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:258 +msgid "Flower client" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:562 +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:260 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:260 msgid "" -"Flower has a number of built-in strategies, but we can also use our own " -"strategy implementations to customize nearly all aspects of the federated" -" learning approach. For this example, we use the built-in ``FedAvg`` " -"implementation and customize it using a few basic parameters. The last " -"step is the actual call to ``start_simulation`` which - you guessed it - " -"starts the simulation:" +"To implement the Flower client, we (again) create a subclass of " +"``flwr.client.NumPyClient`` and implement the three methods " +"``get_parameters``, ``fit``, and ``evaluate``. Here, we also pass the " +"``cid`` to the client and use it log additional details:" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:608 -msgid "Behind the scenes" +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:308 +msgid "Let's test what we have so far before we continue:" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:610 -msgid "So how does this work? How does Flower execute this simulation?" +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:339 +msgid "Build a Strategy from scratch" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:612 -#, python-format +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:341 msgid "" -"When we call ``start_simulation``, we tell Flower that there are 10 " -"clients (``num_clients=10``). Flower then goes ahead an asks the " -"``FedAvg`` strategy to select clients. ``FedAvg`` knows that it should " -"select 100% of the available clients (``fraction_fit=1.0``), so it goes " -"ahead and selects 10 random clients (i.e., 100% of 10)." +"Let’s overwrite the ``configure_fit`` method such that it passes a higher" +" learning rate (potentially also other hyperparameters) to the optimizer " +"of a fraction of the clients. We will keep the sampling of the clients as" +" it is in ``FedAvg`` and then change the configuration dictionary (one of" +" the ``FitIns`` attributes)." msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:614 +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:507 msgid "" -"Flower then asks the selected 10 clients to train the model. When the " -"server receives the model parameter updates from the clients, it hands " -"those updates over to the strategy (*FedAvg*) for aggregation. The " -"strategy aggregates those updates and returns the new global model, which" -" then gets used in the next round of federated learning." +"The only thing left is to use the newly created custom Strategy " +"``FedCustom`` when starting the experiment:" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:626 -msgid "Where's the accuracy?" +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:534 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:932 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:697 +msgid "Recap" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:628 +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:536 msgid "" -"You may have noticed that all metrics except for ``losses_distributed`` " -"are empty. Where did the ``{\"accuracy\": float(accuracy)}`` go?" +"In this notebook, we’ve seen how to implement a custom strategy. A custom" +" strategy enables granular control over client node configuration, result" +" aggregation, and more. To define a custom strategy, you only have to " +"overwrite the abstract methods of the (abstract) base class ``Strategy``." +" To make custom strategies even more powerful, you can pass custom " +"functions to the constructor of your new class (``__init__``) and then " +"call these functions whenever needed." msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:630 +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:550 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:948 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:729 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:715 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:369 msgid "" -"Flower can automatically aggregate losses returned by individual clients," -" but it cannot do the same for metrics in the generic metrics dictionary " -"(the one with the ``accuracy`` key). Metrics dictionaries can contain " -"very different kinds of metrics and even key/value pairs that are not " -"metrics at all, so the framework does not (and can not) know how to " -"handle these automatically." +"Before you continue, make sure to join the Flower community on Slack: " +"`Join Slack `__" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:632 +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:552 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:950 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:731 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:717 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:371 msgid "" -"As users, we need to tell the framework how to handle/aggregate these " -"custom metrics, and we do so by passing metric aggregation functions to " -"the strategy. The strategy will then call these functions whenever it " -"receives fit or evaluate metrics from clients. The two possible functions" -" are ``fit_metrics_aggregation_fn`` and " -"``evaluate_metrics_aggregation_fn``." +"There's a dedicated ``#questions`` channel if you need help, but we'd " +"also love to hear who you are in ``#introductions``!" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:634 +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:554 msgid "" -"Let's create a simple weighted averaging function to aggregate the " -"``accuracy`` metric we return from ``evaluate``:" +"The `Flower Federated Learning Tutorial - Part 4 " +"`__ introduces ``Client``, the flexible API underlying " +"``NumPyClient``." msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:660 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:9 +msgid "Customize the client" +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:11 msgid "" -"The only thing left to do is to tell the strategy to call this function " -"whenever it receives evaluation metric dictionaries from the clients:" +"Welcome to the fourth part of the Flower federated learning tutorial. In " +"the previous parts of this tutorial, we introduced federated learning " +"with PyTorch and Flower (`part 1 `__), we learned how " +"strategies can be used to customize the execution on both the server and " +"the clients (`part 2 `__), and we built our own " +"custom strategy from scratch (`part 3 `__)." msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:697 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:14 msgid "" -"We now have a full system that performs federated training and federated " -"evaluation. It uses the ``weighted_average`` function to aggregate custom" -" evaluation metrics and calculates a single ``accuracy`` metric across " -"all clients on the server side." +"In this notebook, we revisit ``NumPyClient`` and introduce a new " +"baseclass for building clients, simply named ``Client``. In previous " +"parts of this tutorial, we've based our client on ``NumPyClient``, a " +"convenience class which makes it easy to work with machine learning " +"libraries that have good NumPy interoperability. With ``Client``, we gain" +" a lot of flexibility that we didn't have before, but we'll also have to " +"do a few things the we didn't have to do before." msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:699 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:18 msgid "" -"The other two categories of metrics (``losses_centralized`` and " -"``metrics_centralized``) are still empty because they only apply when " -"centralized evaluation is being used. Part two of the Flower tutorial " -"will cover centralized evaluation." +"Let's go deeper and see what it takes to move from ``NumPyClient`` to " +"``Client``!" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:711 -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:351 -msgid "Final remarks" +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:30 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:29 +msgid "Step 0: Preparation" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:713 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:117 msgid "" -"Congratulations, you just trained a convolutional neural network, " -"federated over 10 clients! With that, you understand the basics of " -"federated learning with Flower. The same approach you've seen can be used" -" with other machine learning frameworks (not just PyTorch) and tasks (not" -" just CIFAR-10 images classification), for example NLP with Hugging Face " -"Transformers or speech with SpeechBrain." +"Let's now load the CIFAR-10 training and test set, partition them into " +"ten smaller datasets (each split into training and validation set), and " +"wrap everything in their own ``DataLoader``." msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:715 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:259 +msgid "Step 1: Revisiting NumPyClient" +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:261 msgid "" -"In the next notebook, we're going to cover some more advanced concepts. " -"Want to customize your strategy? Initialize parameters on the server " -"side? Or evaluate the aggregated model on the server side? We'll cover " -"all this and more in the next tutorial." +"So far, we've implemented our client by subclassing " +"``flwr.client.NumPyClient``. The three methods we implemented are " +"``get_parameters``, ``fit``, and ``evaluate``. Finally, we wrap the " +"creation of instances of this class in a function called ``client_fn``:" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:733 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:309 msgid "" -"The `Flower Federated Learning Tutorial - Part 2 " -"`__ goes into more depth about strategies and all " -"the advanced things you can build with them." +"We've seen this before, there's nothing new so far. The only *tiny* " +"difference compared to the previous notebook is naming, we've changed " +"``FlowerClient`` to ``FlowerNumPyClient`` and ``client_fn`` to " +"``numpyclient_fn``. Let's run it to see the output we get:" msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:9 -msgid "Use a federated learning strategy" +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:339 +msgid "" +"This works as expected, two clients are training for three rounds of " +"federated learning." msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:11 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:341 msgid "" -"Welcome to the next part of the federated learning tutorial. In previous " -"parts of this tutorial, we introduced federated learning with PyTorch and" -" Flower (`part 1 `__)." +"Let's dive a little bit deeper and discuss how Flower executes this " +"simulation. Whenever a client is selected to do some work, " +"``start_simulation`` calls the function ``numpyclient_fn`` to create an " +"instance of our ``FlowerNumPyClient`` (along with loading the model and " +"the data)." msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:13 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:343 msgid "" -"In this notebook, we'll begin to customize the federated learning system " -"we built in the introductory notebook (again, using `Flower " -"`__ and `PyTorch `__)." +"But here's the perhaps surprising part: Flower doesn't actually use the " +"``FlowerNumPyClient`` object directly. Instead, it wraps the object to " +"makes it look like a subclass of ``flwr.client.Client``, not " +"``flwr.client.NumPyClient``. In fact, the Flower core framework doesn't " +"know how to handle ``NumPyClient``'s, it only knows how to handle " +"``Client``'s. ``NumPyClient`` is just a convenience abstraction built on " +"top of ``Client``." msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:17 -msgid "Let's move beyond FedAvg with Flower strategies!" +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:345 +msgid "" +"Instead of building on top of ``NumPyClient``, we can directly build on " +"top of ``Client``." msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:309 -msgid "Strategy customization" +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:357 +msgid "Step 2: Moving from ``NumPyClient`` to ``Client``" msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:311 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:359 msgid "" -"So far, everything should look familiar if you've worked through the " -"introductory notebook. With that, we're ready to introduce a number of " -"new features." -msgstr "" - -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:323 -msgid "Server-side parameter **initialization**" +"Let's try to do the same thing using ``Client`` instead of " +"``NumPyClient``." msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:325 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:465 msgid "" -"Flower, by default, initializes the global model by asking one random " -"client for the initial parameters. In many cases, we want more control " -"over parameter initialization though. Flower therefore allows you to " -"directly pass the initial parameters to the Strategy:" +"Before we discuss the code in more detail, let's try to run it! Gotta " +"make sure our new ``Client``-based client works, right?" msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:370 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:490 msgid "" -"Passing ``initial_parameters`` to the ``FedAvg`` strategy prevents Flower" -" from asking one of the clients for the initial parameters. If we look " -"closely, we can see that the logs do not show any calls to the " -"``FlowerClient.get_parameters`` method." +"That's it, we're now using ``Client``. It probably looks similar to what " +"we've done with ``NumPyClient``. So what's the difference?" msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:382 -msgid "Starting with a customized strategy" +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:492 +msgid "" +"First of all, it's more code. But why? The difference comes from the fact" +" that ``Client`` expects us to take care of parameter serialization and " +"deserialization. For Flower to be able to send parameters over the " +"network, it eventually needs to turn these parameters into ``bytes``. " +"Turning parameters (e.g., NumPy ``ndarray``'s) into raw bytes is called " +"serialization. Turning raw bytes into something more useful (like NumPy " +"``ndarray``'s) is called deserialization. Flower needs to do both: it " +"needs to serialize parameters on the server-side and send them to the " +"client, the client needs to deserialize them to use them for local " +"training, and then serialize the updated parameters again to send them " +"back to the server, which (finally!) deserializes them again in order to " +"aggregate them with the updates received from other clients." msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:384 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:495 msgid "" -"We've seen the function ``start_simulation`` before. It accepts a number " -"of arguments, amongst them the ``client_fn`` used to create " -"``FlowerClient`` instances, the number of clients to simulate " -"``num_clients``, the number of rounds ``num_rounds``, and the strategy." +"The only *real* difference between Client and NumPyClient is that " +"NumPyClient takes care of serialization and deserialization for you. It " +"can do so because it expects you to return parameters as NumPy ndarray's," +" and it knows how to handle these. This makes working with machine " +"learning libraries that have good NumPy support (most of them) a breeze." msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:386 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:497 msgid "" -"The strategy encapsulates the federated learning approach/algorithm, for " -"example, ``FedAvg`` or ``FedAdagrad``. Let's try to use a different " -"strategy this time:" +"In terms of API, there's one major difference: all methods in Client take" +" exactly one argument (e.g., ``FitIns`` in ``Client.fit``) and return " +"exactly one value (e.g., ``FitRes`` in ``Client.fit``). The methods in " +"``NumPyClient`` on the other hand have multiple arguments (e.g., " +"``parameters`` and ``config`` in ``NumPyClient.fit``) and multiple return" +" values (e.g., ``parameters``, ``num_example``, and ``metrics`` in " +"``NumPyClient.fit``) if there are multiple things to handle. These " +"``*Ins`` and ``*Res`` objects in ``Client`` wrap all the individual " +"values you're used to from ``NumPyClient``." msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:424 -msgid "Server-side parameter **evaluation**" +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:510 +msgid "Step 3: Custom serialization" msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:426 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:512 msgid "" -"Flower can evaluate the aggregated model on the server-side or on the " -"client-side. Client-side and server-side evaluation are similar in some " -"ways, but different in others." +"Here we will explore how to implement custom serialization with a simple " +"example." msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:428 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:514 msgid "" -"**Centralized Evaluation** (or *server-side evaluation*) is conceptually " -"simple: it works the same way that evaluation in centralized machine " -"learning does. If there is a server-side dataset that can be used for " -"evaluation purposes, then that's great. We can evaluate the newly " -"aggregated model after each round of training without having to send the " -"model to clients. We're also fortunate in the sense that our entire " -"evaluation dataset is available at all times." +"But first what is serialization? Serialization is just the process of " +"converting an object into raw bytes, and equally as important, " +"deserialization is the process of converting raw bytes back into an " +"object. This is very useful for network communication. Indeed, without " +"serialization, you could not just a Python object through the internet." msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:430 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:516 msgid "" -"**Federated Evaluation** (or *client-side evaluation*) is more complex, " -"but also more powerful: it doesn't require a centralized dataset and " -"allows us to evaluate models over a larger set of data, which often " -"yields more realistic evaluation results. In fact, many scenarios require" -" us to use **Federated Evaluation** if we want to get representative " -"evaluation results at all. But this power comes at a cost: once we start " -"to evaluate on the client side, we should be aware that our evaluation " -"dataset can change over consecutive rounds of learning if those clients " -"are not always available. Moreover, the dataset held by each client can " -"also change over consecutive rounds. This can lead to evaluation results " -"that are not stable, so even if we would not change the model, we'd see " -"our evaluation results fluctuate over consecutive rounds." +"Federated Learning relies heavily on internet communication for training " +"by sending Python objects back and forth between the clients and the " +"server. This means that serialization is an essential part of Federated " +"Learning." msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:433 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:518 msgid "" -"We've seen how federated evaluation works on the client side (i.e., by " -"implementing the ``evaluate`` method in ``FlowerClient``). Now let's see " -"how we can evaluate aggregated model parameters on the server-side:" +"In the following section, we will write a basic example where instead of " +"sending a serialized version of our ``ndarray``\\ s containing our " +"parameters, we will first convert the ``ndarray`` into sparse matrices, " +"before sending them. This technique can be used to save bandwidth, as in " +"certain cases where the weights of a model are sparse (containing many 0 " +"entries), converting them to a sparse matrix can greatly improve their " +"bytesize." msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:490 -msgid "Sending/receiving arbitrary values to/from clients" +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:521 +msgid "Our custom serialization/deserialization functions" msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:492 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:523 msgid "" -"In some situations, we want to configure client-side execution (training," -" evaluation) from the server-side. One example for that is the server " -"asking the clients to train for a certain number of local epochs. Flower " -"provides a way to send configuration values from the server to the " -"clients using a dictionary. Let's look at an example where the clients " -"receive values from the server through the ``config`` parameter in " -"``fit`` (``config`` is also available in ``evaluate``). The ``fit`` " -"method receives the configuration dictionary through the ``config`` " -"parameter and can then read values from this dictionary. In this example," -" it reads ``server_round`` and ``local_epochs`` and uses those values to " -"improve the logging and configure the number of local training epochs:" +"This is where the real serialization/deserialization will happen, " +"especially in ``ndarray_to_sparse_bytes`` for serialization and " +"``sparse_bytes_to_ndarray`` for deserialization." msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:546 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:525 msgid "" -"So how can we send this config dictionary from server to clients? The " -"built-in Flower Strategies provide way to do this, and it works similarly" -" to the way server-side evaluation works. We provide a function to the " -"strategy, and the strategy calls this function for every round of " -"federated learning:" +"Note that we imported the ``scipy.sparse`` library in order to convert " +"our arrays." msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:576 -msgid "" -"Next, we'll just pass this function to the FedAvg strategy before " -"starting the simulation:" +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:613 +msgid "Client-side" msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:613 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:615 msgid "" -"As we can see, the client logs now include the current round of federated" -" learning (which they read from the ``config`` dictionary). We can also " -"configure local training to run for one epoch during the first and second" -" round of federated learning, and then for two epochs during the third " -"round." +"To be able to serialize our ``ndarray``\\ s into sparse parameters, we " +"will just have to call our custom functions in our " +"``flwr.client.Client``." msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:615 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:617 msgid "" -"Clients can also return arbitrary values to the server. To do so, they " -"return a dictionary from ``fit`` and/or ``evaluate``. We have seen and " -"used this concept throughout this notebook without mentioning it " -"explicitly: our ``FlowerClient`` returns a dictionary containing a custom" -" key/value pair as the third return value in ``evaluate``." +"Indeed, in ``get_parameters`` we need to serialize the parameters we got " +"from our network using our custom ``ndarrays_to_sparse_parameters`` " +"defined above." msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:627 -msgid "Scaling federated learning" +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:619 +msgid "" +"In ``fit``, we first need to deserialize the parameters coming from the " +"server using our custom ``sparse_parameters_to_ndarrays`` and then we " +"need to serialize our local results with " +"``ndarrays_to_sparse_parameters``." msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:629 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:621 msgid "" -"As a last step in this notebook, let's see how we can use Flower to " -"experiment with a large number of clients." +"In ``evaluate``, we will only need to deserialize the global parameters " +"with our custom function." msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:651 -#, python-format -msgid "" -"We now have 1000 partitions, each holding 45 training and 5 validation " -"examples. Given that the number of training examples on each client is " -"quite small, we should probably train the model a bit longer, so we " -"configure the clients to perform 3 local training epochs. We should also " -"adjust the fraction of clients selected for training during each round " -"(we don't want all 1000 clients participating in every round), so we " -"adjust ``fraction_fit`` to ``0.05``, which means that only 5% of " -"available clients (so 50 clients) will be selected for training each " -"round:" +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:725 +msgid "Server-side" msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:699 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:727 msgid "" -"In this notebook, we've seen how we can gradually enhance our system by " -"customizing the strategy, initializing parameters on the server side, " -"choosing a different strategy, and evaluating models on the server-side. " -"That's quite a bit of flexibility with so little code, right?" +"For this example, we will just use ``FedAvg`` as a strategy. To change " +"the serialization and deserialization here, we only need to reimplement " +"the ``evaluate`` and ``aggregate_fit`` functions of ``FedAvg``. The other" +" functions of the strategy will be inherited from the super class " +"``FedAvg``." msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:701 -msgid "" -"In the later sections, we've seen how we can communicate arbitrary values" -" between server and clients to fully customize client-side execution. " -"With that capability, we built a large-scale Federated Learning " -"simulation using the Flower Virtual Client Engine and ran an experiment " -"involving 1000 clients in the same workload - all in a Jupyter Notebook!" +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:729 +msgid "As you can see only one line as change in ``evaluate``:" msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:719 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:735 msgid "" -"The `Flower Federated Learning Tutorial - Part 3 " -"`__ shows how to build a fully custom ``Strategy`` " -"from scratch." +"And for ``aggregate_fit``, we will first deserialize every result we " +"received:" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:9 -msgid "What is Federated Learning?" +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:744 +msgid "And then serialize the aggregated result:" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:13 -msgid "" -"In this tutorial, you will learn what federated learning is, build your " -"first system in Flower, and gradually extend it. If you work through all " -"parts of the tutorial, you will be able to build advanced federated " -"learning systems that approach the current state of the art in the field." +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:903 +msgid "We can now run our custom serialization example!" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:15 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:934 msgid "" -"🧑‍🏫 This tutorial starts at zero and expects no familiarity with " -"federated learning. Only a basic understanding of data science and Python" -" programming is assumed." +"In this part of the tutorial, we've seen how we can build clients by " +"subclassing either ``NumPyClient`` or ``Client``. ``NumPyClient`` is a " +"convenience abstraction that makes it easier to work with machine " +"learning libraries that have good NumPy interoperability. ``Client`` is a" +" more flexible abstraction that allows us to do things that are not " +"possible in ``NumPyClient``. In order to do so, it requires us to handle " +"parameter serialization and deserialization ourselves." msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:17 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:952 msgid "" -"`Star Flower on GitHub `__ ⭐️ and join " -"the open-source Flower community on Slack to connect, ask questions, and " -"get help: `Join Slack `__ 🌼 We'd love to " -"hear from you in the ``#introductions`` channel! And if anything is " -"unclear, head over to the ``#questions`` channel." +"This is the final part of the Flower tutorial (for now!), " +"congratulations! You're now well equipped to understand the rest of the " +"documentation. There are many topics we didn't cover in the tutorial, we " +"recommend the following resources:" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:19 -msgid "Let's get started!" +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:954 +msgid "`Read Flower Docs `__" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:31 -msgid "Classic machine learning" +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:955 +msgid "" +"`Check out Flower Code Examples " +"`__" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:33 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:956 msgid "" -"Before we begin to discuss federated learning, let us quickly recap how " -"most machine learning works today." +"`Use Flower Baselines for your research " +"`__" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:35 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:957 msgid "" -"In machine learning, we have a model, and we have data. The model could " -"be a neural network (as depicted here), or something else, like classical" -" linear regression." +"`Watch Flower Summit 2023 videos `__" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:41 -msgid "|31e4b1afa87c4b968327bbeafbf184d4|" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:9 +msgid "Get started with Flower" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:109 -msgid "Model and data" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:11 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:11 +msgid "Welcome to the Flower federated learning tutorial!" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:47 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:13 msgid "" -"We train the model using the data to perform a useful task. A task could " -"be to detect objects in images, transcribe an audio recording, or play a " -"game like Go." -msgstr "" - -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:53 -msgid "|c9d935b4284e4c389a33d86b33e07c0a|" +"In this notebook, we'll build a federated learning system using Flower, " +"`Flower Datasets `__ and PyTorch. In " +"part 1, we use PyTorch for the model training pipeline and data loading. " +"In part 2, we continue to federate the PyTorch-based pipeline using " +"Flower." msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:111 -msgid "Train model using data" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:17 +msgid "Let's get stated!" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:59 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:31 msgid "" -"Now, in practice, the training data we work with doesn't originate on the" -" machine we train the model on. It gets created somewhere else." +"Before we begin with any actual code, let's make sure that we have " +"everything we need." msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:61 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:45 msgid "" -"It originates on a smartphone by the user interacting with an app, a car " -"collecting sensor data, a laptop receiving input via the keyboard, or a " -"smart speaker listening to someone trying to sing a song." +"Next, we install the necessary packages for PyTorch (``torch`` and " +"``torchvision``), Flower Datasets (``flwr-datasets``) and Flower " +"(``flwr``):" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:67 -msgid "|00727b5faffb468f84dd1b03ded88638|" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:105 +msgid "" +"It is possible to switch to a runtime that has GPU acceleration enabled " +"(on Google Colab: ``Runtime > Change runtime type > Hardware accelerator:" +" GPU > Save``). Note, however, that Google Colab is not always able to " +"offer GPU acceleration. If you see an error related to GPU availability " +"in one of the following sections, consider switching back to CPU-based " +"execution by setting ``DEVICE = torch.device(\"cpu\")``. If the runtime " +"has GPU acceleration enabled, you should see the output ``Training on " +"cuda``, otherwise it'll say ``Training on cpu``." msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:113 -msgid "Data on a phone" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:118 +msgid "Loading the data" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:73 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:120 msgid "" -"What's also important to mention, this \"somewhere else\" is usually not " -"just one place, it's many places. It could be several devices all running" -" the same app. But it could also be several organizations, all generating" -" data for the same task." +"Federated learning can be applied to many different types of tasks across" +" different domains. In this tutorial, we introduce federated learning by " +"training a simple convolutional neural network (CNN) on the popular " +"CIFAR-10 dataset. CIFAR-10 can be used to train image classifiers that " +"distinguish between images from ten different classes: 'airplane', " +"'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', and " +"'truck'." msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:79 -msgid "|daf0cf0ff4c24fd29439af78416cf47b|" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:131 +msgid "" +"We simulate having multiple datasets from multiple organizations (also " +"called the \"cross-silo\" setting in federated learning) by splitting the" +" original CIFAR-10 dataset into multiple partitions. Each partition will " +"represent the data from a single organization. We're doing this purely " +"for experimentation purposes, in the real world there's no need for data " +"splitting because each organization already has their own data (so the " +"data is naturally partitioned)." msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:115 -msgid "Data is on many devices" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:133 +msgid "" +"Each organization will act as a client in the federated learning system. " +"So having ten organizations participate in a federation means having ten " +"clients connected to the federated learning server." msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:85 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:144 msgid "" -"So to use machine learning, or any kind of data analysis, the approach " -"that has been used in the past was to collect all data on a central " -"server. This server can be somewhere in a data center, or somewhere in " -"the cloud." +"Let's now create the Federated Dataset abstraction that from ``flwr-" +"datasets`` that partitions the CIFAR-10. We will create small training " +"and test set for each edge device and wrap each of them into a PyTorch " +"``DataLoader``:" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:91 -msgid "|9f093007080d471d94ca90d3e9fde9b6|" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:198 +msgid "" +"We now have a list of ten training sets and ten validation sets " +"(``trainloaders`` and ``valloaders``) representing the data of ten " +"different organizations. Each ``trainloader``/``valloader`` pair contains" +" 4500 training examples and 500 validation examples. There's also a " +"single ``testloader`` (we did not split the test set). Again, this is " +"only necessary for building research or educational systems, actual " +"federated learning systems have their data naturally distributed across " +"multiple partitions." msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:117 -msgid "Central data collection" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:201 +msgid "" +"Let's take a look at the first batch of images and labels in the first " +"training set (i.e., ``trainloaders[0]``) before we move on:" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:97 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:240 msgid "" -"Once all the data is collected in one place, we can finally use machine " -"learning algorithms to train our model on the data. This is the machine " -"learning approach that we've basically always relied on." +"The output above shows a random batch of images from the first " +"``trainloader`` in our list of ten ``trainloaders``. It also prints the " +"labels associated with each image (i.e., one of the ten possible labels " +"we've seen above). If you run the cell again, you should see another " +"batch of images." msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:103 -msgid "|46a26e6150e0479fbd3dfd655f36eb13|" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:252 +msgid "Step 1: Centralized Training with PyTorch" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:119 -msgid "Central model training" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:263 +msgid "" +"Next, we're going to use PyTorch to define a simple convolutional neural " +"network. This introduction assumes basic familiarity with PyTorch, so it " +"doesn't cover the PyTorch-related aspects in full detail. If you want to " +"dive deeper into PyTorch, we recommend `DEEP LEARNING WITH PYTORCH: A 60 " +"MINUTE BLITZ " +"`__." msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:130 -msgid "Challenges of classical machine learning" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:275 +msgid "Defining the model" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:132 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:277 msgid "" -"The classic machine learning approach we've just seen can be used in some" -" cases. Great examples include categorizing holiday photos, or analyzing " -"web traffic. Cases, where all the data is naturally available on a " -"centralized server." +"We use the simple CNN described in the `PyTorch tutorial " +"`__:" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:138 -msgid "|3daba297595c4c7fb845d90404a6179a|" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:314 +msgid "Let's continue with the usual training and test functions:" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:173 -msgid "Centralized possible" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:374 +msgid "Training the model" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:144 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:376 msgid "" -"But the approach can not be used in many other cases. Cases, where the " -"data is not available on a centralized server, or cases where the data " -"available on one server is not enough to train a good model." +"We now have all the basic building blocks we need: a dataset, a model, a " +"training function, and a test function. Let's put them together to train " +"the model on the dataset of one of our organizations " +"(``trainloaders[0]``). This simulates the reality of most machine " +"learning projects today: each organization has their own data and trains " +"models only on this internal data:" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:150 -msgid "|5769874fa9c4455b80b2efda850d39d7|" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:406 +msgid "" +"Training the simple CNN on our CIFAR-10 split for 5 epochs should result " +"in a test set accuracy of about 41%, which is not good, but at the same " +"time, it doesn't really matter for the purposes of this tutorial. The " +"intent was just to show a simplistic centralized training pipeline that " +"sets the stage for what comes next - federated learning!" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:175 -msgid "Centralized impossible" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:418 +msgid "Step 2: Federated Learning with Flower" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:156 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:420 msgid "" -"There are many reasons why the classic centralized machine learning " -"approach does not work for a large number of highly important real-world " -"use cases. Those reasons include:" +"Step 1 demonstrated a simple centralized training pipeline. All data was " +"in one place (i.e., a single ``trainloader`` and a single ``valloader``)." +" Next, we'll simulate a situation where we have multiple datasets in " +"multiple organizations and where we train a model over these " +"organizations using federated learning." msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:158 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:432 +msgid "Updating model parameters" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:434 msgid "" -"**Regulations**: GDPR (Europe), CCPA (California), PIPEDA (Canada), LGPD " -"(Brazil), PDPL (Argentina), KVKK (Turkey), POPI (South Africa), FSS " -"(Russia), CDPR (China), PDPB (India), PIPA (Korea), APPI (Japan), PDP " -"(Indonesia), PDPA (Singapore), APP (Australia), and other regulations " -"protect sensitive data from being moved. In fact, those regulations " -"sometimes even prevent single organizations from combining their own " -"users' data for artificial intelligence training because those users live" -" in different parts of the world, and their data is governed by different" -" data protection regulations." +"In federated learning, the server sends the global model parameters to " +"the client, and the client updates the local model with the parameters " +"received from the server. It then trains the model on the local data " +"(which changes the model parameters locally) and sends the " +"updated/changed model parameters back to the server (or, alternatively, " +"it sends just the gradients back to the server, not the full model " +"parameters)." msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:160 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:436 msgid "" -"**User preference**: In addition to regulation, there are use cases where" -" users just expect that no data leaves their device, ever. If you type " -"your passwords and credit card info into the digital keyboard of your " -"phone, you don't expect those passwords to end up on the server of the " -"company that developed that keyboard, do you? In fact, that use case was " -"the reason federated learning was invented in the first place." +"We need two helper functions to update the local model with parameters " +"received from the server and to get the updated model parameters from the" +" local model: ``set_parameters`` and ``get_parameters``. The following " +"two functions do just that for the PyTorch model above." msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:161 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:438 msgid "" -"**Data volume**: Some sensors, like cameras, produce such a high data " -"volume that it is neither feasible nor economic to collect all the data " -"(due to, for example, bandwidth or communication efficiency). Think about" -" a national rail service with hundreds of train stations across the " -"country. If each of these train stations is outfitted with a number of " -"security cameras, the volume of raw on-device data they produce requires " -"incredibly powerful and exceedingly expensive infrastructure to process " -"and store. And most of the data isn't even useful." +"The details of how this works are not really important here (feel free to" +" consult the PyTorch documentation if you want to learn more). In " +"essence, we use ``state_dict`` to access PyTorch model parameter tensors." +" The parameter tensors are then converted to/from a list of NumPy " +"ndarray's (which Flower knows how to serialize/deserialize):" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:164 -msgid "Examples where centralized machine learning does not work include:" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:466 +msgid "Implementing a Flower client" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:166 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:468 msgid "" -"Sensitive healthcare records from multiple hospitals to train cancer " -"detection models" +"With that out of the way, let's move on to the interesting part. " +"Federated learning systems consist of a server and multiple clients. In " +"Flower, we create clients by implementing subclasses of " +"``flwr.client.Client`` or ``flwr.client.NumPyClient``. We use " +"``NumPyClient`` in this tutorial because it is easier to implement and " +"requires us to write less boilerplate." msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:167 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:470 msgid "" -"Financial information from different organizations to detect financial " -"fraud" +"To implement the Flower client, we create a subclass of " +"``flwr.client.NumPyClient`` and implement the three methods " +"``get_parameters``, ``fit``, and ``evaluate``:" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:168 -msgid "Location data from your electric car to make better range prediction" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:472 +msgid "``get_parameters``: Return the current local model parameters" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:169 -msgid "End-to-end encrypted messages to train better auto-complete models" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:473 +msgid "" +"``fit``: Receive model parameters from the server, train the model " +"parameters on the local data, and return the (updated) model parameters " +"to the server" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:171 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:474 msgid "" -"The popularity of privacy-enhancing systems like the `Brave " -"`__ browser or the `Signal `__ " -"messenger shows that users care about privacy. In fact, they choose the " -"privacy-enhancing version over other alternatives, if such an alternative " -"exists. But what can we do to apply machine learning and data science to " -"these cases to utilize private data? After all, these are all areas that " -"would benefit significantly from recent advances in AI." +"``evaluate``: Receive model parameters from the server, evaluate the " +"model parameters on the local data, and return the evaluation result to " +"the server" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:186 -msgid "Federated learning" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:476 +msgid "" +"We mentioned that our clients will use the previously defined PyTorch " +"components for model training and evaluation. Let's see a simple Flower " +"client implementation that brings everything together:" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:188 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:513 msgid "" -"Federated learning simply reverses this approach. It enables machine " -"learning on distributed data by moving the training to the data, instead " -"of moving the data to the training. Here's the single-sentence " -"explanation:" +"Our class ``FlowerClient`` defines how local training/evaluation will be " +"performed and allows Flower to call the local training/evaluation through" +" ``fit`` and ``evaluate``. Each instance of ``FlowerClient`` represents a" +" *single client* in our federated learning system. Federated learning " +"systems have multiple clients (otherwise, there's not much to federate), " +"so each client will be represented by its own instance of " +"``FlowerClient``. If we have, for example, three clients in our workload," +" then we'd have three instances of ``FlowerClient``. Flower calls " +"``FlowerClient.fit`` on the respective instance when the server selects a" +" particular client for training (and ``FlowerClient.evaluate`` for " +"evaluation)." msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:190 -msgid "Central machine learning: move the data to the computation" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:517 +msgid "Using the Virtual Client Engine" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:191 -msgid "Federated (machine) learning: move the computation to the data" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:519 +msgid "" +"In this notebook, we want to simulate a federated learning system with 10" +" clients on a single machine. This means that the server and all 10 " +"clients will live on a single machine and share resources such as CPU, " +"GPU, and memory. Having 10 clients would mean having 10 instances of " +"``FlowerClient`` in memory. Doing this on a single machine can quickly " +"exhaust the available memory resources, even if only a subset of these " +"clients participates in a single round of federated learning." msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:193 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:521 msgid "" -"By doing so, it enables us to use machine learning (and other data " -"science approaches) in areas where it wasn't possible before. We can now " -"train excellent medical AI models by enabling different hospitals to work" -" together. We can solve financial fraud by training AI models on the data" -" of different financial institutions. We can build novel privacy-" -"enhancing applications (such as secure messaging) that have better built-" -"in AI than their non-privacy-enhancing alternatives. And those are just a" -" few of the examples that come to mind. As we deploy federated learning, " -"we discover more and more areas that can suddenly be reinvented because " -"they now have access to vast amounts of previously inaccessible data." +"In addition to the regular capabilities where server and clients run on " +"multiple machines, Flower, therefore, provides special simulation " +"capabilities that create ``FlowerClient`` instances only when they are " +"actually necessary for training or evaluation. To enable the Flower " +"framework to create clients when necessary, we need to implement a " +"function called ``client_fn`` that creates a ``FlowerClient`` instance on" +" demand. Flower calls ``client_fn`` whenever it needs an instance of one " +"particular client to call ``fit`` or ``evaluate`` (those instances are " +"usually discarded after use, so they should not keep any local state). " +"Clients are identified by a client ID, or short ``cid``. The ``cid`` can " +"be used, for example, to load different local data partitions for " +"different clients, as can be seen below:" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:196 -msgid "" -"So how does federated learning work, exactly? Let's start with an " -"intuitive explanation." +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:556 +msgid "Starting the training" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:199 -msgid "Federated learning in five steps" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:558 +msgid "" +"We now have the class ``FlowerClient`` which defines client-side " +"training/evaluation and ``client_fn`` which allows Flower to create " +"``FlowerClient`` instances whenever it needs to call ``fit`` or " +"``evaluate`` on one particular client. The last step is to start the " +"actual simulation using ``flwr.simulation.start_simulation``." msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:202 -msgid "Step 0: Initialize global model" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:560 +msgid "" +"The function ``start_simulation`` accepts a number of arguments, amongst " +"them the ``client_fn`` used to create ``FlowerClient`` instances, the " +"number of clients to simulate (``num_clients``), the number of federated " +"learning rounds (``num_rounds``), and the strategy. The strategy " +"encapsulates the federated learning approach/algorithm, for example, " +"*Federated Averaging* (FedAvg)." msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:204 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:562 msgid "" -"We start by initializing the model on the server. This is exactly the " -"same in classic centralized learning: we initialize the model parameters," -" either randomly or from a previously saved checkpoint." +"Flower has a number of built-in strategies, but we can also use our own " +"strategy implementations to customize nearly all aspects of the federated" +" learning approach. For this example, we use the built-in ``FedAvg`` " +"implementation and customize it using a few basic parameters. The last " +"step is the actual call to ``start_simulation`` which - you guessed it - " +"starts the simulation:" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:210 -msgid "|ba47ffb421814b0f8f9fa5719093d839|" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:608 +msgid "Behind the scenes" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:307 -msgid "Initialize global model" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:610 +msgid "So how does this work? How does Flower execute this simulation?" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:217 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:612 +#, python-format msgid "" -"Step 1: Send model to a number of connected organizations/devices (client" -" nodes)" +"When we call ``start_simulation``, we tell Flower that there are 10 " +"clients (``num_clients=10``). Flower then goes ahead an asks the " +"``FedAvg`` strategy to select clients. ``FedAvg`` knows that it should " +"select 100% of the available clients (``fraction_fit=1.0``), so it goes " +"ahead and selects 10 random clients (i.e., 100% of 10)." msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:219 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:614 msgid "" -"Next, we send the parameters of the global model to the connected client " -"nodes (think: edge devices like smartphones or servers belonging to " -"organizations). This is to ensure that each participating node starts " -"their local training using the same model parameters. We often use only a" -" few of the connected nodes instead of all nodes. The reason for this is " -"that selecting more and more client nodes has diminishing returns." +"Flower then asks the selected 10 clients to train the model. When the " +"server receives the model parameter updates from the clients, it hands " +"those updates over to the strategy (*FedAvg*) for aggregation. The " +"strategy aggregates those updates and returns the new global model, which" +" then gets used in the next round of federated learning." msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:225 -msgid "|aeac5bf79cbf497082e979834717e01b|" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:626 +msgid "Where's the accuracy?" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:309 -msgid "Send global model" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:628 +msgid "" +"You may have noticed that all metrics except for ``losses_distributed`` " +"are empty. Where did the ``{\"accuracy\": float(accuracy)}`` go?" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:232 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:630 msgid "" -"Step 2: Train model locally on the data of each organization/device " -"(client node)" +"Flower can automatically aggregate losses returned by individual clients," +" but it cannot do the same for metrics in the generic metrics dictionary " +"(the one with the ``accuracy`` key). Metrics dictionaries can contain " +"very different kinds of metrics and even key/value pairs that are not " +"metrics at all, so the framework does not (and can not) know how to " +"handle these automatically." msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:234 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:632 msgid "" -"Now that all (selected) client nodes have the latest version of the " -"global model parameters, they start the local training. They use their " -"own local dataset to train their own local model. They don't train the " -"model until full convergence, but they only train for a little while. " -"This could be as little as one epoch on the local data, or even just a " -"few steps (mini-batches)." +"As users, we need to tell the framework how to handle/aggregate these " +"custom metrics, and we do so by passing metric aggregation functions to " +"the strategy. The strategy will then call these functions whenever it " +"receives fit or evaluate metrics from clients. The two possible functions" +" are ``fit_metrics_aggregation_fn`` and " +"``evaluate_metrics_aggregation_fn``." msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:240 -msgid "|ce27ed4bbe95459dba016afc42486ba2|" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:634 +msgid "" +"Let's create a simple weighted averaging function to aggregate the " +"``accuracy`` metric we return from ``evaluate``:" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:311 -msgid "Train on local data" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:660 +msgid "" +"The only thing left to do is to tell the strategy to call this function " +"whenever it receives evaluation metric dictionaries from the clients:" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:247 -msgid "Step 3: Return model updates back to the server" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:697 +msgid "" +"We now have a full system that performs federated training and federated " +"evaluation. It uses the ``weighted_average`` function to aggregate custom" +" evaluation metrics and calculates a single ``accuracy`` metric across " +"all clients on the server side." msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:249 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:699 msgid "" -"After local training, each client node has a slightly different version " -"of the model parameters they originally received. The parameters are all " -"different because each client node has different examples in its local " -"dataset. The client nodes then send those model updates back to the " -"server. The model updates they send can either be the full model " -"parameters or just the gradients that were accumulated during local " -"training." +"The other two categories of metrics (``losses_centralized`` and " +"``metrics_centralized``) are still empty because they only apply when " +"centralized evaluation is being used. Part two of the Flower tutorial " +"will cover centralized evaluation." msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:255 -msgid "|ae94a7f71dda443cbec2385751427d41|" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:711 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:351 +msgid "Final remarks" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:313 -msgid "Send model updates" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:713 +msgid "" +"Congratulations, you just trained a convolutional neural network, " +"federated over 10 clients! With that, you understand the basics of " +"federated learning with Flower. The same approach you've seen can be used" +" with other machine learning frameworks (not just PyTorch) and tasks (not" +" just CIFAR-10 images classification), for example NLP with Hugging Face " +"Transformers or speech with SpeechBrain." msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:262 -msgid "Step 4: Aggregate model updates into a new global model" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:715 +msgid "" +"In the next notebook, we're going to cover some more advanced concepts. " +"Want to customize your strategy? Initialize parameters on the server " +"side? Or evaluate the aggregated model on the server side? We'll cover " +"all this and more in the next tutorial." msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:264 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:733 msgid "" -"The server receives model updates from the selected client nodes. If it " -"selected 100 client nodes, it now has 100 slightly different versions of " -"the original global model, each trained on the local data of one client. " -"But didn't we want to have one model that contains the learnings from the" -" data of all 100 client nodes?" +"The `Flower Federated Learning Tutorial - Part 2 " +"`__ goes into more depth about strategies and all " +"the advanced things you can build with them." msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:266 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:9 +msgid "Use a federated learning strategy" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:11 msgid "" -"In order to get one single model, we have to combine all the model " -"updates we received from the client nodes. This process is called " -"*aggregation*, and there are many different ways to do it. The most basic" -" way to do it is called *Federated Averaging* (`McMahan et al., 2016 " -"`__), often abbreviated as *FedAvg*. " -"*FedAvg* takes the 100 model updates and, as the name suggests, averages " -"them. To be more precise, it takes the *weighted average* of the model " -"updates, weighted by the number of examples each client used for " -"training. The weighting is important to make sure that each data example " -"has the same \"influence\" on the resulting global model. If one client " -"has 10 examples, and another client has 100 examples, then - without " -"weighting - each of the 10 examples would influence the global model ten " -"times as much as each of the 100 examples." +"Welcome to the next part of the federated learning tutorial. In previous " +"parts of this tutorial, we introduced federated learning with PyTorch and" +" Flower (`part 1 `__)." msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:273 -msgid "|e61fce4d43d243e7bb08bdde97d81ce6|" +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:13 +msgid "" +"In this notebook, we'll begin to customize the federated learning system " +"we built in the introductory notebook (again, using `Flower " +"`__ and `PyTorch `__)." msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:315 -msgid "Aggregate model updates" +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:17 +msgid "Let's move beyond FedAvg with Flower strategies!" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:280 -msgid "Step 5: Repeat steps 1 to 4 until the model converges" +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:309 +msgid "Strategy customization" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:282 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:311 msgid "" -"Steps 1 to 4 are what we call a single round of federated learning. The " -"global model parameters get sent to the participating client nodes (step " -"1), the client nodes train on their local data (step 2), they send their " -"updated models to the server (step 3), and the server then aggregates the" -" model updates to get a new version of the global model (step 4)." +"So far, everything should look familiar if you've worked through the " +"introductory notebook. With that, we're ready to introduce a number of " +"new features." msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:284 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:323 +msgid "Server-side parameter **initialization**" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:325 msgid "" -"During a single round, each client node that participates in that " -"iteration only trains for a little while. This means that after the " -"aggregation step (step 4), we have a model that has been trained on all " -"the data of all participating client nodes, but only for a little while. " -"We then have to repeat this training process over and over again to " -"eventually arrive at a fully trained model that performs well across the " -"data of all client nodes." +"Flower, by default, initializes the global model by asking one random " +"client for the initial parameters. In many cases, we want more control " +"over parameter initialization though. Flower therefore allows you to " +"directly pass the initial parameters to the Strategy:" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:289 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:370 msgid "" -"Congratulations, you now understand the basics of federated learning. " -"There's a lot more to discuss, of course, but that was federated learning" -" in a nutshell. In later parts of this tutorial, we will go into more " -"detail. Interesting questions include: How can we select the best client " -"nodes that should participate in the next round? What's the best way to " -"aggregate model updates? How can we handle failing client nodes " -"(stragglers)?" +"Passing ``initial_parameters`` to the ``FedAvg`` strategy prevents Flower" +" from asking one of the clients for the initial parameters. If we look " +"closely, we can see that the logs do not show any calls to the " +"``FlowerClient.get_parameters`` method." msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:294 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:382 +msgid "Starting with a customized strategy" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:384 msgid "" -"Just like we can train a model on the decentralized data of different " -"client nodes, we can also evaluate the model on that data to receive " -"valuable metrics. This is called federated evaluation, sometimes " -"abbreviated as FE. In fact, federated evaluation is an integral part of " -"most federated learning systems." +"We've seen the function ``start_simulation`` before. It accepts a number " +"of arguments, amongst them the ``client_fn`` used to create " +"``FlowerClient`` instances, the number of clients to simulate " +"``num_clients``, the number of rounds ``num_rounds``, and the strategy." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:386 +msgid "" +"The strategy encapsulates the federated learning approach/algorithm, for " +"example, ``FedAvg`` or ``FedAdagrad``. Let's try to use a different " +"strategy this time:" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:424 +msgid "Server-side parameter **evaluation**" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:426 +msgid "" +"Flower can evaluate the aggregated model on the server-side or on the " +"client-side. Client-side and server-side evaluation are similar in some " +"ways, but different in others." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:428 +msgid "" +"**Centralized Evaluation** (or *server-side evaluation*) is conceptually " +"simple: it works the same way that evaluation in centralized machine " +"learning does. If there is a server-side dataset that can be used for " +"evaluation purposes, then that's great. We can evaluate the newly " +"aggregated model after each round of training without having to send the " +"model to clients. We're also fortunate in the sense that our entire " +"evaluation dataset is available at all times." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:430 +msgid "" +"**Federated Evaluation** (or *client-side evaluation*) is more complex, " +"but also more powerful: it doesn't require a centralized dataset and " +"allows us to evaluate models over a larger set of data, which often " +"yields more realistic evaluation results. In fact, many scenarios require" +" us to use **Federated Evaluation** if we want to get representative " +"evaluation results at all. But this power comes at a cost: once we start " +"to evaluate on the client side, we should be aware that our evaluation " +"dataset can change over consecutive rounds of learning if those clients " +"are not always available. Moreover, the dataset held by each client can " +"also change over consecutive rounds. This can lead to evaluation results " +"that are not stable, so even if we would not change the model, we'd see " +"our evaluation results fluctuate over consecutive rounds." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:433 +msgid "" +"We've seen how federated evaluation works on the client side (i.e., by " +"implementing the ``evaluate`` method in ``FlowerClient``). Now let's see " +"how we can evaluate aggregated model parameters on the server-side:" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:490 +msgid "Sending/receiving arbitrary values to/from clients" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:492 +msgid "" +"In some situations, we want to configure client-side execution (training," +" evaluation) from the server-side. One example for that is the server " +"asking the clients to train for a certain number of local epochs. Flower " +"provides a way to send configuration values from the server to the " +"clients using a dictionary. Let's look at an example where the clients " +"receive values from the server through the ``config`` parameter in " +"``fit`` (``config`` is also available in ``evaluate``). The ``fit`` " +"method receives the configuration dictionary through the ``config`` " +"parameter and can then read values from this dictionary. In this example," +" it reads ``server_round`` and ``local_epochs`` and uses those values to " +"improve the logging and configure the number of local training epochs:" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:546 +msgid "" +"So how can we send this config dictionary from server to clients? The " +"built-in Flower Strategies provide way to do this, and it works similarly" +" to the way server-side evaluation works. We provide a function to the " +"strategy, and the strategy calls this function for every round of " +"federated learning:" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:297 -msgid "Federated analytics" -msgstr "" +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:576 +msgid "" +"Next, we'll just pass this function to the FedAvg strategy before " +"starting the simulation:" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:613 +msgid "" +"As we can see, the client logs now include the current round of federated" +" learning (which they read from the ``config`` dictionary). We can also " +"configure local training to run for one epoch during the first and second" +" round of federated learning, and then for two epochs during the third " +"round." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:615 +msgid "" +"Clients can also return arbitrary values to the server. To do so, they " +"return a dictionary from ``fit`` and/or ``evaluate``. We have seen and " +"used this concept throughout this notebook without mentioning it " +"explicitly: our ``FlowerClient`` returns a dictionary containing a custom" +" key/value pair as the third return value in ``evaluate``." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:627 +msgid "Scaling federated learning" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:629 +msgid "" +"As a last step in this notebook, let's see how we can use Flower to " +"experiment with a large number of clients." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:651 +#, python-format +msgid "" +"We now have 1000 partitions, each holding 45 training and 5 validation " +"examples. Given that the number of training examples on each client is " +"quite small, we should probably train the model a bit longer, so we " +"configure the clients to perform 3 local training epochs. We should also " +"adjust the fraction of clients selected for training during each round " +"(we don't want all 1000 clients participating in every round), so we " +"adjust ``fraction_fit`` to ``0.05``, which means that only 5% of " +"available clients (so 50 clients) will be selected for training each " +"round:" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:699 +msgid "" +"In this notebook, we've seen how we can gradually enhance our system by " +"customizing the strategy, initializing parameters on the server side, " +"choosing a different strategy, and evaluating models on the server-side. " +"That's quite a bit of flexibility with so little code, right?" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:701 +msgid "" +"In the later sections, we've seen how we can communicate arbitrary values" +" between server and clients to fully customize client-side execution. " +"With that capability, we built a large-scale Federated Learning " +"simulation using the Flower Virtual Client Engine and ran an experiment " +"involving 1000 clients in the same workload - all in a Jupyter Notebook!" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:719 +msgid "" +"The `Flower Federated Learning Tutorial - Part 3 " +"`__ shows how to build a fully custom ``Strategy`` from " +"scratch." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:9 +msgid "What is Federated Learning?" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:13 +msgid "" +"In this tutorial, you will learn what federated learning is, build your " +"first system in Flower, and gradually extend it. If you work through all " +"parts of the tutorial, you will be able to build advanced federated " +"learning systems that approach the current state of the art in the field." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:15 +msgid "" +"🧑‍🏫 This tutorial starts at zero and expects no familiarity with " +"federated learning. Only a basic understanding of data science and Python" +" programming is assumed." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:17 +msgid "" +"`Star Flower on GitHub `__ ⭐️ and join " +"the open-source Flower community on Slack to connect, ask questions, and " +"get help: `Join Slack `__ 🌼 We'd love to " +"hear from you in the ``#introductions`` channel! And if anything is " +"unclear, head over to the ``#questions`` channel." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:19 +msgid "Let's get started!" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:31 +msgid "Classic machine learning" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:33 +msgid "" +"Before we begin to discuss federated learning, let us quickly recap how " +"most machine learning works today." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:35 +msgid "" +"In machine learning, we have a model, and we have data. The model could " +"be a neural network (as depicted here), or something else, like classical" +" linear regression." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:41 +msgid "|2b5c62c529f6416f840c594cce062fbb|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:109 +msgid "Model and data" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:47 +msgid "" +"We train the model using the data to perform a useful task. A task could " +"be to detect objects in images, transcribe an audio recording, or play a " +"game like Go." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:53 +msgid "|90b334680cb7467d9a04d39b8e8dca9f|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:111 +msgid "Train model using data" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:59 +msgid "" +"Now, in practice, the training data we work with doesn't originate on the" +" machine we train the model on. It gets created somewhere else." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:61 +msgid "" +"It originates on a smartphone by the user interacting with an app, a car " +"collecting sensor data, a laptop receiving input via the keyboard, or a " +"smart speaker listening to someone trying to sing a song." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:67 +msgid "|65764ceee89f4335bfd93fd0b115e831|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:113 +msgid "Data on a phone" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:73 +msgid "" +"What's also important to mention, this \"somewhere else\" is usually not " +"just one place, it's many places. It could be several devices all running" +" the same app. But it could also be several organizations, all generating" +" data for the same task." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:79 +msgid "|d97319ec28bb407ea0ab9705e38f3bcf|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:115 +msgid "Data is on many devices" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:85 +msgid "" +"So to use machine learning, or any kind of data analysis, the approach " +"that has been used in the past was to collect all data on a central " +"server. This server can be somewhere in a data center, or somewhere in " +"the cloud." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:91 +msgid "|11e95ac83a8548d8b3505b4663187d07|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:117 +msgid "Central data collection" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:97 +msgid "" +"Once all the data is collected in one place, we can finally use machine " +"learning algorithms to train our model on the data. This is the machine " +"learning approach that we've basically always relied on." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:103 +msgid "|1dab2f3a23674abc8a6731f20fa10730|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:119 +msgid "Central model training" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:130 +msgid "Challenges of classical machine learning" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:132 +msgid "" +"The classic machine learning approach we've just seen can be used in some" +" cases. Great examples include categorizing holiday photos, or analyzing " +"web traffic. Cases, where all the data is naturally available on a " +"centralized server." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:138 +msgid "|7f0ee162da38450788493a21627306f7|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:173 +msgid "Centralized possible" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:144 +msgid "" +"But the approach can not be used in many other cases. Cases, where the " +"data is not available on a centralized server, or cases where the data " +"available on one server is not enough to train a good model." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:150 +msgid "|296a1fb72c514b23b3d8905ff0ff98c6|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:175 +msgid "Centralized impossible" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:156 +msgid "" +"There are many reasons why the classic centralized machine learning " +"approach does not work for a large number of highly important real-world " +"use cases. Those reasons include:" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:158 +msgid "" +"**Regulations**: GDPR (Europe), CCPA (California), PIPEDA (Canada), LGPD " +"(Brazil), PDPL (Argentina), KVKK (Turkey), POPI (South Africa), FSS " +"(Russia), CDPR (China), PDPB (India), PIPA (Korea), APPI (Japan), PDP " +"(Indonesia), PDPA (Singapore), APP (Australia), and other regulations " +"protect sensitive data from being moved. In fact, those regulations " +"sometimes even prevent single organizations from combining their own " +"users' data for artificial intelligence training because those users live" +" in different parts of the world, and their data is governed by different" +" data protection regulations." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:160 +msgid "" +"**User preference**: In addition to regulation, there are use cases where" +" users just expect that no data leaves their device, ever. If you type " +"your passwords and credit card info into the digital keyboard of your " +"phone, you don't expect those passwords to end up on the server of the " +"company that developed that keyboard, do you? In fact, that use case was " +"the reason federated learning was invented in the first place." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:161 +msgid "" +"**Data volume**: Some sensors, like cameras, produce such a high data " +"volume that it is neither feasible nor economic to collect all the data " +"(due to, for example, bandwidth or communication efficiency). Think about" +" a national rail service with hundreds of train stations across the " +"country. If each of these train stations is outfitted with a number of " +"security cameras, the volume of raw on-device data they produce requires " +"incredibly powerful and exceedingly expensive infrastructure to process " +"and store. And most of the data isn't even useful." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:164 +msgid "Examples where centralized machine learning does not work include:" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:166 +msgid "" +"Sensitive healthcare records from multiple hospitals to train cancer " +"detection models" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:167 +msgid "" +"Financial information from different organizations to detect financial " +"fraud" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:168 +msgid "Location data from your electric car to make better range prediction" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:169 +msgid "End-to-end encrypted messages to train better auto-complete models" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:171 +msgid "" +"The popularity of privacy-enhancing systems like the `Brave " +"`__ browser or the `Signal `__ " +"messenger shows that users care about privacy. In fact, they choose the " +"privacy-enhancing version over other alternatives, if such an alternative" +" exists. But what can we do to apply machine learning and data science to" +" these cases to utilize private data? After all, these are all areas that" +" would benefit significantly from recent advances in AI." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:186 +msgid "Federated learning" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:188 +msgid "" +"Federated learning simply reverses this approach. It enables machine " +"learning on distributed data by moving the training to the data, instead " +"of moving the data to the training. Here's the single-sentence " +"explanation:" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:190 +msgid "Central machine learning: move the data to the computation" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:191 +msgid "Federated (machine) learning: move the computation to the data" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:193 +msgid "" +"By doing so, it enables us to use machine learning (and other data " +"science approaches) in areas where it wasn't possible before. We can now " +"train excellent medical AI models by enabling different hospitals to work" +" together. We can solve financial fraud by training AI models on the data" +" of different financial institutions. We can build novel privacy-" +"enhancing applications (such as secure messaging) that have better built-" +"in AI than their non-privacy-enhancing alternatives. And those are just a" +" few of the examples that come to mind. As we deploy federated learning, " +"we discover more and more areas that can suddenly be reinvented because " +"they now have access to vast amounts of previously inaccessible data." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:196 +msgid "" +"So how does federated learning work, exactly? Let's start with an " +"intuitive explanation." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:199 +msgid "Federated learning in five steps" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:202 +msgid "Step 0: Initialize global model" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:204 +msgid "" +"We start by initializing the model on the server. This is exactly the " +"same in classic centralized learning: we initialize the model parameters," +" either randomly or from a previously saved checkpoint." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:210 +msgid "|5b1408eec0d746cdb91162a9107b6089|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:307 +msgid "Initialize global model" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:217 +msgid "" +"Step 1: Send model to a number of connected organizations/devices (client" +" nodes)" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:219 +msgid "" +"Next, we send the parameters of the global model to the connected client " +"nodes (think: edge devices like smartphones or servers belonging to " +"organizations). This is to ensure that each participating node starts " +"their local training using the same model parameters. We often use only a" +" few of the connected nodes instead of all nodes. The reason for this is " +"that selecting more and more client nodes has diminishing returns." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:225 +msgid "|aef19f4b122c4e8d9f4c57f99bcd5dd2|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:309 +msgid "Send global model" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:232 +msgid "" +"Step 2: Train model locally on the data of each organization/device " +"(client node)" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:234 +msgid "" +"Now that all (selected) client nodes have the latest version of the " +"global model parameters, they start the local training. They use their " +"own local dataset to train their own local model. They don't train the " +"model until full convergence, but they only train for a little while. " +"This could be as little as one epoch on the local data, or even just a " +"few steps (mini-batches)." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:240 +msgid "|2881a86d8fc54ba29d96b29fc2819f4a|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:311 +msgid "Train on local data" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:247 +msgid "Step 3: Return model updates back to the server" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:249 +msgid "" +"After local training, each client node has a slightly different version " +"of the model parameters they originally received. The parameters are all " +"different because each client node has different examples in its local " +"dataset. The client nodes then send those model updates back to the " +"server. The model updates they send can either be the full model " +"parameters or just the gradients that were accumulated during local " +"training." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:255 +msgid "|ec1fe880237247e0975f52766775ab84|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:313 +msgid "Send model updates" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:262 +msgid "Step 4: Aggregate model updates into a new global model" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:264 +msgid "" +"The server receives model updates from the selected client nodes. If it " +"selected 100 client nodes, it now has 100 slightly different versions of " +"the original global model, each trained on the local data of one client. " +"But didn't we want to have one model that contains the learnings from the" +" data of all 100 client nodes?" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:266 +msgid "" +"In order to get one single model, we have to combine all the model " +"updates we received from the client nodes. This process is called " +"*aggregation*, and there are many different ways to do it. The most basic" +" way to do it is called *Federated Averaging* (`McMahan et al., 2016 " +"`__), often abbreviated as *FedAvg*. " +"*FedAvg* takes the 100 model updates and, as the name suggests, averages " +"them. To be more precise, it takes the *weighted average* of the model " +"updates, weighted by the number of examples each client used for " +"training. The weighting is important to make sure that each data example " +"has the same \"influence\" on the resulting global model. If one client " +"has 10 examples, and another client has 100 examples, then - without " +"weighting - each of the 10 examples would influence the global model ten " +"times as much as each of the 100 examples." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:273 +msgid "|9fdf048ed58d4467b2718cdf4aaf1ec3|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:315 +msgid "Aggregate model updates" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:280 +msgid "Step 5: Repeat steps 1 to 4 until the model converges" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:282 +msgid "" +"Steps 1 to 4 are what we call a single round of federated learning. The " +"global model parameters get sent to the participating client nodes (step " +"1), the client nodes train on their local data (step 2), they send their " +"updated models to the server (step 3), and the server then aggregates the" +" model updates to get a new version of the global model (step 4)." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:284 +msgid "" +"During a single round, each client node that participates in that " +"iteration only trains for a little while. This means that after the " +"aggregation step (step 4), we have a model that has been trained on all " +"the data of all participating client nodes, but only for a little while. " +"We then have to repeat this training process over and over again to " +"eventually arrive at a fully trained model that performs well across the " +"data of all client nodes." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:289 +msgid "" +"Congratulations, you now understand the basics of federated learning. " +"There's a lot more to discuss, of course, but that was federated learning" +" in a nutshell. In later parts of this tutorial, we will go into more " +"detail. Interesting questions include: How can we select the best client " +"nodes that should participate in the next round? What's the best way to " +"aggregate model updates? How can we handle failing client nodes " +"(stragglers)?" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:294 +msgid "" +"Just like we can train a model on the decentralized data of different " +"client nodes, we can also evaluate the model on that data to receive " +"valuable metrics. This is called federated evaluation, sometimes " +"abbreviated as FE. In fact, federated evaluation is an integral part of " +"most federated learning systems." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:297 +msgid "Federated analytics" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:299 +msgid "" +"In many cases, machine learning isn't necessary to derive value from " +"data. Data analysis can yield valuable insights, but again, there's often" +" not enough data to get a clear answer. What's the average age at which " +"people develop a certain type of health condition? Federated analytics " +"enables such queries over multiple client nodes. It is usually used in " +"conjunction with other privacy-enhancing technologies like secure " +"aggregation to prevent the server from seeing the results submitted by " +"individual client nodes." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:305 +msgid "" +"Differential privacy (DP) is often mentioned in the context of Federated " +"Learning. It is a privacy-preserving method used when analyzing and " +"sharing statistical data, ensuring the privacy of individual " +"participants. DP achieves this by adding statistical noise to the model " +"updates, ensuring any individual participants’ information cannot be " +"distinguished or re-identified. This technique can be considered an " +"optimization that provides a quantifiable privacy protection measure." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:326 +msgid "Flower" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:328 +msgid "" +"Federated learning, federated evaluation, and federated analytics require" +" infrastructure to move machine learning models back and forth, train and" +" evaluate them on local data, and then aggregate the updated models. " +"Flower provides the infrastructure to do exactly that in an easy, " +"scalable, and secure way. In short, Flower presents a unified approach to" +" federated learning, analytics, and evaluation. It allows the user to " +"federate any workload, any ML framework, and any programming language." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:334 +msgid "|ff726bc5505e432388ee2fdd6ef420b9|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:340 +msgid "" +"Flower federated learning server and client nodes (car, scooter, personal" +" computer, roomba, and phone)" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:353 +msgid "" +"Congratulations, you just learned the basics of federated learning and " +"how it relates to the classic (centralized) machine learning!" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:355 +msgid "" +"In the next part of this tutorial, we are going to build a first " +"federated learning system with Flower." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:373 +msgid "" +"The `Flower Federated Learning Tutorial - Part 1 " +"`__ shows how to build a simple federated learning system " +"with PyTorch and Flower." +msgstr "" + +#~ msgid "" +#~ "Configuring and setting up the " +#~ ":code:`Dockerfile` as well the configuration" +#~ " for the devcontainer can be a " +#~ "bit more involved. The good thing " +#~ "is you want have to do it. " +#~ "Usually it should be enough to " +#~ "install Docker on your system and " +#~ "ensure its available on your command " +#~ "line. Additionally, install the `VSCode " +#~ "Containers Extension `_." +#~ msgstr "" + +#~ msgid "" +#~ "``flwr = { path = " +#~ "\"../../dist/flwr-1.0.0-py3-none-any.whl\" }`` " +#~ "(without extras)" +#~ msgstr "" + +#~ msgid "" +#~ "``flwr = { path = " +#~ "\"../../dist/flwr-1.0.0-py3-none-any.whl\", extras =" +#~ " [\"simulation\"] }`` (with extras)" +#~ msgstr "" + +#~ msgid "Upload the whl (e.g., ``flwr-1.7.0-py3-none-any.whl``)" +#~ msgstr "" + +#~ msgid "" +#~ "Change ``!pip install -q 'flwr[simulation]'" +#~ " torch torchvision matplotlib`` to ``!pip" +#~ " install -q 'flwr-1.7.0-py3-none-" +#~ "any.whl[simulation]' torch torchvision matplotlib``" +#~ msgstr "" + +#~ msgid "Before the release" +#~ msgstr "" + +#~ msgid "" +#~ "Update the changelog (``changelog.md``) with" +#~ " all relevant changes that happened " +#~ "after the last release. If the " +#~ "last release was tagged ``v1.2.0``, you" +#~ " can use the following URL to " +#~ "see all commits that got merged " +#~ "into ``main`` since then:" +#~ msgstr "" + +#~ msgid "" +#~ "`GitHub: Compare v1.2.0...main " +#~ "`_" +#~ msgstr "" + +#~ msgid "" +#~ "Thank the authors who contributed since" +#~ " the last release. This can be " +#~ "done by running the ``./dev/add-" +#~ "shortlog.sh`` convenience script (it can " +#~ "be ran multiple times and will " +#~ "update the names in the list if" +#~ " new contributors were added in the" +#~ " meantime)." +#~ msgstr "" + +#~ msgid "" +#~ "Update the ``changelog.md`` section header " +#~ "``Unreleased`` to contain the version " +#~ "number and date for the release " +#~ "you are building. Create a pull " +#~ "request with the change." +#~ msgstr "" + +#~ msgid "" +#~ "Tag the release commit with the " +#~ "version number as soon as the PR" +#~ " is merged: ``git tag v0.12.3``, then" +#~ " ``git push --tags``. This will " +#~ "create a draft release on GitHub " +#~ "containing the correct artifacts and the" +#~ " relevant part of the changelog." +#~ msgstr "" + +#~ msgid "" +#~ "Note that, in order to build the" +#~ " documentation locally (with ``poetry run" +#~ " make html``, like described below), " +#~ "`Pandoc _` needs " +#~ "to be installed on the system." +#~ msgstr "" + +#~ msgid "" +#~ "If you're familiar with how contributing" +#~ " on GitHub works, you can directly" +#~ " checkout our `getting started guide " +#~ "for contributors `_ and examples " +#~ "of `good first contributions " +#~ "`_." +#~ msgstr "" + +#~ msgid "" +#~ "This will create a `flower/` (or " +#~ "the name of your fork if you " +#~ "renamed it) folder in the current " +#~ "working directory." +#~ msgstr "" + +#~ msgid "Otherwise you can always find this option in the `Branches` page." +#~ msgstr "" + +#~ msgid "" +#~ "Once you click the `Compare & pull" +#~ " request` button, you should see " +#~ "something similar to this:" +#~ msgstr "" + +#~ msgid "Find the source file in `doc/source`" +#~ msgstr "" + +#~ msgid "" +#~ "Make the change in the `.rst` file" +#~ " (beware, the dashes under the title" +#~ " should be the same length as " +#~ "the title itself)" +#~ msgstr "" + +#~ msgid "Change the file name to `save-progress.rst`" +#~ msgstr "" + +#~ msgid "Add a redirect rule to `doc/source/conf.py`" +#~ msgstr "" + +#~ msgid "" +#~ "This will cause a redirect from " +#~ "`saving-progress.html` to `save-progress.html`," +#~ " old links will continue to work." +#~ msgstr "" + +#~ msgid "" +#~ "For the lateral navigation bar to " +#~ "work properly, it is very important " +#~ "to update the `index.rst` file as " +#~ "well. This is where we define the" +#~ " whole arborescence of the navbar." +#~ msgstr "" + +#~ msgid "Find and modify the file name in `index.rst`" +#~ msgstr "" + +#~ msgid "Add CI job to deploy the staging system when the `main` branch changes" +#~ msgstr "" + +#~ msgid "`Python 3.7 `_ or above" +#~ msgstr "" + +#~ msgid "" +#~ "First, clone the `Flower repository " +#~ "`_ from GitHub::" +#~ msgstr "" + +#~ msgid "" +#~ "Second, create a virtual environment " +#~ "(and activate it). If you chose to" +#~ " use :code:`pyenv` (with the :code" +#~ ":`pyenv-virtualenv` plugin) and already " +#~ "have it installed , you can use" +#~ " the following convenience script (by " +#~ "default it will use :code:`Python " +#~ "3.8.17`, but you can change it by" +#~ " providing a specific :code:``)::" +#~ msgstr "" + +#~ msgid "" +#~ "If you don't have :code:`pyenv` " +#~ "installed, you can use the following " +#~ "script that will install pyenv, set " +#~ "it up and create the virtual " +#~ "environment (with :code:`Python 3.8.17` by " +#~ "default)::" +#~ msgstr "" + +#~ msgid "" +#~ "Third, install the Flower package in " +#~ "development mode (think :code:`pip install " +#~ "-e`) along with all necessary " +#~ "dependencies::" +#~ msgstr "" + +#~ msgid "" +#~ "Developers could run the full set " +#~ "of Github Actions workflows under their" +#~ " local environment by using `Act " +#~ "_`. Please refer to" +#~ " the installation instructions under the" +#~ " linked repository and run the next" +#~ " command under Flower main cloned " +#~ "repository folder::" +#~ msgstr "" + +#~ msgid "" +#~ "Please note that these components are" +#~ " still experimental, the correct " +#~ "configuration of DP for a specific " +#~ "task is still an unsolved problem." +#~ msgstr "" + +#~ msgid "" +#~ "The distribution of the update norm " +#~ "has been shown to vary from " +#~ "task-to-task and to evolve as " +#~ "training progresses. Therefore, we use " +#~ "an adaptive approach [andrew]_ that " +#~ "continuously adjusts the clipping threshold" +#~ " to track a prespecified quantile of" +#~ " the update norm distribution." +#~ msgstr "" + +#~ msgid "" +#~ "We make (and attempt to enforce) a" +#~ " number of assumptions that must be" +#~ " satisfied to ensure that the " +#~ "training process actually realises the " +#~ ":math:`(\\epsilon, \\delta)` guarantees the " +#~ "user has in mind when configuring " +#~ "the setup." +#~ msgstr "" + +#~ msgid "" +#~ "The first two are useful for " +#~ "eliminating a multitude of complications " +#~ "associated with calibrating the noise to" +#~ " the clipping threshold while the " +#~ "third one is required to comply " +#~ "with the assumptions of the privacy " +#~ "analysis." +#~ msgstr "" + +#~ msgid "" +#~ "The first version of our solution " +#~ "was to define a decorator whose " +#~ "constructor accepted, among other things, " +#~ "a boolean valued variable indicating " +#~ "whether adaptive clipping was to be " +#~ "enabled or not. We quickly realized " +#~ "that this would clutter its " +#~ ":code:`__init__()` function with variables " +#~ "corresponding to hyperparameters of adaptive" +#~ " clipping that would remain unused " +#~ "when it was disabled. A cleaner " +#~ "implementation could be achieved by " +#~ "splitting the functionality into two " +#~ "decorators, :code:`DPFedAvgFixed` and " +#~ ":code:`DPFedAvgAdaptive`, with the latter sub-" +#~ " classing the former. The constructors " +#~ "for both classes accept a boolean " +#~ "parameter :code:`server_side_noising`, which, as " +#~ "the name suggests, determines where " +#~ "noising is to be performed." +#~ msgstr "" + +#~ msgid "" +#~ ":code:`aggregate_fit()`: We check whether any" +#~ " of the sampled clients dropped out" +#~ " or failed to upload an update " +#~ "before the round timed out. In " +#~ "that case, we need to abort the" +#~ " current round, discarding any successful" +#~ " updates that were received, and move" +#~ " on to the next one. On the " +#~ "other hand, if all clients responded " +#~ "successfully, we must force the " +#~ "averaging of the updates to happen " +#~ "in an unweighted manner by intercepting" +#~ " the :code:`parameters` field of " +#~ ":code:`FitRes` for each received update " +#~ "and setting it to 1. Furthermore, " +#~ "if :code:`server_side_noising=true`, each update " +#~ "is perturbed with an amount of " +#~ "noise equal to what it would have" +#~ " been subjected to had client-side" +#~ " noising being enabled. This entails " +#~ "*pre*-processing of the arguments to " +#~ "this method before passing them on " +#~ "to the wrappee's implementation of " +#~ ":code:`aggregate_fit()`." +#~ msgstr "" + +#~ msgid "" +#~ "McMahan, H. Brendan, et al. \"Learning" +#~ " differentially private recurrent language " +#~ "models.\" arXiv preprint arXiv:1710.06963 " +#~ "(2017)." +#~ msgstr "" + +#~ msgid "" +#~ "Andrew, Galen, et al. \"Differentially " +#~ "private learning with adaptive clipping.\" " +#~ "Advances in Neural Information Processing " +#~ "Systems 34 (2021): 17455-17466." +#~ msgstr "" + +#~ msgid "" +#~ "The following command can be used " +#~ "to verfiy if Flower was successfully " +#~ "installed. If everything worked, it " +#~ "should print the version of Flower " +#~ "to the command line::" +#~ msgstr "" + +#~ msgid "flwr (Python API reference)" +#~ msgstr "" + +#~ msgid "start_client" +#~ msgstr "" + +#~ msgid "start_numpy_client" +#~ msgstr "" + +#~ msgid "start_simulation" +#~ msgstr "" + +#~ msgid "server.start_server" +#~ msgstr "" + +#~ msgid "server.strategy" +#~ msgstr "" + +#~ msgid "server.strategy.Strategy" +#~ msgstr "" + +#~ msgid "server.strategy.FedAvg" +#~ msgstr "" + +#~ msgid "server.strategy.FedAvgM" +#~ msgstr "" + +#~ msgid "server.strategy.FedMedian" +#~ msgstr "" + +#~ msgid "server.strategy.QFedAvg" +#~ msgstr "" + +#~ msgid "server.strategy.FaultTolerantFedAvg" +#~ msgstr "" + +#~ msgid "server.strategy.FedOpt" +#~ msgstr "" + +#~ msgid "server.strategy.FedProx" +#~ msgstr "" + +#~ msgid "server.strategy.FedAdagrad" +#~ msgstr "" + +#~ msgid "server.strategy.FedAdam" +#~ msgstr "" + +#~ msgid "server.strategy.FedYogi" +#~ msgstr "" + +#~ msgid "server.strategy.FedTrimmedAvg" +#~ msgstr "" + +#~ msgid "server.strategy.Krum" +#~ msgstr "" + +#~ msgid "server.strategy.FedXgbNnAvg" +#~ msgstr "" + +#~ msgid "server.strategy.DPFedAvgAdaptive" +#~ msgstr "" + +#~ msgid "server.strategy.DPFedAvgFixed" +#~ msgstr "" + +#~ msgid "" +#~ "**Fix the incorrect return types of " +#~ "Strategy** " +#~ "([#2432](https://github.com/adap/flower/pull/2432/files))" +#~ msgstr "" + +#~ msgid "" +#~ "The types of the return values in" +#~ " the docstrings in two methods " +#~ "(`aggregate_fit` and `aggregate_evaluate`) now " +#~ "match the hint types in the code." +#~ msgstr "" + +#~ msgid "" +#~ "Using the `client_fn`, Flower clients " +#~ "can interchangeably run as standalone " +#~ "processes (i.e. via `start_client`) or " +#~ "in simulation (i.e. via `start_simulation`)" +#~ " without requiring changes to how the" +#~ " client class is defined and " +#~ "instantiated. Calling `start_numpy_client` is " +#~ "now deprecated." +#~ msgstr "" + +#~ msgid "" +#~ "**Update Flower Examples** " +#~ "([#2384](https://github.com/adap/flower/pull/2384)), " +#~ "([#2425](https://github.com/adap/flower/pull/2425))" +#~ msgstr "" + +#~ msgid "" +#~ "**General updates to baselines** " +#~ "([#2301](https://github.com/adap/flower/pull/2301), " +#~ "[#2305](https://github.com/adap/flower/pull/2305), " +#~ "[#2307](https://github.com/adap/flower/pull/2307), " +#~ "[#2327](https://github.com/adap/flower/pull/2327), " +#~ "[#2435](https://github.com/adap/flower/pull/2435))" +#~ msgstr "" + +#~ msgid "" +#~ "**General updates to the simulation " +#~ "engine** ([#2331](https://github.com/adap/flower/pull/2331), " +#~ "[#2447](https://github.com/adap/flower/pull/2447), " +#~ "[#2448](https://github.com/adap/flower/pull/2448))" +#~ msgstr "" + +#~ msgid "" +#~ "**General improvements** " +#~ "([#2309](https://github.com/adap/flower/pull/2309), " +#~ "[#2310](https://github.com/adap/flower/pull/2310), " +#~ "[2313](https://github.com/adap/flower/pull/2313), " +#~ "[#2316](https://github.com/adap/flower/pull/2316), " +#~ "[2317](https://github.com/adap/flower/pull/2317),[#2349](https://github.com/adap/flower/pull/2349)," +#~ " [#2360](https://github.com/adap/flower/pull/2360), " +#~ "[#2402](https://github.com/adap/flower/pull/2402), " +#~ "[#2446](https://github.com/adap/flower/pull/2446))" +#~ msgstr "" + +#~ msgid "" +#~ "`flower-superlink --driver-api-address " +#~ "\"0.0.0.0:8081\" --fleet-api-address " +#~ "\"0.0.0.0:8086\"`" +#~ msgstr "" + +#~ msgid "" +#~ "That's it for the client. We only" +#~ " have to implement :code:`Client` or " +#~ ":code:`NumPyClient` and call " +#~ ":code:`fl.client.start_client()`. The string " +#~ ":code:`\"0.0.0.0:8080\"` tells the client " +#~ "which server to connect to. In our" +#~ " case we can run the server and" +#~ " the client on the same machine, " +#~ "therefore we use :code:`\"0.0.0.0:8080\"`. If" +#~ " we run a truly federated workload" +#~ " with the server and clients running" +#~ " on different machines, all that " +#~ "needs to change is the " +#~ ":code:`server_address` we pass to the " +#~ "client." +#~ msgstr "" + +#~ msgid "" +#~ "That's it for the client. We only" +#~ " have to implement :code:`Client` or " +#~ ":code:`NumPyClient` and call " +#~ ":code:`fl.client.start_client()`. The string " +#~ ":code:`\"[::]:8080\"` tells the client which" +#~ " server to connect to. In our " +#~ "case we can run the server and " +#~ "the client on the same machine, " +#~ "therefore we use :code:`\"[::]:8080\"`. If " +#~ "we run a truly federated workload " +#~ "with the server and clients running " +#~ "on different machines, all that needs" +#~ " to change is the :code:`server_address`" +#~ " we point the client at." +#~ msgstr "" + +#~ msgid "" +#~ "Let's build a horizontal federated " +#~ "learning system using XGBoost and " +#~ "Flower!" +#~ msgstr "" + +#~ msgid "" +#~ "Please refer to the `full code " +#~ "example `_ to learn " +#~ "more." +#~ msgstr "" + +#~ msgid "" +#~ "In this notebook, we'll build a " +#~ "federated learning system using Flower " +#~ "and PyTorch. In part 1, we use " +#~ "PyTorch for the model training pipeline" +#~ " and data loading. In part 2, " +#~ "we continue to federate the PyTorch-" +#~ "based pipeline using Flower." +#~ msgstr "" + +#~ msgid "" +#~ "Next, we install the necessary packages" +#~ " for PyTorch (``torch`` and " +#~ "``torchvision``) and Flower (``flwr``):" +#~ msgstr "" + +#~ msgid "" +#~ "Federated learning can be applied to " +#~ "many different types of tasks across " +#~ "different domains. In this tutorial, we" +#~ " introduce federated learning by training" +#~ " a simple convolutional neural network " +#~ "(CNN) on the popular CIFAR-10 dataset." +#~ " CIFAR-10 can be used to train " +#~ "image classifiers that distinguish between " +#~ "images from ten different classes:" +#~ msgstr "" + +#~ msgid "" +#~ "Each organization will act as a " +#~ "client in the federated learning system." +#~ " So having ten organizations participate" +#~ " in a federation means having ten " +#~ "clients connected to the federated " +#~ "learning server:" +#~ msgstr "" + +#~ msgid "" +#~ "Let's now load the CIFAR-10 training " +#~ "and test set, partition them into " +#~ "ten smaller datasets (each split into" +#~ " training and validation set), and " +#~ "wrap the resulting partitions by " +#~ "creating a PyTorch ``DataLoader`` for " +#~ "each of them:" +#~ msgstr "" + +#~ msgid "|ed6498a023f2477a9ccd57ee4514bda4|" +#~ msgstr "" + +#~ msgid "|5a4f742489ac4f819afefdd4dc9ab272|" +#~ msgstr "" + +#~ msgid "|3331c80cd05045f6a56524d8e3e76d0c|" +#~ msgstr "" + +#~ msgid "|4987b26884ec4b2c8f06c1264bcebe60|" +#~ msgstr "" + +#~ msgid "|ec8ae2d778aa493a986eb2fa29c220e5|" +#~ msgstr "" + +#~ msgid "|b8949d0669fe4f8eadc9a4932f4e9c57|" +#~ msgstr "" + +#~ msgid "|94ff30bdcd09443e8488b5f29932a541|" +#~ msgstr "" + +#~ msgid "|48dccf1d6d0544bba8917d2783a47719|" +#~ msgstr "" + +#~ msgid "|0366618db96b4f329f0d4372d1150fde|" +#~ msgstr "" + +#~ msgid "|ac80eddc76e6478081b1ca35eed029c0|" +#~ msgstr "" + +#~ msgid "|1ac94140c317450e89678db133c7f3c2|" +#~ msgstr "" + +#~ msgid "|f8850c6e96fc4430b55e53bba237a7c0|" +#~ msgstr "" + +#~ msgid "|4a368fdd3fc34adabd20a46752a68582|" +#~ msgstr "" + +#~ msgid "|40f69c17bb444652a7c8dfe577cd120e|" +#~ msgstr "" + +#~ msgid "" +#~ "Please follow the first section on " +#~ "`Run Flower using Docker " +#~ "`_ which covers this" +#~ " step in more detail." +#~ msgstr "" + +#~ msgid "" +#~ "Since `Flower 1.5 `_ we have " +#~ "introduced translations to our doc " +#~ "pages, but, as you might have " +#~ "noticed, the translations are often " +#~ "imperfect. If you speak languages other" +#~ " than English, you might be able " +#~ "to help us in our effort to " +#~ "make Federated Learning accessible to as" +#~ " many people as possible by " +#~ "contributing to those translations! This " +#~ "might also be a great opportunity " +#~ "for those wanting to become open " +#~ "source contributors with little prerequistes." +#~ msgstr "" + +#~ msgid "" +#~ "You input your translation in the " +#~ "textbox at the top and then, once" +#~ " you are happy with it, you " +#~ "either press ``Save and continue`` (to" +#~ " save the translation and go to " +#~ "the next untranslated string), ``Save " +#~ "and stay`` (to save the translation " +#~ "and stay on the same page), " +#~ "``Suggest`` (to add your translation to" +#~ " suggestions for other users to " +#~ "view), or ``Skip`` (to go to the" +#~ " next untranslated string without saving" +#~ " anything)." +#~ msgstr "" + +#~ msgid "" +#~ "The first thing we need to do " +#~ "is to define a message type for" +#~ " the RPC system in :code:`transport.proto`." +#~ " Note that we have to do it " +#~ "for both the request and response " +#~ "messages. For more details on the " +#~ "syntax of proto3, please see the " +#~ "`official documentation `_." +#~ msgstr "" + +#~ msgid "" +#~ "Source: `Official VSCode documentation " +#~ "`_" +#~ msgstr "" + +#~ msgid "" +#~ "`Developing inside a Container " +#~ "`_" +#~ msgstr "" + +#~ msgid "" +#~ "`Remote development in Containers " +#~ "`_" +#~ msgstr "" + +#~ msgid "" +#~ "If you are not familiar with " +#~ "Flower Baselines, you should probably " +#~ "check-out our `contributing guide for " +#~ "baselines `_." +#~ msgstr "" + +#~ msgid "" +#~ "You should then check out the open" +#~ " `issues " +#~ "`_" +#~ " for baseline requests. If you find" +#~ " a baseline that you'd like to " +#~ "work on and that has no assignes," +#~ " feel free to assign it to " +#~ "yourself and start working on it!" +#~ msgstr "" + +#~ msgid "" +#~ "If you're familiar with how contributing" +#~ " on GitHub works, you can directly" +#~ " checkout our `getting started guide " +#~ "for contributors `_." +#~ msgstr "" + +#~ msgid "" +#~ "Git is a distributed version control " +#~ "tool. This allows for an entire " +#~ "codebase's history to be stored and " +#~ "every developer's machine. It is a " +#~ "software that will need to be " +#~ "installed on your local machine, you " +#~ "can follow this `guide " +#~ "`_ to set it up." +#~ msgstr "" + +#~ msgid "" +#~ "A fork is a personal copy of " +#~ "a GitHub repository. To create one " +#~ "for Flower, you must navigate to " +#~ "https://github.com/adap/flower (while connected to" +#~ " your GitHub account) and click the" +#~ " ``Fork`` button situated on the top" +#~ " right of the page." +#~ msgstr "" + +#~ msgid "" +#~ "Now we will add an upstream " +#~ "address to our repository. Still in " +#~ "the same directroy, we must run " +#~ "the following command:" +#~ msgstr "" + +#~ msgid "" +#~ "This can be achieved by following " +#~ "this `getting started guide for " +#~ "contributors`_ (note that you won't need" +#~ " to clone the repository). Once you" +#~ " are able to write code and " +#~ "test it, you can finally start " +#~ "making changes!" +#~ msgstr "" + +#~ msgid "" +#~ "For our documentation, we’ve started to" +#~ " use the `Diàtaxis framework " +#~ "`_." +#~ msgstr "" + +#~ msgid "" +#~ "Our “How to” guides should have " +#~ "titles that continue the sencence “How" +#~ " to …”, for example, “How to " +#~ "upgrade to Flower 1.0”." +#~ msgstr "" + +#~ msgid "" +#~ "This issue is about changing the " +#~ "title of a doc from present " +#~ "continious to present simple." +#~ msgstr "" + +#~ msgid "" +#~ "Let's take the example of “Saving " +#~ "Progress” which we changed to “Save " +#~ "Progress”. Does this pass our check?" +#~ msgstr "" + +#~ msgid "Before: ”How to saving progress” ❌" +#~ msgstr "" + +#~ msgid "After: ”How to save progress” ✅" +#~ msgstr "" + +#~ msgid "" +#~ "This is a tiny change, but it’ll" +#~ " allow us to test your end-" +#~ "to-end setup. After cloning and " +#~ "setting up the Flower repo, here’s " +#~ "what you should do:" +#~ msgstr "" + +#~ msgid "" +#~ "Build the docs and check the " +#~ "result: ``_" +#~ msgstr "" + +#~ msgid "Here’s how to change the file name:" +#~ msgstr "" + +#~ msgid "" +#~ "Commit the changes (commit messages are" +#~ " always imperative: “Do something”, in " +#~ "this case “Change …”)" +#~ msgstr "" + +#~ msgid "" +#~ "`Good first contributions " +#~ "`_, where you should" +#~ " particularly look into the " +#~ ":code:`baselines` contributions." +#~ msgstr "" + +#~ msgid "" +#~ "If the section is completely empty " +#~ "(without any token) or non-existant, " +#~ "the changelog will just contain the " +#~ "title of the PR for the changelog" +#~ " entry, without any description." +#~ msgstr "" + +#~ msgid "" +#~ "Flower uses :code:`pyproject.toml` to manage" +#~ " dependencies and configure development " +#~ "tools (the ones which support it). " +#~ "Poetry is a build tool which " +#~ "supports `PEP 517 " +#~ "`_." +#~ msgstr "" + +#~ msgid "" +#~ "This tutorial will show you how to" +#~ " use Flower to build a federated " +#~ "version of an existing machine learning" +#~ " workload with `FedBN `_, a federated training strategy" +#~ " designed for non-iid data. We " +#~ "are using PyTorch to train a " +#~ "Convolutional Neural Network(with Batch " +#~ "Normalization layers) on the CIFAR-10 " +#~ "dataset. When applying FedBN, only few" +#~ " changes needed compared to `Example: " +#~ "PyTorch - From Centralized To Federated" +#~ " `_." +#~ msgstr "" + +#~ msgid "" +#~ "All files are revised based on " +#~ "`Example: PyTorch - From Centralized To" +#~ " Federated `_. The " +#~ "only thing to do is modifying the" +#~ " file called :code:`cifar.py`, revised part" +#~ " is shown below:" +#~ msgstr "" + +#~ msgid "" +#~ "So far this should all look fairly" +#~ " familiar if you've used PyTorch " +#~ "before. Let's take the next step " +#~ "and use what we've built to create" +#~ " a federated learning system within " +#~ "FedBN, the sytstem consists of one " +#~ "server and two clients." +#~ msgstr "" + +#~ msgid "" +#~ "If you have read `Example: PyTorch " +#~ "- From Centralized To Federated " +#~ "`_, the following" +#~ " parts are easy to follow, onyl " +#~ ":code:`get_parameters` and :code:`set_parameters` " +#~ "function in :code:`client.py` needed to " +#~ "revise. If not, please read the " +#~ "`Example: PyTorch - From Centralized To" +#~ " Federated `_. first." +#~ msgstr "" + +#~ msgid "Example: Walk-Through PyTorch & MNIST" +#~ msgstr "" + +#~ msgid "" +#~ "In this tutorial we will learn, " +#~ "how to train a Convolutional Neural " +#~ "Network on MNIST using Flower and " +#~ "PyTorch." +#~ msgstr "" + +#~ msgid "" +#~ "Since we want to use PyTorch to" +#~ " solve a computer vision task, let's" +#~ " go ahead an install PyTorch and " +#~ "the **torchvision** library:" +#~ msgstr "" + +#~ msgid "Ready... Set... Train!" +#~ msgstr "" + +#~ msgid "" +#~ "Now that we have all our " +#~ "dependencies installed, let's run a " +#~ "simple distributed training with two " +#~ "clients and one server. Our training " +#~ "procedure and network architecture are " +#~ "based on PyTorch's `Basic MNIST Example" +#~ " `_. " +#~ "This will allow you see how easy" +#~ " it is to wrap your code with" +#~ " Flower and begin training in a " +#~ "federated way. We provide you with " +#~ "two helper scripts, namely *run-" +#~ "server.sh*, and *run-clients.sh*. Don't " +#~ "be afraid to look inside, they are" +#~ " simple enough =)." +#~ msgstr "" + +#~ msgid "" +#~ "Go ahead and launch on a terminal" +#~ " the *run-server.sh* script first as" +#~ " follows:" +#~ msgstr "" + +#~ msgid "Now that the server is up and running, go ahead and launch the clients." +#~ msgstr "" + +#~ msgid "" +#~ "Et voilà! You should be seeing the" +#~ " training procedure and, after a few" +#~ " iterations, the test accuracy for " +#~ "each client." +#~ msgstr "" + +#~ msgid "Now, let's see what is really happening inside." +#~ msgstr "" + +#~ msgid "" +#~ "Inside the server helper script *run-" +#~ "server.sh* you will find the following" +#~ " code that basically runs the " +#~ ":code:`server.py`" +#~ msgstr "" + +#~ msgid "" +#~ "We can go a bit deeper and " +#~ "see that :code:`server.py` simply launches " +#~ "a server that will coordinate three " +#~ "rounds of training. Flower Servers are" +#~ " very customizable, but for simple " +#~ "workloads, we can start a server " +#~ "using the :ref:`start_server ` function and leave " +#~ "all the configuration possibilities at " +#~ "their default values, as seen below." +#~ msgstr "" + +#~ msgid "" +#~ "Next, let's take a look at the " +#~ "*run-clients.sh* file. You will see " +#~ "that it contains the main loop " +#~ "that starts a set of *clients*." +#~ msgstr "" + +#~ msgid "" +#~ "**cid**: is the client ID. It is" +#~ " an integer that uniquely identifies " +#~ "client identifier." +#~ msgstr "" + +#~ msgid "**sever_address**: String that identifies IP and port of the server." +#~ msgstr "" + +#~ msgid "" +#~ "**nb_clients**: This defines the number " +#~ "of clients being created. This piece " +#~ "of information is not required by " +#~ "the client, but it helps us " +#~ "partition the original MNIST dataset to" +#~ " make sure that every client is " +#~ "working on unique subsets of both " +#~ "*training* and *test* sets." +#~ msgstr "" + +#~ msgid "" +#~ "Again, we can go deeper and look" +#~ " inside :code:`flwr_example/quickstart-" +#~ "pytorch/client.py`. After going through the" +#~ " argument parsing code at the " +#~ "beginning of our :code:`main` function, " +#~ "you will find a call to " +#~ ":code:`mnist.load_data`. This function is " +#~ "responsible for partitioning the original " +#~ "MNIST datasets (*training* and *test*) " +#~ "and returning a :code:`torch.utils.data.DataLoader`" +#~ " s for each of them. We then" +#~ " instantiate a :code:`PytorchMNISTClient` object" +#~ " with our client ID, our DataLoaders," +#~ " the number of epochs in each " +#~ "round, and which device we want to" +#~ " use for training (CPU or GPU)." +#~ msgstr "" + +#~ msgid "" +#~ "The :code:`PytorchMNISTClient` object when " +#~ "finally passed to :code:`fl.client.start_client` " +#~ "along with the server's address as " +#~ "the training process begins." +#~ msgstr "" + +#~ msgid "A Closer Look" +#~ msgstr "" + +#~ msgid "" +#~ "Now, let's look closely into the " +#~ ":code:`PytorchMNISTClient` inside :code:`flwr_example" +#~ ".quickstart-pytorch.mnist` and see what it" +#~ " is doing:" +#~ msgstr "" + +#~ msgid "" +#~ "The first thing to notice is that" +#~ " :code:`PytorchMNISTClient` instantiates a CNN" +#~ " model inside its constructor" +#~ msgstr "" + +#~ msgid "" +#~ "The code for the CNN is available" +#~ " under :code:`quickstart-pytorch.mnist` and " +#~ "it is reproduced below. It is the" +#~ " same network found in `Basic MNIST" +#~ " Example " +#~ "`_." +#~ msgstr "" + +#~ msgid "" +#~ "The second thing to notice is that" +#~ " :code:`PytorchMNISTClient` class inherits from" +#~ " the :code:`fl.client.Client`, and hence it" +#~ " must implement the following methods:" +#~ msgstr "" + +#~ msgid "" +#~ "When comparing the abstract class to " +#~ "its derived class :code:`PytorchMNISTClient` " +#~ "you will notice that :code:`fit` calls" +#~ " a :code:`train` function and that " +#~ ":code:`evaluate` calls a :code:`test`: " +#~ "function." +#~ msgstr "" + +#~ msgid "" +#~ "These functions can both be found " +#~ "inside the same :code:`quickstart-" +#~ "pytorch.mnist` module:" +#~ msgstr "" + +#~ msgid "" +#~ "Observe that these functions encapsulate " +#~ "regular training and test loops and " +#~ "provide :code:`fit` and :code:`evaluate` with" +#~ " final statistics for each round. You" +#~ " could substitute them with your " +#~ "custom train and test loops and " +#~ "change the network architecture, and the" +#~ " entire example would still work " +#~ "flawlessly. As a matter of fact, " +#~ "why not try and modify the code" +#~ " to an example of your liking?" +#~ msgstr "" + +#~ msgid "Give It a Try" +#~ msgstr "" + +#~ msgid "" +#~ "Looking through the quickstart code " +#~ "description above will have given a " +#~ "good understanding of how *clients* and" +#~ " *servers* work in Flower, how to " +#~ "run a simple experiment, and the " +#~ "internals of a client wrapper. Here " +#~ "are a few things you could try " +#~ "on your own and get more " +#~ "experience with Flower:" +#~ msgstr "" + +#~ msgid "" +#~ "Try and change :code:`PytorchMNISTClient` so" +#~ " it can accept different architectures." +#~ msgstr "" + +#~ msgid "" +#~ "Modify the :code:`train` function so " +#~ "that it accepts different optimizers" +#~ msgstr "" + +#~ msgid "" +#~ "Modify the :code:`test` function so that" +#~ " it proves not only the top-1 " +#~ "(regular accuracy) but also the top-5" +#~ " accuracy?" +#~ msgstr "" + +#~ msgid "" +#~ "Go larger! Try to adapt the code" +#~ " to larger images and datasets. Why" +#~ " not try training on ImageNet with" +#~ " a ResNet-50?" +#~ msgstr "" + +#~ msgid "You are ready now. Enjoy learning in a federated way!" +#~ msgstr "" + +#~ msgid "Differential privacy" +#~ msgstr "" + +#~ msgid "" +#~ "Flower provides differential privacy (DP) " +#~ "wrapper classes for the easy integration" +#~ " of the central DP guarantees " +#~ "provided by DP-FedAvg into training " +#~ "pipelines defined in any of the " +#~ "various ML frameworks that Flower is " +#~ "compatible with." +#~ msgstr "" + +#~ msgid "" +#~ "Please note that these components are" +#~ " still experimental; the correct " +#~ "configuration of DP for a specific " +#~ "task is still an unsolved problem." +#~ msgstr "" + +#~ msgid "" +#~ "The name DP-FedAvg is misleading " +#~ "since it can be applied on top " +#~ "of any FL algorithm that conforms " +#~ "to the general structure prescribed by" +#~ " the FedOpt family of algorithms." +#~ msgstr "" + +#~ msgid "DP-FedAvg" +#~ msgstr "" + +#~ msgid "" +#~ "DP-FedAvg, originally proposed by " +#~ "McMahan et al. [mcmahan]_ and extended" +#~ " by Andrew et al. [andrew]_, is " +#~ "essentially FedAvg with the following " +#~ "modifications." +#~ msgstr "" + +#~ msgid "" +#~ "**Clipping** : The influence of each " +#~ "client's update is bounded by clipping" +#~ " it. This is achieved by enforcing" +#~ " a cap on the L2 norm of " +#~ "the update, scaling it down if " +#~ "needed." +#~ msgstr "" + +#~ msgid "" +#~ "**Noising** : Gaussian noise, calibrated " +#~ "to the clipping threshold, is added " +#~ "to the average computed at the " +#~ "server." +#~ msgstr "" + +#~ msgid "" +#~ "The distribution of the update norm " +#~ "has been shown to vary from " +#~ "task-to-task and to evolve as " +#~ "training progresses. This variability is " +#~ "crucial in understanding its impact on" +#~ " differential privacy guarantees, emphasizing " +#~ "the need for an adaptive approach " +#~ "[andrew]_ that continuously adjusts the " +#~ "clipping threshold to track a " +#~ "prespecified quantile of the update norm" +#~ " distribution." +#~ msgstr "" + +#~ msgid "Simplifying Assumptions" +#~ msgstr "" + +#~ msgid "" +#~ "We make (and attempt to enforce) a" +#~ " number of assumptions that must be" +#~ " satisfied to ensure that the " +#~ "training process actually realizes the " +#~ ":math:`(\\epsilon, \\delta)` guarantees the " +#~ "user has in mind when configuring " +#~ "the setup." +#~ msgstr "" + +#~ msgid "" +#~ "**Fixed-size subsampling** :Fixed-size " +#~ "subsamples of the clients must be " +#~ "taken at each round, as opposed to" +#~ " variable-sized Poisson subsamples." +#~ msgstr "" + +#~ msgid "" +#~ "**Unweighted averaging** : The contributions" +#~ " from all the clients must weighted" +#~ " equally in the aggregate to " +#~ "eliminate the requirement for the server" +#~ " to know in advance the sum of" +#~ " the weights of all clients available" +#~ " for selection." +#~ msgstr "" + +#~ msgid "" +#~ "**No client failures** : The set " +#~ "of available clients must stay constant" +#~ " across all rounds of training. In" +#~ " other words, clients cannot drop out" +#~ " or fail." +#~ msgstr "" + +#~ msgid "" +#~ "The first two are useful for " +#~ "eliminating a multitude of complications " +#~ "associated with calibrating the noise to" +#~ " the clipping threshold, while the " +#~ "third one is required to comply " +#~ "with the assumptions of the privacy " +#~ "analysis." +#~ msgstr "" + +#~ msgid "" +#~ "These restrictions are in line with " +#~ "constraints imposed by Andrew et al. " +#~ "[andrew]_." +#~ msgstr "" + +#~ msgid "Customizable Responsibility for Noise injection" +#~ msgstr "" + +#~ msgid "" +#~ "In contrast to other implementations " +#~ "where the addition of noise is " +#~ "performed at the server, you can " +#~ "configure the site of noise injection" +#~ " to better match your threat model." +#~ " We provide users with the " +#~ "flexibility to set up the training " +#~ "such that each client independently adds" +#~ " a small amount of noise to the" +#~ " clipped update, with the result that" +#~ " simply aggregating the noisy updates " +#~ "is equivalent to the explicit addition" +#~ " of noise to the non-noisy " +#~ "aggregate at the server." +#~ msgstr "" + +#~ msgid "" +#~ "To be precise, if we let :math:`m`" +#~ " be the number of clients sampled " +#~ "each round and :math:`\\sigma_\\Delta` be " +#~ "the scale of the total Gaussian " +#~ "noise that needs to be added to" +#~ " the sum of the model updates, " +#~ "we can use simple maths to show" +#~ " that this is equivalent to each " +#~ "client adding noise with scale " +#~ ":math:`\\sigma_\\Delta/\\sqrt{m}`." +#~ msgstr "" + +#~ msgid "Wrapper-based approach" +#~ msgstr "" + +#~ msgid "" +#~ "Introducing DP to an existing workload" +#~ " can be thought of as adding an" +#~ " extra layer of security around it." +#~ " This inspired us to provide the " +#~ "additional server and client-side logic" +#~ " needed to make the training process" +#~ " differentially private as wrappers for " +#~ "instances of the :code:`Strategy` and " +#~ ":code:`NumPyClient` abstract classes respectively." +#~ " This wrapper-based approach has the" +#~ " advantage of being easily composable " +#~ "with other wrappers that someone might" +#~ " contribute to the Flower library in" +#~ " the future, e.g., for secure " +#~ "aggregation. Using Inheritance instead can " +#~ "be tedious because that would require" +#~ " the creation of new sub- classes " +#~ "every time a new class implementing " +#~ ":code:`Strategy` or :code:`NumPyClient` is " +#~ "defined." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:299 -msgid "" -"In many cases, machine learning isn't necessary to derive value from " -"data. Data analysis can yield valuable insights, but again, there's often" -" not enough data to get a clear answer. What's the average age at which " -"people develop a certain type of health condition? Federated analytics " -"enables such queries over multiple client nodes. It is usually used in " -"conjunction with other privacy-enhancing technologies like secure " -"aggregation to prevent the server from seeing the results submitted by " -"individual client nodes." -msgstr "" +#~ msgid "Server-side logic" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:303 -msgid "Differential Privacy" -msgstr "" +#~ msgid "" +#~ "The first version of our solution " +#~ "was to define a decorator whose " +#~ "constructor accepted, among other things, " +#~ "a boolean-valued variable indicating " +#~ "whether adaptive clipping was to be " +#~ "enabled or not. We quickly realized " +#~ "that this would clutter its " +#~ ":code:`__init__()` function with variables " +#~ "corresponding to hyperparameters of adaptive" +#~ " clipping that would remain unused " +#~ "when it was disabled. A cleaner " +#~ "implementation could be achieved by " +#~ "splitting the functionality into two " +#~ "decorators, :code:`DPFedAvgFixed` and " +#~ ":code:`DPFedAvgAdaptive`, with the latter sub-" +#~ " classing the former. The constructors " +#~ "for both classes accept a boolean " +#~ "parameter :code:`server_side_noising`, which, as " +#~ "the name suggests, determines where " +#~ "noising is to be performed." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:305 -msgid "" -"Differential privacy (DP) is often mentioned in the context of Federated " -"Learning. It is a privacy-preserving method used when analyzing and " -"sharing statistical data, ensuring the privacy of individual " -"participants. DP achieves this by adding statistical noise to the model " -"updates, ensuring any individual participants’ information cannot be " -"distinguished or re-identified. This technique can be considered an " -"optimization that provides a quantifiable privacy protection measure." -msgstr "" +#~ msgid "" +#~ "The server-side capabilities required " +#~ "for the original version of DP-" +#~ "FedAvg, i.e., the one which performed" +#~ " fixed clipping, can be completely " +#~ "captured with the help of wrapper " +#~ "logic for just the following two " +#~ "methods of the :code:`Strategy` abstract " +#~ "class." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:326 -msgid "Flower" -msgstr "" +#~ msgid "" +#~ ":code:`configure_fit()` : The config " +#~ "dictionary being sent by the wrapped " +#~ ":code:`Strategy` to each client needs to" +#~ " be augmented with an additional " +#~ "value equal to the clipping threshold" +#~ " (keyed under :code:`dpfedavg_clip_norm`) and," +#~ " if :code:`server_side_noising=true`, another one" +#~ " equal to the scale of the " +#~ "Gaussian noise that needs to be " +#~ "added at the client (keyed under " +#~ ":code:`dpfedavg_noise_stddev`). This entails " +#~ "*post*-processing of the results returned " +#~ "by the wrappee's implementation of " +#~ ":code:`configure_fit()`." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:328 -msgid "" -"Federated learning, federated evaluation, and federated analytics require" -" infrastructure to move machine learning models back and forth, train and" -" evaluate them on local data, and then aggregate the updated models. " -"Flower provides the infrastructure to do exactly that in an easy, " -"scalable, and secure way. In short, Flower presents a unified approach to" -" federated learning, analytics, and evaluation. It allows the user to " -"federate any workload, any ML framework, and any programming language." -msgstr "" +#~ msgid "" +#~ ":code:`aggregate_fit()`: We check whether any" +#~ " of the sampled clients dropped out" +#~ " or failed to upload an update " +#~ "before the round timed out. In " +#~ "that case, we need to abort the" +#~ " current round, discarding any successful" +#~ " updates that were received, and move" +#~ " on to the next one. On the " +#~ "other hand, if all clients responded " +#~ "successfully, we must force the " +#~ "averaging of the updates to happen " +#~ "in an unweighted manner by intercepting" +#~ " the :code:`parameters` field of " +#~ ":code:`FitRes` for each received update " +#~ "and setting it to 1. Furthermore, " +#~ "if :code:`server_side_noising=true`, each update " +#~ "is perturbed with an amount of " +#~ "noise equal to what it would have" +#~ " been subjected to had client-side" +#~ " noising being enabled. This entails " +#~ "*pre*-processing of the arguments to " +#~ "this method before passing them on " +#~ "to the wrappee's implementation of " +#~ ":code:`aggregate_fit()`." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:334 -msgid "|08cb60859b07461588fe44e55810b050|" -msgstr "" +#~ msgid "" +#~ "We can't directly change the aggregation" +#~ " function of the wrapped strategy to" +#~ " force it to add noise to the" +#~ " aggregate, hence we simulate client-" +#~ "side noising to implement server-side" +#~ " noising." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:340 -msgid "" -"Flower federated learning server and client nodes (car, scooter, personal" -" computer, roomba, and phone)" -msgstr "" +#~ msgid "" +#~ "These changes have been put together " +#~ "into a class called :code:`DPFedAvgFixed`, " +#~ "whose constructor accepts the strategy " +#~ "being decorated, the clipping threshold " +#~ "and the number of clients sampled " +#~ "every round as compulsory arguments. The" +#~ " user is expected to specify the " +#~ "clipping threshold since the order of" +#~ " magnitude of the update norms is " +#~ "highly dependent on the model being " +#~ "trained and providing a default value" +#~ " would be misleading. The number of" +#~ " clients sampled at every round is" +#~ " required to calculate the amount of" +#~ " noise that must be added to " +#~ "each individual update, either by the" +#~ " server or the clients." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:353 -msgid "" -"Congratulations, you just learned the basics of federated learning and " -"how it relates to the classic (centralized) machine learning!" -msgstr "" +#~ msgid "" +#~ "The additional functionality required to " +#~ "facilitate adaptive clipping has been " +#~ "provided in :code:`DPFedAvgAdaptive`, a " +#~ "subclass of :code:`DPFedAvgFixed`. It " +#~ "overrides the above-mentioned methods to" +#~ " do the following." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:355 -msgid "" -"In the next part of this tutorial, we are going to build a first " -"federated learning system with Flower." -msgstr "" +#~ msgid "" +#~ ":code:`configure_fit()` : It intercepts the" +#~ " config dict returned by " +#~ ":code:`super.configure_fit()` to add the " +#~ "key-value pair " +#~ ":code:`dpfedavg_adaptive_clip_enabled:True` to it, " +#~ "which the client interprets as an " +#~ "instruction to include an indicator bit" +#~ " (1 if update norm <= clipping " +#~ "threshold, 0 otherwise) in the results" +#~ " returned by it." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:373 -msgid "" -"The `Flower Federated Learning Tutorial - Part 1 " -"`__ shows how to build a simple federated learning system " -"with PyTorch and Flower." -msgstr "" +#~ msgid "" +#~ ":code:`aggregate_fit()` : It follows a " +#~ "call to :code:`super.aggregate_fit()` with one" +#~ " to :code:`__update_clip_norm__()`, a procedure" +#~ " which adjusts the clipping threshold " +#~ "on the basis of the indicator bits" +#~ " received from the sampled clients." +#~ msgstr "" + +#~ msgid "Client-side logic" +#~ msgstr "" #~ msgid "" -#~ "Configuring and setting up the " -#~ ":code:`Dockerfile` as well the configuration" -#~ " for the devcontainer can be a " -#~ "bit more involved. The good thing " -#~ "is you want have to do it. " -#~ "Usually it should be enough to " -#~ "install Docker on your system and " -#~ "ensure its available on your command " -#~ "line. Additionally, install the `VSCode " -#~ "Containers Extension `_." +#~ "The client-side capabilities required " +#~ "can be completely captured through " +#~ "wrapper logic for just the :code:`fit()`" +#~ " method of the :code:`NumPyClient` abstract" +#~ " class. To be precise, we need " +#~ "to *post-process* the update computed" +#~ " by the wrapped client to clip " +#~ "it, if necessary, to the threshold " +#~ "value supplied by the server as " +#~ "part of the config dictionary. In " +#~ "addition to this, it may need to" +#~ " perform some extra work if either" +#~ " (or both) of the following keys " +#~ "are also present in the dict." #~ msgstr "" #~ msgid "" -#~ "``flwr = { path = " -#~ "\"../../dist/flwr-1.0.0-py3-none-any.whl\" }`` " -#~ "(without extras)" +#~ ":code:`dpfedavg_noise_stddev` : Generate and " +#~ "add the specified amount of noise " +#~ "to the clipped update." #~ msgstr "" #~ msgid "" -#~ "``flwr = { path = " -#~ "\"../../dist/flwr-1.0.0-py3-none-any.whl\", extras =" -#~ " [\"simulation\"] }`` (with extras)" +#~ ":code:`dpfedavg_adaptive_clip_enabled` : Augment the" +#~ " metrics dict in the :code:`FitRes` " +#~ "object being returned to the server " +#~ "with an indicator bit, calculated as " +#~ "described earlier." #~ msgstr "" -#~ msgid "Upload the whl (e.g., ``flwr-1.7.0-py3-none-any.whl``)" +#~ msgid "Performing the :math:`(\\epsilon, \\delta)` analysis" #~ msgstr "" #~ msgid "" -#~ "Change ``!pip install -q 'flwr[simulation]'" -#~ " torch torchvision matplotlib`` to ``!pip" -#~ " install -q 'flwr-1.7.0-py3-none-" -#~ "any.whl[simulation]' torch torchvision matplotlib``" +#~ "Assume you have trained for :math:`n`" +#~ " rounds with sampling fraction :math:`q`" +#~ " and noise multiplier :math:`z`. In " +#~ "order to calculate the :math:`\\epsilon` " +#~ "value this would result in for a" +#~ " particular :math:`\\delta`, the following " +#~ "script may be used." #~ msgstr "" -#~ msgid "Before the release" +#~ msgid "" +#~ "McMahan et al. \"Learning Differentially " +#~ "Private Recurrent Language Models.\" " +#~ "International Conference on Learning " +#~ "Representations (ICLR), 2017." #~ msgstr "" #~ msgid "" -#~ "Update the changelog (``changelog.md``) with" -#~ " all relevant changes that happened " -#~ "after the last release. If the " -#~ "last release was tagged ``v1.2.0``, you" -#~ " can use the following URL to " -#~ "see all commits that got merged " -#~ "into ``main`` since then:" +#~ "Andrew, Galen, et al. \"Differentially " +#~ "Private Learning with Adaptive Clipping.\" " +#~ "Advances in Neural Information Processing " +#~ "Systems (NeurIPS), 2021." #~ msgstr "" #~ msgid "" -#~ "`GitHub: Compare v1.2.0...main " -#~ "`_" +#~ "This can be achieved by customizing " +#~ "an existing strategy or by `implementing" +#~ " a custom strategy from scratch " +#~ "`_. Here's a nonsensical " +#~ "example that customizes :code:`FedAvg` by " +#~ "adding a custom ``\"hello\": \"world\"`` " +#~ "configuration key/value pair to the " +#~ "config dict of a *single client* " +#~ "(only the first client in the " +#~ "list, the other clients in this " +#~ "round to not receive this \"special\"" +#~ " config value):" #~ msgstr "" #~ msgid "" -#~ "Thank the authors who contributed since" -#~ " the last release. This can be " -#~ "done by running the ``./dev/add-" -#~ "shortlog.sh`` convenience script (it can " -#~ "be ran multiple times and will " -#~ "update the names in the list if" -#~ " new contributors were added in the" -#~ " meantime)." +#~ "More sophisticated implementations can use " +#~ ":code:`configure_fit` to implement custom " +#~ "client selection logic. A client will" +#~ " only participate in a round if " +#~ "the corresponding :code:`ClientProxy` is " +#~ "included in the the list returned " +#~ "from :code:`configure_fit`." #~ msgstr "" #~ msgid "" -#~ "Update the ``changelog.md`` section header " -#~ "``Unreleased`` to contain the version " -#~ "number and date for the release " -#~ "you are building. Create a pull " -#~ "request with the change." +#~ "More sophisticated implementations can use " +#~ ":code:`configure_evaluate` to implement custom " +#~ "client selection logic. A client will" +#~ " only participate in a round if " +#~ "the corresponding :code:`ClientProxy` is " +#~ "included in the the list returned " +#~ "from :code:`configure_evaluate`." #~ msgstr "" #~ msgid "" -#~ "Tag the release commit with the " -#~ "version number as soon as the PR" -#~ " is merged: ``git tag v0.12.3``, then" -#~ " ``git push --tags``. This will " -#~ "create a draft release on GitHub " -#~ "containing the correct artifacts and the" -#~ " relevant part of the changelog." +#~ "`How to run Flower using Docker " +#~ "`_" #~ msgstr "" #~ msgid "" -#~ "Note that, in order to build the" -#~ " documentation locally (with ``poetry run" -#~ " make html``, like described below), " -#~ "`Pandoc _` needs " -#~ "to be installed on the system." +#~ "Ray Dashboard: ``_" #~ msgstr "" #~ msgid "" -#~ "If you're familiar with how contributing" -#~ " on GitHub works, you can directly" -#~ " checkout our `getting started guide " -#~ "for contributors `_ and examples " -#~ "of `good first contributions " -#~ "`_." +#~ "Ray Metrics: ``_" +#~ msgstr "" + +#~ msgid "Enjoy building more robust and flexible ``ClientApp``s with mods!" #~ msgstr "" #~ msgid "" -#~ "This will create a `flower/` (or " -#~ "the name of your fork if you " -#~ "renamed it) folder in the current " -#~ "working directory." +#~ ":py:obj:`ClientApp `\\ " +#~ "\\(client\\_fn\\[\\, mods\\]\\)" #~ msgstr "" -#~ msgid "Otherwise you can always find this option in the `Branches` page." +#~ msgid ":py:obj:`flwr.server.driver `\\" +#~ msgstr "" + +#~ msgid "Flower driver SDK." +#~ msgstr "" + +#~ msgid "driver" #~ msgstr "" #~ msgid "" -#~ "Once you click the `Compare & pull" -#~ " request` button, you should see " -#~ "something similar to this:" +#~ ":py:obj:`start_driver `\\ " +#~ "\\(\\*\\[\\, server\\_address\\, server\\, ...\\]\\)" #~ msgstr "" -#~ msgid "Find the source file in `doc/source`" +#~ msgid "" +#~ ":py:obj:`Driver `\\ " +#~ "\\(\\[driver\\_service\\_address\\, ...\\]\\)" #~ msgstr "" #~ msgid "" -#~ "Make the change in the `.rst` file" -#~ " (beware, the dashes under the title" -#~ " should be the same length as " -#~ "the title itself)" +#~ ":py:obj:`GrpcDriver `\\ " +#~ "\\(\\[driver\\_service\\_address\\, ...\\]\\)" #~ msgstr "" -#~ msgid "Change the file name to `save-progress.rst`" +#~ msgid "`GrpcDriver` provides access to the gRPC Driver API/service." #~ msgstr "" -#~ msgid "Add a redirect rule to `doc/source/conf.py`" +#~ msgid ":py:obj:`get_nodes `\\ \\(\\)" #~ msgstr "" #~ msgid "" -#~ "This will cause a redirect from " -#~ "`saving-progress.html` to `save-progress.html`," -#~ " old links will continue to work." +#~ ":py:obj:`pull_task_res " +#~ "`\\ \\(task\\_ids\\)" +#~ msgstr "" + +#~ msgid "Get task results." #~ msgstr "" #~ msgid "" -#~ "For the lateral navigation bar to " -#~ "work properly, it is very important " -#~ "to update the `index.rst` file as " -#~ "well. This is where we define the" -#~ " whole arborescence of the navbar." +#~ ":py:obj:`push_task_ins " +#~ "`\\ " +#~ "\\(task\\_ins\\_list\\)" #~ msgstr "" -#~ msgid "Find and modify the file name in `index.rst`" +#~ msgid "Schedule tasks." #~ msgstr "" -#~ msgid "Add CI job to deploy the staging system when the `main` branch changes" +#~ msgid "GrpcDriver" #~ msgstr "" -#~ msgid "`Python 3.7 `_ or above" +#~ msgid ":py:obj:`connect `\\ \\(\\)" +#~ msgstr "" + +#~ msgid "Connect to the Driver API." #~ msgstr "" #~ msgid "" -#~ "First, clone the `Flower repository " -#~ "`_ from GitHub::" +#~ ":py:obj:`create_run " +#~ "`\\ \\(req\\)" +#~ msgstr "" + +#~ msgid "Request for run ID." #~ msgstr "" #~ msgid "" -#~ "Second, create a virtual environment " -#~ "(and activate it). If you chose to" -#~ " use :code:`pyenv` (with the :code" -#~ ":`pyenv-virtualenv` plugin) and already " -#~ "have it installed , you can use" -#~ " the following convenience script (by " -#~ "default it will use :code:`Python " -#~ "3.8.17`, but you can change it by" -#~ " providing a specific :code:``)::" +#~ ":py:obj:`disconnect " +#~ "`\\ \\(\\)" +#~ msgstr "" + +#~ msgid "Disconnect from the Driver API." #~ msgstr "" #~ msgid "" -#~ "If you don't have :code:`pyenv` " -#~ "installed, you can use the following " -#~ "script that will install pyenv, set " -#~ "it up and create the virtual " -#~ "environment (with :code:`Python 3.8.17` by " -#~ "default)::" +#~ ":py:obj:`get_nodes `\\" +#~ " \\(req\\)" +#~ msgstr "" + +#~ msgid "Get client IDs." #~ msgstr "" #~ msgid "" -#~ "Third, install the Flower package in " -#~ "development mode (think :code:`pip install " -#~ "-e`) along with all necessary " -#~ "dependencies::" +#~ ":py:obj:`pull_task_res " +#~ "`\\ \\(req\\)" #~ msgstr "" #~ msgid "" -#~ "Developers could run the full set " -#~ "of Github Actions workflows under their" -#~ " local environment by using `Act " -#~ "_`. Please refer to" -#~ " the installation instructions under the" -#~ " linked repository and run the next" -#~ " command under Flower main cloned " -#~ "repository folder::" +#~ ":py:obj:`push_task_ins " +#~ "`\\ \\(req\\)" #~ msgstr "" #~ msgid "" -#~ "Please note that these components are" -#~ " still experimental, the correct " -#~ "configuration of DP for a specific " -#~ "task is still an unsolved problem." +#~ "Optionally specify the type of actor " +#~ "to use. The actor object, which " +#~ "persists throughout the simulation, will " +#~ "be the process in charge of " +#~ "running the clients' jobs (i.e. their" +#~ " `fit()` method)." #~ msgstr "" #~ msgid "" -#~ "The distribution of the update norm " -#~ "has been shown to vary from " -#~ "task-to-task and to evolve as " -#~ "training progresses. Therefore, we use " -#~ "an adaptive approach [andrew]_ that " -#~ "continuously adjusts the clipping threshold" -#~ " to track a prespecified quantile of" -#~ " the update norm distribution." +#~ "Much effort went into a completely " +#~ "restructured Flower docs experience. The " +#~ "documentation on [flower.ai/docs](flower.ai/docs) is" +#~ " now divided into Flower Framework, " +#~ "Flower Baselines, Flower Android SDK, " +#~ "Flower iOS SDK, and code example " +#~ "projects." #~ msgstr "" #~ msgid "" -#~ "We make (and attempt to enforce) a" -#~ " number of assumptions that must be" -#~ " satisfied to ensure that the " -#~ "training process actually realises the " -#~ ":math:`(\\epsilon, \\delta)` guarantees the " -#~ "user has in mind when configuring " -#~ "the setup." +#~ "The first preview release of Flower " +#~ "Baselines has arrived! We're kickstarting " +#~ "Flower Baselines with implementations of " +#~ "FedOpt (FedYogi, FedAdam, FedAdagrad), FedBN," +#~ " and FedAvgM. Check the documentation " +#~ "on how to use [Flower " +#~ "Baselines](https://flower.ai/docs/using-baselines.html). " +#~ "With this first preview release we're" +#~ " also inviting the community to " +#~ "[contribute their own " +#~ "baselines](https://flower.ai/docs/contributing-baselines.html)." #~ msgstr "" #~ msgid "" -#~ "The first two are useful for " -#~ "eliminating a multitude of complications " -#~ "associated with calibrating the noise to" -#~ " the clipping threshold while the " -#~ "third one is required to comply " -#~ "with the assumptions of the privacy " -#~ "analysis." +#~ "Flower usage examples used to be " +#~ "bundled with Flower in a package " +#~ "called ``flwr_example``. We are migrating " +#~ "those examples to standalone projects to" +#~ " make them easier to use. All " +#~ "new examples are based in the " +#~ "directory `examples " +#~ "`_." #~ msgstr "" -#~ msgid "" -#~ "The first version of our solution " -#~ "was to define a decorator whose " -#~ "constructor accepted, among other things, " -#~ "a boolean valued variable indicating " -#~ "whether adaptive clipping was to be " -#~ "enabled or not. We quickly realized " -#~ "that this would clutter its " -#~ ":code:`__init__()` function with variables " -#~ "corresponding to hyperparameters of adaptive" -#~ " clipping that would remain unused " -#~ "when it was disabled. A cleaner " -#~ "implementation could be achieved by " -#~ "splitting the functionality into two " -#~ "decorators, :code:`DPFedAvgFixed` and " -#~ ":code:`DPFedAvgAdaptive`, with the latter sub-" -#~ " classing the former. The constructors " -#~ "for both classes accept a boolean " -#~ "parameter :code:`server_side_noising`, which, as " -#~ "the name suggests, determines where " -#~ "noising is to be performed." +#~ msgid "The following examples are available as standalone projects." #~ msgstr "" -#~ msgid "" -#~ ":code:`aggregate_fit()`: We check whether any" -#~ " of the sampled clients dropped out" -#~ " or failed to upload an update " -#~ "before the round timed out. In " -#~ "that case, we need to abort the" -#~ " current round, discarding any successful" -#~ " updates that were received, and move" -#~ " on to the next one. On the " -#~ "other hand, if all clients responded " -#~ "successfully, we must force the " -#~ "averaging of the updates to happen " -#~ "in an unweighted manner by intercepting" -#~ " the :code:`parameters` field of " -#~ ":code:`FitRes` for each received update " -#~ "and setting it to 1. Furthermore, " -#~ "if :code:`server_side_noising=true`, each update " -#~ "is perturbed with an amount of " -#~ "noise equal to what it would have" -#~ " been subjected to had client-side" -#~ " noising being enabled. This entails " -#~ "*pre*-processing of the arguments to " -#~ "this method before passing them on " -#~ "to the wrappee's implementation of " -#~ ":code:`aggregate_fit()`." +#~ msgid "Quickstart TensorFlow/Keras" #~ msgstr "" #~ msgid "" -#~ "McMahan, H. Brendan, et al. \"Learning" -#~ " differentially private recurrent language " -#~ "models.\" arXiv preprint arXiv:1710.06963 " -#~ "(2017)." +#~ "`Quickstart TensorFlow (Tutorial) " +#~ "`_" #~ msgstr "" #~ msgid "" -#~ "Andrew, Galen, et al. \"Differentially " -#~ "private learning with adaptive clipping.\" " -#~ "Advances in Neural Information Processing " -#~ "Systems 34 (2021): 17455-17466." +#~ "`Quickstart PyTorch (Tutorial) " +#~ "`_" #~ msgstr "" #~ msgid "" -#~ "The following command can be used " -#~ "to verfiy if Flower was successfully " -#~ "installed. If everything worked, it " -#~ "should print the version of Flower " -#~ "to the command line::" +#~ "`PyTorch: From Centralized To Federated " +#~ "(Tutorial) `_" #~ msgstr "" -#~ msgid "flwr (Python API reference)" +#~ msgid "Legacy Examples (`flwr_example`)" #~ msgstr "" -#~ msgid "start_client" +#~ msgid "" +#~ "The useage examples in `flwr_example` " +#~ "are deprecated and will be removed " +#~ "in the future. New examples are " +#~ "provided as standalone projects in " +#~ "`examples `_." #~ msgstr "" -#~ msgid "start_numpy_client" +#~ msgid "Extra Dependencies" #~ msgstr "" -#~ msgid "start_simulation" +#~ msgid "" +#~ "The core Flower framework keeps a " +#~ "minimal set of dependencies. The " +#~ "examples demonstrate Flower in the " +#~ "context of different machine learning " +#~ "frameworks, so additional dependencies need" +#~ " to be installed before an example" +#~ " can be run." #~ msgstr "" -#~ msgid "server.start_server" +#~ msgid "For PyTorch examples::" #~ msgstr "" -#~ msgid "server.strategy" +#~ msgid "For TensorFlow examples::" #~ msgstr "" -#~ msgid "server.strategy.Strategy" +#~ msgid "For both PyTorch and TensorFlow examples::" #~ msgstr "" -#~ msgid "server.strategy.FedAvg" +#~ msgid "" +#~ "Please consult :code:`pyproject.toml` for a" +#~ " full list of possible extras " +#~ "(section :code:`[tool.poetry.extras]`)." #~ msgstr "" -#~ msgid "server.strategy.FedAvgM" +#~ msgid "PyTorch Examples" #~ msgstr "" -#~ msgid "server.strategy.FedMedian" +#~ msgid "" +#~ "Our PyTorch examples are based on " +#~ "PyTorch 1.7. They should work with " +#~ "other releases as well. So far, we" +#~ " provide the following examples." #~ msgstr "" -#~ msgid "server.strategy.QFedAvg" +#~ msgid "CIFAR-10 Image Classification" #~ msgstr "" -#~ msgid "server.strategy.FaultTolerantFedAvg" +#~ msgid "" +#~ "`CIFAR-10 and CIFAR-100 " +#~ "`_ are " +#~ "popular RGB image datasets. The Flower" +#~ " CIFAR-10 example uses PyTorch to " +#~ "train a simple CNN classifier in a" +#~ " federated learning setup with two " +#~ "clients." #~ msgstr "" -#~ msgid "server.strategy.FedOpt" +#~ msgid "First, start a Flower server:" #~ msgstr "" -#~ msgid "server.strategy.FedProx" +#~ msgid "$ ./src/py/flwr_example/pytorch_cifar/run-server.sh" #~ msgstr "" -#~ msgid "server.strategy.FedAdagrad" +#~ msgid "Then, start the two clients in a new terminal window:" #~ msgstr "" -#~ msgid "server.strategy.FedAdam" +#~ msgid "$ ./src/py/flwr_example/pytorch_cifar/run-clients.sh" #~ msgstr "" -#~ msgid "server.strategy.FedYogi" +#~ msgid "For more details, see :code:`src/py/flwr_example/pytorch_cifar`." #~ msgstr "" -#~ msgid "server.strategy.FedTrimmedAvg" +#~ msgid "ImageNet-2012 Image Classification" #~ msgstr "" -#~ msgid "server.strategy.Krum" +#~ msgid "" +#~ "`ImageNet-2012 `_ is " +#~ "one of the major computer vision " +#~ "datasets. The Flower ImageNet example " +#~ "uses PyTorch to train a ResNet-18 " +#~ "classifier in a federated learning setup" +#~ " with ten clients." #~ msgstr "" -#~ msgid "server.strategy.FedXgbNnAvg" +#~ msgid "$ ./src/py/flwr_example/pytorch_imagenet/run-server.sh" #~ msgstr "" -#~ msgid "server.strategy.DPFedAvgAdaptive" +#~ msgid "$ ./src/py/flwr_example/pytorch_imagenet/run-clients.sh" #~ msgstr "" -#~ msgid "server.strategy.DPFedAvgFixed" +#~ msgid "For more details, see :code:`src/py/flwr_example/pytorch_imagenet`." #~ msgstr "" -#~ msgid "" -#~ "**Fix the incorrect return types of " -#~ "Strategy** " -#~ "([#2432](https://github.com/adap/flower/pull/2432/files))" +#~ msgid "TensorFlow Examples" #~ msgstr "" #~ msgid "" -#~ "The types of the return values in" -#~ " the docstrings in two methods " -#~ "(`aggregate_fit` and `aggregate_evaluate`) now " -#~ "match the hint types in the code." +#~ "Our TensorFlow examples are based on " +#~ "TensorFlow 2.0 or newer. So far, " +#~ "we provide the following examples." #~ msgstr "" -#~ msgid "" -#~ "Using the `client_fn`, Flower clients " -#~ "can interchangeably run as standalone " -#~ "processes (i.e. via `start_client`) or " -#~ "in simulation (i.e. via `start_simulation`)" -#~ " without requiring changes to how the" -#~ " client class is defined and " -#~ "instantiated. Calling `start_numpy_client` is " -#~ "now deprecated." +#~ msgid "Fashion-MNIST Image Classification" #~ msgstr "" #~ msgid "" -#~ "**Update Flower Examples** " -#~ "([#2384](https://github.com/adap/flower/pull/2384)), " -#~ "([#2425](https://github.com/adap/flower/pull/2425))" +#~ "`Fashion-MNIST `_ is often used as " +#~ "the \"Hello, world!\" of machine " +#~ "learning. We follow this tradition and" +#~ " provide an example which samples " +#~ "random local datasets from Fashion-MNIST" +#~ " and trains a simple image " +#~ "classification model over those partitions." #~ msgstr "" -#~ msgid "" -#~ "**General updates to baselines** " -#~ "([#2301](https://github.com/adap/flower/pull/2301), " -#~ "[#2305](https://github.com/adap/flower/pull/2305), " -#~ "[#2307](https://github.com/adap/flower/pull/2307), " -#~ "[#2327](https://github.com/adap/flower/pull/2327), " -#~ "[#2435](https://github.com/adap/flower/pull/2435))" +#~ msgid "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-server.sh" #~ msgstr "" -#~ msgid "" -#~ "**General updates to the simulation " -#~ "engine** ([#2331](https://github.com/adap/flower/pull/2331), " -#~ "[#2447](https://github.com/adap/flower/pull/2447), " -#~ "[#2448](https://github.com/adap/flower/pull/2448))" +#~ msgid "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-clients.sh" #~ msgstr "" #~ msgid "" -#~ "**General improvements** " -#~ "([#2309](https://github.com/adap/flower/pull/2309), " -#~ "[#2310](https://github.com/adap/flower/pull/2310), " -#~ "[2313](https://github.com/adap/flower/pull/2313), " -#~ "[#2316](https://github.com/adap/flower/pull/2316), " -#~ "[2317](https://github.com/adap/flower/pull/2317),[#2349](https://github.com/adap/flower/pull/2349)," -#~ " [#2360](https://github.com/adap/flower/pull/2360), " -#~ "[#2402](https://github.com/adap/flower/pull/2402), " -#~ "[#2446](https://github.com/adap/flower/pull/2446))" +#~ "For more details, see " +#~ ":code:`src/py/flwr_example/tensorflow_fashion_mnist`." +#~ msgstr "" + +#~ msgid ":fa:`eye,mr-1` Can Flower run on Juptyter Notebooks / Google Colab?" #~ msgstr "" #~ msgid "" -#~ "`flower-superlink --driver-api-address " -#~ "\"0.0.0.0:8081\" --fleet-api-address " -#~ "\"0.0.0.0:8086\"`" +#~ "`Flower meets KOSMoS `_." #~ msgstr "" #~ msgid "" -#~ "That's it for the client. We only" -#~ " have to implement :code:`Client` or " -#~ ":code:`NumPyClient` and call " -#~ ":code:`fl.client.start_client()`. The string " -#~ ":code:`\"0.0.0.0:8080\"` tells the client " -#~ "which server to connect to. In our" -#~ " case we can run the server and" -#~ " the client on the same machine, " -#~ "therefore we use :code:`\"0.0.0.0:8080\"`. If" -#~ " we run a truly federated workload" -#~ " with the server and clients running" -#~ " on different machines, all that " -#~ "needs to change is the " -#~ ":code:`server_address` we pass to the " -#~ "client." +#~ "If you want to check out " +#~ "everything put together, you should " +#~ "check out the full code example: " +#~ "[https://github.com/adap/flower/tree/main/examples/quickstart-" +#~ "huggingface](https://github.com/adap/flower/tree/main/examples" +#~ "/quickstart-huggingface)." #~ msgstr "" #~ msgid "" -#~ "That's it for the client. We only" -#~ " have to implement :code:`Client` or " -#~ ":code:`NumPyClient` and call " -#~ ":code:`fl.client.start_client()`. The string " -#~ ":code:`\"[::]:8080\"` tells the client which" -#~ " server to connect to. In our " -#~ "case we can run the server and " -#~ "the client on the same machine, " -#~ "therefore we use :code:`\"[::]:8080\"`. If " -#~ "we run a truly federated workload " -#~ "with the server and clients running " -#~ "on different machines, all that needs" -#~ " to change is the :code:`server_address`" -#~ " we point the client at." +#~ "First of all, for running the " +#~ "Flower Python server, it is recommended" +#~ " to create a virtual environment and" +#~ " run everything within a `virtualenv " +#~ "`_. " +#~ "For the Flower client implementation in" +#~ " iOS, it is recommended to use " +#~ "Xcode as our IDE." #~ msgstr "" #~ msgid "" -#~ "Let's build a horizontal federated " -#~ "learning system using XGBoost and " -#~ "Flower!" +#~ "Since CoreML does not allow the " +#~ "model parameters to be seen before " +#~ "training, and accessing the model " +#~ "parameters during or after the training" +#~ " can only be done by specifying " +#~ "the layer name, we need to know" +#~ " this informations beforehand, through " +#~ "looking at the model specification, " +#~ "which are written as proto files. " +#~ "The implementation can be seen in " +#~ ":code:`MLModelInspect`." #~ msgstr "" #~ msgid "" -#~ "Please refer to the `full code " -#~ "example `_ to learn " -#~ "more." +#~ "After we have all of the necessary" +#~ " informations, let's create our Flower " +#~ "client." #~ msgstr "" #~ msgid "" -#~ "In this notebook, we'll build a " -#~ "federated learning system using Flower " -#~ "and PyTorch. In part 1, we use " -#~ "PyTorch for the model training pipeline" -#~ " and data loading. In part 2, " -#~ "we continue to federate the PyTorch-" -#~ "based pipeline using Flower." +#~ "MXNet is no longer maintained and " +#~ "has been moved into `Attic " +#~ "`_. As a " +#~ "result, we would encourage you to " +#~ "use other ML frameworks alongise Flower," +#~ " for example, PyTorch. This tutorial " +#~ "might be removed in future versions " +#~ "of Flower." #~ msgstr "" #~ msgid "" -#~ "Next, we install the necessary packages" -#~ " for PyTorch (``torch`` and " -#~ "``torchvision``) and Flower (``flwr``):" +#~ "It is recommended to create a " +#~ "virtual environment and run everything " +#~ "within this `virtualenv `_." #~ msgstr "" #~ msgid "" -#~ "Federated learning can be applied to " -#~ "many different types of tasks across " -#~ "different domains. In this tutorial, we" -#~ " introduce federated learning by training" -#~ " a simple convolutional neural network " -#~ "(CNN) on the popular CIFAR-10 dataset." -#~ " CIFAR-10 can be used to train " -#~ "image classifiers that distinguish between " -#~ "images from ten different classes:" +#~ "First of all, it is recommended to" +#~ " create a virtual environment and run" +#~ " everything within a `virtualenv " +#~ "`_." +#~ msgstr "" + +#~ msgid "Since we want to use scikt-learn, let's go ahead and install it:" #~ msgstr "" #~ msgid "" -#~ "Each organization will act as a " -#~ "client in the federated learning system." -#~ " So having ten organizations participate" -#~ " in a federation means having ten " -#~ "clients connected to the federated " -#~ "learning server:" +#~ "We load the MNIST dataset from " +#~ "`OpenML `_, a popular" +#~ " image classification dataset of " +#~ "handwritten digits for machine learning. " +#~ "The utility :code:`utils.load_mnist()` downloads " +#~ "the training and test data. The " +#~ "training set is split afterwards into" +#~ " 10 partitions with :code:`utils.partition()`." #~ msgstr "" #~ msgid "" -#~ "Let's now load the CIFAR-10 training " -#~ "and test set, partition them into " -#~ "ten smaller datasets (each split into" -#~ " training and validation set), and " -#~ "wrap the resulting partitions by " -#~ "creating a PyTorch ``DataLoader`` for " -#~ "each of them:" +#~ "Now that you have known how " +#~ "federated XGBoost work with Flower, it's" +#~ " time to run some more comprehensive" +#~ " experiments by customising the " +#~ "experimental settings. In the xgboost-" +#~ "comprehensive example (`full code " +#~ "`_), we provide more options " +#~ "to define various experimental setups, " +#~ "including aggregation strategies, data " +#~ "partitioning and centralised/distributed evaluation." +#~ " We also support `Flower simulation " +#~ "`_ making it easy to " +#~ "simulate large client cohorts in a " +#~ "resource-aware manner. Let's take a " +#~ "look!" #~ msgstr "" -#~ msgid "|ed6498a023f2477a9ccd57ee4514bda4|" +#~ msgid "|31e4b1afa87c4b968327bbeafbf184d4|" #~ msgstr "" -#~ msgid "|5a4f742489ac4f819afefdd4dc9ab272|" +#~ msgid "|c9d935b4284e4c389a33d86b33e07c0a|" #~ msgstr "" -#~ msgid "|3331c80cd05045f6a56524d8e3e76d0c|" +#~ msgid "|00727b5faffb468f84dd1b03ded88638|" #~ msgstr "" -#~ msgid "|4987b26884ec4b2c8f06c1264bcebe60|" +#~ msgid "|daf0cf0ff4c24fd29439af78416cf47b|" #~ msgstr "" -#~ msgid "|ec8ae2d778aa493a986eb2fa29c220e5|" +#~ msgid "|9f093007080d471d94ca90d3e9fde9b6|" #~ msgstr "" -#~ msgid "|b8949d0669fe4f8eadc9a4932f4e9c57|" +#~ msgid "|46a26e6150e0479fbd3dfd655f36eb13|" #~ msgstr "" -#~ msgid "|94ff30bdcd09443e8488b5f29932a541|" +#~ msgid "|3daba297595c4c7fb845d90404a6179a|" #~ msgstr "" -#~ msgid "|48dccf1d6d0544bba8917d2783a47719|" +#~ msgid "|5769874fa9c4455b80b2efda850d39d7|" #~ msgstr "" -#~ msgid "|0366618db96b4f329f0d4372d1150fde|" +#~ msgid "|ba47ffb421814b0f8f9fa5719093d839|" #~ msgstr "" -#~ msgid "|ac80eddc76e6478081b1ca35eed029c0|" +#~ msgid "|aeac5bf79cbf497082e979834717e01b|" #~ msgstr "" -#~ msgid "|1ac94140c317450e89678db133c7f3c2|" +#~ msgid "|ce27ed4bbe95459dba016afc42486ba2|" #~ msgstr "" -#~ msgid "|f8850c6e96fc4430b55e53bba237a7c0|" +#~ msgid "|ae94a7f71dda443cbec2385751427d41|" #~ msgstr "" -#~ msgid "|4a368fdd3fc34adabd20a46752a68582|" +#~ msgid "|e61fce4d43d243e7bb08bdde97d81ce6|" #~ msgstr "" -#~ msgid "|40f69c17bb444652a7c8dfe577cd120e|" +#~ msgid "|08cb60859b07461588fe44e55810b050|" #~ msgstr "" diff --git a/doc/locales/zh_Hans/LC_MESSAGES/framework-docs.po b/doc/locales/zh_Hans/LC_MESSAGES/framework-docs.po index f22b74db8896..86d96e5e6865 100644 --- a/doc/locales/zh_Hans/LC_MESSAGES/framework-docs.po +++ b/doc/locales/zh_Hans/LC_MESSAGES/framework-docs.po @@ -7,18 +7,17 @@ msgid "" msgstr "" "Project-Id-Version: Flower main\n" "Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2024-02-13 11:23+0100\n" +"POT-Creation-Date: 2024-03-15 14:23+0000\n" "PO-Revision-Date: 2024-02-19 11:37+0000\n" "Last-Translator: Yan Gao \n" -"Language-Team: Chinese (Simplified) \n" "Language: zh_Hans\n" +"Language-Team: Chinese (Simplified) \n" +"Plural-Forms: nplurals=1; plural=0;\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=utf-8\n" "Content-Transfer-Encoding: 8bit\n" -"Plural-Forms: nplurals=1; plural=0;\n" -"X-Generator: Weblate 5.4\n" -"Generated-By: Babel 2.13.1\n" +"Generated-By: Babel 2.14.0\n" #: ../../source/contributor-explanation-architecture.rst:2 msgid "Flower Architecture" @@ -85,9 +84,8 @@ msgstr "" #: ../../source/contributor-how-to-build-docker-images.rst:19 msgid "" -"Please follow the first section on `Run Flower using Docker " -"`_ " -"which covers this step in more detail." +"Please follow the first section on :doc:`Run Flower using Docker ` which covers this step in more detail." msgstr "" #: ../../source/contributor-how-to-build-docker-images.rst:23 @@ -293,6 +291,7 @@ msgid "Contribute translations" msgstr "贡献译文" #: ../../source/contributor-how-to-contribute-translations.rst:4 +#, fuzzy msgid "" "Since `Flower 1.5 `_ we have introduced translations to " @@ -301,7 +300,7 @@ msgid "" "to help us in our effort to make Federated Learning accessible to as many" " people as possible by contributing to those translations! This might " "also be a great opportunity for those wanting to become open source " -"contributors with little prerequistes." +"contributors with little prerequisites." msgstr "" "从 `Flower 1.5 `_ " @@ -362,8 +361,9 @@ msgid "This is what the interface looks like:" msgstr "这就是界面的样子:" #: ../../source/contributor-how-to-contribute-translations.rst:47 +#, fuzzy msgid "" -"You input your translation in the textbox at the top and then, once you " +"You input your translation in the text box at the top and then, once you " "are happy with it, you either press ``Save and continue`` (to save the " "translation and go to the next untranslated string), ``Save and stay`` " "(to save the translation and stay on the same page), ``Suggest`` (to add " @@ -408,11 +408,11 @@ msgstr "添加新语言" #: ../../source/contributor-how-to-contribute-translations.rst:69 msgid "" "If you want to add a new language, you will first have to contact us, " -"either on `Slack `_, or by opening an " -"issue on our `GitHub repo `_." +"either on `Slack `_, or by opening an issue" +" on our `GitHub repo `_." msgstr "" -"如果您想添加新语言,请先联系我们,可以在 `Slack `_ 上联系,也可以在我们的" -" `GitHub repo `_ 上提交问题。" +"如果您想添加新语言,请先联系我们,可以在 `Slack `_ 上联系,也可以在我们的 " +"`GitHub repo `_ 上提交问题。" #: ../../source/contributor-how-to-create-new-messages.rst:2 msgid "Creating New Messages" @@ -449,12 +449,13 @@ msgid "Message Types for Protocol Buffers" msgstr "协议缓冲区的信息类型" #: ../../source/contributor-how-to-create-new-messages.rst:32 +#, fuzzy msgid "" "The first thing we need to do is to define a message type for the RPC " "system in :code:`transport.proto`. Note that we have to do it for both " "the request and response messages. For more details on the syntax of " -"proto3, please see the `official documentation " -"`_." +"proto3, please see the `official documentation `_." msgstr "" "我们需要做的第一件事是在脚本code:`transport.proto`中定义 RPC " "系统的消息类型。请注意,我们必须对请求信息和响应信息都这样做。有关 proto3 语法的更多详情,请参阅官方文档 " @@ -575,9 +576,10 @@ msgid "" msgstr "工作区文件从本地文件系统加载,或复制或克隆到容器中。扩展在容器内安装和运行,在容器内它们可以完全访问工具、平台和文件系统。这意味着,只需连接到不同的容器,就能无缝切换整个开发环境。" #: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:11 +#, fuzzy msgid "" "Source: `Official VSCode documentation " -"`_" +"`_" msgstr "来源:`VSCode 官方文档 `_" #: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:15 @@ -618,18 +620,20 @@ msgid "" msgstr "在某些情况下,您的设置可能更复杂。有关这些情况,请参考以下资料:" #: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:23 +#, fuzzy msgid "" "`Developing inside a Container " -"`_" msgstr "" "在容器内开发 `_" #: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:24 +#, fuzzy msgid "" "`Remote development in Containers " -"`_" +"`_" msgstr "容器中的远程开发 `_" #: ../../source/contributor-how-to-install-development-versions.rst:2 @@ -909,8 +913,8 @@ msgstr "在 ``changelog.md`` 中添加新的 ``Unreleased`` 部分。" #: ../../source/contributor-how-to-release-flower.rst:25 msgid "" -"Merge the pull request on the same day (i.e., before a new nightly release" -" gets published to PyPI)." +"Merge the pull request on the same day (i.e., before a new nightly " +"release gets published to PyPI)." msgstr "在同一天合并拉取请求(即在新版本发布到 PyPI 之前)。" #: ../../source/contributor-how-to-release-flower.rst:28 @@ -923,8 +927,8 @@ msgstr "释放前命名" #: ../../source/contributor-how-to-release-flower.rst:33 msgid "" -"PyPI supports pre-releases (alpha, beta, release candidate). Pre-releases " -"MUST use one of the following naming patterns:" +"PyPI supports pre-releases (alpha, beta, release candidate). Pre-releases" +" MUST use one of the following naming patterns:" msgstr "PyPI 支持预发布版本(alpha、beta、release candidate)。预发布版本必须使用以下命名模式之一:" #: ../../source/contributor-how-to-release-flower.rst:35 @@ -1193,8 +1197,8 @@ msgid "" "where to start to increase your chances of getting your PR accepted into " "the Flower codebase." msgstr "" -"我们欢迎为Flower做出代码贡献!然而,要知道从哪里开始并非易事。因此,我们提出" -"了一些建议,告诉您从哪里开始,以增加您的 PR 被 Flower 代码库接受的机会。" +"我们欢迎为Flower做出代码贡献!然而,要知道从哪里开始并非易事。因此,我们提出了一些建议,告诉您从哪里开始,以增加您的 PR 被 Flower" +" 代码库接受的机会。" #: ../../source/contributor-ref-good-first-contributions.rst:11 msgid "Where to start" @@ -1224,33 +1228,33 @@ msgid "Request for Flower Baselines" msgstr "Flower Baselines的申请" #: ../../source/contributor-ref-good-first-contributions.rst:25 +#, fuzzy msgid "" "If you are not familiar with Flower Baselines, you should probably check-" -"out our `contributing guide for baselines `_." +"out our `contributing guide for baselines " +"`_." msgstr "" "如果您对 Flower Baselines 还不熟悉,也许可以看看我们的 `Baselines贡献指南 " "`_。" #: ../../source/contributor-ref-good-first-contributions.rst:27 +#, fuzzy msgid "" "You should then check out the open `issues " "`_" " for baseline requests. If you find a baseline that you'd like to work on" -" and that has no assignes, feel free to assign it to yourself and start " +" and that has no assignees, feel free to assign it to yourself and start " "working on it!" msgstr "" -"然后查看开放的 `issues `_ baseline请求。如" -"果您发现了自己想做的baseline,而它还没有被分配,请随时把它分配给自己,然后开" -"始工作!" +"然后查看开放的 `issues " +"`_" +" baseline请求。如果您发现了自己想做的baseline,而它还没有被分配,请随时把它分配给自己,然后开始工作!" #: ../../source/contributor-ref-good-first-contributions.rst:31 msgid "" "Otherwise, if you don't find a baseline you'd like to work on, be sure to" " open a new issue with the baseline request template!" -msgstr "如果您没有找到想要做的baseline,请务必使用baseline请求模板打开一个新问题(" -"GitHub issue)!" +msgstr "如果您没有找到想要做的baseline,请务必使用baseline请求模板打开一个新问题(GitHub issue)!" #: ../../source/contributor-ref-good-first-contributions.rst:34 msgid "Request for examples" @@ -1261,8 +1265,7 @@ msgid "" "We wish we had more time to write usage examples because we believe they " "help users to get started with building what they want to build. Here are" " a few ideas where we'd be happy to accept a PR:" -msgstr "我们希望有更多的时间来撰写使用示例,因为我们相信这些示例可以帮助用户开始构建" -"他们想要的东西。以下是我们乐意接受 PR 的几个想法:" +msgstr "我们希望有更多的时间来撰写使用示例,因为我们相信这些示例可以帮助用户开始构建他们想要的东西。以下是我们乐意接受 PR 的几个想法:" #: ../../source/contributor-ref-good-first-contributions.rst:40 msgid "Llama 2 fine-tuning, with Hugging Face Transformers and PyTorch" @@ -1330,50 +1333,50 @@ msgid "" msgstr "本指南适用于想参与 Flower,但不习惯为 GitHub 项目贡献的人。" #: ../../source/contributor-tutorial-contribute-on-github.rst:6 +#, fuzzy msgid "" "If you're familiar with how contributing on GitHub works, you can " -"directly checkout our `getting started guide for contributors " -"`_." +"directly checkout our :doc:`getting started guide for contributors " +"`." msgstr "" -"如果您熟悉如何在 GitHub 上贡献,可以直接查看我们的 \"贡献者入门指南\" " -"`_ 和 " -"\"优秀的首次贡献示例\" `_。" +"如果您熟悉如何在 GitHub 上贡献,可以直接查看我们的 \"贡献者入门指南\" `_ 和 \"优秀的首次贡献示例\" " +"`_。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:11 +#: ../../source/contributor-tutorial-contribute-on-github.rst:10 msgid "Setting up the repository" msgstr "建立资源库" -#: ../../source/contributor-tutorial-contribute-on-github.rst:22 +#: ../../source/contributor-tutorial-contribute-on-github.rst:21 msgid "**Create a GitHub account and setup Git**" msgstr "**创建 GitHub 账户并设置 Git**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:14 +#: ../../source/contributor-tutorial-contribute-on-github.rst:13 +#, fuzzy msgid "" "Git is a distributed version control tool. This allows for an entire " "codebase's history to be stored and every developer's machine. It is a " "software that will need to be installed on your local machine, you can " -"follow this `guide `_ to set it up." +"follow this `guide `_ to set it up." msgstr "" "Git 是一种分布式版本控制工具。它可以将整个代码库的历史记录保存在每个开发人员的机器上。您需要在本地计算机上安装该软件,可以按照本指南 " "`_ 进行设置。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:17 +#: ../../source/contributor-tutorial-contribute-on-github.rst:16 msgid "" "GitHub, itself, is a code hosting platform for version control and " "collaboration. It allows for everyone to collaborate and work from " "anywhere on remote repositories." msgstr "GitHub 本身是一个用于版本控制和协作的代码托管平台。它允许每个人在任何地方对远程仓库进行协作和工作。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:19 +#: ../../source/contributor-tutorial-contribute-on-github.rst:18 msgid "" "If you haven't already, you will need to create an account on `GitHub " "`_." msgstr "如果还没有,您需要在 `GitHub `_ 上创建一个账户。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:21 +#: ../../source/contributor-tutorial-contribute-on-github.rst:20 msgid "" "The idea behind the generic Git and GitHub workflow boils down to this: " "you download code from a remote repository on GitHub, make changes " @@ -1383,21 +1386,22 @@ msgstr "" "通用的 Git 和 GitHub 工作流程背后的理念可以归结为:从 GitHub 上的远程仓库下载代码,在本地进行修改并使用 Git " "进行跟踪,然后将新的历史记录上传回 GitHub。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:33 +#: ../../source/contributor-tutorial-contribute-on-github.rst:32 msgid "**Forking the Flower repository**" msgstr "**叉花仓库**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:25 +#: ../../source/contributor-tutorial-contribute-on-github.rst:24 +#, fuzzy msgid "" "A fork is a personal copy of a GitHub repository. To create one for " -"Flower, you must navigate to https://github.com/adap/flower (while " +"Flower, you must navigate to ``_ (while " "connected to your GitHub account) and click the ``Fork`` button situated " "on the top right of the page." msgstr "" "fork 是 GitHub 仓库的个人副本。要为 Flower 创建一个 fork,您必须导航到 " "https://github.com/adap/flower(同时连接到您的 GitHub 账户),然后点击页面右上方的 ``Fork`` 按钮。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:30 +#: ../../source/contributor-tutorial-contribute-on-github.rst:29 msgid "" "You can change the name if you want, but this is not necessary as this " "version of Flower will be yours and will sit inside your own account " @@ -1407,11 +1411,11 @@ msgstr "" "您可以更改名称,但没有必要,因为这个版本的 Flower " "将是您自己的,并位于您自己的账户中(即,在您自己的版本库列表中)。创建完成后,您会在左上角看到自己的 Flower 版本。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:48 +#: ../../source/contributor-tutorial-contribute-on-github.rst:47 msgid "**Cloning your forked repository**" msgstr "**克隆你的分叉仓库**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:36 +#: ../../source/contributor-tutorial-contribute-on-github.rst:35 msgid "" "The next step is to download the forked repository on your machine to be " "able to make changes to it. On your forked repository page, you should " @@ -1421,28 +1425,28 @@ msgstr "" "下一步是在你的机器上下载分叉版本库,以便对其进行修改。在分叉版本库页面上,首先点击右侧的 \"代码 \"按钮,这样就能复制版本库的 HTTPS " "链接。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:42 +#: ../../source/contributor-tutorial-contribute-on-github.rst:41 msgid "" "Once you copied the \\, you can open a terminal on your machine, " "navigate to the place you want to download the repository to and type:" msgstr "一旦复制了 (),你就可以在你的机器上打开一个终端,导航到你想下载软件源的地方,然后键入:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:48 +#: ../../source/contributor-tutorial-contribute-on-github.rst:47 #, fuzzy msgid "" "This will create a ``flower/`` (or the name of your fork if you renamed " "it) folder in the current working directory." msgstr "这将在当前工作目录下创建一个 `flower/`(如果重命名了,则使用 fork 的名称)文件夹。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:67 +#: ../../source/contributor-tutorial-contribute-on-github.rst:66 msgid "**Add origin**" msgstr "**添加原产地**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:51 +#: ../../source/contributor-tutorial-contribute-on-github.rst:50 msgid "You can then go into the repository folder:" msgstr "然后,您就可以进入存储库文件夹:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:57 +#: ../../source/contributor-tutorial-contribute-on-github.rst:56 msgid "" "And here we will need to add an origin to our repository. The origin is " "the \\ of the remote fork repository. To obtain it, we can do as " @@ -1452,27 +1456,28 @@ msgstr "" "在这里,我们需要为我们的版本库添加一个 origin。origin 是远程 fork 仓库的 " "\\。要获得它,我们可以像前面提到的那样,访问 GitHub 账户上的分叉仓库并复制链接。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:62 +#: ../../source/contributor-tutorial-contribute-on-github.rst:61 msgid "" "Once the \\ is copied, we can type the following command in our " "terminal:" msgstr "一旦复制了 \\ ,我们就可以在终端中键入以下命令:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:91 +#: ../../source/contributor-tutorial-contribute-on-github.rst:90 msgid "**Add upstream**" msgstr "**增加上游**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:70 +#: ../../source/contributor-tutorial-contribute-on-github.rst:69 +#, fuzzy msgid "" "Now we will add an upstream address to our repository. Still in the same " -"directroy, we must run the following command:" +"directory, we must run the following command:" msgstr "现在,我们要为版本库添加一个上游地址。还是在同一目录下,我们必须运行以下命令:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:77 +#: ../../source/contributor-tutorial-contribute-on-github.rst:76 msgid "The following diagram visually explains what we did in the previous steps:" msgstr "下图直观地解释了我们在前面步骤中的操作:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:81 +#: ../../source/contributor-tutorial-contribute-on-github.rst:80 msgid "" "The upstream is the GitHub remote address of the parent repository (in " "this case Flower), i.e. the one we eventually want to contribute to and " @@ -1483,110 +1488,111 @@ msgstr "" "上游是父版本库(这里是 Flower)的 GitHub 远程地址,即我们最终要贡献的版本库,因此需要最新的历史记录。origin " "只是我们创建的分叉仓库的 GitHub 远程地址,即我们自己账户中的副本(分叉)。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:85 +#: ../../source/contributor-tutorial-contribute-on-github.rst:84 msgid "" "To make sure our local version of the fork is up-to-date with the latest " "changes from the Flower repository, we can execute the following command:" msgstr "为了确保本地版本的分叉程序与 Flower 代码库的最新更改保持一致,我们可以执行以下命令:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:94 +#: ../../source/contributor-tutorial-contribute-on-github.rst:93 msgid "Setting up the coding environment" msgstr "设置编码环境" -#: ../../source/contributor-tutorial-contribute-on-github.rst:96 +#: ../../source/contributor-tutorial-contribute-on-github.rst:95 +#, fuzzy msgid "" -"This can be achieved by following this `getting started guide for " -"contributors`_ (note that you won't need to clone the repository). Once " -"you are able to write code and test it, you can finally start making " -"changes!" +"This can be achieved by following this :doc:`getting started guide for " +"contributors ` (note " +"that you won't need to clone the repository). Once you are able to write " +"code and test it, you can finally start making changes!" msgstr "您可以按照这份 \"贡献者入门指南\"__(注意,您不需要克隆版本库)来实现这一点。一旦您能够编写代码并进行测试,您就可以开始修改了!" -#: ../../source/contributor-tutorial-contribute-on-github.rst:101 +#: ../../source/contributor-tutorial-contribute-on-github.rst:100 msgid "Making changes" msgstr "做出改变" -#: ../../source/contributor-tutorial-contribute-on-github.rst:103 +#: ../../source/contributor-tutorial-contribute-on-github.rst:102 msgid "" "Before making any changes make sure you are up-to-date with your " "repository:" msgstr "在进行任何更改之前,请确保您的版本库是最新的:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:109 +#: ../../source/contributor-tutorial-contribute-on-github.rst:108 msgid "And with Flower's repository:" msgstr "还有Flower的存储库:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:123 +#: ../../source/contributor-tutorial-contribute-on-github.rst:122 msgid "**Create a new branch**" msgstr "**创建一个新分支**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:116 +#: ../../source/contributor-tutorial-contribute-on-github.rst:115 msgid "" "To make the history cleaner and easier to work with, it is good practice " "to create a new branch for each feature/project that needs to be " "implemented." msgstr "为了使历史记录更简洁、更易于操作,为每个需要实现的功能/项目创建一个新分支是个不错的做法。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:119 +#: ../../source/contributor-tutorial-contribute-on-github.rst:118 msgid "" "To do so, just run the following command inside the repository's " "directory:" msgstr "为此,只需在版本库目录下运行以下命令即可:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:126 +#: ../../source/contributor-tutorial-contribute-on-github.rst:125 msgid "**Make changes**" msgstr "**进行修改**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:126 +#: ../../source/contributor-tutorial-contribute-on-github.rst:125 msgid "Write great code and create wonderful changes using your favorite editor!" msgstr "使用您最喜欢的编辑器编写优秀的代码并创建精彩的更改!" -#: ../../source/contributor-tutorial-contribute-on-github.rst:139 +#: ../../source/contributor-tutorial-contribute-on-github.rst:138 msgid "**Test and format your code**" msgstr "**测试并格式化您的代码**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:129 +#: ../../source/contributor-tutorial-contribute-on-github.rst:128 msgid "" "Don't forget to test and format your code! Otherwise your code won't be " "able to be merged into the Flower repository. This is done so the " "codebase stays consistent and easy to understand." msgstr "不要忘记测试和格式化您的代码!否则您的代码将无法并入 Flower 代码库。这样做是为了使代码库保持一致并易于理解。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:132 +#: ../../source/contributor-tutorial-contribute-on-github.rst:131 msgid "To do so, we have written a few scripts that you can execute:" msgstr "为此,我们编写了一些脚本供您执行:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:151 +#: ../../source/contributor-tutorial-contribute-on-github.rst:150 msgid "**Stage changes**" msgstr "**舞台变化**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:142 +#: ../../source/contributor-tutorial-contribute-on-github.rst:141 msgid "" "Before creating a commit that will update your history, you must specify " "to Git which files it needs to take into account." msgstr "在创建更新历史记录的提交之前,必须向 Git 说明需要考虑哪些文件。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:144 +#: ../../source/contributor-tutorial-contribute-on-github.rst:143 msgid "This can be done with:" msgstr "这可以通过:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:150 +#: ../../source/contributor-tutorial-contribute-on-github.rst:149 msgid "" "To check which files have been modified compared to the last version " "(last commit) and to see which files are staged for commit, you can use " "the :code:`git status` command." msgstr "要查看与上一版本(上次提交)相比哪些文件已被修改,以及哪些文件处于提交阶段,可以使用 :code:`git status` 命令。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:161 +#: ../../source/contributor-tutorial-contribute-on-github.rst:160 msgid "**Commit changes**" msgstr "**提交更改**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:154 +#: ../../source/contributor-tutorial-contribute-on-github.rst:153 msgid "" "Once you have added all the files you wanted to commit using :code:`git " "add`, you can finally create your commit using this command:" msgstr "使用 :code:`git add` 添加完所有要提交的文件后,就可以使用此命令创建提交了:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:160 +#: ../../source/contributor-tutorial-contribute-on-github.rst:159 msgid "" "The \\ is there to explain to others what the commit " "does. It should be written in an imperative style and be concise. An " @@ -1595,61 +1601,61 @@ msgstr "" " 用于向他人解释提交的作用。它应该以命令式风格书写,并且简明扼要。例如 :code:`git commit " "-m \"Add images to README\"`。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:172 +#: ../../source/contributor-tutorial-contribute-on-github.rst:171 msgid "**Push the changes to the fork**" msgstr "**将更改推送到分叉**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:164 +#: ../../source/contributor-tutorial-contribute-on-github.rst:163 msgid "" "Once we have committed our changes, we have effectively updated our local" " history, but GitHub has no way of knowing this unless we push our " "changes to our origin's remote address:" msgstr "一旦提交了修改,我们就有效地更新了本地历史记录,但除非我们将修改推送到原点的远程地址,否则 GitHub 无法得知:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:171 +#: ../../source/contributor-tutorial-contribute-on-github.rst:170 msgid "" "Once this is done, you will see on the GitHub that your forked repo was " "updated with the changes you have made." msgstr "完成此操作后,您将在 GitHub 上看到您的分叉仓库已根据您所做的更改进行了更新。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:175 +#: ../../source/contributor-tutorial-contribute-on-github.rst:174 msgid "Creating and merging a pull request (PR)" msgstr "创建和合并拉取请求 (PR)" -#: ../../source/contributor-tutorial-contribute-on-github.rst:206 +#: ../../source/contributor-tutorial-contribute-on-github.rst:205 msgid "**Create the PR**" msgstr "**创建 PR**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:178 +#: ../../source/contributor-tutorial-contribute-on-github.rst:177 msgid "" "Once you have pushed changes, on the GitHub webpage of your repository " "you should see the following message:" msgstr "推送更改后,在仓库的 GitHub 网页上应该会看到以下信息:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:182 +#: ../../source/contributor-tutorial-contribute-on-github.rst:181 #, fuzzy msgid "Otherwise you can always find this option in the ``Branches`` page." msgstr "否则,您可以在 \"分支 \"页面找到该选项。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:184 +#: ../../source/contributor-tutorial-contribute-on-github.rst:183 #, fuzzy msgid "" "Once you click the ``Compare & pull request`` button, you should see " "something similar to this:" msgstr "点击 \"比较和拉取请求 \"按钮后,您应该会看到类似下面的内容:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:188 +#: ../../source/contributor-tutorial-contribute-on-github.rst:187 msgid "At the top you have an explanation of which branch will be merged where:" msgstr "在顶部,你可以看到关于哪个分支将被合并的说明:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:192 +#: ../../source/contributor-tutorial-contribute-on-github.rst:191 msgid "" "In this example you can see that the request is to merge the branch " "``doc-fixes`` from my forked repository to branch ``main`` from the " "Flower repository." msgstr "在这个例子中,你可以看到请求将我分叉的版本库中的分支 ``doc-fixes`` 合并到 Flower 版本库中的分支 ``main``。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:194 +#: ../../source/contributor-tutorial-contribute-on-github.rst:193 msgid "" "The input box in the middle is there for you to describe what your PR " "does and to link it to existing issues. We have placed comments (that " @@ -1657,7 +1663,7 @@ msgid "" "process." msgstr "中间的输入框供您描述 PR 的作用,并将其与现有问题联系起来。我们在此放置了注释(一旦 PR 打开,注释将不会显示),以指导您完成整个过程。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:197 +#: ../../source/contributor-tutorial-contribute-on-github.rst:196 msgid "" "It is important to follow the instructions described in comments. For " "instance, in order to not break how our changelog system works, you " @@ -1666,167 +1672,175 @@ msgid "" ":ref:`changelogentry` appendix." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:201 +#: ../../source/contributor-tutorial-contribute-on-github.rst:200 msgid "" "At the bottom you will find the button to open the PR. This will notify " "reviewers that a new PR has been opened and that they should look over it" " to merge or to request changes." msgstr "在底部,您可以找到打开 PR 的按钮。这将通知审核人员新的 PR 已经打开,他们应该查看该 PR 以进行合并或要求修改。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:204 +#: ../../source/contributor-tutorial-contribute-on-github.rst:203 msgid "" "If your PR is not yet ready for review, and you don't want to notify " "anyone, you have the option to create a draft pull request:" msgstr "如果您的 PR 尚未准备好接受审核,而且您不想通知任何人,您可以选择创建一个草案拉取请求:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:209 +#: ../../source/contributor-tutorial-contribute-on-github.rst:208 msgid "**Making new changes**" msgstr "**作出新的改变**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:209 +#: ../../source/contributor-tutorial-contribute-on-github.rst:208 msgid "" "Once the PR has been opened (as draft or not), you can still push new " "commits to it the same way we did before, by making changes to the branch" " associated with the PR." msgstr "一旦 PR 被打开(无论是否作为草案),你仍然可以像以前一样,通过修改与 PR 关联的分支来推送新的提交。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:231 +#: ../../source/contributor-tutorial-contribute-on-github.rst:230 msgid "**Review the PR**" msgstr "**审查 PR**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:212 +#: ../../source/contributor-tutorial-contribute-on-github.rst:211 msgid "" "Once the PR has been opened or once the draft PR has been marked as " "ready, a review from code owners will be automatically requested:" msgstr "一旦 PR 被打开或 PR 草案被标记为就绪,就会自动要求代码所有者进行审核:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:216 +#: ../../source/contributor-tutorial-contribute-on-github.rst:215 msgid "" "Code owners will then look into the code, ask questions, request changes " "or validate the PR." msgstr "然后,代码所有者会查看代码、提出问题、要求修改或验证 PR。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:218 +#: ../../source/contributor-tutorial-contribute-on-github.rst:217 msgid "Merging will be blocked if there are ongoing requested changes." msgstr "如果有正在进行的更改请求,合并将被阻止。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:222 +#: ../../source/contributor-tutorial-contribute-on-github.rst:221 msgid "" "To resolve them, just push the necessary changes to the branch associated" " with the PR:" msgstr "要解决这些问题,只需将必要的更改推送到与 PR 关联的分支即可:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:226 +#: ../../source/contributor-tutorial-contribute-on-github.rst:225 msgid "And resolve the conversation:" msgstr "并解决对话:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:230 +#: ../../source/contributor-tutorial-contribute-on-github.rst:229 msgid "" "Once all the conversations have been resolved, you can re-request a " "review." msgstr "一旦所有对话都得到解决,您就可以重新申请审核。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:251 +#: ../../source/contributor-tutorial-contribute-on-github.rst:250 msgid "**Once the PR is merged**" msgstr "**一旦 PR 被合并**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:234 +#: ../../source/contributor-tutorial-contribute-on-github.rst:233 msgid "" "If all the automatic tests have passed and reviewers have no more changes" " to request, they can approve the PR and merge it." msgstr "如果所有自动测试都已通过,且审核员不再需要修改,他们就可以批准 PR 并将其合并。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:238 +#: ../../source/contributor-tutorial-contribute-on-github.rst:237 msgid "" "Once it is merged, you can delete the branch on GitHub (a button should " "appear to do so) and also delete it locally by doing:" msgstr "合并后,您可以在 GitHub 上删除该分支(会出现一个删除按钮),也可以在本地删除该分支:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:245 +#: ../../source/contributor-tutorial-contribute-on-github.rst:244 msgid "Then you should update your forked repository by doing:" msgstr "然后,你应该更新你的分叉仓库:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:254 +#: ../../source/contributor-tutorial-contribute-on-github.rst:253 msgid "Example of first contribution" msgstr "首次捐款实例" -#: ../../source/contributor-tutorial-contribute-on-github.rst:257 +#: ../../source/contributor-tutorial-contribute-on-github.rst:256 msgid "Problem" msgstr "问题" -#: ../../source/contributor-tutorial-contribute-on-github.rst:259 +#: ../../source/contributor-tutorial-contribute-on-github.rst:258 +#, fuzzy msgid "" -"For our documentation, we’ve started to use the `Diàtaxis framework " +"For our documentation, we've started to use the `Diàtaxis framework " "`_." msgstr "对于我们的文档,我们已经开始使用 \"Diàtaxis 框架 `_\"。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:261 +#: ../../source/contributor-tutorial-contribute-on-github.rst:260 +#, fuzzy msgid "" -"Our “How to” guides should have titles that continue the sencence “How to" -" …”, for example, “How to upgrade to Flower 1.0”." +"Our \"How to\" guides should have titles that continue the sentence \"How" +" to …\", for example, \"How to upgrade to Flower 1.0\"." msgstr "我们的 \"如何 \"指南的标题应延续 \"如何...... \"的句式,例如 \"如何升级到 Flower 1.0\"。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:263 +#: ../../source/contributor-tutorial-contribute-on-github.rst:262 msgid "" "Most of our guides do not follow this new format yet, and changing their " "title is (unfortunately) more involved than one might think." msgstr "我们的大多数指南还没有采用这种新格式,而更改其标题(不幸的是)比人们想象的要复杂得多。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:265 +#: ../../source/contributor-tutorial-contribute-on-github.rst:264 +#, fuzzy msgid "" -"This issue is about changing the title of a doc from present continious " +"This issue is about changing the title of a doc from present continuous " "to present simple." msgstr "这个问题是关于将文档标题从现在进行时改为现在进行时。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:267 +#: ../../source/contributor-tutorial-contribute-on-github.rst:266 +#, fuzzy msgid "" -"Let's take the example of “Saving Progress” which we changed to “Save " -"Progress”. Does this pass our check?" +"Let's take the example of \"Saving Progress\" which we changed to \"Save " +"Progress\". Does this pass our check?" msgstr "以 \"保存进度 \"为例,我们将其改为 \"保存进度\"。这是否通过了我们的检查?" -#: ../../source/contributor-tutorial-contribute-on-github.rst:269 -msgid "Before: ”How to saving progress” ❌" +#: ../../source/contributor-tutorial-contribute-on-github.rst:268 +#, fuzzy +msgid "Before: \"How to saving progress\" ❌" msgstr "之前: \"如何保存进度\" ❌" -#: ../../source/contributor-tutorial-contribute-on-github.rst:271 -msgid "After: ”How to save progress” ✅" +#: ../../source/contributor-tutorial-contribute-on-github.rst:270 +#, fuzzy +msgid "After: \"How to save progress\" ✅" msgstr "之后: \"如何保存进度\"✅" -#: ../../source/contributor-tutorial-contribute-on-github.rst:274 +#: ../../source/contributor-tutorial-contribute-on-github.rst:273 msgid "Solution" msgstr "解决方案" -#: ../../source/contributor-tutorial-contribute-on-github.rst:276 +#: ../../source/contributor-tutorial-contribute-on-github.rst:275 +#, fuzzy msgid "" -"This is a tiny change, but it’ll allow us to test your end-to-end setup. " -"After cloning and setting up the Flower repo, here’s what you should do:" +"This is a tiny change, but it'll allow us to test your end-to-end setup. " +"After cloning and setting up the Flower repo, here's what you should do:" msgstr "这只是一个很小的改动,但可以让我们测试你的端到端设置。克隆并设置好 Flower repo 后,你应该这样做:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:278 +#: ../../source/contributor-tutorial-contribute-on-github.rst:277 #, fuzzy msgid "Find the source file in ``doc/source``" msgstr "在 `doc/source` 中查找源文件" -#: ../../source/contributor-tutorial-contribute-on-github.rst:279 +#: ../../source/contributor-tutorial-contribute-on-github.rst:278 #, fuzzy msgid "" "Make the change in the ``.rst`` file (beware, the dashes under the title " "should be the same length as the title itself)" msgstr "在 `.rst` 文件中进行修改(注意,标题下的破折号应与标题本身的长度相同)" -#: ../../source/contributor-tutorial-contribute-on-github.rst:280 +#: ../../source/contributor-tutorial-contribute-on-github.rst:279 +#, fuzzy msgid "" -"Build the docs and check the result: ``_" msgstr "" "构建文档并检查结果: ``_" -#: ../../source/contributor-tutorial-contribute-on-github.rst:283 +#: ../../source/contributor-tutorial-contribute-on-github.rst:282 msgid "Rename file" msgstr "重命名文件" -#: ../../source/contributor-tutorial-contribute-on-github.rst:285 +#: ../../source/contributor-tutorial-contribute-on-github.rst:284 msgid "" "You might have noticed that the file name still reflects the old wording." " If we just change the file, then we break all existing links to it - it " @@ -1836,32 +1850,33 @@ msgstr "" "您可能已经注意到,文件名仍然反映了旧的措辞。如果我们只是更改文件,那么就会破坏与该文件的所有现有链接--" "避免这种情况是***重要的,破坏链接会损害我们的搜索引擎排名。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:288 -msgid "Here’s how to change the file name:" +#: ../../source/contributor-tutorial-contribute-on-github.rst:287 +#, fuzzy +msgid "Here's how to change the file name:" msgstr "下面是更改文件名的方法:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:290 +#: ../../source/contributor-tutorial-contribute-on-github.rst:289 #, fuzzy msgid "Change the file name to ``save-progress.rst``" msgstr "将文件名改为`save-progress.rst`" -#: ../../source/contributor-tutorial-contribute-on-github.rst:291 +#: ../../source/contributor-tutorial-contribute-on-github.rst:290 #, fuzzy msgid "Add a redirect rule to ``doc/source/conf.py``" msgstr "在 `doc/source/conf.py` 中添加重定向规则" -#: ../../source/contributor-tutorial-contribute-on-github.rst:293 +#: ../../source/contributor-tutorial-contribute-on-github.rst:292 #, fuzzy msgid "" "This will cause a redirect from ``saving-progress.html`` to ``save-" "progress.html``, old links will continue to work." msgstr "这将导致从 `saving-progress.html` 重定向到 `save-progress.html`,旧链接将继续工作。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:296 +#: ../../source/contributor-tutorial-contribute-on-github.rst:295 msgid "Apply changes in the index file" msgstr "应用索引文件中的更改" -#: ../../source/contributor-tutorial-contribute-on-github.rst:298 +#: ../../source/contributor-tutorial-contribute-on-github.rst:297 #, fuzzy msgid "" "For the lateral navigation bar to work properly, it is very important to " @@ -1869,49 +1884,50 @@ msgid "" "arborescence of the navbar." msgstr "要使横向导航栏正常工作,更新 `index.rst` 文件也非常重要。我们就是在这里定义整个导航栏的结构。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:301 +#: ../../source/contributor-tutorial-contribute-on-github.rst:300 #, fuzzy msgid "Find and modify the file name in ``index.rst``" msgstr "查找并修改 `index.rst` 中的文件名" -#: ../../source/contributor-tutorial-contribute-on-github.rst:304 +#: ../../source/contributor-tutorial-contribute-on-github.rst:303 msgid "Open PR" msgstr "开放式 PR" -#: ../../source/contributor-tutorial-contribute-on-github.rst:306 +#: ../../source/contributor-tutorial-contribute-on-github.rst:305 +#, fuzzy msgid "" -"Commit the changes (commit messages are always imperative: “Do " -"something”, in this case “Change …”)" +"Commit the changes (commit messages are always imperative: \"Do " +"something\", in this case \"Change …\")" msgstr "提交更改(提交信息总是命令式的:\"做某事\",这里是 \"更改......\")" -#: ../../source/contributor-tutorial-contribute-on-github.rst:307 +#: ../../source/contributor-tutorial-contribute-on-github.rst:306 msgid "Push the changes to your fork" msgstr "将更改推送到分叉" -#: ../../source/contributor-tutorial-contribute-on-github.rst:308 +#: ../../source/contributor-tutorial-contribute-on-github.rst:307 msgid "Open a PR (as shown above)" msgstr "打开 PR(如上图所示)" -#: ../../source/contributor-tutorial-contribute-on-github.rst:309 +#: ../../source/contributor-tutorial-contribute-on-github.rst:308 msgid "Wait for it to be approved!" msgstr "等待审批!" -#: ../../source/contributor-tutorial-contribute-on-github.rst:310 +#: ../../source/contributor-tutorial-contribute-on-github.rst:309 msgid "Congrats! 🥳 You're now officially a Flower contributor!" msgstr "祝贺你 🥳 您现在正式成为 \"Flower \"贡献者!" -#: ../../source/contributor-tutorial-contribute-on-github.rst:314 +#: ../../source/contributor-tutorial-contribute-on-github.rst:313 msgid "How to write a good PR title" msgstr "如何撰写好的公关标题" -#: ../../source/contributor-tutorial-contribute-on-github.rst:316 +#: ../../source/contributor-tutorial-contribute-on-github.rst:315 msgid "" "A well-crafted PR title helps team members quickly understand the purpose" " and scope of the changes being proposed. Here's a guide to help you " "write a good GitHub PR title:" msgstr "一个精心撰写的公关标题能帮助团队成员迅速了解所提修改的目的和范围。以下指南可帮助您撰写一个好的 GitHub PR 标题:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:318 +#: ../../source/contributor-tutorial-contribute-on-github.rst:317 msgid "" "1. Be Clear and Concise: Provide a clear summary of the changes in a " "concise manner. 1. Use Actionable Verbs: Start with verbs like \"Add,\" " @@ -1924,63 +1940,63 @@ msgstr "" "\"等动词来表明目的。1. 包含相关信息: 提及受影响的功能或模块以了解上下文。1. 简短:避免冗长的标题,以方便阅读。1. " "使用正确的大小写和标点符号: 遵守语法规则,以确保清晰。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:324 +#: ../../source/contributor-tutorial-contribute-on-github.rst:323 msgid "" "Let's start with a few examples for titles that should be avoided because" " they do not provide meaningful information:" msgstr "让我们先举例说明几个应该避免使用的标题,因为它们不能提供有意义的信息:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:326 +#: ../../source/contributor-tutorial-contribute-on-github.rst:325 msgid "Implement Algorithm" msgstr "执行算法" -#: ../../source/contributor-tutorial-contribute-on-github.rst:327 +#: ../../source/contributor-tutorial-contribute-on-github.rst:326 msgid "Database" msgstr "数据库" -#: ../../source/contributor-tutorial-contribute-on-github.rst:328 +#: ../../source/contributor-tutorial-contribute-on-github.rst:327 msgid "Add my_new_file.py to codebase" msgstr "在代码库中添加 my_new_file.py" -#: ../../source/contributor-tutorial-contribute-on-github.rst:329 +#: ../../source/contributor-tutorial-contribute-on-github.rst:328 msgid "Improve code in module" msgstr "改进模块中的代码" -#: ../../source/contributor-tutorial-contribute-on-github.rst:330 +#: ../../source/contributor-tutorial-contribute-on-github.rst:329 msgid "Change SomeModule" msgstr "更改 SomeModule" -#: ../../source/contributor-tutorial-contribute-on-github.rst:332 +#: ../../source/contributor-tutorial-contribute-on-github.rst:331 msgid "" "Here are a few positive examples which provide helpful information " "without repeating how they do it, as that is already visible in the " "\"Files changed\" section of the PR:" msgstr "这里有几个正面的例子,提供了有用的信息,但没有重复他们是如何做的,因为在 PR 的 \"已更改文件 \"部分已经可以看到:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:334 +#: ../../source/contributor-tutorial-contribute-on-github.rst:333 msgid "Update docs banner to mention Flower Summit 2023" msgstr "更新文件横幅,提及 2023 年 Flower 峰会" -#: ../../source/contributor-tutorial-contribute-on-github.rst:335 +#: ../../source/contributor-tutorial-contribute-on-github.rst:334 msgid "Remove unnecessary XGBoost dependency" msgstr "移除不必要的 XGBoost 依赖性" -#: ../../source/contributor-tutorial-contribute-on-github.rst:336 +#: ../../source/contributor-tutorial-contribute-on-github.rst:335 msgid "Remove redundant attributes in strategies subclassing FedAvg" msgstr "删除 FedAvg 子类化策略中的多余属性" -#: ../../source/contributor-tutorial-contribute-on-github.rst:337 +#: ../../source/contributor-tutorial-contribute-on-github.rst:336 #, fuzzy msgid "Add CI job to deploy the staging system when the ``main`` branch changes" msgstr "添加 CI 作业,以便在 \"主 \"分支发生变化时部署暂存系统" -#: ../../source/contributor-tutorial-contribute-on-github.rst:338 +#: ../../source/contributor-tutorial-contribute-on-github.rst:337 msgid "" "Add new amazing library which will be used to improve the simulation " "engine" msgstr "添加新的惊人库,用于改进模拟引擎" -#: ../../source/contributor-tutorial-contribute-on-github.rst:342 +#: ../../source/contributor-tutorial-contribute-on-github.rst:341 #: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:548 #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:946 #: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:727 @@ -1989,153 +2005,154 @@ msgstr "添加新的惊人库,用于改进模拟引擎" msgid "Next steps" msgstr "接下来的步骤" -#: ../../source/contributor-tutorial-contribute-on-github.rst:344 +#: ../../source/contributor-tutorial-contribute-on-github.rst:343 msgid "" "Once you have made your first PR, and want to contribute more, be sure to" " check out the following :" msgstr "一旦您完成了第一份 PR,并希望做出更多贡献,请务必查看以下内容:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:346 +#: ../../source/contributor-tutorial-contribute-on-github.rst:345 +#, fuzzy msgid "" -"`Good first contributions `_, where you should particularly look " -"into the :code:`baselines` contributions." +":doc:`Good first contributions `, where you should particularly look into the " +":code:`baselines` contributions." msgstr "" "`优秀的首次贡献 `_,在这里你应该特别看看 :code:`baselines` 的贡献。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:350 +#: ../../source/contributor-tutorial-contribute-on-github.rst:349 #: ../../source/fed/0000-20200102-fed-template.md:60 msgid "Appendix" msgstr "附录" -#: ../../source/contributor-tutorial-contribute-on-github.rst:355 +#: ../../source/contributor-tutorial-contribute-on-github.rst:354 #, fuzzy msgid "Changelog entry" msgstr "更新日志" -#: ../../source/contributor-tutorial-contribute-on-github.rst:357 +#: ../../source/contributor-tutorial-contribute-on-github.rst:356 msgid "" "When opening a new PR, inside its description, there should be a " "``Changelog entry`` header." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:359 +#: ../../source/contributor-tutorial-contribute-on-github.rst:358 msgid "" "Above this header you should see the following comment that explains how " "to write your changelog entry:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:361 +#: ../../source/contributor-tutorial-contribute-on-github.rst:360 msgid "" "Inside the following 'Changelog entry' section, you should put the " "description of your changes that will be added to the changelog alongside" " your PR title." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:364 +#: ../../source/contributor-tutorial-contribute-on-github.rst:363 msgid "" -"If the section is completely empty (without any token) or non-existant, " +"If the section is completely empty (without any token) or non-existent, " "the changelog will just contain the title of the PR for the changelog " "entry, without any description." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:367 +#: ../../source/contributor-tutorial-contribute-on-github.rst:366 msgid "" "If the section contains some text other than tokens, it will use it to " "add a description to the change." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:369 +#: ../../source/contributor-tutorial-contribute-on-github.rst:368 msgid "" "If the section contains one of the following tokens it will ignore any " "other text and put the PR under the corresponding section of the " "changelog:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:371 +#: ../../source/contributor-tutorial-contribute-on-github.rst:370 msgid " is for classifying a PR as a general improvement." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:373 +#: ../../source/contributor-tutorial-contribute-on-github.rst:372 msgid " is to not add the PR to the changelog" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:375 +#: ../../source/contributor-tutorial-contribute-on-github.rst:374 msgid " is to add a general baselines change to the PR" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:377 +#: ../../source/contributor-tutorial-contribute-on-github.rst:376 msgid " is to add a general examples change to the PR" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:379 +#: ../../source/contributor-tutorial-contribute-on-github.rst:378 msgid " is to add a general sdk change to the PR" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:381 +#: ../../source/contributor-tutorial-contribute-on-github.rst:380 msgid " is to add a general simulations change to the PR" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:383 +#: ../../source/contributor-tutorial-contribute-on-github.rst:382 msgid "Note that only one token should be used." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:385 +#: ../../source/contributor-tutorial-contribute-on-github.rst:384 msgid "" "Its content must have a specific format. We will break down what each " "possibility does:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:387 +#: ../../source/contributor-tutorial-contribute-on-github.rst:386 msgid "" "If the ``### Changelog entry`` section contains nothing or doesn't exist," " the following text will be added to the changelog::" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:391 +#: ../../source/contributor-tutorial-contribute-on-github.rst:390 msgid "" "If the ``### Changelog entry`` section contains a description (and no " "token), the following text will be added to the changelog::" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:397 +#: ../../source/contributor-tutorial-contribute-on-github.rst:396 msgid "" "If the ``### Changelog entry`` section contains ````, nothing will " "change in the changelog." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:399 +#: ../../source/contributor-tutorial-contribute-on-github.rst:398 msgid "" "If the ``### Changelog entry`` section contains ````, the " "following text will be added to the changelog::" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:403 +#: ../../source/contributor-tutorial-contribute-on-github.rst:402 msgid "" "If the ``### Changelog entry`` section contains ````, the " "following text will be added to the changelog::" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:407 +#: ../../source/contributor-tutorial-contribute-on-github.rst:406 msgid "" "If the ``### Changelog entry`` section contains ````, the " "following text will be added to the changelog::" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:411 +#: ../../source/contributor-tutorial-contribute-on-github.rst:410 msgid "" "If the ``### Changelog entry`` section contains ````, the following " "text will be added to the changelog::" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:415 +#: ../../source/contributor-tutorial-contribute-on-github.rst:414 msgid "" "If the ``### Changelog entry`` section contains ````, the " "following text will be added to the changelog::" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:419 +#: ../../source/contributor-tutorial-contribute-on-github.rst:418 msgid "" "Note that only one token must be provided, otherwise, only the first " "action (in the order listed above), will be performed." @@ -2167,10 +2184,11 @@ msgid "(Optional) `pyenv-virtualenv ` msgstr "(可选) `pyenv-virtualenv `_" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:12 +#, fuzzy msgid "" "Flower uses :code:`pyproject.toml` to manage dependencies and configure " "development tools (the ones which support it). Poetry is a build tool " -"which supports `PEP 517 `_." +"which supports `PEP 517 `_." msgstr "" "Flower 使用 :code:`pyproject.toml` 来管理依赖关系和配置开发工具(支持它的)。Poetry 是一种支持 `PEP " "517 `_ 的构建工具。" @@ -2348,15 +2366,16 @@ msgid "Example: FedBN in PyTorch - From Centralized To Federated" msgstr "示例: PyTorch 中的 FedBN - 从集中式到联邦式" #: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:4 +#, fuzzy msgid "" "This tutorial will show you how to use Flower to build a federated " "version of an existing machine learning workload with `FedBN " "`_, a federated training strategy " "designed for non-iid data. We are using PyTorch to train a Convolutional " "Neural Network(with Batch Normalization layers) on the CIFAR-10 dataset. " -"When applying FedBN, only few changes needed compared to `Example: " -"PyTorch - From Centralized To Federated `_." +"When applying FedBN, only few changes needed compared to :doc:`Example: " +"PyTorch - From Centralized To Federated `." msgstr "" "本教程将向您展示如何使用 Flower 为现有的机器学习框架构建一个联邦学习的版本,并使用 \"FedBN `_\"(一种针对非 iid 数据设计的联邦训练策略)。我们使用 PyTorch 在 CIFAR-10 " @@ -2370,11 +2389,12 @@ msgid "Centralized Training" msgstr "集中式训练" #: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:10 +#, fuzzy msgid "" -"All files are revised based on `Example: PyTorch - From Centralized To " -"Federated `_. The only thing to do is modifying the file called " -":code:`cifar.py`, revised part is shown below:" +"All files are revised based on :doc:`Example: PyTorch - From Centralized " +"To Federated `. The only " +"thing to do is modifying the file called :code:`cifar.py`, revised part " +"is shown below:" msgstr "" "所有文件均根据 `示例: PyTorch -从集中式到联邦式 `_。唯一要做的就是修改名为 :code:`cifar.py` " @@ -2392,11 +2412,12 @@ msgid "You can now run your machine learning workload:" msgstr "现在,您可以运行您的机器学习工作了:" #: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:47 +#, fuzzy msgid "" "So far this should all look fairly familiar if you've used PyTorch " "before. Let's take the next step and use what we've built to create a " -"federated learning system within FedBN, the sytstem consists of one " -"server and two clients." +"federated learning system within FedBN, the system consists of one server" +" and two clients." msgstr "" "到目前为止,如果您以前使用过 PyTorch,这一切看起来应该相当熟悉。让我们进行下一步,使用我们所构建的内容在 FedBN " "中创建一个联邦学习系统,该系统由一个服务器和两个客户端组成。" @@ -2407,14 +2428,14 @@ msgid "Federated Training" msgstr "联邦培训" #: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:53 +#, fuzzy msgid "" -"If you have read `Example: PyTorch - From Centralized To Federated " -"`_, the following parts are easy to follow, onyl " -":code:`get_parameters` and :code:`set_parameters` function in " -":code:`client.py` needed to revise. If not, please read the `Example: " -"PyTorch - From Centralized To Federated `_. first." +"If you have read :doc:`Example: PyTorch - From Centralized To Federated " +"`, the following parts are" +" easy to follow, only :code:`get_parameters` and :code:`set_parameters` " +"function in :code:`client.py` needed to revise. If not, please read the " +":doc:`Example: PyTorch - From Centralized To Federated `. first." msgstr "" "如果你读过 `示例: PyTorch - 从集中式到联邦式 `_,下面的部分就很容易理解了,只需要修改 " @@ -3004,8 +3025,8 @@ msgid "" "Implementing a Flower *client* basically means implementing a subclass of" " either :code:`flwr.client.Client` or :code:`flwr.client.NumPyClient`. " "Our implementation will be based on :code:`flwr.client.NumPyClient` and " -"we'll call it :code:`MNISTClient`. :code:`NumPyClient` is slightly easier " -"to implement than :code:`Client` if you use a framework with good NumPy " +"we'll call it :code:`MNISTClient`. :code:`NumPyClient` is slightly easier" +" to implement than :code:`Client` if you use a framework with good NumPy " "interoperability (like PyTorch or MXNet) because it avoids some of the " "boilerplate that would otherwise be necessary. :code:`MNISTClient` needs " "to implement four methods, two methods for getting/setting model " @@ -3223,8 +3244,8 @@ msgid "" "Implementing a Flower *client* basically means implementing a subclass of" " either :code:`flwr.client.Client` or :code:`flwr.client.NumPyClient`. " "Our implementation will be based on :code:`flwr.client.NumPyClient` and " -"we'll call it :code:`CifarClient`. :code:`NumPyClient` is slightly easier " -"to implement than :code:`Client` if you use a framework with good NumPy " +"we'll call it :code:`CifarClient`. :code:`NumPyClient` is slightly easier" +" to implement than :code:`Client` if you use a framework with good NumPy " "interoperability (like PyTorch or TensorFlow/Keras) because it avoids " "some of the boilerplate that would otherwise be necessary. " ":code:`CifarClient` needs to implement four methods, two methods for " @@ -3289,653 +3310,301 @@ msgstr "" "federated>`_。当然,我们的示例有些过于简单,因为两个客户端都加载了完全相同的数据集,这并不真实。现在,您已经准备好进一步探讨这一主题了。比如在每个客户端使用不同的" " CIFAR-10 子集会如何?增加更多客户端会如何?" -#: ../../source/example-walkthrough-pytorch-mnist.rst:2 -msgid "Example: Walk-Through PyTorch & MNIST" -msgstr "实例: PyTorch 和 MNIST 的演练" - -#: ../../source/example-walkthrough-pytorch-mnist.rst:4 -msgid "" -"In this tutorial we will learn, how to train a Convolutional Neural " -"Network on MNIST using Flower and PyTorch." -msgstr "在本教程中,我们将学习如何使用 Flower 和 PyTorch 在 MNIST 上训练卷积神经网络。" - -#: ../../source/example-walkthrough-pytorch-mnist.rst:6 -#: ../../source/tutorial-quickstart-mxnet.rst:16 -#: ../../source/tutorial-quickstart-pytorch.rst:17 -#: ../../source/tutorial-quickstart-scikitlearn.rst:14 -msgid "" -"Our example consists of one *server* and two *clients* all having the " -"same model." -msgstr "我们的例子包括一个*服务器*和两个*客户端*,它们都有相同的模型。" - -#: ../../source/example-walkthrough-pytorch-mnist.rst:8 -#: ../../source/tutorial-quickstart-pytorch.rst:19 -msgid "" -"*Clients* are responsible for generating individual weight-updates for " -"the model based on their local datasets. These updates are then sent to " -"the *server* which will aggregate them to produce a better model. " -"Finally, the *server* sends this improved version of the model back to " -"each *client*. A complete cycle of weight updates is called a *round*." -msgstr "*客户端*负责在其本地数据集上更新模型参数。然后,这些参数会被发送到*服务器*,由*服务器*聚合后生成一个更好的模型。最后,*服务器*将改进后的模型发送回每个*客户端*。一个完整的模型参数更新周期称为一*轮*。" - -#: ../../source/example-walkthrough-pytorch-mnist.rst:12 -#: ../../source/tutorial-quickstart-pytorch.rst:23 -msgid "" -"Now that we have a rough idea of what is going on, let's get started. We " -"first need to install Flower. You can do this by running :" -msgstr "现在,我们已经有了一个大致的概念了,那就让我们开始吧。首先,我们需要安装 Flower。可以通过运行 :" - -#: ../../source/example-walkthrough-pytorch-mnist.rst:18 -msgid "" -"Since we want to use PyTorch to solve a computer vision task, let's go " -"ahead an install PyTorch and the **torchvision** library:" -msgstr "我们想用 PyTorch 来做计算机视觉任务,需要先安装 PyTorch 和 **torchvision** 库:" - -#: ../../source/example-walkthrough-pytorch-mnist.rst:26 -msgid "Ready... Set... Train!" -msgstr "准备...设置...训练!" - -#: ../../source/example-walkthrough-pytorch-mnist.rst:28 -msgid "" -"Now that we have all our dependencies installed, let's run a simple " -"distributed training with two clients and one server. Our training " -"procedure and network architecture are based on PyTorch's `Basic MNIST " -"Example `_. This " -"will allow you see how easy it is to wrap your code with Flower and begin" -" training in a federated way. We provide you with two helper scripts, " -"namely *run-server.sh*, and *run-clients.sh*. Don't be afraid to look " -"inside, they are simple enough =)." -msgstr "" -"现在我们已经安装了所有的依赖包,让我们用两个客户端和一个服务器来运行一个简单的分布式训练。我们的训练过程和网络架构基于 PyTorch 的 " -"`Basic MNIST Example " -"`_。您会发现用 Flower " -"来封装您的代码并进行联邦学习训练是多么容易。我们为您提供了两个辅助脚本,即 *run-server.sh* 和 *run-" -"clients.sh*。别害怕,它们很简单 =)。" - -#: ../../source/example-walkthrough-pytorch-mnist.rst:31 -msgid "" -"Go ahead and launch on a terminal the *run-server.sh* script first as " -"follows:" -msgstr "首先在终端上启动 *run-server.sh* 脚本,如下所示:" - -#: ../../source/example-walkthrough-pytorch-mnist.rst:38 -msgid "Now that the server is up and running, go ahead and launch the clients." -msgstr "现在服务器已经启动并运行,请继续启动客户端。" - -#: ../../source/example-walkthrough-pytorch-mnist.rst:45 -msgid "" -"Et voilà! You should be seeing the training procedure and, after a few " -"iterations, the test accuracy for each client." -msgstr "然后就可以了!您应该能看到训练过程,以及经过几次反复后,每个客户端的测试准确率。" - -#: ../../source/example-walkthrough-pytorch-mnist.rst:66 -msgid "Now, let's see what is really happening inside." -msgstr "现在,让我们看看里面到底发生了什么。" - -#: ../../source/example-walkthrough-pytorch-mnist.rst:69 -#: ../../source/tutorial-quickstart-ios.rst:129 -#: ../../source/tutorial-quickstart-mxnet.rst:226 -#: ../../source/tutorial-quickstart-pytorch.rst:203 -#: ../../source/tutorial-quickstart-scikitlearn.rst:157 -#: ../../source/tutorial-quickstart-tensorflow.rst:98 -#: ../../source/tutorial-quickstart-xgboost.rst:309 -msgid "Flower Server" -msgstr "Flower 服务器" - -#: ../../source/example-walkthrough-pytorch-mnist.rst:71 -msgid "" -"Inside the server helper script *run-server.sh* you will find the " -"following code that basically runs the :code:`server.py`" -msgstr "在服务器辅助脚本 *run-server.sh* 中,你可以找到以下代码,这些代码基本上都是运行 :code:`server.py` 的代码" +#: ../../source/explanation-differential-privacy.rst:2 +#: ../../source/explanation-differential-privacy.rst:11 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:303 +msgid "Differential Privacy" +msgstr "差分隐私" -#: ../../source/example-walkthrough-pytorch-mnist.rst:78 +#: ../../source/explanation-differential-privacy.rst:3 msgid "" -"We can go a bit deeper and see that :code:`server.py` simply launches a " -"server that will coordinate three rounds of training. Flower Servers are " -"very customizable, but for simple workloads, we can start a server using " -"the :ref:`start_server ` function and " -"leave all the configuration possibilities at their default values, as " -"seen below." +"The information in datasets like healthcare, financial transactions, user" +" preferences, etc., is valuable and has the potential for scientific " +"breakthroughs and provides important business insights. However, such " +"data is also sensitive and there is a risk of compromising individual " +"privacy." msgstr "" -"我们可以再深入一点,:code:`server.py` 只是启动了一个服务器,该服务器将协调三轮训练。Flower " -"服务器是非常容易修改的,但对于简单的工作,我们可以使用 :ref:`start_server `函数启动服务器,并将所有可能的配置保留为默认值,如下所示。" - -#: ../../source/example-walkthrough-pytorch-mnist.rst:89 -#: ../../source/tutorial-quickstart-ios.rst:34 -#: ../../source/tutorial-quickstart-mxnet.rst:36 -#: ../../source/tutorial-quickstart-pytorch.rst:37 -#: ../../source/tutorial-quickstart-scikitlearn.rst:40 -#: ../../source/tutorial-quickstart-tensorflow.rst:29 -#: ../../source/tutorial-quickstart-xgboost.rst:55 -msgid "Flower Client" -msgstr "Flower 客户端" - -#: ../../source/example-walkthrough-pytorch-mnist.rst:91 -msgid "" -"Next, let's take a look at the *run-clients.sh* file. You will see that " -"it contains the main loop that starts a set of *clients*." -msgstr "接下来,让我们看看 *run-clients.sh* 文件。您会看到它包含了用来启动多个 *客户端* 的代码。" -#: ../../source/example-walkthrough-pytorch-mnist.rst:100 +#: ../../source/explanation-differential-privacy.rst:6 msgid "" -"**cid**: is the client ID. It is an integer that uniquely identifies " -"client identifier." -msgstr "**cid**:是客户 ID。它是一个整数,可唯一标识客户标识符。" - -#: ../../source/example-walkthrough-pytorch-mnist.rst:101 -msgid "**sever_address**: String that identifies IP and port of the server." -msgstr "**sever_address**: 标识服务器 IP 和端口的字符串。" - -#: ../../source/example-walkthrough-pytorch-mnist.rst:102 -msgid "" -"**nb_clients**: This defines the number of clients being created. This " -"piece of information is not required by the client, but it helps us " -"partition the original MNIST dataset to make sure that every client is " -"working on unique subsets of both *training* and *test* sets." +"Traditional methods like anonymization alone would not work because of " +"attacks like Re-identification and Data Linkage. That's where " +"differential privacy comes in. It provides the possibility of analyzing " +"data while ensuring the privacy of individuals." msgstr "" -"**nb_clients**: 这定义了正在创建的客户端数量。客户端并不需要这一信息,但它有助于我们对原始 MNIST " -"数据集进行划分,以确保每个客户端都在 *training* 和 *test* 数据集上有独立的数据。" -#: ../../source/example-walkthrough-pytorch-mnist.rst:104 +#: ../../source/explanation-differential-privacy.rst:12 msgid "" -"Again, we can go deeper and look inside :code:`flwr_example/quickstart-" -"pytorch/client.py`. After going through the argument parsing code at the " -"beginning of our :code:`main` function, you will find a call to " -":code:`mnist.load_data`. This function is responsible for partitioning " -"the original MNIST datasets (*training* and *test*) and returning a " -":code:`torch.utils.data.DataLoader` s for each of them. We then " -"instantiate a :code:`PytorchMNISTClient` object with our client ID, our " -"DataLoaders, the number of epochs in each round, and which device we want" -" to use for training (CPU or GPU)." +"Imagine two datasets that are identical except for a single record (for " +"instance, Alice's data). Differential Privacy (DP) guarantees that any " +"analysis (M), like calculating the average income, will produce nearly " +"identical results for both datasets (O and O' would be similar). This " +"preserves group patterns while obscuring individual details, ensuring the" +" individual's information remains hidden in the crowd." msgstr "" -"我们可以深入看一下 :code:`flwr_example/quickstart-pytorch/client.py`。查看 " -":code:`main` 函数开头的参数解析代码后,你会发现一个对 :code:`mnist.load_data` 的调用。该函数负责分割原始 " -"MNIST 数据集(*training* 和 *test*),并为每个数据集返回一个 " -":code:`torch.utils.data.DataLoader` 。然后,我们实例化一个 " -":code:`PytorchMNISTClient` 对象,其中包含我们的客户端 ID、 " -"DataLoader、每一轮中的遍历数,以及我们希望用于训练的设备(CPU 或 GPU)。" -#: ../../source/example-walkthrough-pytorch-mnist.rst:119 -msgid "" -"The :code:`PytorchMNISTClient` object when finally passed to " -":code:`fl.client.start_client` along with the server's address as the " -"training process begins." +#: ../../source/explanation-differential-privacy.rst:-1 +msgid "DP Intro" msgstr "" -"当训练过程开始时,:code:`PytorchMNISTClient` 对象会连同服务器地址一起传递给 " -":code:`fl.client.start_client`。" -#: ../../source/example-walkthrough-pytorch-mnist.rst:123 -msgid "A Closer Look" -msgstr "仔细看一下" - -#: ../../source/example-walkthrough-pytorch-mnist.rst:125 +#: ../../source/explanation-differential-privacy.rst:22 msgid "" -"Now, let's look closely into the :code:`PytorchMNISTClient` inside " -":code:`flwr_example.quickstart-pytorch.mnist` and see what it is doing:" +"One of the most commonly used mechanisms to achieve DP is adding enough " +"noise to the output of the analysis to mask the contribution of each " +"individual in the data while preserving the overall accuracy of the " +"analysis." msgstr "" -"现在,让我们仔细研究一下 :code:`flwr_example.quickstart-pytorch.mnist` 中的 " -":code:`PytorchMNISTClient`,看看它在做什么:" -#: ../../source/example-walkthrough-pytorch-mnist.rst:226 -msgid "" -"The first thing to notice is that :code:`PytorchMNISTClient` instantiates" -" a CNN model inside its constructor" -msgstr "首先要注意的是 :code:`PytorchMNISTClient` 在其构造函数中实例化了一个 CNN 模型" +#: ../../source/explanation-differential-privacy.rst:25 +#, fuzzy +msgid "Formal Definition" +msgstr "编译 ProtoBuf 定义" -#: ../../source/example-walkthrough-pytorch-mnist.rst:244 +#: ../../source/explanation-differential-privacy.rst:26 msgid "" -"The code for the CNN is available under :code:`quickstart-pytorch.mnist` " -"and it is reproduced below. It is the same network found in `Basic MNIST " -"Example `_." +"Differential Privacy (DP) provides statistical guarantees against the " +"information an adversary can infer through the output of a randomized " +"algorithm. It provides an unconditional upper bound on the influence of a" +" single individual on the output of the algorithm by adding noise [1]. A " +"randomized mechanism M provides (:math:`\\epsilon`, " +":math:`\\delta`)-differential privacy if for any two neighboring " +"databases, D :sub:`1` and D :sub:`2`, that differ in only a single " +"record, and for all possible outputs S ⊆ Range(A):" msgstr "" -"CNN 的代码可在 :code:`quickstart-pytorch.mnist` 下找到,现复制如下。它与 `Basic MNIST " -"Example `_中的网络相同。" -#: ../../source/example-walkthrough-pytorch-mnist.rst:290 +#: ../../source/explanation-differential-privacy.rst:32 msgid "" -"The second thing to notice is that :code:`PytorchMNISTClient` class " -"inherits from the :code:`fl.client.Client`, and hence it must implement " -"the following methods:" +"\\small\n" +"P[M(D_{1} \\in A)] \\leq e^{\\delta} P[M(D_{2} \\in A)] + \\delta" msgstr "" -"第二件要注意的事是 :code:`PytorchMNISTClient` 类继承自 " -":code:`fl.client.Client`,因此它必须实现以下方法:" -#: ../../source/example-walkthrough-pytorch-mnist.rst:315 +#: ../../source/explanation-differential-privacy.rst:38 msgid "" -"When comparing the abstract class to its derived class " -":code:`PytorchMNISTClient` you will notice that :code:`fit` calls a " -":code:`train` function and that :code:`evaluate` calls a :code:`test`: " -"function." +"The :math:`\\epsilon` parameter, also known as the privacy budget, is a " +"metric of privacy loss. It also controls the privacy-utility trade-off; " +"lower :math:`\\epsilon` values indicate higher levels of privacy but are " +"likely to reduce utility as well. The :math:`\\delta` parameter accounts " +"for a small probability on which the upper bound :math:`\\epsilon` does " +"not hold. The amount of noise needed to achieve differential privacy is " +"proportional to the sensitivity of the output, which measures the maximum" +" change in the output due to the inclusion or removal of a single record." msgstr "" -"将抽象类与其派生类 :code:`PytorchMNISTClient` 进行比较时,您会发现 :code:`fit` 调用了一个 " -":code:`train` 函数,而 :code:`evaluate` 则调用了一个 :code:`test`: 函数。" -#: ../../source/example-walkthrough-pytorch-mnist.rst:317 -msgid "" -"These functions can both be found inside the same :code:`quickstart-" -"pytorch.mnist` module:" -msgstr "这些函数都可以在同一个 :code:`quickstart-pytorch.mnist` 模块中找到:" +#: ../../source/explanation-differential-privacy.rst:45 +#, fuzzy +msgid "Differential Privacy in Machine Learning" +msgstr "差分隐私" -#: ../../source/example-walkthrough-pytorch-mnist.rst:437 +#: ../../source/explanation-differential-privacy.rst:46 msgid "" -"Observe that these functions encapsulate regular training and test loops " -"and provide :code:`fit` and :code:`evaluate` with final statistics for " -"each round. You could substitute them with your custom train and test " -"loops and change the network architecture, and the entire example would " -"still work flawlessly. As a matter of fact, why not try and modify the " -"code to an example of your liking?" +"DP can be utilized in machine learning to preserve the privacy of the " +"training data. Differentially private machine learning algorithms are " +"designed in a way to prevent the algorithm to learn any specific " +"information about any individual data points and subsequently prevent the" +" model from revealing sensitive information. Depending on the stage at " +"which noise is introduced, various methods exist for applying DP to " +"machine learning algorithms. One approach involves adding noise to the " +"training data (either to the features or labels), while another method " +"entails injecting noise into the gradients of the loss function during " +"model training. Additionally, such noise can be incorporated into the " +"model's output." msgstr "" -"请注意,这些函数封装了常规的训练和测试循环,并为 :code:`fit` 和 :code:`evaluate` " -"提供了每轮的最终统计数据。您可以用自定义的训练和测试循环来替代它们,并改变网络结构,整个示例仍然可以完美运行。事实上,为什么不按照自己的喜好修改代码呢?" -#: ../../source/example-walkthrough-pytorch-mnist.rst:444 -msgid "Give It a Try" -msgstr "试试看" +#: ../../source/explanation-differential-privacy.rst:53 +#, fuzzy +msgid "Differential Privacy in Federated Learning" +msgstr "扩大联邦学习的规模" -#: ../../source/example-walkthrough-pytorch-mnist.rst:445 +#: ../../source/explanation-differential-privacy.rst:54 msgid "" -"Looking through the quickstart code description above will have given a " -"good understanding of how *clients* and *servers* work in Flower, how to " -"run a simple experiment, and the internals of a client wrapper. Here are " -"a few things you could try on your own and get more experience with " -"Flower:" +"Federated learning is a data minimization approach that allows multiple " +"parties to collaboratively train a model without sharing their raw data. " +"However, federated learning also introduces new privacy challenges. The " +"model updates between parties and the central server can leak information" +" about the local data. These leaks can be exploited by attacks such as " +"membership inference and property inference attacks, or model inversion " +"attacks." msgstr "" -"通过上面的快速入门代码描述,你将对 Flower " -"中*客户端*和*服务器*的工作方式、如何运行一个简单的实验以及客户端封装器的内部结构有一个很好的了解。您可以自己尝试以下内容,以获得更多使用 " -"Flower 的经验:" - -#: ../../source/example-walkthrough-pytorch-mnist.rst:448 -msgid "" -"Try and change :code:`PytorchMNISTClient` so it can accept different " -"architectures." -msgstr "尝试修改 :code:`PytorchMNISTClient`,使其可以接受不同的架构。" - -#: ../../source/example-walkthrough-pytorch-mnist.rst:449 -msgid "Modify the :code:`train` function so that it accepts different optimizers" -msgstr "修改 :code:`train` 函数,使其接受不同的优化器" - -#: ../../source/example-walkthrough-pytorch-mnist.rst:450 -msgid "" -"Modify the :code:`test` function so that it proves not only the top-1 " -"(regular accuracy) but also the top-5 accuracy?" -msgstr "修改 :code:`test` 函数,使其不仅能输出前 1 名(常规精确度),还能证明前 5 名的精确度?" -#: ../../source/example-walkthrough-pytorch-mnist.rst:451 -msgid "" -"Go larger! Try to adapt the code to larger images and datasets. Why not " -"try training on ImageNet with a ResNet-50?" -msgstr "让我们尝试让代码适应更大的图像和数据集。为什么不尝试使用 ResNet-50 在 ImageNet 上进行训练呢?" - -#: ../../source/example-walkthrough-pytorch-mnist.rst:453 -msgid "You are ready now. Enjoy learning in a federated way!" -msgstr "您现在已经准备就绪。尽情享受联邦学习的乐趣吧!" - -#: ../../source/explanation-differential-privacy.rst:2 -msgid "Differential privacy" -msgstr "差别隐私" - -#: ../../source/explanation-differential-privacy.rst:4 +#: ../../source/explanation-differential-privacy.rst:58 msgid "" -"Flower provides differential privacy (DP) wrapper classes for the easy " -"integration of the central DP guarantees provided by DP-FedAvg into " -"training pipelines defined in any of the various ML frameworks that " -"Flower is compatible with." +"DP can play a crucial role in federated learning to provide privacy for " +"the clients' data." msgstr "" -"Flower 提供了差分隐私 (DP) 封装类,可将 DP-FedAvg 提供的核心 DP 轻松集成到 Flower 兼容的各种 ML " -"框架中定义的训练模式中。" - -#: ../../source/explanation-differential-privacy.rst:7 -#, fuzzy -msgid "" -"Please note that these components are still experimental; the correct " -"configuration of DP for a specific task is still an unsolved problem." -msgstr "请注意,这些组件仍处于试验阶段,如何为特定任务正确配置 DP 仍是一个尚未解决的问题。" - -#: ../../source/explanation-differential-privacy.rst:10 -msgid "" -"The name DP-FedAvg is misleading since it can be applied on top of any FL" -" algorithm that conforms to the general structure prescribed by the " -"FedOpt family of algorithms." -msgstr "DP-FedAvg 这个名称容易引起误解,因为它可以应用于任何符合 FedOpt 系列算法规定的一般结构的 FL 算法之上。" - -#: ../../source/explanation-differential-privacy.rst:13 -msgid "DP-FedAvg" -msgstr "DP-FedAvg" -#: ../../source/explanation-differential-privacy.rst:15 +#: ../../source/explanation-differential-privacy.rst:60 msgid "" -"DP-FedAvg, originally proposed by McMahan et al. [mcmahan]_ and extended " -"by Andrew et al. [andrew]_, is essentially FedAvg with the following " -"modifications." -msgstr "DP-FedAvg 最初由McMahan等人提出,并由Andrew等人加以扩展。" +"Depending on the granularity of privacy provision or the location of " +"noise addition, different forms of DP exist in federated learning. In " +"this explainer, we focus on two approaches of DP utilization in federated" +" learning based on where the noise is added: at the server (also known as" +" the center) or at the client (also known as the local)." +msgstr "" -#: ../../source/explanation-differential-privacy.rst:17 +#: ../../source/explanation-differential-privacy.rst:63 msgid "" -"**Clipping** : The influence of each client's update is bounded by " -"clipping it. This is achieved by enforcing a cap on the L2 norm of the " -"update, scaling it down if needed." -msgstr "**裁剪** : 裁剪会影响到每个客户端的模型参数。具体做法是对参数的 L2 准则设置上限,必要时将其缩减。" +"**Central Differential Privacy**: DP is applied by the server and the " +"goal is to prevent the aggregated model from leaking information about " +"each client's data." +msgstr "" -#: ../../source/explanation-differential-privacy.rst:18 +#: ../../source/explanation-differential-privacy.rst:65 msgid "" -"**Noising** : Gaussian noise, calibrated to the clipping threshold, is " -"added to the average computed at the server." -msgstr "**噪声** : 在服务器计算出的平均值中加入高斯噪声,该噪声根据剪切阈值进行校准。" +"**Local Differential Privacy**: DP is applied on the client side before " +"sending any information to the server and the goal is to prevent the " +"updates that are sent to the server from leaking any information about " +"the client's data." +msgstr "" -#: ../../source/explanation-differential-privacy.rst:20 +#: ../../source/explanation-differential-privacy.rst:-1 +#: ../../source/explanation-differential-privacy.rst:68 +#: ../../source/how-to-use-differential-privacy.rst:11 #, fuzzy -msgid "" -"The distribution of the update norm has been shown to vary from task-to-" -"task and to evolve as training progresses. This variability is crucial in" -" understanding its impact on differential privacy guarantees, emphasizing" -" the need for an adaptive approach [andrew]_ that continuously adjusts " -"the clipping threshold to track a prespecified quantile of the update " -"norm distribution." -msgstr "事实证明,参数更新准则的分布会随着任务的不同而变化,并随着训练的进展而演变。因此,我们采用了一种自适应方法,该方法会不断调整剪切阈值,以跟踪参数更新准则分布的预设量化值。" - -#: ../../source/explanation-differential-privacy.rst:23 -msgid "Simplifying Assumptions" -msgstr "简化假设" +msgid "Central Differential Privacy" +msgstr "差分隐私" -#: ../../source/explanation-differential-privacy.rst:25 -#, fuzzy +#: ../../source/explanation-differential-privacy.rst:69 msgid "" -"We make (and attempt to enforce) a number of assumptions that must be " -"satisfied to ensure that the training process actually realizes the " -":math:`(\\epsilon, \\delta)` guarantees the user has in mind when " -"configuring the setup." +"In this approach, which is also known as user-level DP, the central " +"server is responsible for adding noise to the globally aggregated " +"parameters. It should be noted that trust in the server is required." msgstr "" -"我们提出(并试图执行)了一系列必须满足的假设,以确保训练过程真正实现用户在配置设置时所定的 :math:`(\\epsilon,\\delta)`" -" 。" - -#: ../../source/explanation-differential-privacy.rst:27 -msgid "" -"**Fixed-size subsampling** :Fixed-size subsamples of the clients must be " -"taken at each round, as opposed to variable-sized Poisson subsamples." -msgstr "** 固定大小的子样本** :与可变大小的泊松分布子样本相比,每轮必须抽取固定大小的客户端子样本。" - -#: ../../source/explanation-differential-privacy.rst:28 -msgid "" -"**Unweighted averaging** : The contributions from all the clients must " -"weighted equally in the aggregate to eliminate the requirement for the " -"server to know in advance the sum of the weights of all clients available" -" for selection." -msgstr "**非加权平均**: 所有客户端的贡献必须加权相等,这样服务器就不需要事先知道所有客户的权重总和。" -#: ../../source/explanation-differential-privacy.rst:29 +#: ../../source/explanation-differential-privacy.rst:76 msgid "" -"**No client failures** : The set of available clients must stay constant " -"across all rounds of training. In other words, clients cannot drop out or" -" fail." -msgstr "**没有失败的客户端** : 在各轮训练中,可用客户端的数量必须保持不变。换句话说,客户端不能退出或失败。" +"While there are various ways to implement central DP in federated " +"learning, we concentrate on the algorithms proposed by [2] and [3]. The " +"overall approach is to clip the model updates sent by the clients and add" +" some amount of noise to the aggregated model. In each iteration, a " +"random set of clients is chosen with a specific probability for training." +" Each client performs local training on its own data. The update of each " +"client is then clipped by some value `S` (sensitivity `S`). This would " +"limit the impact of any individual client which is crucial for privacy " +"and often beneficial for robustness. A common approach to achieve this is" +" by restricting the `L2` norm of the clients' model updates, ensuring " +"that larger updates are scaled down to fit within the norm `S`." +msgstr "" -#: ../../source/explanation-differential-privacy.rst:31 -#, fuzzy -msgid "" -"The first two are useful for eliminating a multitude of complications " -"associated with calibrating the noise to the clipping threshold, while " -"the third one is required to comply with the assumptions of the privacy " -"analysis." -msgstr "前两种方法有助于消除将噪声校准为削波阈值所带来的诸多复杂问题,而第三种方法则需要符合隐私分析的假设。" +#: ../../source/explanation-differential-privacy.rst:-1 +msgid "clipping" +msgstr "" -#: ../../source/explanation-differential-privacy.rst:34 +#: ../../source/explanation-differential-privacy.rst:89 msgid "" -"These restrictions are in line with constraints imposed by Andrew et al. " -"[andrew]_." -msgstr "这些限制与 Andrew 等人所施加的限制一致。" - -#: ../../source/explanation-differential-privacy.rst:37 -msgid "Customizable Responsibility for Noise injection" -msgstr "可定制的噪声注入" +"Afterwards, the Gaussian mechanism is used to add noise in order to " +"distort the sum of all clients' updates. The amount of noise is scaled to" +" the sensitivity value to obtain a privacy guarantee. The Gaussian " +"mechanism is used with a noise sampled from `N (0, σ²)` where `σ = ( " +"noise_scale * S ) / (number of sampled clients)`." +msgstr "" -#: ../../source/explanation-differential-privacy.rst:38 -msgid "" -"In contrast to other implementations where the addition of noise is " -"performed at the server, you can configure the site of noise injection to" -" better match your threat model. We provide users with the flexibility to" -" set up the training such that each client independently adds a small " -"amount of noise to the clipped update, with the result that simply " -"aggregating the noisy updates is equivalent to the explicit addition of " -"noise to the non-noisy aggregate at the server." -msgstr "与其他在服务器上添加噪声的实现方法不同,您可以配置噪声注入的位置,以便更好地匹配您的威胁模型。我们为用户提供了设置训练的灵活性,使每个客户端都能独立地为剪切参数更新添加少量噪声,这样,只需聚合噪声更新,就相当于在服务器上为非噪声聚合添加噪声了。" +#: ../../source/explanation-differential-privacy.rst:94 +msgid "Clipping" +msgstr "" -#: ../../source/explanation-differential-privacy.rst:41 +#: ../../source/explanation-differential-privacy.rst:96 msgid "" -"To be precise, if we let :math:`m` be the number of clients sampled each " -"round and :math:`\\sigma_\\Delta` be the scale of the total Gaussian " -"noise that needs to be added to the sum of the model updates, we can use " -"simple maths to show that this is equivalent to each client adding noise " -"with scale :math:`\\sigma_\\Delta/\\sqrt{m}`." +"There are two forms of clipping commonly used in Central DP: Fixed " +"Clipping and Adaptive Clipping." msgstr "" -"准确地说,我们假设每轮采样的客户端数量为:math:`m`,:math:`\\sigma_\\Delta` " -"为需要添加到模型更新总和中的总高斯噪声的规模,我们就可以用简单的数学方法证明了,这相当于每个客户端都添加了规模为 " -":math:`\\sigma_\\Delta/\\sqrt{m}` 的噪声。" - -#: ../../source/explanation-differential-privacy.rst:44 -msgid "Wrapper-based approach" -msgstr "基于封装的方法" -#: ../../source/explanation-differential-privacy.rst:46 +#: ../../source/explanation-differential-privacy.rst:98 msgid "" -"Introducing DP to an existing workload can be thought of as adding an " -"extra layer of security around it. This inspired us to provide the " -"additional server and client-side logic needed to make the training " -"process differentially private as wrappers for instances of the " -":code:`Strategy` and :code:`NumPyClient` abstract classes respectively. " -"This wrapper-based approach has the advantage of being easily composable " -"with other wrappers that someone might contribute to the Flower library " -"in the future, e.g., for secure aggregation. Using Inheritance instead " -"can be tedious because that would require the creation of new sub- " -"classes every time a new class implementing :code:`Strategy` or " -":code:`NumPyClient` is defined." -msgstr "" -"在现有工作负载中引入 DP 可以被认为是在其周围增加了一层额外的安全性。受此启发,我们提供了额外的服务器端和客户端逻辑,分别作为 " -":code:`Strategy` 和 :code:`NumPyClient` " -"抽象类实例的封装器,使训练过程具有不同的隐私性。这种基于封装器的方法的优点是可以很容易地与将来有人贡献给 Flower " -"的其他封装器(例如用于安全聚合的封装器)进行组合。使用继承可能会比较繁琐,因为每次定义实现 :code:`Strategy` 或 " -":code:`NumPyClient` 的新类时,都需要创建新的子类。" - -#: ../../source/explanation-differential-privacy.rst:49 -msgid "Server-side logic" -msgstr "服务器端逻辑" +"**Fixed Clipping** : A predefined fix threshold is set for the magnitude " +"of clients' updates. Any update exceeding this threshold is clipped back " +"to the threshold value." +msgstr "" -#: ../../source/explanation-differential-privacy.rst:51 -#, fuzzy +#: ../../source/explanation-differential-privacy.rst:100 msgid "" -"The first version of our solution was to define a decorator whose " -"constructor accepted, among other things, a boolean-valued variable " -"indicating whether adaptive clipping was to be enabled or not. We quickly" -" realized that this would clutter its :code:`__init__()` function with " -"variables corresponding to hyperparameters of adaptive clipping that " -"would remain unused when it was disabled. A cleaner implementation could " -"be achieved by splitting the functionality into two decorators, " -":code:`DPFedAvgFixed` and :code:`DPFedAvgAdaptive`, with the latter sub- " -"classing the former. The constructors for both classes accept a boolean " -"parameter :code:`server_side_noising`, which, as the name suggests, " -"determines where noising is to be performed." +"**Adaptive Clipping** : The clipping threshold dynamically adjusts based " +"on the observed update distribution [4]. It means that the clipping value" +" is tuned during the rounds with respect to the quantile of the update " +"norm distribution." msgstr "" -"我们的第一版解决方案是定义一个装饰器,其构造函数接受一个布尔值变量,表示是否启用自适应剪裁。我们很快意识到,这样会使其 " -":code:`__init__()` " -"函数中与自适应裁剪超参数相对应的变量变得杂乱无章,而这些变量在自适应裁剪被禁用时将保持未使用状态。要实现更简洁的功能,可以将该功能拆分为两个装饰器,即" -" :code:`DPFedAvgFixed` 和 " -":code:`DPFedAvgAdaptive`,后者是前者的子类。这两个类的构造函数都接受一个布尔参数 " -":code:`server_side_noising`,顾名思义,它决定了在哪里加噪声。" - -#: ../../source/explanation-differential-privacy.rst:54 -#: ../../source/ref-api/flwr.server.strategy.DPFedAvgFixed.rst:2 -msgid "DPFedAvgFixed" -msgstr "DPFedAvgFixed" -#: ../../source/explanation-differential-privacy.rst:56 +#: ../../source/explanation-differential-privacy.rst:102 msgid "" -"The server-side capabilities required for the original version of DP-" -"FedAvg, i.e., the one which performed fixed clipping, can be completely " -"captured with the help of wrapper logic for just the following two " -"methods of the :code:`Strategy` abstract class." +"The choice between fixed and adaptive clipping depends on various factors" +" such as privacy requirements, data distribution, model complexity, and " +"others." msgstr "" -"只需对 :code:`Strategy` 抽象类的以下两个方法进行封装,就能完全捕获 DP-FedAvg " -"原始版本(即执行固定剪裁的版本)所需的服务器端功能。" -#: ../../source/explanation-differential-privacy.rst:58 -msgid "" -":code:`configure_fit()` : The config dictionary being sent by the wrapped" -" :code:`Strategy` to each client needs to be augmented with an additional" -" value equal to the clipping threshold (keyed under " -":code:`dpfedavg_clip_norm`) and, if :code:`server_side_noising=true`, " -"another one equal to the scale of the Gaussian noise that needs to be " -"added at the client (keyed under :code:`dpfedavg_noise_stddev`). This " -"entails *post*-processing of the results returned by the wrappee's " -"implementation of :code:`configure_fit()`." -msgstr "" -":code:`configure_fit()` :由封装的 :code:`Strategy` " -"发送到每个客户端的配置字典需要使用等于裁剪阈值的附加值(在 :code:`dpfedavg_clip_norm` 下键入)进行扩充。并且,如果 " -"server_side_noising=true,则另一个值等于需要在客户端添加的高斯噪声的大小(在 dpfedavg_noise_stddev " -"下键入)。这需要对封装后的configure_fit() 所返回的结果进行后处理。" - -#: ../../source/explanation-differential-privacy.rst:59 -#, fuzzy -msgid "" -":code:`aggregate_fit()`: We check whether any of the sampled clients " -"dropped out or failed to upload an update before the round timed out. In " -"that case, we need to abort the current round, discarding any successful " -"updates that were received, and move on to the next one. On the other " -"hand, if all clients responded successfully, we must force the averaging " -"of the updates to happen in an unweighted manner by intercepting the " -":code:`parameters` field of :code:`FitRes` for each received update and " -"setting it to 1. Furthermore, if :code:`server_side_noising=true`, each " -"update is perturbed with an amount of noise equal to what it would have " -"been subjected to had client-side noising being enabled. This entails " -"*pre*-processing of the arguments to this method before passing them on " -"to the wrappee's implementation of :code:`aggregate_fit()`." -msgstr "" -":code:`aggregate_fit()`: " -"我们会检查是否有任何客户端在本轮超时前退出或未能上传参数更新。在这种情况下,我们需要中止当前一轮,丢弃已收到的所有参数更新,然后继续下一轮。另一方面,如果所有客户端都成功响应,我们就必须通过拦截" -" :code:`FitRes` 的 :code:`parameters` 字段并将其设置为 1,强制以不加权的方式平均更新。此外,如果 " -":code:`server_side_noising=true`,每次更新都会受到一定量的噪声扰动,其扰动量相当于启用客户端噪声时的扰动量。 " -"这就需要在将本方法的参数传递给封装的 :code:`aggregate_fit()` 之前,对参数进行*预*处理。" - -#: ../../source/explanation-differential-privacy.rst:62 -msgid "" -"We can't directly change the aggregation function of the wrapped strategy" -" to force it to add noise to the aggregate, hence we simulate client-side" -" noising to implement server-side noising." -msgstr "我们无法直接改变封装策略的聚合函数,迫使它在聚合中添加噪声,因此我们模拟客户端噪声来实现服务器端噪声。" - -#: ../../source/explanation-differential-privacy.rst:64 -msgid "" -"These changes have been put together into a class called " -":code:`DPFedAvgFixed`, whose constructor accepts the strategy being " -"decorated, the clipping threshold and the number of clients sampled every" -" round as compulsory arguments. The user is expected to specify the " -"clipping threshold since the order of magnitude of the update norms is " -"highly dependent on the model being trained and providing a default value" -" would be misleading. The number of clients sampled at every round is " -"required to calculate the amount of noise that must be added to each " -"individual update, either by the server or the clients." -msgstr "" -"这些变化被整合到一个名为 :code:`DPFedAvgFixed` " -"的类中,其构造函数接受被装饰的策略、剪切阈值和每轮采样的客户数作为必选参数。用户需要指定剪切阈值,因为参数更新规范的数量级在很大程度上取决于正在训练的模型,提供默认值会产生误导。每轮采样的客户端数量是计算服务器或客户在每次参数更新时添加的噪音量所必需的。" - -#: ../../source/explanation-differential-privacy.rst:67 -#: ../../source/ref-api/flwr.server.strategy.DPFedAvgAdaptive.rst:2 -msgid "DPFedAvgAdaptive" -msgstr "DPFedAvgAdaptive" +#: ../../source/explanation-differential-privacy.rst:-1 +#: ../../source/explanation-differential-privacy.rst:105 +#: ../../source/how-to-use-differential-privacy.rst:96 +#, fuzzy +msgid "Local Differential Privacy" +msgstr "差分隐私" -#: ../../source/explanation-differential-privacy.rst:69 +#: ../../source/explanation-differential-privacy.rst:107 msgid "" -"The additional functionality required to facilitate adaptive clipping has" -" been provided in :code:`DPFedAvgAdaptive`, a subclass of " -":code:`DPFedAvgFixed`. It overrides the above-mentioned methods to do the" -" following." +"In this approach, each client is responsible for performing DP. Local DP " +"avoids the need for a fully trusted aggregator, but it should be noted " +"that local DP leads to a decrease in accuracy but better privacy in " +"comparison to central DP." msgstr "" -"自适应剪裁所需的附加功能在 :code:`DPFedAvgAdaptive` 中提供,其是 :code:`DPFedAvgFixed` " -"的子类。它重写了上述方法,以实现以下功能。" -#: ../../source/explanation-differential-privacy.rst:71 -msgid "" -":code:`configure_fit()` : It intercepts the config dict returned by " -":code:`super.configure_fit()` to add the key-value pair " -":code:`dpfedavg_adaptive_clip_enabled:True` to it, which the client " -"interprets as an instruction to include an indicator bit (1 if update " -"norm <= clipping threshold, 0 otherwise) in the results returned by it." +#: ../../source/explanation-differential-privacy.rst:116 +msgid "In this explainer, we focus on two forms of achieving Local DP:" msgstr "" -":code:`configure_fit()`:它截取由 :code:`super.configure_fit()` 返回的 config " -"字典,并在其中添加键-值对 " -":code:`dpfedavg_adaptive_clip_enabled:True\",客户端将其解释为在返回结果中包含一个指示位(如果参数更新范式" -" <= 剪裁阈值,则为 1,否则为 0)的指令。" -#: ../../source/explanation-differential-privacy.rst:73 +#: ../../source/explanation-differential-privacy.rst:118 msgid "" -":code:`aggregate_fit()` : It follows a call to " -":code:`super.aggregate_fit()` with one to :code:`__update_clip_norm__()`," -" a procedure which adjusts the clipping threshold on the basis of the " -"indicator bits received from the sampled clients." -msgstr ":code:`aggregate_fit()`:在调用:code:`super.aggregate_fit()`后,再调用:code:`__update_clip_norm__()`,该过程根据从采样客户端接收到的指示位调整裁剪阈值。" - -#: ../../source/explanation-differential-privacy.rst:77 -msgid "Client-side logic" -msgstr "客户端逻辑" +"Each client adds noise to the local updates before sending them to the " +"server. To achieve (:math:`\\epsilon`, :math:`\\delta`)-DP, considering " +"the sensitivity of the local model to be ∆, Gaussian noise is applied " +"with a noise scale of σ where:" +msgstr "" -#: ../../source/explanation-differential-privacy.rst:79 +#: ../../source/explanation-differential-privacy.rst:120 msgid "" -"The client-side capabilities required can be completely captured through " -"wrapper logic for just the :code:`fit()` method of the " -":code:`NumPyClient` abstract class. To be precise, we need to *post-" -"process* the update computed by the wrapped client to clip it, if " -"necessary, to the threshold value supplied by the server as part of the " -"config dictionary. In addition to this, it may need to perform some extra" -" work if either (or both) of the following keys are also present in the " -"dict." +"\\small\n" +"\\frac{∆ \\times \\sqrt{2 \\times " +"\\log\\left(\\frac{1.25}{\\delta}\\right)}}{\\epsilon}\n" +"\n" msgstr "" -"客户端所需的功能完全可以通过 :code:`NumPyClient` 抽象类的 :code:`fit()` " -"方法的封装逻辑来实现。准确地说,我们需要对封装客户端计算的参数更新进行处理,以便在必要时将其剪切到服务器作为配置字典的一部分提供的阈值。除此之外,如果配置字典中还存在以下任一(或两个)键,客户端可能还需要执行一些额外的工作。" -#: ../../source/explanation-differential-privacy.rst:81 +#: ../../source/explanation-differential-privacy.rst:125 msgid "" -":code:`dpfedavg_noise_stddev` : Generate and add the specified amount of " -"noise to the clipped update." -msgstr "code:`dpfedavg_noise_stddev`:生成并在剪切参数更新中添加指定数量的噪声。" +"Each client adds noise to the gradients of the model during the local " +"training (DP-SGD). More specifically, in this approach, gradients are " +"clipped and an amount of calibrated noise is injected into the gradients." +msgstr "" -#: ../../source/explanation-differential-privacy.rst:82 +#: ../../source/explanation-differential-privacy.rst:128 msgid "" -":code:`dpfedavg_adaptive_clip_enabled` : Augment the metrics dict in the " -":code:`FitRes` object being returned to the server with an indicator bit," -" calculated as described earlier." +"Please note that these two approaches are providing privacy at different " +"levels." msgstr "" -":code:`dpfedavg_adaptive_clip_enabled`:在返回给服务器的 :code:`FitRes` " -"对象中的度量值字典中增加一个指标位,计算方法如前所述。" -#: ../../source/explanation-differential-privacy.rst:86 -msgid "Performing the :math:`(\\epsilon, \\delta)` analysis" -msgstr "进行 :math:`(epsilon, \\delta)` 分析" +#: ../../source/explanation-differential-privacy.rst:131 +#, fuzzy +msgid "**References:**" +msgstr "参考资料" -#: ../../source/explanation-differential-privacy.rst:88 -msgid "" -"Assume you have trained for :math:`n` rounds with sampling fraction " -":math:`q` and noise multiplier :math:`z`. In order to calculate the " -":math:`\\epsilon` value this would result in for a particular " -":math:`\\delta`, the following script may be used." +#: ../../source/explanation-differential-privacy.rst:133 +msgid "[1] Dwork et al. The Algorithmic Foundations of Differential Privacy." msgstr "" -"假设您已经训练了 :math:`n` 轮,采样比例为 :math:`q`,噪声乘数为 :math:`z`。为了计算特定 " -":math:`\\delta` 的 :math:`epsilon` 值,可以使用下面的脚本。" -#: ../../source/explanation-differential-privacy.rst:98 +#: ../../source/explanation-differential-privacy.rst:135 #, fuzzy msgid "" -"McMahan et al. \"Learning Differentially Private Recurrent Language " -"Models.\" International Conference on Learning Representations (ICLR), " -"2017." +"[2] McMahan et al. Learning Differentially Private Recurrent Language " +"Models." msgstr "" "McMahan, H. Brendan等. \"Learning differentially private recurrent " "language models.\" arXiv preprint arXiv:1710.06963 (2017)." -#: ../../source/explanation-differential-privacy.rst:100 -#, fuzzy +#: ../../source/explanation-differential-privacy.rst:137 msgid "" -"Andrew, Galen, et al. \"Differentially Private Learning with Adaptive " -"Clipping.\" Advances in Neural Information Processing Systems (NeurIPS), " -"2021." +"[3] Geyer et al. Differentially Private Federated Learning: A Client " +"Level Perspective." +msgstr "" + +#: ../../source/explanation-differential-privacy.rst:139 +#, fuzzy +msgid "[4] Galen et al. Differentially Private Learning with Adaptive Clipping." msgstr "" "Andrew, Galen等. \"Differentially private learning with adaptive " "clipping.\" Advances in Neural Information Processing Systems 34 (2021): " @@ -4382,6 +4051,7 @@ msgid "As a reference, this document follows the above structure." msgstr "作为参考,本文件采用上述结构。" #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:90 +#: ../../source/ref-api/flwr.common.Metadata.rst:2 msgid "Metadata" msgstr "描述数据" @@ -4715,15 +4385,15 @@ msgid "" msgstr "在某些情况下,有必要向不同的客户端发送不同的配置值。" #: ../../source/how-to-configure-clients.rst:89 +#, fuzzy msgid "" "This can be achieved by customizing an existing strategy or by " -"`implementing a custom strategy from scratch " -"`_. " -"Here's a nonsensical example that customizes :code:`FedAvg` by adding a " -"custom ``\"hello\": \"world\"`` configuration key/value pair to the " -"config dict of a *single client* (only the first client in the list, the " -"other clients in this round to not receive this \"special\" config " -"value):" +":doc:`implementing a custom strategy from scratch `. Here's a nonsensical example that customizes :code:`FedAvg`" +" by adding a custom ``\"hello\": \"world\"`` configuration key/value pair" +" to the config dict of a *single client* (only the first client in the " +"list, the other clients in this round to not receive this \"special\" " +"config value):" msgstr "" "这可以通过定制现有策略或 `从头开始实施一个定制策略 `_" +msgid ":doc:`How to run Flower using Docker `" msgstr "" "`TensorFlow快速入门 (教程) `_" @@ -5620,15 +5290,15 @@ msgid "Resources" msgstr "资源" #: ../../source/how-to-monitor-simulation.rst:234 +#, fuzzy msgid "" -"Ray Dashboard: ``_" +"Ray Dashboard: ``_" msgstr "Ray 仪表盘: ``_" #: ../../source/how-to-monitor-simulation.rst:236 -msgid "" -"Ray Metrics: ``_" +#, fuzzy +msgid "Ray Metrics: ``_" msgstr "" "Ray 指标: ``_" @@ -6650,7 +6320,8 @@ msgstr "除了上述必要的改动之外,还有一些潜在的改进措施: msgid "" "Remove \"placeholder\" methods from subclasses of ``Client`` or " "``NumPyClient``. If you, for example, use server-side evaluation, then " -"empty placeholder implementations of ``evaluate`` are no longer necessary." +"empty placeholder implementations of ``evaluate`` are no longer " +"necessary." msgstr "" "删除 ``Client`` 或 ``NumPyClient`` 子类中的 \"占位符 " "\"方法。例如,如果你使用服务器端评估,那么就不再需要``evaluate``的 \"空占位符 \"实现。" @@ -6798,25 +6469,175 @@ msgid "" msgstr "" #: ../../source/how-to-use-built-in-mods.rst:89 -msgid "Enjoy building more robust and flexible ``ClientApp``s with mods!" +msgid "Enjoy building a more robust and flexible ``ClientApp`` with mods!" msgstr "" -#: ../../source/how-to-use-strategies.rst:2 -msgid "Use strategies" -msgstr "使用策略" +#: ../../source/how-to-use-differential-privacy.rst:2 +#, fuzzy +msgid "Use Differential Privacy" +msgstr "差分隐私" -#: ../../source/how-to-use-strategies.rst:4 +#: ../../source/how-to-use-differential-privacy.rst:3 msgid "" -"Flower allows full customization of the learning process through the " -":code:`Strategy` abstraction. A number of built-in strategies are " -"provided in the core framework." -msgstr "Flower 允许通过 :code:`Strategy` 抽象类对学习过程进行完全定制。核心框架中提供了许多内置策略。" +"This guide explains how you can utilize differential privacy in the " +"Flower framework. If you are not yet familiar with differential privacy, " +"you can refer to :doc:`explanation-differential-privacy`." +msgstr "" -#: ../../source/how-to-use-strategies.rst:6 +#: ../../source/how-to-use-differential-privacy.rst:7 msgid "" -"There are three ways to customize the way Flower orchestrates the " -"learning process on the server side:" -msgstr "有三种方法可以自定义 Flower 在服务器端协调学习过程的方式:" +"Differential Privacy in Flower is in a preview phase. If you plan to use " +"these features in a production environment with sensitive data, feel free" +" contact us to discuss your requirements and to receive guidance on how " +"to best use these features." +msgstr "" + +#: ../../source/how-to-use-differential-privacy.rst:12 +msgid "" +"This approach consists of two seprate phases: clipping of the updates and" +" adding noise to the aggregated model. For the clipping phase, Flower " +"framework has made it possible to decide whether to perform clipping on " +"the server side or the client side." +msgstr "" + +#: ../../source/how-to-use-differential-privacy.rst:15 +msgid "" +"**Server-side Clipping**: This approach has the advantage of the server " +"enforcing uniform clipping across all clients' updates and reducing the " +"communication overhead for clipping values. However, it also has the " +"disadvantage of increasing the computational load on the server due to " +"the need to perform the clipping operation for all clients." +msgstr "" + +#: ../../source/how-to-use-differential-privacy.rst:16 +msgid "" +"**Client-side Clipping**: This approach has the advantage of reducing the" +" computational overhead on the server. However, it also has the " +"disadvantage of lacking centralized control, as the server has less " +"control over the clipping process." +msgstr "" + +#: ../../source/how-to-use-differential-privacy.rst:21 +#, fuzzy +msgid "Server-side Clipping" +msgstr "服务器端逻辑" + +#: ../../source/how-to-use-differential-privacy.rst:22 +msgid "" +"For central DP with server-side clipping, there are two :code:`Strategy` " +"classes that act as wrappers around the actual :code:`Strategy` instance " +"(for example, :code:`FedAvg`). The two wrapper classes are " +":code:`DifferentialPrivacyServerSideFixedClipping` and " +":code:`DifferentialPrivacyServerSideAdaptiveClipping` for fixed and " +"adaptive clipping." +msgstr "" + +#: ../../source/how-to-use-differential-privacy.rst:-1 +#, fuzzy +msgid "server side clipping" +msgstr "服务器端逻辑" + +#: ../../source/how-to-use-differential-privacy.rst:31 +msgid "" +"The code sample below enables the :code:`FedAvg` strategy to use server-" +"side fixed clipping using the " +":code:`DifferentialPrivacyServerSideFixedClipping` wrapper class. The " +"same approach can be used with " +":code:`DifferentialPrivacyServerSideAdaptiveClipping` by adjusting the " +"corresponding input parameters." +msgstr "" + +#: ../../source/how-to-use-differential-privacy.rst:52 +#, fuzzy +msgid "Client-side Clipping" +msgstr "客户端逻辑" + +#: ../../source/how-to-use-differential-privacy.rst:53 +msgid "" +"For central DP with client-side clipping, the server sends the clipping " +"value to selected clients on each round. Clients can use existing Flower " +":code:`Mods` to perform the clipping. Two mods are available for fixed " +"and adaptive client-side clipping: :code:`fixedclipping_mod` and " +":code:`adaptiveclipping_mod` with corresponding server-side wrappers " +":code:`DifferentialPrivacyClientSideFixedClipping` and " +":code:`DifferentialPrivacyClientSideAdaptiveClipping`." +msgstr "" + +#: ../../source/how-to-use-differential-privacy.rst:-1 +#, fuzzy +msgid "client side clipping" +msgstr "客户端逻辑" + +#: ../../source/how-to-use-differential-privacy.rst:63 +msgid "" +"The code sample below enables the :code:`FedAvg` strategy to use " +"differential privacy with client-side fixed clipping using both the " +":code:`DifferentialPrivacyClientSideFixedClipping` wrapper class and, on " +"the client, :code:`fixedclipping_mod`:" +msgstr "" + +#: ../../source/how-to-use-differential-privacy.rst:80 +msgid "" +"In addition to the server-side strategy wrapper, the :code:`ClientApp` " +"needs to configure the matching :code:`fixedclipping_mod` to perform the " +"client-side clipping:" +msgstr "" + +#: ../../source/how-to-use-differential-privacy.rst:97 +msgid "" +"To utilize local differential privacy (DP) and add noise to the client " +"model parameters before transmitting them to the server in Flower, you " +"can use the `LocalDpMod`. The following hyperparameters need to be set: " +"clipping norm value, sensitivity, epsilon, and delta." +msgstr "" + +#: ../../source/how-to-use-differential-privacy.rst:-1 +msgid "local DP mod" +msgstr "" + +#: ../../source/how-to-use-differential-privacy.rst:104 +msgid "Below is a code example that shows how to use :code:`LocalDpMod`:" +msgstr "" + +#: ../../source/how-to-use-differential-privacy.rst:122 +msgid "" +"Please note that the order of mods, especially those that modify " +"parameters, is important when using multiple modifiers. Typically, " +"differential privacy (DP) modifiers should be the last to operate on " +"parameters." +msgstr "" + +#: ../../source/how-to-use-differential-privacy.rst:125 +msgid "Local Training using Privacy Engines" +msgstr "" + +#: ../../source/how-to-use-differential-privacy.rst:126 +msgid "" +"For ensuring data instance-level privacy during local model training on " +"the client side, consider leveraging privacy engines such as Opacus and " +"TensorFlow Privacy. For examples of using Flower with these engines, " +"please refer to the Flower examples directory (`Opacus " +"`_, `Tensorflow" +" Privacy `_)." +msgstr "" + +#: ../../source/how-to-use-strategies.rst:2 +msgid "Use strategies" +msgstr "使用策略" + +#: ../../source/how-to-use-strategies.rst:4 +msgid "" +"Flower allows full customization of the learning process through the " +":code:`Strategy` abstraction. A number of built-in strategies are " +"provided in the core framework." +msgstr "Flower 允许通过 :code:`Strategy` 抽象类对学习过程进行完全定制。核心框架中提供了许多内置策略。" + +#: ../../source/how-to-use-strategies.rst:6 +msgid "" +"There are three ways to customize the way Flower orchestrates the " +"learning process on the server side:" +msgstr "有三种方法可以自定义 Flower 在服务器端协调学习过程的方式:" #: ../../source/how-to-use-strategies.rst:8 msgid "Use an existing strategy, for example, :code:`FedAvg`" @@ -6925,11 +6746,11 @@ msgstr "快速入门教程" msgid "How-to guides" msgstr "操作指南" -#: ../../source/index.rst:97 +#: ../../source/index.rst:98 msgid "Legacy example guides" msgstr "旧版指南范例" -#: ../../source/index.rst:108 ../../source/index.rst:112 +#: ../../source/index.rst:109 ../../source/index.rst:113 msgid "Explanations" msgstr "说明" @@ -6937,23 +6758,23 @@ msgstr "说明" msgid "API reference" msgstr "应用程序接口参考" -#: ../../source/index.rst:137 +#: ../../source/index.rst:138 msgid "Reference docs" msgstr "参考文档" -#: ../../source/index.rst:153 +#: ../../source/index.rst:154 msgid "Contributor tutorials" msgstr "贡献者教程" -#: ../../source/index.rst:160 +#: ../../source/index.rst:161 msgid "Contributor how-to guides" msgstr "投稿指南" -#: ../../source/index.rst:173 +#: ../../source/index.rst:174 msgid "Contributor explanations" msgstr "贡献者解释" -#: ../../source/index.rst:179 +#: ../../source/index.rst:180 msgid "Contributor references" msgstr "贡献者参考资料" @@ -7048,33 +6869,33 @@ msgid "" "specific goal." msgstr "以问题为导向的 \"如何做 \"指南逐步展示如何实现特定目标。" -#: ../../source/index.rst:110 +#: ../../source/index.rst:111 msgid "" "Understanding-oriented concept guides explain and discuss key topics and " "underlying ideas behind Flower and collaborative AI." msgstr "以理解为导向的概念指南解释并讨论了Flower和协作式人工智能背后的关键主题和基本思想。" -#: ../../source/index.rst:120 +#: ../../source/index.rst:121 msgid "References" msgstr "参考资料" -#: ../../source/index.rst:122 +#: ../../source/index.rst:123 msgid "Information-oriented API reference and other reference material." msgstr "以信息为导向的 API 参考资料和其他参考资料。" -#: ../../source/index.rst:131::1 +#: ../../source/index.rst:132::1 msgid ":py:obj:`flwr `\\" msgstr "" -#: ../../source/index.rst:131::1 flwr:1 of +#: ../../source/index.rst:132::1 flwr:1 of msgid "Flower main package." msgstr "" -#: ../../source/index.rst:148 +#: ../../source/index.rst:149 msgid "Contributor docs" msgstr "贡献者文档" -#: ../../source/index.rst:150 +#: ../../source/index.rst:151 msgid "" "The Flower community welcomes contributions. The following docs are " "intended to help along the way." @@ -7096,12 +6917,22 @@ msgstr "flower-driver-api" msgid "flower-fleet-api" msgstr "flower-fleet-api" +#: ../../source/ref-api-cli.rst:37 +#, fuzzy +msgid "flower-client-app" +msgstr "Flower 客户端。" + +#: ../../source/ref-api-cli.rst:47 +#, fuzzy +msgid "flower-server-app" +msgstr "flower-driver-api" + #: ../../source/ref-api/flwr.rst:2 #, fuzzy msgid "flwr" msgstr "Flower" -#: ../../source/ref-api/flwr.rst:25 ../../source/ref-api/flwr.server.rst:48 +#: ../../source/ref-api/flwr.rst:25 ../../source/ref-api/flwr.server.rst:52 msgid "Modules" msgstr "" @@ -7126,7 +6957,7 @@ msgid ":py:obj:`flwr.server `\\" msgstr "" #: ../../source/ref-api/flwr.rst:35::1 -#: ../../source/ref-api/flwr.server.rst:37::1 flwr.server:1 +#: ../../source/ref-api/flwr.server.rst:41::1 flwr.server:1 #: flwr.server.server.Server:1 of msgid "Flower server." msgstr "Flower 服务器。" @@ -7146,7 +6977,6 @@ msgstr "客户端" #: ../../source/ref-api/flwr.client.rst:13 #: ../../source/ref-api/flwr.common.rst:13 -#: ../../source/ref-api/flwr.server.driver.rst:13 #: ../../source/ref-api/flwr.server.rst:13 #: ../../source/ref-api/flwr.simulation.rst:13 #, fuzzy @@ -7186,10 +7016,10 @@ msgid "Start a Flower NumPyClient which connects to a gRPC server." msgstr "启动 Flower NumPyClient,连接到 gRPC 服务器。" #: ../../source/ref-api/flwr.client.rst:26 -#: ../../source/ref-api/flwr.common.rst:31 -#: ../../source/ref-api/flwr.server.driver.rst:24 -#: ../../source/ref-api/flwr.server.rst:28 +#: ../../source/ref-api/flwr.common.rst:32 +#: ../../source/ref-api/flwr.server.rst:29 #: ../../source/ref-api/flwr.server.strategy.rst:17 +#: ../../source/ref-api/flwr.server.workflow.rst:17 msgid "Classes" msgstr "" @@ -7204,7 +7034,7 @@ msgstr "Flower 客户端的抽象基类。" #: ../../source/ref-api/flwr.client.rst:33::1 msgid "" -":py:obj:`ClientApp `\\ \\(client\\_fn\\[\\, " +":py:obj:`ClientApp `\\ \\(\\[client\\_fn\\, " "mods\\]\\)" msgstr "" @@ -7232,8 +7062,12 @@ msgstr "" #: ../../source/ref-api/flwr.client.Client.rst:15 #: ../../source/ref-api/flwr.client.ClientApp.rst:15 #: ../../source/ref-api/flwr.client.NumPyClient.rst:15 +#: ../../source/ref-api/flwr.common.Array.rst:15 #: ../../source/ref-api/flwr.common.ClientMessage.rst:15 +#: ../../source/ref-api/flwr.common.ConfigsRecord.rst:15 +#: ../../source/ref-api/flwr.common.Context.rst:15 #: ../../source/ref-api/flwr.common.DisconnectRes.rst:15 +#: ../../source/ref-api/flwr.common.Error.rst:15 #: ../../source/ref-api/flwr.common.EvaluateIns.rst:15 #: ../../source/ref-api/flwr.common.EvaluateRes.rst:15 #: ../../source/ref-api/flwr.common.FitIns.rst:15 @@ -7242,20 +7076,32 @@ msgstr "" #: ../../source/ref-api/flwr.common.GetParametersRes.rst:15 #: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:15 #: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:15 +#: ../../source/ref-api/flwr.common.Message.rst:15 +#: ../../source/ref-api/flwr.common.MessageType.rst:15 +#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:15 +#: ../../source/ref-api/flwr.common.Metadata.rst:15 +#: ../../source/ref-api/flwr.common.MetricsRecord.rst:15 #: ../../source/ref-api/flwr.common.Parameters.rst:15 +#: ../../source/ref-api/flwr.common.ParametersRecord.rst:15 #: ../../source/ref-api/flwr.common.ReconnectIns.rst:15 +#: ../../source/ref-api/flwr.common.RecordSet.rst:15 #: ../../source/ref-api/flwr.common.ServerMessage.rst:15 #: ../../source/ref-api/flwr.common.Status.rst:15 #: ../../source/ref-api/flwr.server.ClientManager.rst:15 +#: ../../source/ref-api/flwr.server.Driver.rst:15 #: ../../source/ref-api/flwr.server.History.rst:15 +#: ../../source/ref-api/flwr.server.LegacyContext.rst:15 #: ../../source/ref-api/flwr.server.Server.rst:15 +#: ../../source/ref-api/flwr.server.ServerApp.rst:15 #: ../../source/ref-api/flwr.server.ServerConfig.rst:15 #: ../../source/ref-api/flwr.server.SimpleClientManager.rst:15 -#: ../../source/ref-api/flwr.server.driver.Driver.rst:15 -#: ../../source/ref-api/flwr.server.driver.GrpcDriver.rst:15 #: ../../source/ref-api/flwr.server.strategy.Bulyan.rst:15 #: ../../source/ref-api/flwr.server.strategy.DPFedAvgAdaptive.rst:15 #: ../../source/ref-api/flwr.server.strategy.DPFedAvgFixed.rst:15 +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideAdaptiveClipping.rst:15 +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideFixedClipping.rst:15 +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideAdaptiveClipping.rst:15 +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideFixedClipping.rst:15 #: ../../source/ref-api/flwr.server.strategy.FaultTolerantFedAvg.rst:15 #: ../../source/ref-api/flwr.server.strategy.FedAdagrad.rst:15 #: ../../source/ref-api/flwr.server.strategy.FedAdam.rst:15 @@ -7273,6 +7119,9 @@ msgstr "" #: ../../source/ref-api/flwr.server.strategy.Krum.rst:15 #: ../../source/ref-api/flwr.server.strategy.QFedAvg.rst:15 #: ../../source/ref-api/flwr.server.strategy.Strategy.rst:15 +#: ../../source/ref-api/flwr.server.workflow.DefaultWorkflow.rst:15 +#: ../../source/ref-api/flwr.server.workflow.SecAggPlusWorkflow.rst:15 +#: ../../source/ref-api/flwr.server.workflow.SecAggWorkflow.rst:15 msgid "Methods" msgstr "" @@ -7350,9 +7199,12 @@ msgstr "返回客户端(本身)。" #: ../../source/ref-api/flwr.client.Client.rst:46 #: ../../source/ref-api/flwr.client.NumPyClient.rst:46 +#: ../../source/ref-api/flwr.common.Array.rst:28 #: ../../source/ref-api/flwr.common.ClientMessage.rst:25 #: ../../source/ref-api/flwr.common.Code.rst:19 +#: ../../source/ref-api/flwr.common.Context.rst:25 #: ../../source/ref-api/flwr.common.DisconnectRes.rst:25 +#: ../../source/ref-api/flwr.common.Error.rst:25 #: ../../source/ref-api/flwr.common.EvaluateIns.rst:25 #: ../../source/ref-api/flwr.common.EvaluateRes.rst:25 #: ../../source/ref-api/flwr.common.EventType.rst:19 @@ -7362,10 +7214,16 @@ msgstr "返回客户端(本身)。" #: ../../source/ref-api/flwr.common.GetParametersRes.rst:25 #: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:25 #: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:25 +#: ../../source/ref-api/flwr.common.Message.rst:37 +#: ../../source/ref-api/flwr.common.MessageType.rst:25 +#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:25 +#: ../../source/ref-api/flwr.common.Metadata.rst:25 #: ../../source/ref-api/flwr.common.Parameters.rst:25 #: ../../source/ref-api/flwr.common.ReconnectIns.rst:25 +#: ../../source/ref-api/flwr.common.RecordSet.rst:25 #: ../../source/ref-api/flwr.common.ServerMessage.rst:25 #: ../../source/ref-api/flwr.common.Status.rst:25 +#: ../../source/ref-api/flwr.server.LegacyContext.rst:25 #: ../../source/ref-api/flwr.server.ServerConfig.rst:25 msgid "Attributes" msgstr "" @@ -7383,14 +7241,25 @@ msgstr "" #: flwr.client.numpy_client.NumPyClient.fit #: flwr.client.numpy_client.NumPyClient.get_parameters #: flwr.client.numpy_client.NumPyClient.get_properties -#: flwr.server.app.start_server +#: flwr.common.context.Context flwr.common.message.Error +#: flwr.common.message.Message flwr.common.message.Message.create_error_reply +#: flwr.common.message.Message.create_reply flwr.common.message.Metadata +#: flwr.common.record.parametersrecord.Array flwr.server.app.start_server #: flwr.server.client_manager.ClientManager.register #: flwr.server.client_manager.ClientManager.unregister #: flwr.server.client_manager.SimpleClientManager.register #: flwr.server.client_manager.SimpleClientManager.unregister #: flwr.server.client_manager.SimpleClientManager.wait_for -#: flwr.server.driver.app.start_driver flwr.server.driver.driver.Driver +#: flwr.server.compat.app.start_driver flwr.server.driver.driver.Driver +#: flwr.server.driver.driver.Driver.create_message +#: flwr.server.driver.driver.Driver.pull_messages +#: flwr.server.driver.driver.Driver.push_messages +#: flwr.server.driver.driver.Driver.send_and_receive #: flwr.server.strategy.bulyan.Bulyan +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit #: flwr.server.strategy.fedadagrad.FedAdagrad @@ -7406,7 +7275,10 @@ msgstr "" #: flwr.server.strategy.strategy.Strategy.configure_fit #: flwr.server.strategy.strategy.Strategy.evaluate #: flwr.server.strategy.strategy.Strategy.initialize_parameters -#: flwr.simulation.app.start_simulation of +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow +#: flwr.simulation.app.start_simulation +#: flwr.simulation.run_simulation.run_simulation of msgid "Parameters" msgstr "参数" @@ -7424,13 +7296,17 @@ msgstr "评估指令包含从服务器接收的(全局)模型参数,以及 #: flwr.client.numpy_client.NumPyClient.fit #: flwr.client.numpy_client.NumPyClient.get_parameters #: flwr.client.numpy_client.NumPyClient.get_properties -#: flwr.server.app.start_server +#: flwr.common.message.Message.create_reply flwr.server.app.start_server #: flwr.server.client_manager.ClientManager.num_available #: flwr.server.client_manager.ClientManager.register #: flwr.server.client_manager.SimpleClientManager.num_available #: flwr.server.client_manager.SimpleClientManager.register #: flwr.server.client_manager.SimpleClientManager.wait_for -#: flwr.server.driver.app.start_driver +#: flwr.server.compat.app.start_driver +#: flwr.server.driver.driver.Driver.create_message +#: flwr.server.driver.driver.Driver.pull_messages +#: flwr.server.driver.driver.Driver.push_messages +#: flwr.server.driver.driver.Driver.send_and_receive #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit #: flwr.server.strategy.strategy.Strategy.aggregate_evaluate @@ -7454,13 +7330,17 @@ msgstr "评估结果包含本地数据集上的损失值和其他详细信息, #: flwr.client.client.Client.get_properties #: flwr.client.numpy_client.NumPyClient.get_parameters #: flwr.client.numpy_client.NumPyClient.get_properties -#: flwr.server.app.start_server +#: flwr.common.message.Message.create_reply flwr.server.app.start_server #: flwr.server.client_manager.ClientManager.num_available #: flwr.server.client_manager.ClientManager.register #: flwr.server.client_manager.SimpleClientManager.num_available #: flwr.server.client_manager.SimpleClientManager.register #: flwr.server.client_manager.SimpleClientManager.wait_for -#: flwr.server.driver.app.start_driver +#: flwr.server.compat.app.start_driver +#: flwr.server.driver.driver.Driver.create_message +#: flwr.server.driver.driver.Driver.pull_messages +#: flwr.server.driver.driver.Driver.push_messages +#: flwr.server.driver.driver.Driver.send_and_receive #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit #: flwr.server.strategy.strategy.Strategy.aggregate_evaluate @@ -7511,23 +7391,38 @@ msgstr "当前客户端属性。" msgid "ClientApp" msgstr "客户端" -#: flwr.client.client_app.ClientApp:1 flwr.common.typing.ClientMessage:1 +#: flwr.client.client_app.ClientApp:1 flwr.common.constant.MessageType:1 +#: flwr.common.constant.MessageTypeLegacy:1 flwr.common.context.Context:1 +#: flwr.common.message.Error:1 flwr.common.message.Message:1 +#: flwr.common.message.Metadata:1 flwr.common.record.parametersrecord.Array:1 +#: flwr.common.record.recordset.RecordSet:1 flwr.common.typing.ClientMessage:1 #: flwr.common.typing.DisconnectRes:1 flwr.common.typing.EvaluateIns:1 #: flwr.common.typing.EvaluateRes:1 flwr.common.typing.FitIns:1 #: flwr.common.typing.FitRes:1 flwr.common.typing.GetParametersIns:1 #: flwr.common.typing.GetParametersRes:1 flwr.common.typing.GetPropertiesIns:1 #: flwr.common.typing.GetPropertiesRes:1 flwr.common.typing.Parameters:1 #: flwr.common.typing.ReconnectIns:1 flwr.common.typing.ServerMessage:1 -#: flwr.common.typing.Status:1 flwr.server.app.ServerConfig:1 -#: flwr.server.driver.driver.Driver:1 -#: flwr.server.driver.grpc_driver.GrpcDriver:1 flwr.server.history.History:1 -#: flwr.server.server.Server:1 of +#: flwr.common.typing.Status:1 flwr.server.driver.driver.Driver:1 +#: flwr.server.history.History:1 flwr.server.server.Server:1 +#: flwr.server.server_app.ServerApp:1 flwr.server.server_config.ServerConfig:1 +#: flwr.server.workflow.default_workflows.DefaultWorkflow:1 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:1 +#: of msgid "Bases: :py:class:`object`" msgstr "" -#: flwr.client.app.start_client:33 flwr.client.app.start_numpy_client:36 -#: flwr.client.client_app.ClientApp:4 flwr.server.app.start_server:41 -#: flwr.server.driver.app.start_driver:30 of +#: flwr.client.app.start_client:41 flwr.client.app.start_numpy_client:36 +#: flwr.client.client_app.ClientApp:4 +#: flwr.client.client_app.ClientApp.evaluate:4 +#: flwr.client.client_app.ClientApp.query:4 +#: flwr.client.client_app.ClientApp.train:4 flwr.server.app.start_server:41 +#: flwr.server.compat.app.start_driver:32 flwr.server.server_app.ServerApp:4 +#: flwr.server.server_app.ServerApp.main:4 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:29 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:22 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:21 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:14 +#: of msgid "Examples" msgstr "实例" @@ -7550,6 +7445,34 @@ msgid "" "global attribute `app` that points to an object of type `ClientApp`." msgstr "" +#: flwr.client.client_app.ClientApp.evaluate:1::1 of +msgid ":py:obj:`evaluate `\\ \\(\\)" +msgstr "" + +#: flwr.client.client_app.ClientApp.evaluate:1 +#: flwr.client.client_app.ClientApp.evaluate:1::1 of +msgid "Return a decorator that registers the evaluate fn with the client app." +msgstr "" + +#: flwr.client.client_app.ClientApp.evaluate:1::1 of +msgid ":py:obj:`query `\\ \\(\\)" +msgstr "" + +#: flwr.client.client_app.ClientApp.evaluate:1::1 +#: flwr.client.client_app.ClientApp.query:1 of +msgid "Return a decorator that registers the query fn with the client app." +msgstr "" + +#: flwr.client.client_app.ClientApp.evaluate:1::1 of +#, fuzzy +msgid ":py:obj:`train `\\ \\(\\)" +msgstr "server.strategy.Strategy" + +#: flwr.client.client_app.ClientApp.evaluate:1::1 +#: flwr.client.client_app.ClientApp.train:1 of +msgid "Return a decorator that registers the train fn with the client app." +msgstr "" + #: ../../source/ref-api/flwr.client.NumPyClient.rst:2 msgid "NumPyClient" msgstr "NumPyClient" @@ -7764,7 +7687,7 @@ msgstr "" "服务器需要以相同的值启动(请参阅 `flwr.server.start_server`),否则它将不知道增加的限制并阻止更大的消息。" #: flwr.client.app.start_client:19 flwr.client.app.start_numpy_client:22 -#: flwr.server.driver.app.start_driver:21 of +#: flwr.server.compat.app.start_driver:21 of msgid "" "The PEM-encoded root certificates as a byte string or a path string. If " "provided, a secure connection using the certificates will be established " @@ -7786,16 +7709,30 @@ msgstr "" "配置传输层:允许的值包括 - 'grpc-bidi': gRPC,双向流 - 'grpc-rere': gRPC,请求-响应(实验性) - " "'rest': HTTP(实验性)" -#: flwr.client.app.start_client:34 flwr.client.app.start_numpy_client:37 of +#: flwr.client.app.start_client:31 of +msgid "" +"The maximum number of times the client will try to connect to the server " +"before giving up in case of a connection error. If set to None, there is " +"no limit to the number of tries." +msgstr "" + +#: flwr.client.app.start_client:35 of +msgid "" +"The maximum duration before the client stops trying to connect to the " +"server in case of connection error. If set to None, there is no limit to " +"the total time." +msgstr "" + +#: flwr.client.app.start_client:42 flwr.client.app.start_numpy_client:37 of msgid "Starting a gRPC client with an insecure server connection:" msgstr "使用不安全的服务器连接启动 gRPC 客户端:" -#: flwr.client.app.start_client:41 flwr.client.app.start_numpy_client:44 of +#: flwr.client.app.start_client:49 flwr.client.app.start_numpy_client:44 of #, fuzzy msgid "Starting an SSL-enabled gRPC client using system certificates:" msgstr "启动支持 SSL 的 gRPC 客户端:" -#: flwr.client.app.start_client:52 flwr.client.app.start_numpy_client:52 of +#: flwr.client.app.start_client:60 flwr.client.app.start_numpy_client:52 of #, fuzzy msgid "Starting an SSL-enabled gRPC client using provided certificates:" msgstr "启动支持 SSL 的 gRPC 客户端:" @@ -7821,73 +7758,83 @@ msgstr "抽象基类 `flwr.client.NumPyClient` 的实现。" msgid "common" msgstr "常见" -#: ../../source/ref-api/flwr.common.rst:29::1 +#: ../../source/ref-api/flwr.common.rst:30::1 +msgid ":py:obj:`array_from_numpy `\\ \\(ndarray\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:30::1 +#: flwr.common.record.conversion_utils.array_from_numpy:1 of +#, fuzzy +msgid "Create Array from NumPy ndarray." +msgstr "将参数对象转换为 NumPy ndarrays。" + +#: ../../source/ref-api/flwr.common.rst:30::1 msgid ":py:obj:`bytes_to_ndarray `\\ \\(tensor\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:29::1 +#: ../../source/ref-api/flwr.common.rst:30::1 #: flwr.common.parameter.bytes_to_ndarray:1 of msgid "Deserialize NumPy ndarray from bytes." msgstr "从字节反序列化 NumPy ndarray。" -#: ../../source/ref-api/flwr.common.rst:29::1 +#: ../../source/ref-api/flwr.common.rst:30::1 msgid "" ":py:obj:`configure `\\ \\(identifier\\[\\, " "filename\\, host\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:29::1 +#: ../../source/ref-api/flwr.common.rst:30::1 #: flwr.common.logger.configure:1 of msgid "Configure logging to file and/or remote log server." msgstr "配置将日志记录到文件和/或远程日志服务器。" -#: ../../source/ref-api/flwr.common.rst:29::1 +#: ../../source/ref-api/flwr.common.rst:30::1 msgid "" ":py:obj:`event `\\ \\(event\\_type\\[\\, " "event\\_details\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:29::1 +#: ../../source/ref-api/flwr.common.rst:30::1 #: flwr.common.telemetry.event:1 of msgid "Submit create_event to ThreadPoolExecutor to avoid blocking." msgstr "" -#: ../../source/ref-api/flwr.common.rst:29::1 +#: ../../source/ref-api/flwr.common.rst:30::1 msgid "" ":py:obj:`log `\\ \\(level\\, msg\\, \\*args\\, " "\\*\\*kwargs\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:29::1 logging.Logger.log:1 +#: ../../source/ref-api/flwr.common.rst:30::1 logging.Logger.log:1 #: of msgid "Log 'msg % args' with the integer severity 'level'." msgstr "以整数严重性 \"级别 \"记录 \"msg % args\"。" -#: ../../source/ref-api/flwr.common.rst:29::1 +#: ../../source/ref-api/flwr.common.rst:30::1 msgid ":py:obj:`ndarray_to_bytes `\\ \\(ndarray\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:29::1 +#: ../../source/ref-api/flwr.common.rst:30::1 #: flwr.common.parameter.ndarray_to_bytes:1 of msgid "Serialize NumPy ndarray to bytes." msgstr "将 NumPy ndarray 序列化为字节。" -#: ../../source/ref-api/flwr.common.rst:29::1 +#: ../../source/ref-api/flwr.common.rst:30::1 msgid ":py:obj:`now `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:29::1 +#: ../../source/ref-api/flwr.common.rst:30::1 #: flwr.common.date.now:1 of msgid "Construct a datetime from time.time() with time zone set to UTC." msgstr "从 time.time() 生成日期时间,时区设置为 UTC。" -#: ../../source/ref-api/flwr.common.rst:29::1 +#: ../../source/ref-api/flwr.common.rst:30::1 msgid "" ":py:obj:`ndarrays_to_parameters `\\ " "\\(ndarrays\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:29::1 +#: ../../source/ref-api/flwr.common.rst:30::1 #: flwr.common.parameter.ndarrays_to_parameters:1 #: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 #: flwr.server.strategy.fedavg_android.FedAvgAndroid.ndarrays_to_parameters:1 @@ -7895,187 +7842,369 @@ msgstr "" msgid "Convert NumPy ndarrays to parameters object." msgstr "将 NumPy ndarrays 转换为参数对象。" -#: ../../source/ref-api/flwr.common.rst:29::1 +#: ../../source/ref-api/flwr.common.rst:30::1 msgid "" ":py:obj:`parameters_to_ndarrays `\\ " "\\(parameters\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:29::1 +#: ../../source/ref-api/flwr.common.rst:30::1 #: flwr.common.parameter.parameters_to_ndarrays:1 of msgid "Convert parameters object to NumPy ndarrays." msgstr "将参数对象转换为 NumPy ndarrays。" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 +msgid "" +":py:obj:`Array `\\ \\(dtype\\, shape\\, stype\\, " +"data\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +#: flwr.common.record.parametersrecord.Array:1 of +#, fuzzy +msgid "Array type." +msgstr "返回类型" + +#: ../../source/ref-api/flwr.common.rst:64::1 msgid "" ":py:obj:`ClientMessage `\\ " "\\(\\[get\\_properties\\_res\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.typing.ClientMessage:1 of msgid "ClientMessage is a container used to hold one result message." msgstr "ClientMessage 是用于容纳一条结果信息的容器。" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 msgid ":py:obj:`Code `\\ \\(value\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.typing.Code:1 of msgid "Client status codes." msgstr "客户端状态代码。" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 +#, fuzzy +msgid "" +":py:obj:`ConfigsRecord `\\ " +"\\(\\[configs\\_dict\\, keep\\_input\\]\\)" +msgstr "" +"Flower 1.0: ``start_server(..., " +"config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " +"...)``" + +#: ../../source/ref-api/flwr.common.rst:64::1 +#: flwr.common.record.configsrecord.ConfigsRecord:1 of +#, fuzzy +msgid "Configs record." +msgstr "配置日志记录" + +#: ../../source/ref-api/flwr.common.rst:64::1 +msgid ":py:obj:`Context `\\ \\(state\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +#: flwr.common.context.Context:1 of +msgid "State of your run." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 msgid ":py:obj:`DisconnectRes `\\ \\(reason\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.typing.DisconnectRes:1 of msgid "DisconnectRes message from client to server." msgstr "客户端向服务器发送 DisconnectRes 信息。" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 msgid "" ":py:obj:`EvaluateIns `\\ \\(parameters\\, " "config\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.typing.EvaluateIns:1 of msgid "Evaluate instructions for a client." msgstr "评估客户端的指示。" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 msgid "" ":py:obj:`EvaluateRes `\\ \\(status\\, loss\\, " "num\\_examples\\, metrics\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.typing.EvaluateRes:1 of msgid "Evaluate response from a client." msgstr "评估客户端的反应。" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 msgid ":py:obj:`EventType `\\ \\(value\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.telemetry.EventType:1 of msgid "Types of telemetry events." msgstr "遥测事件类型。" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 msgid ":py:obj:`FitIns `\\ \\(parameters\\, config\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.typing.FitIns:1 of msgid "Fit instructions for a client." msgstr "为客户提供安装说明。" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 msgid "" ":py:obj:`FitRes `\\ \\(status\\, parameters\\, " "num\\_examples\\, metrics\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.typing.FitRes:1 of msgid "Fit response from a client." msgstr "来自客户端的合适回复。" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 +msgid ":py:obj:`Error `\\ \\(code\\[\\, reason\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +#: flwr.common.message.Error:1 of +msgid "A dataclass that stores information about an error that occurred." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 msgid ":py:obj:`GetParametersIns `\\ \\(config\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.typing.GetParametersIns:1 of msgid "Parameters request for a client." msgstr "客户端的参数请求。" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 msgid "" ":py:obj:`GetParametersRes `\\ \\(status\\, " "parameters\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.typing.GetParametersRes:1 of msgid "Response when asked to return parameters." msgstr "要求返回参数时的响应。" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 msgid ":py:obj:`GetPropertiesIns `\\ \\(config\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.typing.GetPropertiesIns:1 of msgid "Properties request for a client." msgstr "客户端的属性请求。" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 msgid "" ":py:obj:`GetPropertiesRes `\\ \\(status\\, " "properties\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.typing.GetPropertiesRes:1 of msgid "Properties response from a client." msgstr "来自客户端的属性响应。" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 +msgid "" +":py:obj:`Message `\\ \\(metadata\\[\\, content\\, " +"error\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +#: flwr.common.message.Message:1 of +msgid "State of your application from the viewpoint of the entity using it." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +msgid ":py:obj:`MessageType `\\ \\(\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +#: flwr.common.constant.MessageType:1 of +msgid "Message type." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +msgid ":py:obj:`MessageTypeLegacy `\\ \\(\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +#: flwr.common.constant.MessageTypeLegacy:1 of +msgid "Legacy message type." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +msgid "" +":py:obj:`Metadata `\\ \\(run\\_id\\, " +"message\\_id\\, src\\_node\\_id\\, ...\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +#: flwr.common.message.Metadata:1 of +msgid "A dataclass holding metadata associated with the current message." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +msgid "" +":py:obj:`MetricsRecord `\\ " +"\\(\\[metrics\\_dict\\, keep\\_input\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +#: flwr.common.record.metricsrecord.MetricsRecord:1 of +msgid "Metrics record." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 msgid ":py:obj:`NDArray `\\" msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 msgid "" "alias of :py:class:`~numpy.ndarray`\\ [:py:obj:`~typing.Any`, " ":py:class:`~numpy.dtype`\\ [:py:obj:`~typing.Any`]]" msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 msgid "" ":py:obj:`Parameters `\\ \\(tensors\\, " "tensor\\_type\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.typing.Parameters:1 of msgid "Model parameters." msgstr "模型参数。" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 +msgid "" +":py:obj:`ParametersRecord `\\ " +"\\(\\[array\\_dict\\, keep\\_input\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +#: flwr.common.record.parametersrecord.ParametersRecord:1 of +#, fuzzy +msgid "Parameters record." +msgstr "参数" + +#: ../../source/ref-api/flwr.common.rst:64::1 msgid ":py:obj:`ReconnectIns `\\ \\(seconds\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.typing.ReconnectIns:1 of msgid "ReconnectIns message from server to client." msgstr "服务器发送给客户端的重新连接信息。" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 +msgid "" +":py:obj:`RecordSet `\\ " +"\\(\\[parameters\\_records\\, ...\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 +#: flwr.common.record.recordset.RecordSet:1 of +msgid "RecordSet stores groups of parameters, metrics and configs." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:64::1 msgid "" ":py:obj:`ServerMessage `\\ " "\\(\\[get\\_properties\\_ins\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.typing.ServerMessage:1 of msgid "ServerMessage is a container used to hold one instruction message." msgstr "ServerMessage 是用于容纳一条指令信息的容器。" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 msgid ":py:obj:`Status `\\ \\(code\\, message\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:52::1 +#: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.typing.Status:1 of msgid "Client status." msgstr "客户端状态。" +#: ../../source/ref-api/flwr.common.Array.rst:2 +msgid "Array" +msgstr "" + +#: flwr.common.record.parametersrecord.Array:3 of +msgid "" +"A dataclass containing serialized data from an array-like or tensor-like " +"object along with some metadata about it." +msgstr "" + +#: flwr.common.record.parametersrecord.Array:6 of +msgid "" +"A string representing the data type of the serialised object (e.g. " +"`np.float32`)" +msgstr "" + +#: flwr.common.record.parametersrecord.Array:8 of +msgid "" +"A list representing the shape of the unserialized array-like object. This" +" is used to deserialize the data (depending on the serialization method) " +"or simply as a metadata field." +msgstr "" + +#: flwr.common.record.parametersrecord.Array:12 of +msgid "" +"A string indicating the type of serialisation mechanism used to generate " +"the bytes in `data` from an array-like or tensor-like object." +msgstr "" + +#: flwr.common.record.parametersrecord.Array:15 of +msgid "A buffer of bytes containing the data." +msgstr "" + +#: ../../source/ref-api/flwr.common.Array.rst:26::1 +#, fuzzy +msgid ":py:obj:`numpy `\\ \\(\\)" +msgstr "server.strategy.Strategy" + +#: ../../source/ref-api/flwr.common.Array.rst:26::1 +#: flwr.common.record.parametersrecord.Array.numpy:1 of +#, fuzzy +msgid "Return the array as a NumPy array." +msgstr "以 NumPy ndarrays 列表形式返回模型参数" + +#: flwr.common.record.parametersrecord.Array.numpy:1::1 of +msgid ":py:obj:`dtype `\\" +msgstr "" + +#: flwr.common.record.parametersrecord.Array.numpy:1::1 of +#, fuzzy +msgid ":py:obj:`shape `\\" +msgstr "server.strategy.Strategy" + +#: flwr.common.record.parametersrecord.Array.numpy:1::1 of +#, fuzzy +msgid ":py:obj:`stype `\\" +msgstr "server.strategy.Strategy" + +#: flwr.common.record.parametersrecord.Array.numpy:1::1 of +msgid ":py:obj:`data `\\" +msgstr "" + #: ../../source/ref-api/flwr.common.ClientMessage.rst:2 #, fuzzy msgid "ClientMessage" @@ -8135,6 +8264,106 @@ msgid "" "`\\" msgstr "" +#: ../../source/ref-api/flwr.common.ConfigsRecord.rst:2 +#, fuzzy +msgid "ConfigsRecord" +msgstr "配置日志记录" + +#: flwr.common.record.configsrecord.ConfigsRecord:1 of +msgid "" +"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " +"[:py:class:`str`, :py:obj:`~typing.Union`\\ [:py:class:`int`, " +":py:class:`float`, :py:class:`str`, :py:class:`bytes`, :py:class:`bool`, " +":py:class:`~typing.List`\\ [:py:class:`int`], :py:class:`~typing.List`\\ " +"[:py:class:`float`], :py:class:`~typing.List`\\ [:py:class:`str`], " +":py:class:`~typing.List`\\ [:py:class:`bytes`], " +":py:class:`~typing.List`\\ [:py:class:`bool`]]]" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`clear `\\ \\(\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1 +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid "Remove all items from R." +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`count_bytes `\\ \\(\\)" +msgstr "" + +#: flwr.common.record.configsrecord.ConfigsRecord.count_bytes:1 +#: flwr.common.record.metricsrecord.MetricsRecord.count_bytes:1 +#: flwr.common.record.parametersrecord.ParametersRecord.count_bytes:1 +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid "Return number of Bytes stored in this object." +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 +#: flwr.common.record.typeddict.TypedDict.get:1 of +msgid "d defaults to None." +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`items `\\ \\(\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`keys `\\ \\(\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 +#: flwr.common.record.typeddict.TypedDict.pop:1 of +msgid "If key is not found, d is returned if given, otherwise KeyError is raised." +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid "" +":py:obj:`update `\\ \\(\\[E\\, " +"\\]\\*\\*F\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 +#: flwr.common.record.typeddict.TypedDict.update:1 of +msgid "Update R from dict/iterable E and F." +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`values `\\ \\(\\)" +msgstr "" + +#: flwr.common.record.configsrecord.ConfigsRecord.count_bytes:3 of +msgid "This function counts booleans as occupying 1 Byte." +msgstr "" + +#: ../../source/ref-api/flwr.common.Context.rst:2 +msgid "Context" +msgstr "" + +#: flwr.common.context.Context:3 of +msgid "" +"Holds records added by the entity in a given run and that will stay " +"local. This means that the data it holds will never leave the system it's" +" running from. This can be used as an intermediate storage or scratchpad " +"when executing mods. It can also be used as a memory to access at " +"different points during the lifecycle of this entity (e.g. across " +"multiple rounds)" +msgstr "" + +#: ../../source/ref-api/flwr.common.Context.rst:28::1 +#, fuzzy +msgid ":py:obj:`state `\\" +msgstr "server.strategy.Strategy" + #: ../../source/ref-api/flwr.common.DisconnectRes.rst:2 msgid "DisconnectRes" msgstr "" @@ -8143,6 +8372,34 @@ msgstr "" msgid ":py:obj:`reason `\\" msgstr "" +#: ../../source/ref-api/flwr.common.Error.rst:2 +msgid "Error" +msgstr "" + +#: flwr.common.message.Error:3 of +msgid "An identifier for the error." +msgstr "" + +#: flwr.common.message.Error:5 of +msgid "A reason for why the error arose (e.g. an exception stack-trace)" +msgstr "" + +#: flwr.common.Error.code:1::1 of +msgid ":py:obj:`code `\\" +msgstr "" + +#: flwr.common.Error.code:1 flwr.common.Error.code:1::1 of +msgid "Error code." +msgstr "" + +#: flwr.common.Error.code:1::1 of +msgid ":py:obj:`reason `\\" +msgstr "" + +#: flwr.common.Error.code:1::1 flwr.common.Error.reason:1 of +msgid "Reason reported about the error." +msgstr "" + #: ../../source/ref-api/flwr.common.EvaluateIns.rst:2 #, fuzzy msgid "EvaluateIns" @@ -8367,11 +8624,286 @@ msgstr "" msgid ":py:obj:`properties `\\" msgstr "" -#: ../../source/ref-api/flwr.common.NDArray.rst:2 -msgid "NDArray" +#: ../../source/ref-api/flwr.common.Message.rst:2 +#, fuzzy +msgid "Message" +msgstr "服务器端" + +#: flwr.common.Message.content:1::1 flwr.common.Message.metadata:1 +#: flwr.common.message.Message:3 of +msgid "A dataclass including information about the message to be executed." msgstr "" -#: ../../source/ref-api/flwr.common.Parameters.rst:29::1 +#: flwr.common.message.Message:5 of +msgid "" +"Holds records either sent by another entity (e.g. sent by the server-side" +" logic to a client, or vice-versa) or that will be sent to it." +msgstr "" + +#: flwr.common.message.Message:8 of +msgid "" +"A dataclass that captures information about an error that took place when" +" processing another message." +msgstr "" + +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +msgid "" +":py:obj:`create_error_reply `\\ " +"\\(error\\, ttl\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +#: flwr.common.message.Message.create_error_reply:1 of +msgid "Construct a reply message indicating an error happened." +msgstr "" + +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +msgid "" +":py:obj:`create_reply `\\ \\(content\\," +" ttl\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +#: flwr.common.message.Message.create_reply:1 of +msgid "Create a reply to this message with specified content and TTL." +msgstr "" + +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +msgid ":py:obj:`has_content `\\ \\(\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +#: flwr.common.message.Message.has_content:1 of +msgid "Return True if message has content, else False." +msgstr "" + +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +msgid ":py:obj:`has_error `\\ \\(\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +#: flwr.common.message.Message.has_error:1 of +msgid "Return True if message has an error, else False." +msgstr "" + +#: flwr.common.Message.content:1::1 of +msgid ":py:obj:`content `\\" +msgstr "" + +#: flwr.common.Message.content:1 flwr.common.Message.content:1::1 +#: of +#, fuzzy +msgid "The content of this message." +msgstr "评估客户端的反应。" + +#: flwr.common.Message.content:1::1 of +msgid ":py:obj:`error `\\" +msgstr "" + +#: flwr.common.Message.content:1::1 flwr.common.Message.error:1 of +msgid "Error captured by this message." +msgstr "" + +#: flwr.common.Message.content:1::1 of +msgid ":py:obj:`metadata `\\" +msgstr "" + +#: flwr.common.message.Message.create_error_reply:3 of +msgid "The error that was encountered." +msgstr "" + +#: flwr.common.Metadata.dst_node_id:1::1 +#: flwr.common.Metadata.ttl:1 flwr.common.message.Message.create_error_reply:5 +#: flwr.common.message.Message.create_reply:9 flwr.common.message.Metadata:16 +#: of +msgid "Time-to-live for this message." +msgstr "" + +#: flwr.common.message.Message.create_reply:3 of +msgid "" +"The method generates a new `Message` as a reply to this message. It " +"inherits 'run_id', 'src_node_id', 'dst_node_id', and 'message_type' from " +"this message and sets 'reply_to_message' to the ID of this message." +msgstr "" + +#: flwr.common.message.Message.create_reply:7 of +msgid "The content for the reply message." +msgstr "" + +#: flwr.common.message.Message.create_reply:12 of +msgid "A new `Message` instance representing the reply." +msgstr "" + +#: ../../source/ref-api/flwr.common.MessageType.rst:2 +#, fuzzy +msgid "MessageType" +msgstr "返回类型" + +#: ../../source/ref-api/flwr.common.MessageType.rst:30::1 +msgid ":py:obj:`EVALUATE `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.MessageType.rst:30::1 +msgid ":py:obj:`QUERY `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.MessageType.rst:30::1 +msgid ":py:obj:`TRAIN `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:2 +msgid "MessageTypeLegacy" +msgstr "" + +#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:29::1 +msgid ":py:obj:`GET_PARAMETERS `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:29::1 +msgid ":py:obj:`GET_PROPERTIES `\\" +msgstr "" + +#: flwr.common.Metadata.dst_node_id:1::1 +#: flwr.common.Metadata.run_id:1 flwr.common.message.Metadata:3 of +msgid "An identifier for the current run." +msgstr "" + +#: flwr.common.Metadata.dst_node_id:1::1 +#: flwr.common.Metadata.message_id:1 flwr.common.message.Metadata:5 of +msgid "An identifier for the current message." +msgstr "" + +#: flwr.common.Metadata.dst_node_id:1::1 +#: flwr.common.Metadata.src_node_id:1 flwr.common.message.Metadata:7 of +msgid "An identifier for the node sending this message." +msgstr "" + +#: flwr.common.Metadata.dst_node_id:1 +#: flwr.common.Metadata.dst_node_id:1::1 +#: flwr.common.message.Metadata:9 of +msgid "An identifier for the node receiving this message." +msgstr "" + +#: flwr.common.Metadata.dst_node_id:1::1 +#: flwr.common.Metadata.reply_to_message:1 flwr.common.message.Metadata:11 of +msgid "An identifier for the message this message replies to." +msgstr "" + +#: flwr.common.message.Metadata:13 of +msgid "" +"An identifier for grouping messages. In some settings, this is used as " +"the FL round." +msgstr "" + +#: flwr.common.Metadata.dst_node_id:1::1 +#: flwr.common.Metadata.message_type:1 flwr.common.message.Metadata:18 of +msgid "A string that encodes the action to be executed on the receiving end." +msgstr "" + +#: flwr.common.message.Metadata:21 of +msgid "" +"An identifier that can be used when loading a particular data partition " +"for a ClientApp. Making use of this identifier is more relevant when " +"conducting simulations." +msgstr "" + +#: flwr.common.Metadata.dst_node_id:1::1 of +msgid ":py:obj:`dst_node_id `\\" +msgstr "" + +#: flwr.common.Metadata.dst_node_id:1::1 of +msgid ":py:obj:`group_id `\\" +msgstr "" + +#: flwr.common.Metadata.dst_node_id:1::1 +#: flwr.common.Metadata.group_id:1 of +msgid "An identifier for grouping messages." +msgstr "" + +#: flwr.common.Metadata.dst_node_id:1::1 of +msgid ":py:obj:`message_id `\\" +msgstr "" + +#: flwr.common.Metadata.dst_node_id:1::1 of +msgid ":py:obj:`message_type `\\" +msgstr "" + +#: flwr.common.Metadata.dst_node_id:1::1 of +msgid ":py:obj:`partition_id `\\" +msgstr "" + +#: flwr.common.Metadata.dst_node_id:1::1 +#: flwr.common.Metadata.partition_id:1 of +msgid "An identifier telling which data partition a ClientApp should use." +msgstr "" + +#: flwr.common.Metadata.dst_node_id:1::1 of +msgid ":py:obj:`reply_to_message `\\" +msgstr "" + +#: flwr.common.Metadata.dst_node_id:1::1 of +msgid ":py:obj:`run_id `\\" +msgstr "" + +#: flwr.common.Metadata.dst_node_id:1::1 of +msgid ":py:obj:`src_node_id `\\" +msgstr "" + +#: flwr.common.Metadata.dst_node_id:1::1 of +msgid ":py:obj:`ttl `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.MetricsRecord.rst:2 +msgid "MetricsRecord" +msgstr "" + +#: flwr.common.record.metricsrecord.MetricsRecord:1 of +msgid "" +"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " +"[:py:class:`str`, :py:obj:`~typing.Union`\\ [:py:class:`int`, " +":py:class:`float`, :py:class:`~typing.List`\\ [:py:class:`int`], " +":py:class:`~typing.List`\\ [:py:class:`float`]]]" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`clear `\\ \\(\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`count_bytes `\\ \\(\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`items `\\ \\(\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`keys `\\ \\(\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid "" +":py:obj:`update `\\ \\(\\[E\\, " +"\\]\\*\\*F\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`values `\\ \\(\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.NDArray.rst:2 +msgid "NDArray" +msgstr "" + +#: ../../source/ref-api/flwr.common.Parameters.rst:29::1 msgid ":py:obj:`tensors `\\" msgstr "" @@ -8379,6 +8911,66 @@ msgstr "" msgid ":py:obj:`tensor_type `\\" msgstr "" +#: ../../source/ref-api/flwr.common.ParametersRecord.rst:2 +#, fuzzy +msgid "ParametersRecord" +msgstr "参数" + +#: flwr.common.record.parametersrecord.ParametersRecord:1 of +msgid "" +"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " +"[:py:class:`str`, :py:class:`~flwr.common.record.parametersrecord.Array`]" +msgstr "" + +#: flwr.common.record.parametersrecord.ParametersRecord:3 of +msgid "" +"A dataclass storing named Arrays in order. This means that it holds " +"entries as an OrderedDict[str, Array]. ParametersRecord objects can be " +"viewed as an equivalent to PyTorch's state_dict, but holding serialised " +"tensors instead." +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`clear `\\ \\(\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`count_bytes `\\ \\(\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`items `\\ \\(\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`keys `\\ \\(\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid "" +":py:obj:`update `\\ \\(\\[E\\, " +"\\]\\*\\*F\\)" +msgstr "" + +#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +msgid ":py:obj:`values `\\ \\(\\)" +msgstr "" + +#: flwr.common.record.parametersrecord.ParametersRecord.count_bytes:3 of +msgid "" +"Note that a small amount of Bytes might also be included in this counting" +" that correspond to metadata of the serialized object (e.g. of NumPy " +"array) needed for deseralization." +msgstr "" + #: ../../source/ref-api/flwr.common.ReconnectIns.rst:2 #, fuzzy msgid "ReconnectIns" @@ -8388,6 +8980,37 @@ msgstr "启用 SSL 连接" msgid ":py:obj:`seconds `\\" msgstr "" +#: ../../source/ref-api/flwr.common.RecordSet.rst:2 +msgid "RecordSet" +msgstr "" + +#: flwr.common.RecordSet.configs_records:1::1 of +msgid ":py:obj:`configs_records `\\" +msgstr "" + +#: flwr.common.RecordSet.configs_records:1 +#: flwr.common.RecordSet.configs_records:1::1 of +msgid "Dictionary holding ConfigsRecord instances." +msgstr "" + +#: flwr.common.RecordSet.configs_records:1::1 of +msgid ":py:obj:`metrics_records `\\" +msgstr "" + +#: flwr.common.RecordSet.configs_records:1::1 +#: flwr.common.RecordSet.metrics_records:1 of +msgid "Dictionary holding MetricsRecord instances." +msgstr "" + +#: flwr.common.RecordSet.configs_records:1::1 of +msgid ":py:obj:`parameters_records `\\" +msgstr "" + +#: flwr.common.RecordSet.configs_records:1::1 +#: flwr.common.RecordSet.parameters_records:1 of +msgid "Dictionary holding ParametersRecord instances." +msgstr "" + #: ../../source/ref-api/flwr.common.ServerMessage.rst:2 #, fuzzy msgid "ServerMessage" @@ -8426,6 +9049,10 @@ msgstr "" msgid ":py:obj:`message `\\" msgstr "" +#: ../../source/ref-api/flwr.common.array_from_numpy.rst:2 +msgid "array\\_from\\_numpy" +msgstr "" + #: ../../source/ref-api/flwr.common.bytes_to_ndarray.rst:2 msgid "bytes\\_to\\_ndarray" msgstr "" @@ -8474,83 +9101,134 @@ msgstr "" msgid "server" msgstr "服务器" -#: ../../source/ref-api/flwr.server.rst:26::1 +#: ../../source/ref-api/flwr.server.rst:27::1 msgid ":py:obj:`run_driver_api `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:26::1 +#: ../../source/ref-api/flwr.server.rst:27::1 #: flwr.server.app.run_driver_api:1 of #, fuzzy msgid "Run Flower server (Driver API)." msgstr "flower-driver-api" -#: ../../source/ref-api/flwr.server.rst:26::1 +#: ../../source/ref-api/flwr.server.rst:27::1 msgid ":py:obj:`run_fleet_api `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:26::1 +#: ../../source/ref-api/flwr.server.rst:27::1 #: flwr.server.app.run_fleet_api:1 of #, fuzzy msgid "Run Flower server (Fleet API)." msgstr "Flower 服务器。" -#: ../../source/ref-api/flwr.server.rst:26::1 +#: ../../source/ref-api/flwr.server.rst:27::1 msgid ":py:obj:`run_server_app `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:26::1 -#: flwr.server.app.run_server_app:1 of +#: ../../source/ref-api/flwr.server.rst:27::1 +#: flwr.server.run_serverapp.run_server_app:1 of #, fuzzy msgid "Run Flower server app." msgstr "Flower 服务器。" -#: ../../source/ref-api/flwr.server.rst:26::1 +#: ../../source/ref-api/flwr.server.rst:27::1 msgid ":py:obj:`run_superlink `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:26::1 +#: ../../source/ref-api/flwr.server.rst:27::1 #: flwr.server.app.run_superlink:1 of msgid "Run Flower server (Driver API and Fleet API)." msgstr "" -#: ../../source/ref-api/flwr.server.rst:26::1 +#: ../../source/ref-api/flwr.server.rst:27::1 +msgid "" +":py:obj:`start_driver `\\ \\(\\*\\[\\, " +"server\\_address\\, server\\, ...\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.rst:27::1 +#: flwr.server.compat.app.start_driver:1 of +#, fuzzy +msgid "Start a Flower Driver API server." +msgstr "启动基于 Ray 的Flower模拟服务器。" + +#: ../../source/ref-api/flwr.server.rst:27::1 msgid "" ":py:obj:`start_server `\\ \\(\\*\\[\\, " "server\\_address\\, server\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:26::1 +#: ../../source/ref-api/flwr.server.rst:27::1 #: flwr.server.app.start_server:1 of msgid "Start a Flower server using the gRPC transport layer." msgstr "使用 gRPC 传输层启动 Flower 服务器。" -#: ../../source/ref-api/flwr.server.rst:37::1 +#: ../../source/ref-api/flwr.server.rst:41::1 msgid ":py:obj:`ClientManager `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:37::1 +#: ../../source/ref-api/flwr.server.rst:41::1 #: flwr.server.client_manager.ClientManager:1 of #, fuzzy msgid "Abstract base class for managing Flower clients." msgstr "Flower 客户端的抽象基类。" -#: ../../source/ref-api/flwr.server.rst:37::1 +#: ../../source/ref-api/flwr.server.rst:41::1 +#, fuzzy +msgid "" +":py:obj:`Driver `\\ " +"\\(\\[driver\\_service\\_address\\, ...\\]\\)" +msgstr "" +"Flower 1.0: ``start_server(..., " +"config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " +"...)``" + +#: ../../source/ref-api/flwr.server.rst:41::1 +#: flwr.server.driver.driver.Driver:1 of +msgid "`Driver` class provides an interface to the Driver API." +msgstr "" + +#: ../../source/ref-api/flwr.server.rst:41::1 msgid ":py:obj:`History `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:37::1 +#: ../../source/ref-api/flwr.server.rst:41::1 #: flwr.server.history.History:1 of #, fuzzy msgid "History class for training and/or evaluation metrics collection." msgstr "**hist** -- 包含训练和评估指标的对象。" -#: ../../source/ref-api/flwr.server.rst:37::1 +#: ../../source/ref-api/flwr.server.rst:41::1 +msgid "" +":py:obj:`LegacyContext `\\ \\(state\\[\\, " +"config\\, strategy\\, ...\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.rst:41::1 +#: flwr.server.compat.legacy_context.LegacyContext:1 of +msgid "Legacy Context." +msgstr "" + +#: ../../source/ref-api/flwr.server.rst:41::1 msgid "" ":py:obj:`Server `\\ \\(\\*\\, client\\_manager\\[\\, " "strategy\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:37::1 +#: ../../source/ref-api/flwr.server.rst:41::1 +#, fuzzy +msgid "" +":py:obj:`ServerApp `\\ \\(\\[server\\, config\\, " +"strategy\\, ...\\]\\)" +msgstr "server.strategy.Strategy" + +#: ../../source/ref-api/flwr.server.rst:41::1 +#: flwr.server.server_app.ServerApp:1 of +#, fuzzy +msgid "Flower ServerApp." +msgstr "Flower 服务器。" + +#: ../../source/ref-api/flwr.server.rst:41::1 #, fuzzy msgid "" ":py:obj:`ServerConfig `\\ \\(\\[num\\_rounds\\," @@ -8560,42 +9238,43 @@ msgstr "" "config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " "...)``" -#: ../../source/ref-api/flwr.server.rst:37::1 -#: flwr.server.app.ServerConfig:1 of +#: ../../source/ref-api/flwr.server.rst:41::1 +#: flwr.server.server_config.ServerConfig:1 of #, fuzzy msgid "Flower server config." msgstr "Flower 服务器。" -#: ../../source/ref-api/flwr.server.rst:37::1 +#: ../../source/ref-api/flwr.server.rst:41::1 msgid ":py:obj:`SimpleClientManager `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:37::1 +#: ../../source/ref-api/flwr.server.rst:41::1 #: flwr.server.client_manager.SimpleClientManager:1 of #, fuzzy msgid "Provides a pool of available clients." msgstr "使用部分可用客户进行评估。" -#: ../../source/ref-api/flwr.server.rst:56::1 -msgid ":py:obj:`flwr.server.driver `\\" -msgstr "" - -#: ../../source/ref-api/flwr.server.rst:56::1 flwr.server.driver:1 -#: of -#, fuzzy -msgid "Flower driver SDK." -msgstr "Flower 服务器。" - -#: ../../source/ref-api/flwr.server.rst:56::1 +#: ../../source/ref-api/flwr.server.rst:60::1 #, fuzzy msgid ":py:obj:`flwr.server.strategy `\\" msgstr "server.strategy.Strategy" -#: ../../source/ref-api/flwr.server.rst:56::1 +#: ../../source/ref-api/flwr.server.rst:60::1 #: flwr.server.strategy:1 of msgid "Contains the strategy abstraction and different implementations." msgstr "包含策略抽象和不同的实现方法。" +#: ../../source/ref-api/flwr.server.rst:60::1 +#, fuzzy +msgid ":py:obj:`flwr.server.workflow `\\" +msgstr "server.strategy.Strategy" + +#: ../../source/ref-api/flwr.server.rst:60::1 +#: flwr.server.workflow:1 of +#, fuzzy +msgid "Workflows." +msgstr "工作流程" + #: ../../source/ref-api/flwr.server.ClientManager.rst:2 #, fuzzy msgid "ClientManager" @@ -8690,34 +9369,248 @@ msgstr "" msgid "This method is idempotent." msgstr "" -#: ../../source/ref-api/flwr.server.History.rst:2 -msgid "History" -msgstr "" - -#: flwr.server.history.History.add_loss_centralized:1::1 of -msgid "" -":py:obj:`add_loss_centralized " -"`\\ \\(server\\_round\\, " -"loss\\)" -msgstr "" +#: ../../source/ref-api/flwr.server.Driver.rst:2 +#, fuzzy +msgid "Driver" +msgstr "服务器" -#: flwr.server.history.History.add_loss_centralized:1 -#: flwr.server.history.History.add_loss_centralized:1::1 of +#: flwr.server.driver.driver.Driver:3 of #, fuzzy -msgid "Add one loss entry (from centralized evaluation)." -msgstr "集中评估" +msgid "" +"The IPv4 or IPv6 address of the Driver API server. Defaults to " +"`\"[::]:9091\"`." +msgstr "服务器的 IPv4 或 IPv6 地址。默认为 `\"[::]:8080\"。" -#: flwr.server.history.History.add_loss_centralized:1::1 of +#: flwr.server.app.start_server:28 flwr.server.driver.driver.Driver:6 of msgid "" -":py:obj:`add_loss_distributed " -"`\\ \\(server\\_round\\, " -"loss\\)" +"Tuple containing root certificate, server certificate, and private key to" +" start a secure SSL-enabled server. The tuple is expected to have three " +"bytes elements in the following order: * CA certificate. * " +"server certificate. * server private key." msgstr "" +"包含根证书、服务器证书和私钥的元组,用于启动启用 SSL 的安全服务器。元组应按以下顺序包含三个字节元素: * CA 证书,* 服务器证书, * " +"服务器私钥。" -#: flwr.server.history.History.add_loss_centralized:1::1 -#: flwr.server.history.History.add_loss_distributed:1 of -msgid "Add one loss entry (from distributed evaluation)." -msgstr "" +#: flwr.server.app.start_server:28 flwr.server.driver.driver.Driver:6 of +msgid "" +"Tuple containing root certificate, server certificate, and private key to" +" start a secure SSL-enabled server. The tuple is expected to have three " +"bytes elements in the following order:" +msgstr "包含根证书、服务器证书和私钥的元组,用于启动启用 SSL 的安全服务器。元组应按以下顺序包含三个字节元素:" + +#: flwr.server.app.start_server:32 flwr.server.driver.driver.Driver:10 of +msgid "CA certificate." +msgstr "CA 证书。" + +#: flwr.server.app.start_server:33 flwr.server.driver.driver.Driver:11 of +msgid "server certificate." +msgstr "服务器证书。" + +#: flwr.server.app.start_server:34 flwr.server.driver.driver.Driver:12 of +msgid "server private key." +msgstr "服务器私人密钥。" + +#: flwr.server.driver.driver.Driver.close:1::1 of +#, fuzzy +msgid ":py:obj:`close `\\ \\(\\)" +msgstr "server.strategy.Strategy" + +#: flwr.server.driver.driver.Driver.close:1 +#: flwr.server.driver.driver.Driver.close:1::1 of +msgid "Disconnect from the SuperLink if connected." +msgstr "" + +#: flwr.server.driver.driver.Driver.close:1::1 of +msgid "" +":py:obj:`create_message `\\ " +"\\(content\\, message\\_type\\, ...\\)" +msgstr "" + +#: flwr.server.driver.driver.Driver.close:1::1 +#: flwr.server.driver.driver.Driver.create_message:1 of +msgid "Create a new message with specified parameters." +msgstr "" + +#: flwr.server.driver.driver.Driver.close:1::1 of +msgid ":py:obj:`get_node_ids `\\ \\(\\)" +msgstr "" + +#: flwr.server.driver.driver.Driver.close:1::1 +#: flwr.server.driver.driver.Driver.get_node_ids:1 of +msgid "Get node IDs." +msgstr "" + +#: flwr.server.driver.driver.Driver.close:1::1 of +msgid "" +":py:obj:`pull_messages `\\ " +"\\(message\\_ids\\)" +msgstr "" + +#: flwr.server.driver.driver.Driver.close:1::1 +#: flwr.server.driver.driver.Driver.pull_messages:1 of +msgid "Pull messages based on message IDs." +msgstr "" + +#: flwr.server.driver.driver.Driver.close:1::1 of +msgid "" +":py:obj:`push_messages `\\ " +"\\(messages\\)" +msgstr "" + +#: flwr.server.driver.driver.Driver.close:1::1 +#: flwr.server.driver.driver.Driver.push_messages:1 of +msgid "Push messages to specified node IDs." +msgstr "" + +#: flwr.server.driver.driver.Driver.close:1::1 of +#, fuzzy +msgid "" +":py:obj:`send_and_receive `\\ " +"\\(messages\\, \\*\\[\\, timeout\\]\\)" +msgstr "" +"Flower 1.0: ``start_server(..., " +"config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " +"...)``" + +#: flwr.server.driver.driver.Driver.close:1::1 +#: flwr.server.driver.driver.Driver.send_and_receive:1 of +msgid "Push messages to specified node IDs and pull the reply messages." +msgstr "" + +#: flwr.server.driver.driver.Driver.create_message:3 of +msgid "" +"This method constructs a new `Message` with given content and metadata. " +"The `run_id` and `src_node_id` will be set automatically." +msgstr "" + +#: flwr.server.driver.driver.Driver.create_message:6 of +msgid "" +"The content for the new message. This holds records that are to be sent " +"to the destination node." +msgstr "" + +#: flwr.server.driver.driver.Driver.create_message:9 of +msgid "" +"The type of the message, defining the action to be executed on the " +"receiving end." +msgstr "" + +#: flwr.server.driver.driver.Driver.create_message:12 of +msgid "The ID of the destination node to which the message is being sent." +msgstr "" + +#: flwr.server.driver.driver.Driver.create_message:14 of +msgid "" +"The ID of the group to which this message is associated. In some " +"settings, this is used as the FL round." +msgstr "" + +#: flwr.server.driver.driver.Driver.create_message:17 of +msgid "" +"Time-to-live for the round trip of this message, i.e., the time from " +"sending this message to receiving a reply. It specifies the duration for " +"which the message and its potential reply are considered valid." +msgstr "" + +#: flwr.server.driver.driver.Driver.create_message:22 of +msgid "" +"**message** -- A new `Message` instance with the specified content and " +"metadata." +msgstr "" + +#: flwr.server.driver.driver.Driver.pull_messages:3 of +msgid "" +"This method is used to collect messages from the SuperLink that " +"correspond to a set of given message IDs." +msgstr "" + +#: flwr.server.driver.driver.Driver.pull_messages:6 of +msgid "An iterable of message IDs for which reply messages are to be retrieved." +msgstr "" + +#: flwr.server.driver.driver.Driver.pull_messages:9 of +msgid "**messages** -- An iterable of messages received." +msgstr "" + +#: flwr.server.driver.driver.Driver.push_messages:3 of +msgid "" +"This method takes an iterable of messages and sends each message to the " +"node specified in `dst_node_id`." +msgstr "" + +#: flwr.server.driver.driver.Driver.push_messages:6 +#: flwr.server.driver.driver.Driver.send_and_receive:7 of +msgid "An iterable of messages to be sent." +msgstr "" + +#: flwr.server.driver.driver.Driver.push_messages:9 of +msgid "" +"**message_ids** -- An iterable of IDs for the messages that were sent, " +"which can be used to pull replies." +msgstr "" + +#: flwr.server.driver.driver.Driver.send_and_receive:3 of +msgid "" +"This method sends a list of messages to their destination node IDs and " +"then waits for the replies. It continues to pull replies until either all" +" replies are received or the specified timeout duration is exceeded." +msgstr "" + +#: flwr.server.driver.driver.Driver.send_and_receive:9 of +msgid "" +"The timeout duration in seconds. If specified, the method will wait for " +"replies for this duration. If `None`, there is no time limit and the " +"method will wait until replies for all messages are received." +msgstr "" + +#: flwr.server.driver.driver.Driver.send_and_receive:14 of +msgid "**replies** -- An iterable of reply messages received from the SuperLink." +msgstr "" + +#: flwr.server.driver.driver.Driver.send_and_receive:18 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:53 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:60 +#: of +#, fuzzy +msgid "Notes" +msgstr "无" + +#: flwr.server.driver.driver.Driver.send_and_receive:19 of +msgid "" +"This method uses `push_messages` to send the messages and `pull_messages`" +" to collect the replies. If `timeout` is set, the method may not return " +"replies for all sent messages. A message remains valid until its TTL, " +"which is not affected by `timeout`." +msgstr "" + +#: ../../source/ref-api/flwr.server.History.rst:2 +msgid "History" +msgstr "" + +#: flwr.server.history.History.add_loss_centralized:1::1 of +msgid "" +":py:obj:`add_loss_centralized " +"`\\ \\(server\\_round\\, " +"loss\\)" +msgstr "" + +#: flwr.server.history.History.add_loss_centralized:1 +#: flwr.server.history.History.add_loss_centralized:1::1 of +#, fuzzy +msgid "Add one loss entry (from centralized evaluation)." +msgstr "集中评估" + +#: flwr.server.history.History.add_loss_centralized:1::1 of +msgid "" +":py:obj:`add_loss_distributed " +"`\\ \\(server\\_round\\, " +"loss\\)" +msgstr "" + +#: flwr.server.history.History.add_loss_centralized:1::1 +#: flwr.server.history.History.add_loss_distributed:1 of +msgid "Add one loss entry (from distributed evaluation)." +msgstr "" #: flwr.server.history.History.add_loss_centralized:1::1 of msgid "" @@ -8757,6 +9650,38 @@ msgstr "" msgid "Add metrics entries (from distributed fit)." msgstr "" +#: ../../source/ref-api/flwr.server.LegacyContext.rst:2 +msgid "LegacyContext" +msgstr "" + +#: flwr.server.compat.legacy_context.LegacyContext:1 of +msgid "Bases: :py:class:`~flwr.common.context.Context`" +msgstr "" + +#: ../../source/ref-api/flwr.server.LegacyContext.rst:32::1 +#, fuzzy +msgid ":py:obj:`config `\\" +msgstr "server.strategy.Strategy" + +#: ../../source/ref-api/flwr.server.LegacyContext.rst:32::1 +#, fuzzy +msgid ":py:obj:`strategy `\\" +msgstr "server.strategy.Strategy" + +#: ../../source/ref-api/flwr.server.LegacyContext.rst:32::1 +msgid ":py:obj:`client_manager `\\" +msgstr "" + +#: ../../source/ref-api/flwr.server.LegacyContext.rst:32::1 +#, fuzzy +msgid ":py:obj:`history `\\" +msgstr "server.strategy.Strategy" + +#: ../../source/ref-api/flwr.server.LegacyContext.rst:32::1 +#, fuzzy +msgid ":py:obj:`state `\\" +msgstr "server.strategy.Strategy" + #: flwr.server.server.Server.client_manager:1::1 of msgid ":py:obj:`client_manager `\\ \\(\\)" msgstr "" @@ -8833,12 +9758,36 @@ msgstr "" msgid "Replace server strategy." msgstr "server.strategy" +#: ../../source/ref-api/flwr.server.ServerApp.rst:2 +#, fuzzy +msgid "ServerApp" +msgstr "服务器" + +#: flwr.server.server_app.ServerApp:5 of +#, fuzzy +msgid "Use the `ServerApp` with an existing `Strategy`:" +msgstr "使用现有策略" + +#: flwr.server.server_app.ServerApp:15 of +msgid "Use the `ServerApp` with a custom main function:" +msgstr "" + +#: flwr.server.server_app.ServerApp.main:1::1 of +#, fuzzy +msgid ":py:obj:`main `\\ \\(\\)" +msgstr "server.strategy.Strategy" + +#: flwr.server.server_app.ServerApp.main:1 +#: flwr.server.server_app.ServerApp.main:1::1 of +msgid "Return a decorator that registers the main fn with the server app." +msgstr "" + #: ../../source/ref-api/flwr.server.ServerConfig.rst:2 #, fuzzy msgid "ServerConfig" msgstr "服务器" -#: flwr.server.app.ServerConfig:3 of +#: flwr.server.server_config.ServerConfig:3 of msgid "" "All attributes have default values which allows users to configure just " "the ones they care about." @@ -8912,255 +9861,87 @@ msgstr "" msgid "**success**" msgstr "" -#: ../../source/ref-api/flwr.server.driver.rst:2 -#, fuzzy -msgid "driver" -msgstr "服务器" - -#: ../../source/ref-api/flwr.server.driver.rst:22::1 -msgid "" -":py:obj:`start_driver `\\ \\(\\*\\[\\, " -"server\\_address\\, server\\, ...\\]\\)" -msgstr "" - -#: ../../source/ref-api/flwr.server.driver.rst:22::1 -#: flwr.server.driver.app.start_driver:1 of +#: ../../source/ref-api/flwr.server.run_driver_api.rst:2 #, fuzzy -msgid "Start a Flower Driver API server." -msgstr "启动基于 Ray 的Flower模拟服务器。" - -#: ../../source/ref-api/flwr.server.driver.rst:30::1 -msgid "" -":py:obj:`Driver `\\ " -"\\(\\[driver\\_service\\_address\\, ...\\]\\)" -msgstr "" +msgid "run\\_driver\\_api" +msgstr "flower-driver-api" -#: ../../source/ref-api/flwr.server.driver.rst:30::1 -#: flwr.server.driver.driver.Driver:1 of -msgid "`Driver` class provides an interface to the Driver API." +#: ../../source/ref-api/flwr.server.run_fleet_api.rst:2 +msgid "run\\_fleet\\_api" msgstr "" -#: ../../source/ref-api/flwr.server.driver.rst:30::1 -msgid "" -":py:obj:`GrpcDriver `\\ " -"\\(\\[driver\\_service\\_address\\, ...\\]\\)" +#: ../../source/ref-api/flwr.server.run_server_app.rst:2 +msgid "run\\_server\\_app" msgstr "" -#: ../../source/ref-api/flwr.server.driver.rst:30::1 -#: flwr.server.driver.grpc_driver.GrpcDriver:1 of -msgid "`GrpcDriver` provides access to the gRPC Driver API/service." -msgstr "" +#: ../../source/ref-api/flwr.server.run_superlink.rst:2 +#, fuzzy +msgid "run\\_superlink" +msgstr "flower-superlink" -#: ../../source/ref-api/flwr.server.driver.Driver.rst:2 +#: ../../source/ref-api/flwr.server.start_driver.rst:2 #, fuzzy -msgid "Driver" -msgstr "服务器" +msgid "start\\_driver" +msgstr "启动客户端" -#: flwr.server.driver.driver.Driver:3 of +#: flwr.server.compat.app.start_driver:3 of #, fuzzy msgid "" "The IPv4 or IPv6 address of the Driver API server. Defaults to " -"`\"[::]:9091\"`." +"`\"[::]:8080\"`." msgstr "服务器的 IPv4 或 IPv6 地址。默认为 `\"[::]:8080\"。" -#: flwr.server.app.start_server:28 flwr.server.driver.driver.Driver:6 of +#: flwr.server.compat.app.start_driver:6 of +#, fuzzy msgid "" -"Tuple containing root certificate, server certificate, and private key to" -" start a secure SSL-enabled server. The tuple is expected to have three " -"bytes elements in the following order: * CA certificate. * " -"server certificate. * server private key." -msgstr "" -"包含根证书、服务器证书和私钥的元组,用于启动启用 SSL 的安全服务器。元组应按以下顺序包含三个字节元素: * CA 证书,* 服务器证书, * " -"服务器私钥。" +"A server implementation, either `flwr.server.Server` or a subclass " +"thereof. If no instance is provided, then `start_driver` will create one." +msgstr "服务器实现,可以是 `flwr.server.Server` 或其子类。如果没有提供实例,`start_server` 将创建一个。" -#: flwr.server.app.start_server:28 flwr.server.driver.driver.Driver:6 of +#: flwr.server.app.start_server:9 flwr.server.compat.app.start_driver:10 +#: flwr.simulation.app.start_simulation:28 of msgid "" -"Tuple containing root certificate, server certificate, and private key to" -" start a secure SSL-enabled server. The tuple is expected to have three " -"bytes elements in the following order:" -msgstr "包含根证书、服务器证书和私钥的元组,用于启动启用 SSL 的安全服务器。元组应按以下顺序包含三个字节元素:" - -#: flwr.server.app.start_server:32 flwr.server.driver.driver.Driver:10 of -msgid "CA certificate." -msgstr "CA 证书。" - -#: flwr.server.app.start_server:33 flwr.server.driver.driver.Driver:11 of -msgid "server certificate." -msgstr "服务器证书。" - -#: flwr.server.app.start_server:34 flwr.server.driver.driver.Driver:12 of -msgid "server private key." -msgstr "服务器私人密钥。" - -#: flwr.server.driver.driver.Driver.get_nodes:1::1 of -msgid ":py:obj:`get_nodes `\\ \\(\\)" -msgstr "" - -#: flwr.server.driver.driver.Driver.get_nodes:1 -#: flwr.server.driver.driver.Driver.get_nodes:1::1 of -msgid "Get node IDs." -msgstr "" +"Currently supported values are `num_rounds` (int, default: 1) and " +"`round_timeout` in seconds (float, default: None)." +msgstr "目前支持的值有:`num_rounds`(int,默认值:1)和以秒为单位的`round_timeout`(float,默认值:无)。" -#: flwr.server.driver.driver.Driver.get_nodes:1::1 of +#: flwr.server.app.start_server:12 flwr.server.compat.app.start_driver:13 of msgid "" -":py:obj:`pull_task_res `\\ " -"\\(task\\_ids\\)" +"An implementation of the abstract base class " +"`flwr.server.strategy.Strategy`. If no strategy is provided, then " +"`start_server` will use `flwr.server.strategy.FedAvg`." msgstr "" +"抽象基类 `flwr.server.strategy.Strategy` 的实现。如果没有提供策略,`start_server` 将使用 " +"`flwr.server.strategy.FedAvg`。" -#: flwr.server.driver.driver.Driver.get_nodes:1::1 -#: flwr.server.driver.driver.Driver.pull_task_res:1 -#: flwr.server.driver.grpc_driver.GrpcDriver.connect:1::1 -#: flwr.server.driver.grpc_driver.GrpcDriver.pull_task_res:1 of +#: flwr.server.compat.app.start_driver:17 of #, fuzzy -msgid "Get task results." -msgstr "汇总训练结果。" - -#: flwr.server.driver.driver.Driver.get_nodes:1::1 of msgid "" -":py:obj:`push_task_ins `\\ " -"\\(task\\_ins\\_list\\)" +"An implementation of the class `flwr.server.ClientManager`. If no " +"implementation is provided, then `start_driver` will use " +"`flwr.server.SimpleClientManager`." msgstr "" +"抽象基类 `flwr.server.ClientManager` 的实现。如果没有提供实现,`start_server` 将使用 " +"`flwr.server.client_manager.SimpleClientManager`。" -#: flwr.server.driver.driver.Driver.get_nodes:1::1 -#: flwr.server.driver.driver.Driver.push_task_ins:1 -#: flwr.server.driver.grpc_driver.GrpcDriver.connect:1::1 -#: flwr.server.driver.grpc_driver.GrpcDriver.push_task_ins:1 of -msgid "Schedule tasks." +#: flwr.server.compat.app.start_driver:25 of +msgid "The Driver object to use." msgstr "" -#: ../../source/ref-api/flwr.server.driver.GrpcDriver.rst:2 -msgid "GrpcDriver" -msgstr "" +#: flwr.server.app.start_server:37 flwr.server.compat.app.start_driver:28 of +msgid "**hist** -- Object containing training and evaluation metrics." +msgstr "**hist** -- 包含训练和评估指标的对象。" -#: flwr.server.driver.grpc_driver.GrpcDriver.connect:1::1 of -msgid ":py:obj:`connect `\\ \\(\\)" -msgstr "" +#: flwr.server.compat.app.start_driver:33 of +#, fuzzy +msgid "Starting a driver that connects to an insecure server:" +msgstr "启动不安全的服务器:" -#: flwr.server.driver.grpc_driver.GrpcDriver.connect:1 -#: flwr.server.driver.grpc_driver.GrpcDriver.connect:1::1 of -msgid "Connect to the Driver API." -msgstr "" - -#: flwr.server.driver.grpc_driver.GrpcDriver.connect:1::1 of -msgid "" -":py:obj:`create_run `\\ " -"\\(req\\)" -msgstr "" - -#: flwr.server.driver.grpc_driver.GrpcDriver.connect:1::1 -#: flwr.server.driver.grpc_driver.GrpcDriver.create_run:1 of -#, fuzzy -msgid "Request for run ID." -msgstr "Flower 基线申请" - -#: flwr.server.driver.grpc_driver.GrpcDriver.connect:1::1 of -msgid ":py:obj:`disconnect `\\ \\(\\)" -msgstr "" - -#: flwr.server.driver.grpc_driver.GrpcDriver.connect:1::1 -#: flwr.server.driver.grpc_driver.GrpcDriver.disconnect:1 of -msgid "Disconnect from the Driver API." -msgstr "" - -#: flwr.server.driver.grpc_driver.GrpcDriver.connect:1::1 of -msgid ":py:obj:`get_nodes `\\ \\(req\\)" -msgstr "" - -#: flwr.server.driver.grpc_driver.GrpcDriver.connect:1::1 -#: flwr.server.driver.grpc_driver.GrpcDriver.get_nodes:1 of -#, fuzzy -msgid "Get client IDs." -msgstr "返回客户端(本身)。" - -#: flwr.server.driver.grpc_driver.GrpcDriver.connect:1::1 of -msgid "" -":py:obj:`pull_task_res `\\ " -"\\(req\\)" -msgstr "" - -#: flwr.server.driver.grpc_driver.GrpcDriver.connect:1::1 of -msgid "" -":py:obj:`push_task_ins `\\ " -"\\(req\\)" -msgstr "" - -#: ../../source/ref-api/flwr.server.driver.start_driver.rst:2 -#, fuzzy -msgid "start\\_driver" -msgstr "启动客户端" - -#: flwr.server.driver.app.start_driver:3 of -#, fuzzy -msgid "" -"The IPv4 or IPv6 address of the Driver API server. Defaults to " -"`\"[::]:8080\"`." -msgstr "服务器的 IPv4 或 IPv6 地址。默认为 `\"[::]:8080\"。" - -#: flwr.server.driver.app.start_driver:6 of -#, fuzzy -msgid "" -"A server implementation, either `flwr.server.Server` or a subclass " -"thereof. If no instance is provided, then `start_driver` will create one." -msgstr "服务器实现,可以是 `flwr.server.Server` 或其子类。如果没有提供实例,`start_server` 将创建一个。" - -#: flwr.server.app.start_server:9 flwr.server.driver.app.start_driver:10 -#: flwr.simulation.app.start_simulation:28 of -msgid "" -"Currently supported values are `num_rounds` (int, default: 1) and " -"`round_timeout` in seconds (float, default: None)." -msgstr "目前支持的值有:`num_rounds`(int,默认值:1)和以秒为单位的`round_timeout`(float,默认值:无)。" - -#: flwr.server.app.start_server:12 flwr.server.driver.app.start_driver:13 of -msgid "" -"An implementation of the abstract base class " -"`flwr.server.strategy.Strategy`. If no strategy is provided, then " -"`start_server` will use `flwr.server.strategy.FedAvg`." -msgstr "" -"抽象基类 `flwr.server.strategy.Strategy` 的实现。如果没有提供策略,`start_server` 将使用 " -"`flwr.server.strategy.FedAvg`。" - -#: flwr.server.driver.app.start_driver:17 of -#, fuzzy -msgid "" -"An implementation of the class `flwr.server.ClientManager`. If no " -"implementation is provided, then `start_driver` will use " -"`flwr.server.SimpleClientManager`." -msgstr "" -"抽象基类 `flwr.server.ClientManager` 的实现。如果没有提供实现,`start_server` 将使用 " -"`flwr.server.client_manager.SimpleClientManager`。" - -#: flwr.server.app.start_server:37 flwr.server.driver.app.start_driver:26 of -msgid "**hist** -- Object containing training and evaluation metrics." -msgstr "**hist** -- 包含训练和评估指标的对象。" - -#: flwr.server.driver.app.start_driver:31 of -#, fuzzy -msgid "Starting a driver that connects to an insecure server:" -msgstr "启动不安全的服务器:" - -#: flwr.server.driver.app.start_driver:35 of +#: flwr.server.compat.app.start_driver:37 of #, fuzzy msgid "Starting a driver that connects to an SSL-enabled server:" msgstr "启动支持 SSL 的服务器:" -#: ../../source/ref-api/flwr.server.run_driver_api.rst:2 -#, fuzzy -msgid "run\\_driver\\_api" -msgstr "flower-driver-api" - -#: ../../source/ref-api/flwr.server.run_fleet_api.rst:2 -msgid "run\\_fleet\\_api" -msgstr "" - -#: ../../source/ref-api/flwr.server.run_server_app.rst:2 -msgid "run\\_server\\_app" -msgstr "" - -#: ../../source/ref-api/flwr.server.run_superlink.rst:2 -#, fuzzy -msgid "run\\_superlink" -msgstr "flower-superlink" - #: ../../source/ref-api/flwr.server.start_server.rst:2 #, fuzzy msgid "start\\_server" @@ -9210,223 +9991,279 @@ msgstr "启动支持 SSL 的服务器:" msgid "strategy" msgstr "Krum 策略。" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`FaultTolerantFedAvg " -"`\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" +":py:obj:`Bulyan `\\ \\(\\*\\, " +"fraction\\_fit\\, fraction\\_evaluate\\, ...\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg:1 of -msgid "Configurable fault-tolerant FedAvg strategy implementation." -msgstr "可配置的容错 FedAvg 策略实施。" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.bulyan.Bulyan:1 of +msgid "Bulyan strategy." +msgstr "Bulyan 策略。" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +msgid "" +":py:obj:`DPFedAvgAdaptive `\\ " +"\\(strategy\\, num\\_sampled\\_clients\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive:1 of +msgid "Wrapper for configuring a Strategy for DP with Adaptive Clipping." +msgstr "用于配置具有自适应剪切功能的 DP 策略的包装器。" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +msgid "" +":py:obj:`DPFedAvgFixed `\\ " +"\\(strategy\\, num\\_sampled\\_clients\\, ...\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed:1 of +msgid "Wrapper for configuring a Strategy for DP with Fixed Clipping." +msgstr "封装器,用于为具有固定剪切功能的 DP 配置策略。" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +msgid "" +":py:obj:`DifferentialPrivacyClientSideAdaptiveClipping " +"`\\ " +"\\(...\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:1 +#: of +#, fuzzy +msgid "Strategy wrapper for central DP with client-side adaptive clipping." +msgstr "用于配置具有自适应剪切功能的 DP 策略的包装器。" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +msgid "" +":py:obj:`DifferentialPrivacyServerSideAdaptiveClipping " +"`\\ " +"\\(...\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:1 +#: of +#, fuzzy +msgid "Strategy wrapper for central DP with server-side adaptive clipping." +msgstr "用于配置具有自适应剪切功能的 DP 策略的包装器。" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +msgid "" +":py:obj:`DifferentialPrivacyClientSideFixedClipping " +"`\\ " +"\\(...\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:1 +#: of +#, fuzzy +msgid "Strategy wrapper for central DP with client-side fixed clipping." +msgstr "封装器,用于为具有固定剪切功能的 DP 配置策略。" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +msgid "" +":py:obj:`DifferentialPrivacyServerSideFixedClipping " +"`\\ " +"\\(...\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:1 +#: of +#, fuzzy +msgid "Strategy wrapper for central DP with server-side fixed clipping." +msgstr "封装器,用于为具有固定剪切功能的 DP 配置策略。" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" ":py:obj:`FedAdagrad `\\ \\(\\*\\[\\, " "fraction\\_fit\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 #: flwr.server.strategy.fedadagrad.FedAdagrad:1 of msgid "FedAdagrad strategy - Adaptive Federated Optimization using Adagrad." msgstr "FedAdagrad 策略 - 使用 Adagrad 进行自适应联合优化。" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" ":py:obj:`FedAdam `\\ \\(\\*\\[\\, " "fraction\\_fit\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 #: flwr.server.strategy.fedadam.FedAdam:1 of msgid "FedAdam - Adaptive Federated Optimization using Adam." msgstr "FedAdam - 使用 Adam 进行自适应联合优化。" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" ":py:obj:`FedAvg `\\ \\(\\*\\[\\, " "fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 #: flwr.server.strategy.fedavg.FedAvg:1 #: flwr.server.strategy.fedavg_android.FedAvgAndroid:1 of msgid "Federated Averaging strategy." msgstr "联邦平均策略。" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 -msgid "" -":py:obj:`FedXgbNnAvg `\\ \\(\\*args\\, " -"\\*\\*kwargs\\)" -msgstr "" - -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 -#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg:1 of -msgid "Configurable FedXgbNnAvg strategy implementation." -msgstr "可配置的 FedXgbNAvg 策略实施。" - -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 -msgid "" -":py:obj:`FedXgbBagging `\\ " -"\\(\\[evaluate\\_function\\]\\)" -msgstr "" - -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging:1 of -#, fuzzy -msgid "Configurable FedXgbBagging strategy implementation." -msgstr "可配置的 FedXgbNAvg 策略实施。" - -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 -msgid "" -":py:obj:`FedXgbCyclic `\\ " -"\\(\\*\\*kwargs\\)" -msgstr "" - -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic:1 of -#, fuzzy -msgid "Configurable FedXgbCyclic strategy implementation." -msgstr "可配置的 FedAvg 策略实施。" - -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" ":py:obj:`FedAvgAndroid `\\ " "\\(\\*\\[\\, fraction\\_fit\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" ":py:obj:`FedAvgM `\\ \\(\\*\\[\\, " "fraction\\_fit\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 #: flwr.server.strategy.fedavgm.FedAvgM:1 of msgid "Federated Averaging with Momentum strategy." msgstr "联邦平均动量策略。" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +msgid "" +":py:obj:`FedMedian `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedmedian.FedMedian:1 of +#, fuzzy +msgid "Configurable FedMedian strategy implementation." +msgstr "可配置的 FedAvg 策略实施。" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" ":py:obj:`FedOpt `\\ \\(\\*\\[\\, " "fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 #: flwr.server.strategy.fedopt.FedOpt:1 of #, fuzzy msgid "Federated Optim strategy." msgstr "联邦优化策略。" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" ":py:obj:`FedProx `\\ \\(\\*\\[\\, " "fraction\\_fit\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 #: flwr.server.strategy.fedprox.FedProx:1 of msgid "Federated Optimization strategy." msgstr "联邦优化策略。" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`FedYogi `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" +":py:obj:`FedTrimmedAvg `\\ " +"\\(\\*\\[\\, fraction\\_fit\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 -#: flwr.server.strategy.fedyogi.FedYogi:1 of -msgid "FedYogi [Reddi et al., 2020] strategy." -msgstr "FedYogi [Reddi 等人,2020] 策略。" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:1 of +msgid "Federated Averaging with Trimmed Mean [Dong Yin, et al., 2021]." +msgstr "带修剪均值的联邦平均法[Dong Yin 等,2021]。" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`QFedAvg `\\ \\(\\*\\[\\, " -"q\\_param\\, qffl\\_learning\\_rate\\, ...\\]\\)" +":py:obj:`FedXgbBagging `\\ " +"\\(\\[evaluate\\_function\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 -#: flwr.server.strategy.qfedavg.QFedAvg:1 of -msgid "Configurable QFedAvg strategy implementation." -msgstr "可配置的 QFedAvg 策略实施。" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging:1 of +#, fuzzy +msgid "Configurable FedXgbBagging strategy implementation." +msgstr "可配置的 FedXgbNAvg 策略实施。" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`FedMedian `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" +":py:obj:`FedXgbCyclic `\\ " +"\\(\\*\\*kwargs\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 -#: flwr.server.strategy.fedmedian.FedMedian:1 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic:1 of #, fuzzy -msgid "Configurable FedMedian strategy implementation." +msgid "Configurable FedXgbCyclic strategy implementation." msgstr "可配置的 FedAvg 策略实施。" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`FedTrimmedAvg `\\ " -"\\(\\*\\[\\, fraction\\_fit\\, ...\\]\\)" +":py:obj:`FedXgbNnAvg `\\ \\(\\*args\\, " +"\\*\\*kwargs\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:1 of -msgid "Federated Averaging with Trimmed Mean [Dong Yin, et al., 2021]." -msgstr "带修剪均值的联邦平均法[Dong Yin 等,2021]。" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg:1 of +msgid "Configurable FedXgbNnAvg strategy implementation." +msgstr "可配置的 FedXgbNAvg 策略实施。" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`Krum `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" +":py:obj:`FedYogi `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 -#: flwr.server.strategy.krum.Krum:1 of -#, fuzzy -msgid "Krum [Blanchard et al., 2017] strategy." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedyogi.FedYogi:1 of +msgid "FedYogi [Reddi et al., 2020] strategy." msgstr "FedYogi [Reddi 等人,2020] 策略。" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`Bulyan `\\ \\(\\*\\, " -"fraction\\_fit\\, fraction\\_evaluate\\, ...\\)" +":py:obj:`FaultTolerantFedAvg " +"`\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 -#: flwr.server.strategy.bulyan.Bulyan:1 of -msgid "Bulyan strategy." -msgstr "Bulyan 策略。" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg:1 of +msgid "Configurable fault-tolerant FedAvg strategy implementation." +msgstr "可配置的容错 FedAvg 策略实施。" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`DPFedAvgAdaptive `\\ " -"\\(strategy\\, num\\_sampled\\_clients\\)" +":py:obj:`Krum `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 -#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive:1 of -msgid "Wrapper for configuring a Strategy for DP with Adaptive Clipping." -msgstr "用于配置具有自适应剪切功能的 DP 策略的包装器。" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.krum.Krum:1 of +#, fuzzy +msgid "Krum [Blanchard et al., 2017] strategy." +msgstr "FedYogi [Reddi 等人,2020] 策略。" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`DPFedAvgFixed `\\ " -"\\(strategy\\, num\\_sampled\\_clients\\, ...\\)" +":py:obj:`QFedAvg `\\ \\(\\*\\[\\, " +"q\\_param\\, qffl\\_learning\\_rate\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed:1 of -msgid "Wrapper for configuring a Strategy for DP with Fixed Clipping." -msgstr "封装器,用于为具有固定剪切功能的 DP 配置策略。" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.qfedavg.QFedAvg:1 of +msgid "Configurable QFedAvg strategy implementation." +msgstr "可配置的 QFedAvg 策略实施。" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 #, fuzzy msgid ":py:obj:`Strategy `\\ \\(\\)" msgstr "server.strategy.Strategy" -#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 #: flwr.server.strategy.strategy.Strategy:1 of msgid "Abstract base class for server strategy implementations." msgstr "服务器策略实现的抽象基类。" @@ -9627,6 +10464,14 @@ msgid "" "parameters\\, ...\\)" msgstr "" +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.configure_evaluate:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.configure_evaluate:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.configure_evaluate:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.configure_evaluate:1 #: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 #: flwr.server.strategy.fedavg.FedAvg.configure_evaluate:1 @@ -9648,6 +10493,14 @@ msgid "" "\\(server\\_round\\, parameters\\, ...\\)" msgstr "" +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.configure_fit:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.configure_fit:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.configure_fit:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.configure_fit:1 #: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive.configure_fit:1 #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 #: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 @@ -9741,6 +10594,10 @@ msgstr "" msgid "Return the sample size and the required number of available clients." msgstr "返回样本大小和所需的可用客户数量。" +#: ../../source/ref-api/flwr.server.strategy.DPFedAvgAdaptive.rst:2 +msgid "DPFedAvgAdaptive" +msgstr "DPFedAvgAdaptive" + #: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive:1 of msgid "Bases: :py:class:`~flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed`" msgstr "" @@ -9758,6 +10615,14 @@ msgid "" "\\(server\\_round\\, results\\, ...\\)" msgstr "" +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1 #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 #: of @@ -9806,6 +10671,14 @@ msgid "" "\\(server\\_round\\, parameters\\)" msgstr "" +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.evaluate:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.evaluate:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.evaluate:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.evaluate:1 #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.evaluate:1 of msgid "Evaluate model parameters using an evaluation function from the strategy." @@ -9819,6 +10692,14 @@ msgid "" "\\(client\\_manager\\)" msgstr "" +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.initialize_parameters:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.initialize_parameters:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.initialize_parameters:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.initialize_parameters:1 #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.initialize_parameters:1 of msgid "Initialize global model parameters using given strategy." @@ -9855,6 +10736,14 @@ msgstr "" "一个元组列表。列表中的每个元组都标识了一个`ClientProxy`和该特定`ClientProxy`的`EvaluateIns`。如果某个特定的" " `ClientProxy` 未包含在此列表中,则表示该 `ClientProxy` 将不参与下一轮联合评估。" +#: ../../source/ref-api/flwr.server.strategy.DPFedAvgFixed.rst:2 +msgid "DPFedAvgFixed" +msgstr "DPFedAvgFixed" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:1 #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed:1 #: flwr.server.strategy.fedavg.FedAvg:1 #: flwr.server.strategy.fedavg_android.FedAvgAndroid:1 of @@ -9938,6 +10827,392 @@ msgstr "" "**fit_configuration** -- " "一个元组列表。列表中的每个元组都标识了一个`ClientProxy`和该特定`ClientProxy`的`FitIns'。如果某个特定的`ClientProxy`不在此列表中,则表示该`ClientProxy`将不参加下一轮联合学习。" +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideAdaptiveClipping.rst:2 +msgid "DifferentialPrivacyClientSideAdaptiveClipping" +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:3 +#: of +msgid "Use `adaptiveclipping_mod` modifier at the client side." +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:5 +#: of +msgid "" +"In comparison to `DifferentialPrivacyServerSideAdaptiveClipping`, which " +"performs clipping on the server-side, " +"`DifferentialPrivacyClientSideAdaptiveClipping` expects clipping to " +"happen on the client-side, usually by using the built-in " +"`adaptiveclipping_mod`." +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:10 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:3 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:10 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:3 +#: of +msgid "The strategy to which DP functionalities will be added by this wrapper." +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:12 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:5 +#: of +msgid "The noise multiplier for the Gaussian mechanism for model updates." +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:14 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:7 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:17 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:10 +#: of +msgid "The number of clients that are sampled on each round." +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:16 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:9 +#: of +msgid "" +"The initial value of clipping norm. Defaults to 0.1. Andrew et al. " +"recommends to set to 0.1." +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:19 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:12 +#: of +msgid "The desired quantile of updates which should be clipped. Defaults to 0.5." +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:21 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:14 +#: of +msgid "" +"The learning rate for the clipping norm adaptation. Defaults to 0.2. " +"Andrew et al. recommends to set to 0.2." +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:24 +#: of +msgid "" +"The stddev of the noise added to the count of updates currently below the" +" estimate. Andrew et al. recommends to set to `expected_num_records/20`" +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:30 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:23 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:22 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:15 +#: of +#, fuzzy +msgid "Create a strategy:" +msgstr "server.strategy" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:34 +#: of +msgid "" +"Wrap the strategy with the " +"`DifferentialPrivacyClientSideAdaptiveClipping` wrapper:" +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:40 +#: of +msgid "On the client, add the `adaptiveclipping_mod` to the client-side mods:" +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\" +" \\(server\\_round\\, results\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_fit " +"`\\" +" \\(server\\_round\\, results\\, failures\\)" +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_fit:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_fit:1 +#: of +#, fuzzy +msgid "Aggregate training results and update clip norms." +msgstr "汇总 DPFedAvgFixed 中的训练结果并更新片段标准。" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_evaluate " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_fit " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`evaluate " +"`\\" +" \\(server\\_round\\, parameters\\)" +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`initialize_parameters " +"`\\" +" \\(client\\_manager\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideFixedClipping.rst:2 +#, fuzzy +msgid "DifferentialPrivacyClientSideFixedClipping" +msgstr "差分隐私" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:3 +#: of +msgid "Use `fixedclipping_mod` modifier at the client side." +msgstr "" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:5 +#: of +msgid "" +"In comparison to `DifferentialPrivacyServerSideFixedClipping`, which " +"performs clipping on the server-side, " +"`DifferentialPrivacyClientSideFixedClipping` expects clipping to happen " +"on the client-side, usually by using the built-in `fixedclipping_mod`." +msgstr "" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:12 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:5 +#: of +msgid "" +"The noise multiplier for the Gaussian mechanism for model updates. A " +"value of 1.0 or higher is recommended for strong privacy." +msgstr "" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:15 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:8 +#: of +msgid "The value of the clipping norm." +msgstr "" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:26 +#: of +msgid "" +"Wrap the strategy with the `DifferentialPrivacyClientSideFixedClipping` " +"wrapper:" +msgstr "" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:32 +#: of +msgid "On the client, add the `fixedclipping_mod` to the client-side mods:" +msgstr "" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\" +" \\(server\\_round\\, results\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_fit " +"`\\" +" \\(server\\_round\\, results\\, failures\\)" +msgstr "" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_fit:1 +#: of +#, fuzzy +msgid "Add noise to the aggregated parameters." +msgstr "然后将汇总结果序列化:" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_evaluate " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_fit " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`evaluate " +"`\\" +" \\(server\\_round\\, parameters\\)" +msgstr "" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`initialize_parameters " +"`\\" +" \\(client\\_manager\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideAdaptiveClipping.rst:2 +msgid "DifferentialPrivacyServerSideAdaptiveClipping" +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:17 +#: of +msgid "" +"The standard deviation of the noise added to the count of updates below " +"the estimate. Andrew et al. recommends to set to " +"`expected_num_records/20`" +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:27 +#: of +msgid "" +"Wrap the strategy with the DifferentialPrivacyServerSideAdaptiveClipping " +"wrapper" +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\" +" \\(server\\_round\\, results\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_fit " +"`\\" +" \\(server\\_round\\, results\\, failures\\)" +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_evaluate " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_fit " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`evaluate " +"`\\" +" \\(server\\_round\\, parameters\\)" +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`initialize_parameters " +"`\\" +" \\(client\\_manager\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideFixedClipping.rst:2 +#, fuzzy +msgid "DifferentialPrivacyServerSideFixedClipping" +msgstr "差分隐私" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:19 +#: of +msgid "" +"Wrap the strategy with the DifferentialPrivacyServerSideFixedClipping " +"wrapper" +msgstr "" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\" +" \\(server\\_round\\, results\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_fit " +"`\\" +" \\(server\\_round\\, results\\, failures\\)" +msgstr "" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_fit:1 +#: of +msgid "Compute the updates, clip, and pass them for aggregation." +msgstr "" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_evaluate " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_fit " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`evaluate " +"`\\" +" \\(server\\_round\\, parameters\\)" +msgstr "" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`initialize_parameters " +"`\\" +" \\(client\\_manager\\)" +msgstr "" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_fit:3 +#: of +msgid "Afterward, add noise to the aggregated parameters." +msgstr "" + #: ../../source/ref-api/flwr.server.strategy.FaultTolerantFedAvg.rst:2 #, fuzzy msgid "FaultTolerantFedAvg" @@ -10222,6 +11497,10 @@ msgstr "" "验证过程中使用的客户端的比例。如果 `min_evaluate_clients` 大于 `fraction_evaluate * " "available_clients`,则仍会对 `min_evaluate_clients` 进行采样。默认为 1.0。" +#: flwr.server.strategy.fedavg.FedAvg:33 of +msgid "Enable (True) or disable (False) in-place aggregation of model updates." +msgstr "" + #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" ":py:obj:`aggregate_evaluate " @@ -11333,26 +12612,472 @@ msgid "" "these as the initial global model parameters." msgstr "**parameters** -- 如果返回参数,服务器将把这些参数视为初始全局模型参数。" -#: ../../source/ref-api/flwr.simulation.rst:2 +#: ../../source/ref-api/flwr.server.workflow.rst:2 #, fuzzy -msgid "simulation" -msgstr "运行模拟" +msgid "workflow" +msgstr "工作流程" -#: ../../source/ref-api/flwr.simulation.rst:17::1 +#: ../../source/ref-api/flwr.server.workflow.rst:24::1 msgid "" -":py:obj:`start_simulation `\\ \\(\\*\\," -" client\\_fn\\[\\, ...\\]\\)" +":py:obj:`DefaultWorkflow `\\ " +"\\(\\[fit\\_workflow\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.simulation.rst:17::1 -#: flwr.simulation.app.start_simulation:1 of -msgid "Start a Ray-based Flower simulation server." -msgstr "启动基于 Ray 的Flower模拟服务器。" +#: ../../source/ref-api/flwr.server.workflow.rst:24::1 +#: flwr.server.workflow.default_workflows.DefaultWorkflow:1 of +msgid "Default workflow in Flower." +msgstr "" -#: ../../source/ref-api/flwr.simulation.start_simulation.rst:2 -#, fuzzy -msgid "start\\_simulation" -msgstr "start_simulation" +#: ../../source/ref-api/flwr.server.workflow.rst:24::1 +msgid "" +":py:obj:`SecAggPlusWorkflow `\\ " +"\\(num\\_shares\\, ...\\[\\, ...\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.workflow.rst:24::1 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:1 +#: of +msgid "The workflow for the SecAgg+ protocol." +msgstr "" + +#: ../../source/ref-api/flwr.server.workflow.rst:24::1 +msgid "" +":py:obj:`SecAggWorkflow `\\ " +"\\(reconstruction\\_threshold\\, \\*\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.workflow.rst:24::1 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:1 of +msgid "The workflow for the SecAgg protocol." +msgstr "" + +#: ../../source/ref-api/flwr.server.workflow.DefaultWorkflow.rst:2 +#, fuzzy +msgid "DefaultWorkflow" +msgstr "工作流程" + +#: ../../source/ref-api/flwr.server.workflow.SecAggPlusWorkflow.rst:2 +#, fuzzy +msgid "SecAggPlusWorkflow" +msgstr "工作流程" + +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:3 +#: of +msgid "" +"The SecAgg+ protocol ensures the secure summation of integer vectors " +"owned by multiple parties, without accessing any individual integer " +"vector. This workflow allows the server to compute the weighted average " +"of model parameters across all clients, ensuring individual contributions" +" remain private. This is achieved by clients sending both, a weighting " +"factor and a weighted version of the locally updated parameters, both of " +"which are masked for privacy. Specifically, each client uploads \"[w, w *" +" params]\" with masks, where weighting factor 'w' is the number of " +"examples ('num_examples') and 'params' represents the model parameters " +"('parameters') from the client's `FitRes`. The server then aggregates " +"these contributions to compute the weighted average of model parameters." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:14 +#: of +msgid "" +"The protocol involves four main stages: - 'setup': Send SecAgg+ " +"configuration to clients and collect their public keys. - 'share keys': " +"Broadcast public keys among clients and collect encrypted secret" +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:17 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:17 +#: of +msgid "key shares." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:18 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:18 +#: of +msgid "" +"'collect masked vectors': Forward encrypted secret key shares to target " +"clients and collect masked model parameters." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:20 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:20 +#: of +msgid "" +"'unmask': Collect secret key shares to decrypt and aggregate the model " +"parameters." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:22 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:22 +#: of +msgid "" +"Only the aggregated model parameters are exposed and passed to " +"`Strategy.aggregate_fit`, ensuring individual data privacy." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:25 +#: of +msgid "" +"The number of shares into which each client's private key is split under " +"the SecAgg+ protocol. If specified as a float, it represents the " +"proportion of all selected clients, and the number of shares will be set " +"dynamically in the run time. A private key can be reconstructed from " +"these shares, allowing for the secure aggregation of model updates. Each " +"client sends one share to each of its neighbors while retaining one." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:25 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:32 +#: of +msgid "" +"The minimum number of shares required to reconstruct a client's private " +"key, or, if specified as a float, it represents the proportion of the " +"total number of shares needed for reconstruction. This threshold ensures " +"privacy by allowing for the recovery of contributions from dropped " +"clients during aggregation, without compromising individual client data." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:31 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:38 +#: of +msgid "" +"The maximum value of the weight that can be assigned to any single " +"client's update during the weighted average calculation on the server " +"side, e.g., in the FedAvg algorithm." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:35 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:42 +#: of +msgid "" +"The range within which model parameters are clipped before quantization. " +"This parameter ensures each model parameter is bounded within " +"[-clipping_range, clipping_range], facilitating quantization." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:39 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:46 +#: of +msgid "" +"The size of the range into which floating-point model parameters are " +"quantized, mapping each parameter to an integer in [0, " +"quantization_range-1]. This facilitates cryptographic operations on the " +"model updates." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:43 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:50 +#: of +msgid "" +"The range of values from which random mask entries are uniformly sampled " +"([0, modulus_range-1]). `modulus_range` must be less than 4294967296. " +"Please use 2**n values for `modulus_range` to prevent overflow issues." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:47 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:54 +#: of +msgid "" +"The timeout duration in seconds. If specified, the workflow will wait for" +" replies for this duration each time. If `None`, there is no time limit " +"and the workflow will wait until replies for all messages are received." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:61 +#: of +msgid "" +"Generally, higher `num_shares` means more robust to dropouts while " +"increasing the computational costs; higher `reconstruction_threshold` " +"means better privacy guarantees but less tolerance to dropouts." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:58 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:64 +#: of +msgid "Too large `max_weight` may compromise the precision of the quantization." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:59 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:65 +#: of +msgid "`modulus_range` must be 2**n and larger than `quantization_range`." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:66 +#: of +msgid "" +"When `num_shares` is a float, it is interpreted as the proportion of all " +"selected clients, and hence the number of shares will be determined in " +"the runtime. This allows for dynamic adjustment based on the total number" +" of participating clients." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:69 +#: of +msgid "" +"Similarly, when `reconstruction_threshold` is a float, it is interpreted " +"as the proportion of the number of shares needed for the reconstruction " +"of a private key. This feature enables flexibility in setting the " +"security threshold relative to the number of distributed shares." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:73 +#: of +msgid "" +"`num_shares`, `reconstruction_threshold`, and the quantization parameters" +" (`clipping_range`, `quantization_range`, `modulus_range`) play critical " +"roles in balancing privacy, robustness, and efficiency within the SecAgg+" +" protocol." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of +msgid "" +":py:obj:`collect_masked_vectors_stage " +"`\\" +" \\(driver\\, ...\\)" +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of +msgid "Execute the 'collect masked vectors' stage." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of +msgid "" +":py:obj:`setup_stage " +"`\\ \\(driver\\, " +"context\\, state\\)" +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.setup_stage:1 +#: of +msgid "Execute the 'setup' stage." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of +msgid "" +":py:obj:`share_keys_stage " +"`\\ " +"\\(driver\\, context\\, state\\)" +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.share_keys_stage:1 +#: of +msgid "Execute the 'share keys' stage." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of +msgid "" +":py:obj:`unmask_stage " +"`\\ \\(driver\\, " +"context\\, state\\)" +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.unmask_stage:1 +#: of +msgid "Execute the 'unmask' stage." +msgstr "" + +#: ../../source/ref-api/flwr.server.workflow.SecAggWorkflow.rst:2 +#, fuzzy +msgid "SecAggWorkflow" +msgstr "工作流程" + +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:1 of +msgid "" +"Bases: " +":py:class:`~flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow`" +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:3 of +msgid "" +"The SecAgg protocol ensures the secure summation of integer vectors owned" +" by multiple parties, without accessing any individual integer vector. " +"This workflow allows the server to compute the weighted average of model " +"parameters across all clients, ensuring individual contributions remain " +"private. This is achieved by clients sending both, a weighting factor and" +" a weighted version of the locally updated parameters, both of which are " +"masked for privacy. Specifically, each client uploads \"[w, w * params]\"" +" with masks, where weighting factor 'w' is the number of examples " +"('num_examples') and 'params' represents the model parameters " +"('parameters') from the client's `FitRes`. The server then aggregates " +"these contributions to compute the weighted average of model parameters." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:14 of +msgid "" +"The protocol involves four main stages: - 'setup': Send SecAgg " +"configuration to clients and collect their public keys. - 'share keys': " +"Broadcast public keys among clients and collect encrypted secret" +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:54 of +msgid "" +"Each client's private key is split into N shares under the SecAgg " +"protocol, where N is the number of selected clients." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:56 of +msgid "" +"Generally, higher `reconstruction_threshold` means better privacy " +"guarantees but less tolerance to dropouts." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:60 of +msgid "" +"When `reconstruction_threshold` is a float, it is interpreted as the " +"proportion of the number of all selected clients needed for the " +"reconstruction of a private key. This feature enables flexibility in " +"setting the security threshold relative to the number of selected " +"clients." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:64 of +msgid "" +"`reconstruction_threshold`, and the quantization parameters " +"(`clipping_range`, `quantization_range`, `modulus_range`) play critical " +"roles in balancing privacy, robustness, and efficiency within the SecAgg " +"protocol." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of +msgid "" +":py:obj:`collect_masked_vectors_stage " +"`\\ " +"\\(driver\\, ...\\)" +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of +msgid "" +":py:obj:`setup_stage `\\" +" \\(driver\\, context\\, state\\)" +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of +msgid "" +":py:obj:`share_keys_stage " +"`\\ \\(driver\\, " +"context\\, state\\)" +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of +msgid "" +":py:obj:`unmask_stage " +"`\\ \\(driver\\, " +"context\\, state\\)" +msgstr "" + +#: ../../source/ref-api/flwr.simulation.rst:2 +#, fuzzy +msgid "simulation" +msgstr "运行模拟" + +#: ../../source/ref-api/flwr.simulation.rst:19::1 +msgid "" +":py:obj:`start_simulation `\\ \\(\\*\\," +" client\\_fn\\[\\, ...\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.simulation.rst:19::1 +#: flwr.simulation.app.start_simulation:1 of +msgid "Start a Ray-based Flower simulation server." +msgstr "启动基于 Ray 的Flower模拟服务器。" + +#: ../../source/ref-api/flwr.simulation.rst:19::1 +msgid "" +":py:obj:`run_simulation_from_cli " +"`\\ \\(\\)" +msgstr "" + +#: ../../source/ref-api/flwr.simulation.rst:19::1 +#: flwr.simulation.run_simulation.run_simulation_from_cli:1 of +msgid "Run Simulation Engine from the CLI." +msgstr "" + +#: ../../source/ref-api/flwr.simulation.rst:19::1 +msgid "" +":py:obj:`run_simulation `\\ " +"\\(server\\_app\\, client\\_app\\, ...\\)" +msgstr "" + +#: ../../source/ref-api/flwr.simulation.rst:19::1 +#: flwr.simulation.run_simulation.run_simulation:1 of +msgid "Run a Flower App using the Simulation Engine." +msgstr "" + +#: ../../source/ref-api/flwr.simulation.run_simulation.rst:2 +#, fuzzy +msgid "run\\_simulation" +msgstr "运行模拟" + +#: flwr.simulation.run_simulation.run_simulation:3 of +msgid "" +"The `ServerApp` to be executed. It will send messages to different " +"`ClientApp` instances running on different (virtual) SuperNodes." +msgstr "" + +#: flwr.simulation.run_simulation.run_simulation:6 of +msgid "" +"The `ClientApp` to be executed by each of the SuperNodes. It will receive" +" messages sent by the `ServerApp`." +msgstr "" + +#: flwr.simulation.run_simulation.run_simulation:9 of +msgid "" +"Number of nodes that run a ClientApp. They can be sampled by a Driver in " +"the ServerApp and receive a Message describing what the ClientApp should " +"perform." +msgstr "" + +#: flwr.simulation.run_simulation.run_simulation:13 of +msgid "A simulation backend that runs `ClientApp`s." +msgstr "" + +#: flwr.simulation.run_simulation.run_simulation:15 of +msgid "" +"'A dictionary, e.g {\"\": , \"\": } to " +"configure a backend. Values supported in are those included by " +"`flwr.common.typing.ConfigsRecordValues`." +msgstr "" + +#: flwr.simulation.run_simulation.run_simulation:19 of +msgid "" +"A boolean to indicate whether to enable GPU growth on the main thread. " +"This is desirable if you make use of a TensorFlow model on your " +"`ServerApp` while having your `ClientApp` running on the same GPU. " +"Without enabling this, you might encounter an out-of-memory error because" +" TensorFlow, by default, allocates all GPU memory. Read more about how " +"`tf.config.experimental.set_memory_growth()` works in the TensorFlow " +"documentation: https://www.tensorflow.org/api/stable." +msgstr "" + +#: flwr.simulation.run_simulation.run_simulation:26 of +msgid "" +"When diabled, only INFO, WARNING and ERROR log messages will be shown. If" +" enabled, DEBUG-level logs will be displayed." +msgstr "" + +#: ../../source/ref-api/flwr.simulation.run_simulation_from_cli.rst:2 +#, fuzzy +msgid "run\\_simulation\\_from\\_cli" +msgstr "运行模拟" + +#: ../../source/ref-api/flwr.simulation.start_simulation.rst:2 +#, fuzzy +msgid "start\\_simulation" +msgstr "start_simulation" #: flwr.simulation.app.start_simulation:3 of msgid "" @@ -11456,10 +13181,11 @@ msgid "" msgstr "设为 True 可在 `ray.is_initialized()=True` 情况下阻止 `ray.shutdown()` 。" #: flwr.simulation.app.start_simulation:50 of +#, fuzzy msgid "" "Optionally specify the type of actor to use. The actor object, which " "persists throughout the simulation, will be the process in charge of " -"running the clients' jobs (i.e. their `fit()` method)." +"executing a ClientApp wrapping input argument `client_fn`." msgstr "可选择指定要使用的actor类型。actor对象将在整个模拟过程中持续存在,它将是负责运行客户端作业(即其 `fit()`方法)的进程。" #: flwr.simulation.app.start_simulation:54 of @@ -12484,8 +14210,8 @@ msgid "" "tensorflow.html) notebooks, and a new [YouTube tutorial " "series](https://www.youtube.com/watch?v=cRebUIGB5RU&list=PLNG4feLHqCWlnj8a_E1A_n5zr2-8pafTB)." msgstr "" -"综合文档包括新的[how-to run simulations](https://flower.ai/docs/framework/how-" -"to-run-simulations.html) guide, new [simulation-" +"综合文档包括新的[how-to run simulations](https://flower.ai/docs/framework/how-to-" +"run-simulations.html) guide, new [simulation-" "pytorch](https://flower.ai/docs/examples/simulation-pytorch.html) and " "[simulation-tensorflow](https://flower.ai/docs/examples/simulation-" "tensorflow.html) notebooks, and a new [YouTube tutorial " @@ -12551,15 +14277,15 @@ msgstr "" "[#2227](https://github.com/adap/flower/pull/2227))" #: ../../source/ref-changelog.md:220 +#, fuzzy msgid "" "Much effort went into a completely restructured Flower docs experience. " -"The documentation on [flower.ai/docs](flower.ai/docs) is now divided " -"into Flower Framework, Flower Baselines, Flower Android SDK, Flower iOS " -"SDK, and code example projects." +"The documentation on [flower.ai/docs](https://flower.ai/docs) is now " +"divided into Flower Framework, Flower Baselines, Flower Android SDK, " +"Flower iOS SDK, and code example projects." msgstr "" -"Flower 文档体验的全面重构耗费了大量精力。现在,[flower.ai/docs](flower.ai/docs)上的文档分为 " -"Flower Framework、Flower Baselines、Flower Android SDK、Flower iOS SDK " -"和代码示例项目。" +"Flower 文档体验的全面重构耗费了大量精力。现在,[flower.ai/docs](flower.ai/docs)上的文档分为 Flower " +"Framework、Flower Baselines、Flower Android SDK、Flower iOS SDK 和代码示例项目。" #: ../../source/ref-changelog.md:222 msgid "" @@ -12904,13 +14630,13 @@ msgid "" "gradient boosting to improve model accuracy. We added a new `FedXgbNnAvg`" " " "[strategy](https://github.com/adap/flower/tree/main/src/py/flwr/server/strategy/fedxgb_nn_avg.py)," -" and a [code " -"example](https://github.com/adap/flower/tree/main/examples/xgboost-quickstart)" -" that demonstrates the usage of this new strategy in an XGBoost project." +" and a [code example](https://github.com/adap/flower/tree/main/examples" +"/xgboost-quickstart) that demonstrates the usage of this new strategy in " +"an XGBoost project." msgstr "" "XGBoost 是一种基于树的集合机器学习算法,它使用梯度提升来提高模型的准确性。我们添加了一个新的 " -"\"FedXgbNnAvg\"[策略](https://github.com/adap/flower/tree/main/src/py/flwr/server/strategy/fedxgb_nn_avg.py)和一个[代码示例](https://github.com/adap/flower/tree/main/examples/xgboost-quickstart),演示如何在" -" XGBoost 项目中使用这个新策略。" +"\"FedXgbNnAvg\"[策略](https://github.com/adap/flower/tree/main/src/py/flwr/server/strategy/fedxgb_nn_avg.py)和一个[代码示例](https://github.com/adap/flower/tree/main/examples" +"/xgboost-quickstart),演示如何在 XGBoost 项目中使用这个新策略。" #: ../../source/ref-changelog.md:300 msgid "" @@ -13094,10 +14820,12 @@ msgstr "" msgid "" "TabNet is a powerful and flexible framework for training machine learning" " models on tabular data. We now have a federated example using Flower: " -"[quickstart-tabnet](https://github.com/adap/flower/tree/main/examples/quickstart-tabnet)." +"[quickstart-tabnet](https://github.com/adap/flower/tree/main/examples" +"/quickstart-tabnet)." msgstr "" -"TabNet 是一个强大而灵活的框架,用于在表格数据上训练机器学习模型。我们现在有一个使用 Flower " -"的联邦示例:[quickstart-tabnet](https://github.com/adap/flower/tree/main/examples/quickstart-tabnet)。" +"TabNet 是一个强大而灵活的框架,用于在表格数据上训练机器学习模型。我们现在有一个使用 Flower 的联邦示例:[quickstart-" +"tabnet](https://github.com/adap/flower/tree/main/examples/quickstart-" +"tabnet)。" #: ../../source/ref-changelog.md:334 msgid "" @@ -13305,11 +15033,13 @@ msgstr "" msgid "" "A new code example (`quickstart-fastai`) demonstrates federated learning " "with [fastai](https://www.fast.ai/) and Flower. You can find it here: " -"[quickstart-fastai](https://github.com/adap/flower/tree/main/examples/quickstart-fastai)." +"[quickstart-fastai](https://github.com/adap/flower/tree/main/examples" +"/quickstart-fastai)." msgstr "" "一个新的代码示例(`quickstart-fastai`)演示了使用 [fastai](https://www.fast.ai/) 和 " -"Flower 的联邦学习。您可以在这里找到它: " -"[quickstart-fastai](https://github.com/adap/flower/tree/main/examples/quickstart-fastai)。" +"Flower 的联邦学习。您可以在这里找到它: [quickstart-" +"fastai](https://github.com/adap/flower/tree/main/examples/quickstart-" +"fastai)。" #: ../../source/ref-changelog.md:376 msgid "" @@ -13527,8 +15257,8 @@ msgid "" msgstr "" "在未来几周内,我们将发布一些新的参考,特别是对 FL " "新手有用的方法。它们通常会重温文献中的知名论文,适合集成到您自己的应用程序中或用于实验,以加深您对 FL " -"的总体了解。今天发布的是该系列中的第一篇。[阅读全文](https://flower.ai/blog/2023-01-12-fl-" -"starter-pack-fedavg-mnist-cnn/)" +"的总体了解。今天发布的是该系列中的第一篇。[阅读全文](https://flower.ai/blog/2023-01-12-fl-starter-" +"pack-fedavg-mnist-cnn/)" #: ../../source/ref-changelog.md:422 msgid "" @@ -13697,11 +15427,13 @@ msgstr "" #: ../../source/ref-changelog.md:453 msgid "" "A new code example (`quickstart-pandas`) demonstrates federated analytics" -" with Pandas and Flower. You can find it here: " -"[quickstart-pandas](https://github.com/adap/flower/tree/main/examples/quickstart-pandas)." +" with Pandas and Flower. You can find it here: [quickstart-" +"pandas](https://github.com/adap/flower/tree/main/examples/quickstart-" +"pandas)." msgstr "" "新代码示例(`quickstart-pandas`)演示了使用 Pandas 和 Flower 进行联邦分析。您可以在此处找到它: " -"[quickstart-pandas](https://github.com/adap/flower/tree/main/examples/quickstart-pandas)。" +"[quickstart-pandas](https://github.com/adap/flower/tree/main/examples" +"/quickstart-pandas)。" #: ../../source/ref-changelog.md:455 msgid "" @@ -14590,14 +16322,15 @@ msgstr "" "[#914](https://github.com/adap/flower/pull/914))" #: ../../source/ref-changelog.md:660 +#, fuzzy msgid "" "The first preview release of Flower Baselines has arrived! We're " "kickstarting Flower Baselines with implementations of FedOpt (FedYogi, " "FedAdam, FedAdagrad), FedBN, and FedAvgM. Check the documentation on how " "to use [Flower Baselines](https://flower.ai/docs/using-baselines.html). " "With this first preview release we're also inviting the community to " -"[contribute their own baselines](https://flower.ai/docs/contributing-" -"baselines.html)." +"[contribute their own baselines](https://flower.ai/docs/baselines/how-to-" +"contribute-baselines.html)." msgstr "" "Flower Baselines 的第一个预览版已经发布!我们通过实现 " "FedOpt(FedYogi、FedAdam、FedAdagrad)、FedBN 和 FedAvgM 来启动 Flower " @@ -14787,8 +16520,8 @@ msgid "" "strategies.html) ([#1097](https://github.com/adap/flower/pull/1097), " "[#1175](https://github.com/adap/flower/pull/1175))" msgstr "" -"[实施战略](https://flower.ai/docs/framework/how-to-implement-" -"strategies.html) 的新文件([#1097](https://github.com/adap/flower/pull/1097), " +"[实施战略](https://flower.ai/docs/framework/how-to-implement-strategies.html)" +" 的新文件([#1097](https://github.com/adap/flower/pull/1097), " "[#1175](https://github.com/adap/flower/pull/1175)" #: ../../source/ref-changelog.md:707 @@ -15775,32 +17508,20 @@ msgstr "" "`PyTorch `_ 或 `TensorFlow " "`_。" -#: ../../source/ref-example-projects.rst:11 +#: ../../source/ref-example-projects.rst:10 +#, fuzzy msgid "" -"Flower usage examples used to be bundled with Flower in a package called " -"``flwr_example``. We are migrating those examples to standalone projects " -"to make them easier to use. All new examples are based in the directory " -"`examples `_." -msgstr "" -"Flower 的使用示例曾与 Flower 捆绑在一个名为 ``flwr_example`` " -"的软件包中。我们正在将这些示例迁移到独立项目中,以使它们更易于使用。所有新示例都位于目录 `examples " -"`_。" - -#: ../../source/ref-example-projects.rst:16 -msgid "The following examples are available as standalone projects." +"The following examples are available as standalone projects. Quickstart " +"TensorFlow/Keras ---------------------------" msgstr "以下示例可作为独立项目使用。" -#: ../../source/ref-example-projects.rst:20 -msgid "Quickstart TensorFlow/Keras" -msgstr "快速入门 TensorFlow/Keras" - -#: ../../source/ref-example-projects.rst:22 +#: ../../source/ref-example-projects.rst:14 msgid "" "The TensorFlow/Keras quickstart example shows CIFAR-10 image " "classification with MobileNetV2:" msgstr "TensorFlow/Keras 快速入门示例展示了使用 MobileNetV2 进行的 CIFAR-10 图像分类:" -#: ../../source/ref-example-projects.rst:25 +#: ../../source/ref-example-projects.rst:17 msgid "" "`Quickstart TensorFlow (Code) " "`_" -#: ../../source/ref-example-projects.rst:26 -msgid "" -"`Quickstart TensorFlow (Tutorial) `_" +#: ../../source/ref-example-projects.rst:18 +#, fuzzy +msgid ":doc:`Quickstart TensorFlow (Tutorial) `" msgstr "" "`TensorFlow快速入门 (教程) `_" -#: ../../source/ref-example-projects.rst:27 +#: ../../source/ref-example-projects.rst:19 msgid "" "`Quickstart TensorFlow (Blog Post) `_" @@ -15825,18 +17545,18 @@ msgstr "" "`TensorFlow快速入门 (博客) `_" -#: ../../source/ref-example-projects.rst:31 +#: ../../source/ref-example-projects.rst:23 #: ../../source/tutorial-quickstart-pytorch.rst:5 msgid "Quickstart PyTorch" msgstr "PyTorch快速入门" -#: ../../source/ref-example-projects.rst:33 +#: ../../source/ref-example-projects.rst:25 msgid "" "The PyTorch quickstart example shows CIFAR-10 image classification with a" " simple Convolutional Neural Network:" msgstr "PyTorch 快速入门范例展示了使用简单卷积神经网络进行 CIFAR-10 图像分类的情况:" -#: ../../source/ref-example-projects.rst:36 +#: ../../source/ref-example-projects.rst:28 msgid "" "`Quickstart PyTorch (Code) " "`_" @@ -15844,25 +17564,24 @@ msgstr "" "`PyTorch快速入门 (代码) `_" -#: ../../source/ref-example-projects.rst:37 -msgid "" -"`Quickstart PyTorch (Tutorial) `_" +#: ../../source/ref-example-projects.rst:29 +#, fuzzy +msgid ":doc:`Quickstart PyTorch (Tutorial) `" msgstr "" "`PyTorch快速入门 (教程) `_" -#: ../../source/ref-example-projects.rst:41 +#: ../../source/ref-example-projects.rst:33 msgid "PyTorch: From Centralized To Federated" msgstr "PyTorch: 从集中式到联邦式" -#: ../../source/ref-example-projects.rst:43 +#: ../../source/ref-example-projects.rst:35 msgid "" "This example shows how a regular PyTorch project can be federated using " "Flower:" msgstr "本例展示了如何使用 Flower 联邦化一个普通的 PyTorch 项目:" -#: ../../source/ref-example-projects.rst:45 +#: ../../source/ref-example-projects.rst:37 msgid "" "`PyTorch: From Centralized To Federated (Code) " "`_" -#: ../../source/ref-example-projects.rst:46 +#: ../../source/ref-example-projects.rst:38 +#, fuzzy msgid "" -"`PyTorch: From Centralized To Federated (Tutorial) " -"`_" +":doc:`PyTorch: From Centralized To Federated (Tutorial) `" msgstr "" "PyTorch: 从集中式到联邦式(教程) `_" -#: ../../source/ref-example-projects.rst:50 +#: ../../source/ref-example-projects.rst:42 msgid "Federated Learning on Raspberry Pi and Nvidia Jetson" msgstr "树莓派和 Nvidia Jetson 上的联邦学习" -#: ../../source/ref-example-projects.rst:52 +#: ../../source/ref-example-projects.rst:44 msgid "" "This example shows how Flower can be used to build a federated learning " "system that run across Raspberry Pi and Nvidia Jetson:" msgstr "本示例展示了如何利用 Flower 建立一个跨 Raspberry Pi 和 Nvidia Jetson 运行的联邦学习系统:" -#: ../../source/ref-example-projects.rst:54 +#: ../../source/ref-example-projects.rst:46 msgid "" "`Federated Learning on Raspberry Pi and Nvidia Jetson (Code) " "`_" @@ -15898,7 +17617,7 @@ msgstr "" "Raspberry Pi 和 Nvidia Jetson 上的联邦学习(代码) " "`_" -#: ../../source/ref-example-projects.rst:55 +#: ../../source/ref-example-projects.rst:47 msgid "" "`Federated Learning on Raspberry Pi and Nvidia Jetson (Blog Post) " "`_" @@ -15906,186 +17625,28 @@ msgstr "" "Raspberry Pi和 Nvidia Jetson 上的联邦学习(博客) " "`_" -#: ../../source/ref-example-projects.rst:60 -msgid "Legacy Examples (`flwr_example`)" -msgstr "传统示例 (`flwr_example`)" - -#: ../../source/ref-example-projects.rst:63 +#: ../../source/ref-faq.rst:4 msgid "" -"The useage examples in `flwr_example` are deprecated and will be removed " -"in the future. New examples are provided as standalone projects in " -"`examples `_." -msgstr "" -"在 `flwr_example` 中的使用示例已被弃用,今后将被移除。新示例将作为独立项目在 `examples " -"`_ 中提供。" +"This page collects answers to commonly asked questions about Federated " +"Learning with Flower." +msgstr "本页收集了有关 \"Flower 联邦学习 \"常见问题的答案。" -#: ../../source/ref-example-projects.rst:69 -msgid "Extra Dependencies" -msgstr "额外依赖" +#: ../../source/ref-faq.rst +#, fuzzy +msgid ":fa:`eye,mr-1` Can Flower run on Jupyter Notebooks / Google Colab?" +msgstr ":fa:`eye,mr-1` Flower 可以在 Juptyter Notebooks / Google Colab 上运行吗?" -#: ../../source/ref-example-projects.rst:71 +#: ../../source/ref-faq.rst:8 msgid "" -"The core Flower framework keeps a minimal set of dependencies. The " -"examples demonstrate Flower in the context of different machine learning " -"frameworks, so additional dependencies need to be installed before an " -"example can be run." -msgstr "Flower 核心框架只保留了最低限度的依赖项。这些示例在不同机器学习框架的背景下演示了 Flower,因此在运行示例之前需要安装额外的依赖项。" - -#: ../../source/ref-example-projects.rst:75 -msgid "For PyTorch examples::" -msgstr "PyTorch 示例::" - -#: ../../source/ref-example-projects.rst:79 -msgid "For TensorFlow examples::" -msgstr "TensorFlow 示例::" - -#: ../../source/ref-example-projects.rst:83 -msgid "For both PyTorch and TensorFlow examples::" -msgstr "PyTorch 和 TensorFlow 示例::" +"Yes, it can! Flower even comes with a few under-the-hood optimizations to" +" make it work even better on Colab. Here's a quickstart example:" +msgstr "是的,它可以!Flower 甚至还进行了一些底层优化,使其在 Colab 上运行得更好。下面是一个快速启动示例:" -#: ../../source/ref-example-projects.rst:87 +#: ../../source/ref-faq.rst:10 msgid "" -"Please consult :code:`pyproject.toml` for a full list of possible extras " -"(section :code:`[tool.poetry.extras]`)." -msgstr "" -"请参阅 :code:`pyproject.toml`,了解可能的 extras 的完整列表(章节 " -":code:`[tool.poems.extras]`)。" - -#: ../../source/ref-example-projects.rst:92 -msgid "PyTorch Examples" -msgstr "PyTorch 示例" - -#: ../../source/ref-example-projects.rst:94 -msgid "" -"Our PyTorch examples are based on PyTorch 1.7. They should work with " -"other releases as well. So far, we provide the following examples." -msgstr "我们的 PyTorch 示例基于 PyTorch 1.7。它们应该也能在其他版本中使用。到目前为止,我们提供了以下示例。" - -#: ../../source/ref-example-projects.rst:98 -msgid "CIFAR-10 Image Classification" -msgstr "CIFAR-10 图像分类" - -#: ../../source/ref-example-projects.rst:100 -msgid "" -"`CIFAR-10 and CIFAR-100 `_ " -"are popular RGB image datasets. The Flower CIFAR-10 example uses PyTorch " -"to train a simple CNN classifier in a federated learning setup with two " -"clients." -msgstr "" -"CIFAR-10 和 CIFAR-100 ``_ " -"是流行的 RGB 图像数据集。Flower CIFAR-10 示例使用 PyTorch 在有两个客户端的联邦学习设置中训练一个简单的 CNN " -"分类器。" - -#: ../../source/ref-example-projects.rst:104 -#: ../../source/ref-example-projects.rst:121 -#: ../../source/ref-example-projects.rst:146 -msgid "First, start a Flower server:" -msgstr "首先,启动 Flower 服务器:" - -#: ../../source/ref-example-projects.rst:106 -msgid "$ ./src/py/flwr_example/pytorch_cifar/run-server.sh" -msgstr "$ ./src/py/flwr_example/pytorch_cifar/run-server.sh" - -#: ../../source/ref-example-projects.rst:108 -#: ../../source/ref-example-projects.rst:125 -#: ../../source/ref-example-projects.rst:150 -msgid "Then, start the two clients in a new terminal window:" -msgstr "然后,在新的终端窗口中启动两个客户端:" - -#: ../../source/ref-example-projects.rst:110 -msgid "$ ./src/py/flwr_example/pytorch_cifar/run-clients.sh" -msgstr "$ ./src/py/flwr_example/pytorch_cifar/run-clients.sh" - -#: ../../source/ref-example-projects.rst:112 -msgid "For more details, see :code:`src/py/flwr_example/pytorch_cifar`." -msgstr "更多详情,请参阅 :code:`src/py/flwr_example/pytorch_cifar`。" - -#: ../../source/ref-example-projects.rst:115 -msgid "ImageNet-2012 Image Classification" -msgstr "ImageNet-2012 图像分类" - -#: ../../source/ref-example-projects.rst:117 -msgid "" -"`ImageNet-2012 `_ is one of the major computer" -" vision datasets. The Flower ImageNet example uses PyTorch to train a " -"ResNet-18 classifier in a federated learning setup with ten clients." -msgstr "" -"ImageNet-2012 `_ 是主要的计算机视觉数据集之一。Flower " -"ImageNet 示例使用 PyTorch 在有十个客户端的联邦学习设置中训练 ResNet-18 分类器。" - -#: ../../source/ref-example-projects.rst:123 -msgid "$ ./src/py/flwr_example/pytorch_imagenet/run-server.sh" -msgstr "$ ./src/py/flwr_example/pytorch_imagenet/run-server.sh" - -#: ../../source/ref-example-projects.rst:127 -msgid "$ ./src/py/flwr_example/pytorch_imagenet/run-clients.sh" -msgstr "$ ./src/py/flwr_example/pytorch_imagenet/run-clients.sh" - -#: ../../source/ref-example-projects.rst:129 -msgid "For more details, see :code:`src/py/flwr_example/pytorch_imagenet`." -msgstr "更多详情,请参阅 :code:`src/py/flwr_example/pytorch_imagenet`。" - -#: ../../source/ref-example-projects.rst:133 -msgid "TensorFlow Examples" -msgstr "TensorFlow 示例" - -#: ../../source/ref-example-projects.rst:135 -msgid "" -"Our TensorFlow examples are based on TensorFlow 2.0 or newer. So far, we " -"provide the following examples." -msgstr "我们的 TensorFlow 示例基于 TensorFlow 2.0 或更新版本。到目前为止,我们提供了以下示例。" - -#: ../../source/ref-example-projects.rst:139 -msgid "Fashion-MNIST Image Classification" -msgstr "Fashion-MNIST 图像分类" - -#: ../../source/ref-example-projects.rst:141 -msgid "" -"`Fashion-MNIST `_ is " -"often used as the \"Hello, world!\" of machine learning. We follow this " -"tradition and provide an example which samples random local datasets from" -" Fashion-MNIST and trains a simple image classification model over those " -"partitions." -msgstr "" -"`Fashion-MNIST `_ " -"经常被用作机器学习的 \"你好,世界!\"。我们遵循这一传统,提供了一个从Fashion-MNIST " -"中随机抽样本地数据集的示例,并在这些分区上训练一个简单的图像分类模型。" - -#: ../../source/ref-example-projects.rst:148 -msgid "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-server.sh" -msgstr "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-server.sh" - -#: ../../source/ref-example-projects.rst:152 -msgid "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-clients.sh" -msgstr "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-clients.sh" - -#: ../../source/ref-example-projects.rst:154 -msgid "" -"For more details, see " -":code:`src/py/flwr_example/tensorflow_fashion_mnist`." -msgstr "更多详情,请参阅 :code:`src/py/flwr_example/tensorflow_fashion_mnist`。" - -#: ../../source/ref-faq.rst:4 -msgid "" -"This page collects answers to commonly asked questions about Federated " -"Learning with Flower." -msgstr "本页收集了有关 \"Flower 联邦学习 \"常见问题的答案。" - -#: ../../source/ref-faq.rst -msgid ":fa:`eye,mr-1` Can Flower run on Juptyter Notebooks / Google Colab?" -msgstr ":fa:`eye,mr-1` Flower 可以在 Juptyter Notebooks / Google Colab 上运行吗?" - -#: ../../source/ref-faq.rst:8 -msgid "" -"Yes, it can! Flower even comes with a few under-the-hood optimizations to" -" make it work even better on Colab. Here's a quickstart example:" -msgstr "是的,它可以!Flower 甚至还进行了一些底层优化,使其在 Colab 上运行得更好。下面是一个快速启动示例:" - -#: ../../source/ref-faq.rst:10 -msgid "" -"`Flower simulation PyTorch " -"`_" +"`Flower simulation PyTorch " +"`_" msgstr "" "`Flower 模拟 PyTorch " "`_ or check out the code examples:" +"`_ or check out the code examples:" msgstr "" "是的,确实如此。请查看我们的 \"博客文章 `_\" 或查看代码示例:" @@ -16134,9 +17695,7 @@ msgstr "" msgid "" "`Android Kotlin example `_" -msgstr "" -"`Android Kotlin 示例 `_" +msgstr "`Android Kotlin 示例 `_" #: ../../source/ref-faq.rst:22 msgid "`Android Java example `_" @@ -16169,8 +17728,9 @@ msgstr "" "`_." #: ../../source/ref-faq.rst:30 +#, fuzzy msgid "" -"`Flower meets KOSMoS `_." msgstr "" "`Flower meets KOSMoS `_ ." msgstr "" "如果您想查看所有内容,请查看完整的代码示例: [https://github.com/adap/flower/tree/main/examples" "/quickstart-" "huggingface](https://github.com/adap/flower/tree/main/examples" "/quickstart-huggingface)." -#: ../../source/tutorial-quickstart-huggingface.rst:227 +#: ../../source/tutorial-quickstart-huggingface.rst:226 msgid "" "Of course, this is a very basic example, and a lot can be added or " "modified, it was just to showcase how simply we could federate a Hugging " "Face workflow using Flower." msgstr "当然,这只是一个非常基本的示例,还可以添加或修改很多内容,只是为了展示我们可以如何简单地使用 Flower 联合Hugging Face的工作流程。" -#: ../../source/tutorial-quickstart-huggingface.rst:230 +#: ../../source/tutorial-quickstart-huggingface.rst:229 msgid "" "Note that in this example we used :code:`PyTorch`, but we could have very" " well used :code:`TensorFlow`." @@ -16655,11 +18214,12 @@ msgid "" msgstr "在本教程中,我们将学习如何在 iOS 设备上使用 Flower 和 CoreML 在 MNIST 上训练神经网络。" #: ../../source/tutorial-quickstart-ios.rst:12 +#, fuzzy msgid "" "First of all, for running the Flower Python server, it is recommended to " -"create a virtual environment and run everything within a `virtualenv " -"`_. For the Flower " -"client implementation in iOS, it is recommended to use Xcode as our IDE." +"create a virtual environment and run everything within a :doc:`virtualenv" +" `. For the Flower client " +"implementation in iOS, it is recommended to use Xcode as our IDE." msgstr "" "首先,为了运行 Flower Python 服务器,建议创建一个虚拟环境,并在 `virtualenv " "`_ 中运行一切。对于在 iOS 中实现 " @@ -16691,6 +18251,15 @@ msgstr "现在我们已经有了一个大致的概念,让我们开始设置 Fl msgid "Or Poetry:" msgstr "或者Poetry:" +#: ../../source/tutorial-quickstart-ios.rst:34 +#: ../../source/tutorial-quickstart-mxnet.rst:36 +#: ../../source/tutorial-quickstart-pytorch.rst:37 +#: ../../source/tutorial-quickstart-scikitlearn.rst:40 +#: ../../source/tutorial-quickstart-tensorflow.rst:29 +#: ../../source/tutorial-quickstart-xgboost.rst:55 +msgid "Flower Client" +msgstr "Flower 客户端" + #: ../../source/tutorial-quickstart-ios.rst:36 msgid "" "Now that we have all our dependencies installed, let's run a simple " @@ -16738,20 +18307,22 @@ msgstr "" "中完成。" #: ../../source/tutorial-quickstart-ios.rst:99 +#, fuzzy msgid "" "Since CoreML does not allow the model parameters to be seen before " "training, and accessing the model parameters during or after the training" " can only be done by specifying the layer name, we need to know this " -"informations beforehand, through looking at the model specification, " -"which are written as proto files. The implementation can be seen in " +"information beforehand, through looking at the model specification, which" +" are written as proto files. The implementation can be seen in " ":code:`MLModelInspect`." msgstr "" "由于 CoreML 不允许在训练前查看模型参数,而在训练过程中或训练后访问模型参数只能通过指定层名来完成,因此我们需要事先通过查看模型规范(写成 " "proto 文件)来了解这些信息。具体实现可参见 :code:`MLModelInspect`。" #: ../../source/tutorial-quickstart-ios.rst:102 +#, fuzzy msgid "" -"After we have all of the necessary informations, let's create our Flower " +"After we have all of the necessary information, let's create our Flower " "client." msgstr "获得所有必要信息后,让我们创建 Flower 客户端。" @@ -16774,6 +18345,15 @@ msgstr "" ":code:`startFlwrGRPC()`。属性 :code:`hostname` 和 :code:`port` " "会告诉客户端要连接到哪个服务器。这可以通过在应用程序中输入主机名和端口来实现,然后再点击开始按钮启动联邦学习进程。" +#: ../../source/tutorial-quickstart-ios.rst:129 +#: ../../source/tutorial-quickstart-mxnet.rst:226 +#: ../../source/tutorial-quickstart-pytorch.rst:203 +#: ../../source/tutorial-quickstart-scikitlearn.rst:157 +#: ../../source/tutorial-quickstart-tensorflow.rst:98 +#: ../../source/tutorial-quickstart-xgboost.rst:309 +msgid "Flower Server" +msgstr "Flower 服务器" + #: ../../source/tutorial-quickstart-ios.rst:131 #: ../../source/tutorial-quickstart-mxnet.rst:228 #: ../../source/tutorial-quickstart-pytorch.rst:205 @@ -16853,7 +18433,7 @@ msgstr "快速入门 MXNet" msgid "" "MXNet is no longer maintained and has been moved into `Attic " "`_. As a result, we would " -"encourage you to use other ML frameworks alongise Flower, for example, " +"encourage you to use other ML frameworks alongside Flower, for example, " "PyTorch. This tutorial might be removed in future versions of Flower." msgstr "" @@ -16865,14 +18445,22 @@ msgstr "在本教程中,我们将学习如何使用 Flower 和 MXNet 在 MNIST #: ../../source/tutorial-quickstart-mxnet.rst:14 #: ../../source/tutorial-quickstart-scikitlearn.rst:12 +#, fuzzy msgid "" "It is recommended to create a virtual environment and run everything " -"within this `virtualenv `_." +"within this :doc:`virtualenv `." msgstr "" "建议创建一个虚拟环境,并在此 `virtualenv `_ 中运行所有内容。" +#: ../../source/tutorial-quickstart-mxnet.rst:16 +#: ../../source/tutorial-quickstart-pytorch.rst:17 +#: ../../source/tutorial-quickstart-scikitlearn.rst:14 +msgid "" +"Our example consists of one *server* and two *clients* all having the " +"same model." +msgstr "我们的例子包括一个*服务器*和两个*客户端*,它们都有相同的模型。" + #: ../../source/tutorial-quickstart-mxnet.rst:18 #: ../../source/tutorial-quickstart-scikitlearn.rst:16 msgid "" @@ -17139,14 +18727,30 @@ msgstr "在本教程中,我们将学习如何使用 Flower 和 PyTorch 在 CIF #: ../../source/tutorial-quickstart-pytorch.rst:15 #: ../../source/tutorial-quickstart-xgboost.rst:39 +#, fuzzy msgid "" "First of all, it is recommended to create a virtual environment and run " -"everything within a `virtualenv `_." +"everything within a :doc:`virtualenv `." msgstr "" "首先,建议创建一个虚拟环境,并在 `virtualenv `_ 中运行一切。" +#: ../../source/tutorial-quickstart-pytorch.rst:19 +msgid "" +"*Clients* are responsible for generating individual weight-updates for " +"the model based on their local datasets. These updates are then sent to " +"the *server* which will aggregate them to produce a better model. " +"Finally, the *server* sends this improved version of the model back to " +"each *client*. A complete cycle of weight updates is called a *round*." +msgstr "*客户端*负责在其本地数据集上更新模型参数。然后,这些参数会被发送到*服务器*,由*服务器*聚合后生成一个更好的模型。最后,*服务器*将改进后的模型发送回每个*客户端*。一个完整的模型参数更新周期称为一*轮*。" + +#: ../../source/tutorial-quickstart-pytorch.rst:23 +msgid "" +"Now that we have a rough idea of what is going on, let's get started. We " +"first need to install Flower. You can do this by running :" +msgstr "现在,我们已经有了一个大致的概念了,那就让我们开始吧。首先,我们需要安装 Flower。可以通过运行 :" + #: ../../source/tutorial-quickstart-pytorch.rst:29 msgid "" "Since we want to use PyTorch to solve a computer vision task, let's go " @@ -17315,7 +18919,8 @@ msgstr "" "Regression` 模型。" #: ../../source/tutorial-quickstart-scikitlearn.rst:26 -msgid "Since we want to use scikt-learn, let's go ahead and install it:" +#, fuzzy +msgid "Since we want to use scikit-learn, let's go ahead and install it:" msgstr "既然我们要使用 scikt-learn,那就继续安装吧:" #: ../../source/tutorial-quickstart-scikitlearn.rst:32 @@ -17396,12 +19001,14 @@ msgstr "" "还需要导入几个软件包,如 Flower 和 scikit-learn:" #: ../../source/tutorial-quickstart-scikitlearn.rst:73 +#, fuzzy msgid "" -"We load the MNIST dataset from `OpenML `_, " -"a popular image classification dataset of handwritten digits for machine " -"learning. The utility :code:`utils.load_mnist()` downloads the training " -"and test data. The training set is split afterwards into 10 partitions " -"with :code:`utils.partition()`." +"We load the MNIST dataset from `OpenML " +"`_, a popular " +"image classification dataset of handwritten digits for machine learning. " +"The utility :code:`utils.load_mnist()` downloads the training and test " +"data. The training set is split afterwards into 10 partitions with " +":code:`utils.partition()`." msgstr "" "我们从 `OpenML `_ 中加载 MNIST " "数据集,这是一个用于机器学习的流行手写数字图像分类数据集。实用程序 :code:`utils.load_mnist()` " @@ -17969,10 +19576,9 @@ msgid "" "`_), we provide more options to define various experimental" " setups, including aggregation strategies, data partitioning and " -"centralised/distributed evaluation. We also support `Flower simulation " -"`_ making " -"it easy to simulate large client cohorts in a resource-aware manner. " -"Let's take a look!" +"centralised/distributed evaluation. We also support :doc:`Flower " +"simulation ` making it easy to simulate large " +"client cohorts in a resource-aware manner. Let's take a look!" msgstr "" "既然您已经知道联合 XGBoost 如何与 Flower 协同工作,那么现在就该通过自定义实验设置来运行一些更综合的实验了。在 xgboost-" "comprehensive 示例 (`完整代码 " @@ -18413,9 +20019,8 @@ msgid "" "pytorch.html>`__ introduces ``Client``, the flexible API underlying " "``NumPyClient``." msgstr "" -"Flower联邦学习教程 - 第4部分 `__ " -"介绍了``Client``,它是``NumPyClient``底层的灵活应用程序接口。" +"Flower联邦学习教程 - 第4部分 `__ 介绍了``Client``,它是``NumPyClient``底层的灵活应用程序接口。" #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:9 msgid "Customize the client" @@ -18438,8 +20043,8 @@ msgstr "" "pytorch.html>`__),了解了如何使用策略来定制服务器和客户端的执行(`part 2 " "`__),并从头开始构建了我们自己的定制策略(`part 3 " -"`__)。" +"`__)。" #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:14 msgid "" @@ -18678,8 +20283,8 @@ msgstr "客户端" #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:615 msgid "" -"To be able to serialize our ``ndarray``\\ s into sparse " -"parameters, we will just have to call our custom functions in our " +"To be able to serialize our ``ndarray``\\ s into sparse parameters, we " +"will just have to call our custom functions in our " "``flwr.client.Client``." msgstr "为了能够将我们的 ``ndarray`` 序列化为稀疏参数,我们只需在 ``flwr.client.Client`` 中调用我们的自定义函数。" @@ -19569,9 +21174,9 @@ msgstr "" #: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:719 msgid "" "The `Flower Federated Learning Tutorial - Part 3 " -"`__ shows how to build a fully custom ``Strategy`` " -"from scratch." +"`__ shows how to build a fully custom ``Strategy`` from " +"scratch." msgstr "" "`Flower 联邦学习教程 - 第 3 部分 `__ 展示了如何从头开始构建完全自定义的 \"策略\"。" @@ -19606,8 +21211,8 @@ msgid "" "unclear, head over to the ``#questions`` channel." msgstr "" "`Star Flower on GitHub `__ ⭐️ 并加入 Slack " -"上的开源 Flower 社区,进行交流、提问并获得帮助: 加入 Slack `__ " -"🌼 我们希望在 ``#introductions`` 频道听到您的声音!如果有任何不清楚的地方,请访问 ``#questions`` 频道。" +"上的开源 Flower 社区,进行交流、提问并获得帮助: 加入 Slack `__ 🌼" +" 我们希望在 ``#introductions`` 频道听到您的声音!如果有任何不清楚的地方,请访问 ``#questions`` 频道。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:19 msgid "Let's get started!" @@ -19631,7 +21236,7 @@ msgid "" msgstr "在机器学习中,我们有一个模型和数据。模型可以是一个神经网络(如图所示),也可以是其他东西,比如经典的线性回归。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:41 -msgid "|31e4b1afa87c4b968327bbeafbf184d4|" +msgid "|2b5c62c529f6416f840c594cce062fbb|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:109 @@ -19646,7 +21251,7 @@ msgid "" msgstr "我们使用数据来训练模型,以完成一项有用的任务。任务可以是检测图像中的物体、转录音频或玩围棋等游戏。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:53 -msgid "|c9d935b4284e4c389a33d86b33e07c0a|" +msgid "|90b334680cb7467d9a04d39b8e8dca9f|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:111 @@ -19667,7 +21272,7 @@ msgid "" msgstr "它源于智能手机上用户与应用程序的交互、汽车上传感器数据的收集、笔记本电脑上键盘输入的接收,或者智能扬声器上某人试着唱的歌。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:67 -msgid "|00727b5faffb468f84dd1b03ded88638|" +msgid "|65764ceee89f4335bfd93fd0b115e831|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:113 @@ -19685,7 +21290,7 @@ msgstr "" "\"通常不只是一个地方,而是很多地方。它可能是多个运行同一应用程序的设备。但也可能是多个组织,都在为同一任务生成数据。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:79 -msgid "|daf0cf0ff4c24fd29439af78416cf47b|" +msgid "|d97319ec28bb407ea0ab9705e38f3bcf|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:115 @@ -19701,7 +21306,7 @@ msgid "" msgstr "因此,要使用机器学习或任何类型的数据分析,过去使用的方法是在中央服务器上收集所有数据。这个服务器可以在数据中心的某个地方,也可以在云端的某个地方。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:91 -msgid "|9f093007080d471d94ca90d3e9fde9b6|" +msgid "|11e95ac83a8548d8b3505b4663187d07|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:117 @@ -19716,7 +21321,7 @@ msgid "" msgstr "一旦所有数据都收集到一处,我们最终就可以使用机器学习算法在数据上训练我们的模型。这就是我们基本上一直依赖的机器学习方法。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:103 -msgid "|46a26e6150e0479fbd3dfd655f36eb13|" +msgid "|1dab2f3a23674abc8a6731f20fa10730|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:119 @@ -19736,7 +21341,7 @@ msgid "" msgstr "我们刚刚看到的经典机器学习方法可以在某些情况下使用。很好的例子包括对假日照片进行分类或分析网络流量。在这些案例中,所有数据自然都可以在中央服务器上获得。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:138 -msgid "|3daba297595c4c7fb845d90404a6179a|" +msgid "|7f0ee162da38450788493a21627306f7|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:173 @@ -19751,7 +21356,7 @@ msgid "" msgstr "但这种方法并不适用于许多其他情况。例如,集中服务器上没有数据,或者一台服务器上的数据不足以训练出一个好的模型。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:150 -msgid "|5769874fa9c4455b80b2efda850d39d7|" +msgid "|296a1fb72c514b23b3d8905ff0ff98c6|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:175 @@ -19835,10 +21440,10 @@ msgid "" "The popularity of privacy-enhancing systems like the `Brave " "`__ browser or the `Signal `__ " "messenger shows that users care about privacy. In fact, they choose the " -"privacy-enhancing version over other alternatives, if such an alternative " -"exists. But what can we do to apply machine learning and data science to " -"these cases to utilize private data? After all, these are all areas that " -"would benefit significantly from recent advances in AI." +"privacy-enhancing version over other alternatives, if such an alternative" +" exists. But what can we do to apply machine learning and data science to" +" these cases to utilize private data? After all, these are all areas that" +" would benefit significantly from recent advances in AI." msgstr "" "像 `Brave `__浏览器或 `Signal " "`__信息管理器这样的隐私增强系统的流行表明,用户关心隐私。事实上,他们会选择隐私性更好的产品。但是,我们能做些什么来将机器学习和数据科学应用到这些情况中,以利用隐私数据呢?毕竟,这些领域都将从人工智能的最新进展中受益匪浅。" @@ -19899,7 +21504,7 @@ msgid "" msgstr "我们首先在服务器上初始化模型。这与经典的集中式学习完全相同:我们随机或从先前保存的检查点初始化模型参数。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:210 -msgid "|ba47ffb421814b0f8f9fa5719093d839|" +msgid "|5b1408eec0d746cdb91162a9107b6089|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:307 @@ -19923,7 +21528,7 @@ msgid "" msgstr "接下来,我们会将全局模型的参数发送到连接的客户端节点(如智能手机等边缘设备或企业的服务器)。这是为了确保每个参与节点都使用相同的模型参数开始本地训练。我们通常只使用几个连接节点,而不是所有节点。这样做的原因是,选择越来越多的客户端节点会导致收益递减。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:225 -msgid "|aeac5bf79cbf497082e979834717e01b|" +msgid "|aef19f4b122c4e8d9f4c57f99bcd5dd2|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:309 @@ -19949,7 +21554,7 @@ msgstr "" "(mini-batches)。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:240 -msgid "|ce27ed4bbe95459dba016afc42486ba2|" +msgid "|2881a86d8fc54ba29d96b29fc2819f4a|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:311 @@ -19972,7 +21577,7 @@ msgid "" msgstr "经过本地训练后,每个客户节点最初收到的模型参数都会略有不同。参数之所以不同,是因为每个客户端节点的本地数据集中都有不同的数据。然后,客户端节点将这些模型更新发回服务器。它们发送的模型更新既可以是完整的模型参数,也可以只是本地训练过程中积累的梯度。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:255 -msgid "|ae94a7f71dda443cbec2385751427d41|" +msgid "|ec1fe880237247e0975f52766775ab84|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:313 @@ -20018,7 +21623,7 @@ msgstr "" " 100 个示例的 10 倍。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:273 -msgid "|e61fce4d43d243e7bb08bdde97d81ce6|" +msgid "|9fdf048ed58d4467b2718cdf4aaf1ec3|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:315 @@ -20093,10 +21698,6 @@ msgid "" "individual client nodes." msgstr "在很多情况下,机器学习并不是从数据中获取价值的必要条件。数据分析可以产生有价值的见解,但同样,往往没有足够的数据来获得明确的答案。人们患某种健康疾病的平均年龄是多少?联邦分析可以通过多个客户端节点进行此类查询。它通常与安全聚合等其他隐私增强技术结合使用,以防止服务器看到单个客户端节点提交的结果。" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:303 -msgid "Differential Privacy" -msgstr "差分隐私" - #: ../../source/tutorial-series-what-is-federated-learning.ipynb:305 msgid "" "Differential privacy (DP) is often mentioned in the context of Federated " @@ -20129,7 +21730,7 @@ msgstr "" "为联邦学习、分析和评估提供了一种统一的方法。它允许用户联邦化任何工作负载、任何 ML 框架和任何编程语言。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:334 -msgid "|08cb60859b07461588fe44e55810b050|" +msgid "|ff726bc5505e432388ee2fdd6ef420b9|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:340 @@ -20493,3 +22094,1021 @@ msgstr "" #~ "`_\" " #~ "的类来配置,因此行为方式也完全相同。除此之外,由 :code:`VirtualClientEngine` " #~ "管理的客户端还包括:" + +#~ msgid "" +#~ "Please follow the first section on " +#~ "`Run Flower using Docker " +#~ "`_ which covers this" +#~ " step in more detail." +#~ msgstr "" + +#~ msgid "" +#~ "If the section is completely empty " +#~ "(without any token) or non-existant, " +#~ "the changelog will just contain the " +#~ "title of the PR for the changelog" +#~ " entry, without any description." +#~ msgstr "" + +#~ msgid "Example: Walk-Through PyTorch & MNIST" +#~ msgstr "实例: PyTorch 和 MNIST 的演练" + +#~ msgid "" +#~ "In this tutorial we will learn, " +#~ "how to train a Convolutional Neural " +#~ "Network on MNIST using Flower and " +#~ "PyTorch." +#~ msgstr "在本教程中,我们将学习如何使用 Flower 和 PyTorch 在 MNIST 上训练卷积神经网络。" + +#~ msgid "" +#~ "Since we want to use PyTorch to" +#~ " solve a computer vision task, let's" +#~ " go ahead an install PyTorch and " +#~ "the **torchvision** library:" +#~ msgstr "我们想用 PyTorch 来做计算机视觉任务,需要先安装 PyTorch 和 **torchvision** 库:" + +#~ msgid "Ready... Set... Train!" +#~ msgstr "准备...设置...训练!" + +#~ msgid "" +#~ "Now that we have all our " +#~ "dependencies installed, let's run a " +#~ "simple distributed training with two " +#~ "clients and one server. Our training " +#~ "procedure and network architecture are " +#~ "based on PyTorch's `Basic MNIST Example" +#~ " `_. " +#~ "This will allow you see how easy" +#~ " it is to wrap your code with" +#~ " Flower and begin training in a " +#~ "federated way. We provide you with " +#~ "two helper scripts, namely *run-" +#~ "server.sh*, and *run-clients.sh*. Don't " +#~ "be afraid to look inside, they are" +#~ " simple enough =)." +#~ msgstr "" +#~ "现在我们已经安装了所有的依赖包,让我们用两个客户端和一个服务器来运行一个简单的分布式训练。我们的训练过程和网络架构基于 " +#~ "PyTorch 的 `Basic MNIST Example " +#~ "`_。您会发现用 " +#~ "Flower 来封装您的代码并进行联邦学习训练是多么容易。我们为您提供了两个辅助脚本,即 *run-" +#~ "server.sh* 和 *run-clients.sh*。别害怕,它们很简单 =)。" + +#~ msgid "" +#~ "Go ahead and launch on a terminal" +#~ " the *run-server.sh* script first as" +#~ " follows:" +#~ msgstr "首先在终端上启动 *run-server.sh* 脚本,如下所示:" + +#~ msgid "Now that the server is up and running, go ahead and launch the clients." +#~ msgstr "现在服务器已经启动并运行,请继续启动客户端。" + +#~ msgid "" +#~ "Et voilà! You should be seeing the" +#~ " training procedure and, after a few" +#~ " iterations, the test accuracy for " +#~ "each client." +#~ msgstr "然后就可以了!您应该能看到训练过程,以及经过几次反复后,每个客户端的测试准确率。" + +#~ msgid "Now, let's see what is really happening inside." +#~ msgstr "现在,让我们看看里面到底发生了什么。" + +#~ msgid "" +#~ "Inside the server helper script *run-" +#~ "server.sh* you will find the following" +#~ " code that basically runs the " +#~ ":code:`server.py`" +#~ msgstr "在服务器辅助脚本 *run-server.sh* 中,你可以找到以下代码,这些代码基本上都是运行 :code:`server.py` 的代码" + +#~ msgid "" +#~ "We can go a bit deeper and " +#~ "see that :code:`server.py` simply launches " +#~ "a server that will coordinate three " +#~ "rounds of training. Flower Servers are" +#~ " very customizable, but for simple " +#~ "workloads, we can start a server " +#~ "using the :ref:`start_server ` function and leave " +#~ "all the configuration possibilities at " +#~ "their default values, as seen below." +#~ msgstr "" +#~ "我们可以再深入一点,:code:`server.py` 只是启动了一个服务器,该服务器将协调三轮训练。Flower " +#~ "服务器是非常容易修改的,但对于简单的工作,我们可以使用 :ref:`start_server `函数启动服务器,并将所有可能的配置保留为默认值,如下所示。" + +#~ msgid "" +#~ "Next, let's take a look at the " +#~ "*run-clients.sh* file. You will see " +#~ "that it contains the main loop " +#~ "that starts a set of *clients*." +#~ msgstr "接下来,让我们看看 *run-clients.sh* 文件。您会看到它包含了用来启动多个 *客户端* 的代码。" + +#~ msgid "" +#~ "**cid**: is the client ID. It is" +#~ " an integer that uniquely identifies " +#~ "client identifier." +#~ msgstr "**cid**:是客户 ID。它是一个整数,可唯一标识客户标识符。" + +#~ msgid "**sever_address**: String that identifies IP and port of the server." +#~ msgstr "**sever_address**: 标识服务器 IP 和端口的字符串。" + +#~ msgid "" +#~ "**nb_clients**: This defines the number " +#~ "of clients being created. This piece " +#~ "of information is not required by " +#~ "the client, but it helps us " +#~ "partition the original MNIST dataset to" +#~ " make sure that every client is " +#~ "working on unique subsets of both " +#~ "*training* and *test* sets." +#~ msgstr "" +#~ "**nb_clients**: 这定义了正在创建的客户端数量。客户端并不需要这一信息,但它有助于我们对原始 " +#~ "MNIST 数据集进行划分,以确保每个客户端都在 *training* 和 *test*" +#~ " 数据集上有独立的数据。" + +#~ msgid "" +#~ "Again, we can go deeper and look" +#~ " inside :code:`flwr_example/quickstart-" +#~ "pytorch/client.py`. After going through the" +#~ " argument parsing code at the " +#~ "beginning of our :code:`main` function, " +#~ "you will find a call to " +#~ ":code:`mnist.load_data`. This function is " +#~ "responsible for partitioning the original " +#~ "MNIST datasets (*training* and *test*) " +#~ "and returning a :code:`torch.utils.data.DataLoader`" +#~ " s for each of them. We then" +#~ " instantiate a :code:`PytorchMNISTClient` object" +#~ " with our client ID, our DataLoaders," +#~ " the number of epochs in each " +#~ "round, and which device we want to" +#~ " use for training (CPU or GPU)." +#~ msgstr "" +#~ "我们可以深入看一下 :code:`flwr_example/quickstart-" +#~ "pytorch/client.py`。查看 :code:`main` 函数开头的参数解析代码后,你会发现一个对" +#~ " :code:`mnist.load_data` 的调用。该函数负责分割原始 MNIST " +#~ "数据集(*training* 和 *test*),并为每个数据集返回一个 " +#~ ":code:`torch.utils.data.DataLoader` 。然后,我们实例化一个 " +#~ ":code:`PytorchMNISTClient` 对象,其中包含我们的客户端 ID、 " +#~ "DataLoader、每一轮中的遍历数,以及我们希望用于训练的设备(CPU 或 GPU)。" + +#~ msgid "" +#~ "The :code:`PytorchMNISTClient` object when " +#~ "finally passed to :code:`fl.client.start_client` " +#~ "along with the server's address as " +#~ "the training process begins." +#~ msgstr "" +#~ "当训练过程开始时,:code:`PytorchMNISTClient` 对象会连同服务器地址一起传递给 " +#~ ":code:`fl.client.start_client`。" + +#~ msgid "A Closer Look" +#~ msgstr "仔细看一下" + +#~ msgid "" +#~ "Now, let's look closely into the " +#~ ":code:`PytorchMNISTClient` inside :code:`flwr_example" +#~ ".quickstart-pytorch.mnist` and see what it" +#~ " is doing:" +#~ msgstr "" +#~ "现在,让我们仔细研究一下 :code:`flwr_example.quickstart-pytorch.mnist`" +#~ " 中的 :code:`PytorchMNISTClient`,看看它在做什么:" + +#~ msgid "" +#~ "The first thing to notice is that" +#~ " :code:`PytorchMNISTClient` instantiates a CNN" +#~ " model inside its constructor" +#~ msgstr "首先要注意的是 :code:`PytorchMNISTClient` 在其构造函数中实例化了一个 CNN 模型" + +#~ msgid "" +#~ "The code for the CNN is available" +#~ " under :code:`quickstart-pytorch.mnist` and " +#~ "it is reproduced below. It is the" +#~ " same network found in `Basic MNIST" +#~ " Example " +#~ "`_." +#~ msgstr "" +#~ "CNN 的代码可在 :code:`quickstart-pytorch.mnist` " +#~ "下找到,现复制如下。它与 `Basic MNIST Example " +#~ "`_中的网络相同。" + +#~ msgid "" +#~ "The second thing to notice is that" +#~ " :code:`PytorchMNISTClient` class inherits from" +#~ " the :code:`fl.client.Client`, and hence it" +#~ " must implement the following methods:" +#~ msgstr "" +#~ "第二件要注意的事是 :code:`PytorchMNISTClient` 类继承自 " +#~ ":code:`fl.client.Client`,因此它必须实现以下方法:" + +#~ msgid "" +#~ "When comparing the abstract class to " +#~ "its derived class :code:`PytorchMNISTClient` " +#~ "you will notice that :code:`fit` calls" +#~ " a :code:`train` function and that " +#~ ":code:`evaluate` calls a :code:`test`: " +#~ "function." +#~ msgstr "" +#~ "将抽象类与其派生类 :code:`PytorchMNISTClient` 进行比较时,您会发现 " +#~ ":code:`fit` 调用了一个 :code:`train` 函数,而 " +#~ ":code:`evaluate` 则调用了一个 :code:`test`: 函数。" + +#~ msgid "" +#~ "These functions can both be found " +#~ "inside the same :code:`quickstart-" +#~ "pytorch.mnist` module:" +#~ msgstr "这些函数都可以在同一个 :code:`quickstart-pytorch.mnist` 模块中找到:" + +#~ msgid "" +#~ "Observe that these functions encapsulate " +#~ "regular training and test loops and " +#~ "provide :code:`fit` and :code:`evaluate` with" +#~ " final statistics for each round. You" +#~ " could substitute them with your " +#~ "custom train and test loops and " +#~ "change the network architecture, and the" +#~ " entire example would still work " +#~ "flawlessly. As a matter of fact, " +#~ "why not try and modify the code" +#~ " to an example of your liking?" +#~ msgstr "" +#~ "请注意,这些函数封装了常规的训练和测试循环,并为 :code:`fit` 和 " +#~ ":code:`evaluate` " +#~ "提供了每轮的最终统计数据。您可以用自定义的训练和测试循环来替代它们,并改变网络结构,整个示例仍然可以完美运行。事实上,为什么不按照自己的喜好修改代码呢?" + +#~ msgid "Give It a Try" +#~ msgstr "试试看" + +#~ msgid "" +#~ "Looking through the quickstart code " +#~ "description above will have given a " +#~ "good understanding of how *clients* and" +#~ " *servers* work in Flower, how to " +#~ "run a simple experiment, and the " +#~ "internals of a client wrapper. Here " +#~ "are a few things you could try " +#~ "on your own and get more " +#~ "experience with Flower:" +#~ msgstr "" +#~ "通过上面的快速入门代码描述,你将对 Flower " +#~ "中*客户端*和*服务器*的工作方式、如何运行一个简单的实验以及客户端封装器的内部结构有一个很好的了解。您可以自己尝试以下内容,以获得更多使用" +#~ " Flower 的经验:" + +#~ msgid "" +#~ "Try and change :code:`PytorchMNISTClient` so" +#~ " it can accept different architectures." +#~ msgstr "尝试修改 :code:`PytorchMNISTClient`,使其可以接受不同的架构。" + +#~ msgid "" +#~ "Modify the :code:`train` function so " +#~ "that it accepts different optimizers" +#~ msgstr "修改 :code:`train` 函数,使其接受不同的优化器" + +#~ msgid "" +#~ "Modify the :code:`test` function so that" +#~ " it proves not only the top-1 " +#~ "(regular accuracy) but also the top-5" +#~ " accuracy?" +#~ msgstr "修改 :code:`test` 函数,使其不仅能输出前 1 名(常规精确度),还能证明前 5 名的精确度?" + +#~ msgid "" +#~ "Go larger! Try to adapt the code" +#~ " to larger images and datasets. Why" +#~ " not try training on ImageNet with" +#~ " a ResNet-50?" +#~ msgstr "让我们尝试让代码适应更大的图像和数据集。为什么不尝试使用 ResNet-50 在 ImageNet 上进行训练呢?" + +#~ msgid "You are ready now. Enjoy learning in a federated way!" +#~ msgstr "您现在已经准备就绪。尽情享受联邦学习的乐趣吧!" + +#~ msgid "Differential privacy" +#~ msgstr "差别隐私" + +#~ msgid "" +#~ "Flower provides differential privacy (DP) " +#~ "wrapper classes for the easy integration" +#~ " of the central DP guarantees " +#~ "provided by DP-FedAvg into training " +#~ "pipelines defined in any of the " +#~ "various ML frameworks that Flower is " +#~ "compatible with." +#~ msgstr "" +#~ "Flower 提供了差分隐私 (DP) 封装类,可将 DP-FedAvg " +#~ "提供的核心 DP 轻松集成到 Flower 兼容的各种 ML " +#~ "框架中定义的训练模式中。" + +#~ msgid "" +#~ "Please note that these components are" +#~ " still experimental; the correct " +#~ "configuration of DP for a specific " +#~ "task is still an unsolved problem." +#~ msgstr "请注意,这些组件仍处于试验阶段,如何为特定任务正确配置 DP 仍是一个尚未解决的问题。" + +#~ msgid "" +#~ "The name DP-FedAvg is misleading " +#~ "since it can be applied on top " +#~ "of any FL algorithm that conforms " +#~ "to the general structure prescribed by" +#~ " the FedOpt family of algorithms." +#~ msgstr "DP-FedAvg 这个名称容易引起误解,因为它可以应用于任何符合 FedOpt 系列算法规定的一般结构的 FL 算法之上。" + +#~ msgid "DP-FedAvg" +#~ msgstr "DP-FedAvg" + +#~ msgid "" +#~ "DP-FedAvg, originally proposed by " +#~ "McMahan et al. [mcmahan]_ and extended" +#~ " by Andrew et al. [andrew]_, is " +#~ "essentially FedAvg with the following " +#~ "modifications." +#~ msgstr "DP-FedAvg 最初由McMahan等人提出,并由Andrew等人加以扩展。" + +#~ msgid "" +#~ "**Clipping** : The influence of each " +#~ "client's update is bounded by clipping" +#~ " it. This is achieved by enforcing" +#~ " a cap on the L2 norm of " +#~ "the update, scaling it down if " +#~ "needed." +#~ msgstr "**裁剪** : 裁剪会影响到每个客户端的模型参数。具体做法是对参数的 L2 准则设置上限,必要时将其缩减。" + +#~ msgid "" +#~ "**Noising** : Gaussian noise, calibrated " +#~ "to the clipping threshold, is added " +#~ "to the average computed at the " +#~ "server." +#~ msgstr "**噪声** : 在服务器计算出的平均值中加入高斯噪声,该噪声根据剪切阈值进行校准。" + +#~ msgid "" +#~ "The distribution of the update norm " +#~ "has been shown to vary from " +#~ "task-to-task and to evolve as " +#~ "training progresses. This variability is " +#~ "crucial in understanding its impact on" +#~ " differential privacy guarantees, emphasizing " +#~ "the need for an adaptive approach " +#~ "[andrew]_ that continuously adjusts the " +#~ "clipping threshold to track a " +#~ "prespecified quantile of the update norm" +#~ " distribution." +#~ msgstr "事实证明,参数更新准则的分布会随着任务的不同而变化,并随着训练的进展而演变。因此,我们采用了一种自适应方法,该方法会不断调整剪切阈值,以跟踪参数更新准则分布的预设量化值。" + +#~ msgid "Simplifying Assumptions" +#~ msgstr "简化假设" + +#~ msgid "" +#~ "We make (and attempt to enforce) a" +#~ " number of assumptions that must be" +#~ " satisfied to ensure that the " +#~ "training process actually realizes the " +#~ ":math:`(\\epsilon, \\delta)` guarantees the " +#~ "user has in mind when configuring " +#~ "the setup." +#~ msgstr "" +#~ "我们提出(并试图执行)了一系列必须满足的假设,以确保训练过程真正实现用户在配置设置时所定的 " +#~ ":math:`(\\epsilon,\\delta)` 。" + +#~ msgid "" +#~ "**Fixed-size subsampling** :Fixed-size " +#~ "subsamples of the clients must be " +#~ "taken at each round, as opposed to" +#~ " variable-sized Poisson subsamples." +#~ msgstr "** 固定大小的子样本** :与可变大小的泊松分布子样本相比,每轮必须抽取固定大小的客户端子样本。" + +#~ msgid "" +#~ "**Unweighted averaging** : The contributions" +#~ " from all the clients must weighted" +#~ " equally in the aggregate to " +#~ "eliminate the requirement for the server" +#~ " to know in advance the sum of" +#~ " the weights of all clients available" +#~ " for selection." +#~ msgstr "**非加权平均**: 所有客户端的贡献必须加权相等,这样服务器就不需要事先知道所有客户的权重总和。" + +#~ msgid "" +#~ "**No client failures** : The set " +#~ "of available clients must stay constant" +#~ " across all rounds of training. In" +#~ " other words, clients cannot drop out" +#~ " or fail." +#~ msgstr "**没有失败的客户端** : 在各轮训练中,可用客户端的数量必须保持不变。换句话说,客户端不能退出或失败。" + +#~ msgid "" +#~ "The first two are useful for " +#~ "eliminating a multitude of complications " +#~ "associated with calibrating the noise to" +#~ " the clipping threshold, while the " +#~ "third one is required to comply " +#~ "with the assumptions of the privacy " +#~ "analysis." +#~ msgstr "前两种方法有助于消除将噪声校准为削波阈值所带来的诸多复杂问题,而第三种方法则需要符合隐私分析的假设。" + +#~ msgid "" +#~ "These restrictions are in line with " +#~ "constraints imposed by Andrew et al. " +#~ "[andrew]_." +#~ msgstr "这些限制与 Andrew 等人所施加的限制一致。" + +#~ msgid "Customizable Responsibility for Noise injection" +#~ msgstr "可定制的噪声注入" + +#~ msgid "" +#~ "In contrast to other implementations " +#~ "where the addition of noise is " +#~ "performed at the server, you can " +#~ "configure the site of noise injection" +#~ " to better match your threat model." +#~ " We provide users with the " +#~ "flexibility to set up the training " +#~ "such that each client independently adds" +#~ " a small amount of noise to the" +#~ " clipped update, with the result that" +#~ " simply aggregating the noisy updates " +#~ "is equivalent to the explicit addition" +#~ " of noise to the non-noisy " +#~ "aggregate at the server." +#~ msgstr "与其他在服务器上添加噪声的实现方法不同,您可以配置噪声注入的位置,以便更好地匹配您的威胁模型。我们为用户提供了设置训练的灵活性,使每个客户端都能独立地为剪切参数更新添加少量噪声,这样,只需聚合噪声更新,就相当于在服务器上为非噪声聚合添加噪声了。" + +#~ msgid "" +#~ "To be precise, if we let :math:`m`" +#~ " be the number of clients sampled " +#~ "each round and :math:`\\sigma_\\Delta` be " +#~ "the scale of the total Gaussian " +#~ "noise that needs to be added to" +#~ " the sum of the model updates, " +#~ "we can use simple maths to show" +#~ " that this is equivalent to each " +#~ "client adding noise with scale " +#~ ":math:`\\sigma_\\Delta/\\sqrt{m}`." +#~ msgstr "" +#~ "准确地说,我们假设每轮采样的客户端数量为:math:`m`,:math:`\\sigma_\\Delta` " +#~ "为需要添加到模型更新总和中的总高斯噪声的规模,我们就可以用简单的数学方法证明了,这相当于每个客户端都添加了规模为 " +#~ ":math:`\\sigma_\\Delta/\\sqrt{m}` 的噪声。" + +#~ msgid "Wrapper-based approach" +#~ msgstr "基于封装的方法" + +#~ msgid "" +#~ "Introducing DP to an existing workload" +#~ " can be thought of as adding an" +#~ " extra layer of security around it." +#~ " This inspired us to provide the " +#~ "additional server and client-side logic" +#~ " needed to make the training process" +#~ " differentially private as wrappers for " +#~ "instances of the :code:`Strategy` and " +#~ ":code:`NumPyClient` abstract classes respectively." +#~ " This wrapper-based approach has the" +#~ " advantage of being easily composable " +#~ "with other wrappers that someone might" +#~ " contribute to the Flower library in" +#~ " the future, e.g., for secure " +#~ "aggregation. Using Inheritance instead can " +#~ "be tedious because that would require" +#~ " the creation of new sub- classes " +#~ "every time a new class implementing " +#~ ":code:`Strategy` or :code:`NumPyClient` is " +#~ "defined." +#~ msgstr "" +#~ "在现有工作负载中引入 DP " +#~ "可以被认为是在其周围增加了一层额外的安全性。受此启发,我们提供了额外的服务器端和客户端逻辑,分别作为 " +#~ ":code:`Strategy` 和 :code:`NumPyClient` " +#~ "抽象类实例的封装器,使训练过程具有不同的隐私性。这种基于封装器的方法的优点是可以很容易地与将来有人贡献给 Flower " +#~ "的其他封装器(例如用于安全聚合的封装器)进行组合。使用继承可能会比较繁琐,因为每次定义实现 :code:`Strategy`" +#~ " 或 :code:`NumPyClient` 的新类时,都需要创建新的子类。" + +#~ msgid "" +#~ "The first version of our solution " +#~ "was to define a decorator whose " +#~ "constructor accepted, among other things, " +#~ "a boolean-valued variable indicating " +#~ "whether adaptive clipping was to be " +#~ "enabled or not. We quickly realized " +#~ "that this would clutter its " +#~ ":code:`__init__()` function with variables " +#~ "corresponding to hyperparameters of adaptive" +#~ " clipping that would remain unused " +#~ "when it was disabled. A cleaner " +#~ "implementation could be achieved by " +#~ "splitting the functionality into two " +#~ "decorators, :code:`DPFedAvgFixed` and " +#~ ":code:`DPFedAvgAdaptive`, with the latter sub-" +#~ " classing the former. The constructors " +#~ "for both classes accept a boolean " +#~ "parameter :code:`server_side_noising`, which, as " +#~ "the name suggests, determines where " +#~ "noising is to be performed." +#~ msgstr "" +#~ "我们的第一版解决方案是定义一个装饰器,其构造函数接受一个布尔值变量,表示是否启用自适应剪裁。我们很快意识到,这样会使其 " +#~ ":code:`__init__()` " +#~ "函数中与自适应裁剪超参数相对应的变量变得杂乱无章,而这些变量在自适应裁剪被禁用时将保持未使用状态。要实现更简洁的功能,可以将该功能拆分为两个装饰器,即" +#~ " :code:`DPFedAvgFixed` 和 " +#~ ":code:`DPFedAvgAdaptive`,后者是前者的子类。这两个类的构造函数都接受一个布尔参数 " +#~ ":code:`server_side_noising`,顾名思义,它决定了在哪里加噪声。" + +#~ msgid "" +#~ "The server-side capabilities required " +#~ "for the original version of DP-" +#~ "FedAvg, i.e., the one which performed" +#~ " fixed clipping, can be completely " +#~ "captured with the help of wrapper " +#~ "logic for just the following two " +#~ "methods of the :code:`Strategy` abstract " +#~ "class." +#~ msgstr "" +#~ "只需对 :code:`Strategy` 抽象类的以下两个方法进行封装,就能完全捕获 DP-" +#~ "FedAvg 原始版本(即执行固定剪裁的版本)所需的服务器端功能。" + +#~ msgid "" +#~ ":code:`configure_fit()` : The config " +#~ "dictionary being sent by the wrapped " +#~ ":code:`Strategy` to each client needs to" +#~ " be augmented with an additional " +#~ "value equal to the clipping threshold" +#~ " (keyed under :code:`dpfedavg_clip_norm`) and," +#~ " if :code:`server_side_noising=true`, another one" +#~ " equal to the scale of the " +#~ "Gaussian noise that needs to be " +#~ "added at the client (keyed under " +#~ ":code:`dpfedavg_noise_stddev`). This entails " +#~ "*post*-processing of the results returned " +#~ "by the wrappee's implementation of " +#~ ":code:`configure_fit()`." +#~ msgstr "" +#~ ":code:`configure_fit()` :由封装的 :code:`Strategy` " +#~ "发送到每个客户端的配置字典需要使用等于裁剪阈值的附加值(在 :code:`dpfedavg_clip_norm` " +#~ "下键入)进行扩充。并且,如果 " +#~ "server_side_noising=true,则另一个值等于需要在客户端添加的高斯噪声的大小(在 " +#~ "dpfedavg_noise_stddev 下键入)。这需要对封装后的configure_fit() " +#~ "所返回的结果进行后处理。" + +#~ msgid "" +#~ ":code:`aggregate_fit()`: We check whether any" +#~ " of the sampled clients dropped out" +#~ " or failed to upload an update " +#~ "before the round timed out. In " +#~ "that case, we need to abort the" +#~ " current round, discarding any successful" +#~ " updates that were received, and move" +#~ " on to the next one. On the " +#~ "other hand, if all clients responded " +#~ "successfully, we must force the " +#~ "averaging of the updates to happen " +#~ "in an unweighted manner by intercepting" +#~ " the :code:`parameters` field of " +#~ ":code:`FitRes` for each received update " +#~ "and setting it to 1. Furthermore, " +#~ "if :code:`server_side_noising=true`, each update " +#~ "is perturbed with an amount of " +#~ "noise equal to what it would have" +#~ " been subjected to had client-side" +#~ " noising being enabled. This entails " +#~ "*pre*-processing of the arguments to " +#~ "this method before passing them on " +#~ "to the wrappee's implementation of " +#~ ":code:`aggregate_fit()`." +#~ msgstr "" +#~ ":code:`aggregate_fit()`: " +#~ "我们会检查是否有任何客户端在本轮超时前退出或未能上传参数更新。在这种情况下,我们需要中止当前一轮,丢弃已收到的所有参数更新,然后继续下一轮。另一方面,如果所有客户端都成功响应,我们就必须通过拦截" +#~ " :code:`FitRes` 的 :code:`parameters` 字段并将其设置为 " +#~ "1,强制以不加权的方式平均更新。此外,如果 " +#~ ":code:`server_side_noising=true`,每次更新都会受到一定量的噪声扰动,其扰动量相当于启用客户端噪声时的扰动量。" +#~ " 这就需要在将本方法的参数传递给封装的 :code:`aggregate_fit()` " +#~ "之前,对参数进行*预*处理。" + +#~ msgid "" +#~ "We can't directly change the aggregation" +#~ " function of the wrapped strategy to" +#~ " force it to add noise to the" +#~ " aggregate, hence we simulate client-" +#~ "side noising to implement server-side" +#~ " noising." +#~ msgstr "我们无法直接改变封装策略的聚合函数,迫使它在聚合中添加噪声,因此我们模拟客户端噪声来实现服务器端噪声。" + +#~ msgid "" +#~ "These changes have been put together " +#~ "into a class called :code:`DPFedAvgFixed`, " +#~ "whose constructor accepts the strategy " +#~ "being decorated, the clipping threshold " +#~ "and the number of clients sampled " +#~ "every round as compulsory arguments. The" +#~ " user is expected to specify the " +#~ "clipping threshold since the order of" +#~ " magnitude of the update norms is " +#~ "highly dependent on the model being " +#~ "trained and providing a default value" +#~ " would be misleading. The number of" +#~ " clients sampled at every round is" +#~ " required to calculate the amount of" +#~ " noise that must be added to " +#~ "each individual update, either by the" +#~ " server or the clients." +#~ msgstr "" +#~ "这些变化被整合到一个名为 :code:`DPFedAvgFixed` " +#~ "的类中,其构造函数接受被装饰的策略、剪切阈值和每轮采样的客户数作为必选参数。用户需要指定剪切阈值,因为参数更新规范的数量级在很大程度上取决于正在训练的模型,提供默认值会产生误导。每轮采样的客户端数量是计算服务器或客户在每次参数更新时添加的噪音量所必需的。" + +#~ msgid "" +#~ "The additional functionality required to " +#~ "facilitate adaptive clipping has been " +#~ "provided in :code:`DPFedAvgAdaptive`, a " +#~ "subclass of :code:`DPFedAvgFixed`. It " +#~ "overrides the above-mentioned methods to" +#~ " do the following." +#~ msgstr "" +#~ "自适应剪裁所需的附加功能在 :code:`DPFedAvgAdaptive` 中提供,其是 " +#~ ":code:`DPFedAvgFixed` 的子类。它重写了上述方法,以实现以下功能。" + +#~ msgid "" +#~ ":code:`configure_fit()` : It intercepts the" +#~ " config dict returned by " +#~ ":code:`super.configure_fit()` to add the " +#~ "key-value pair " +#~ ":code:`dpfedavg_adaptive_clip_enabled:True` to it, " +#~ "which the client interprets as an " +#~ "instruction to include an indicator bit" +#~ " (1 if update norm <= clipping " +#~ "threshold, 0 otherwise) in the results" +#~ " returned by it." +#~ msgstr "" +#~ ":code:`configure_fit()`:它截取由 :code:`super.configure_fit()` " +#~ "返回的 config 字典,并在其中添加键-值对 " +#~ ":code:`dpfedavg_adaptive_clip_enabled:True\",客户端将其解释为在返回结果中包含一个指示位(如果参数更新范式" +#~ " <= 剪裁阈值,则为 1,否则为 0)的指令。" + +#~ msgid "" +#~ ":code:`aggregate_fit()` : It follows a " +#~ "call to :code:`super.aggregate_fit()` with one" +#~ " to :code:`__update_clip_norm__()`, a procedure" +#~ " which adjusts the clipping threshold " +#~ "on the basis of the indicator bits" +#~ " received from the sampled clients." +#~ msgstr ":code:`aggregate_fit()`:在调用:code:`super.aggregate_fit()`后,再调用:code:`__update_clip_norm__()`,该过程根据从采样客户端接收到的指示位调整裁剪阈值。" + +#~ msgid "" +#~ "The client-side capabilities required " +#~ "can be completely captured through " +#~ "wrapper logic for just the :code:`fit()`" +#~ " method of the :code:`NumPyClient` abstract" +#~ " class. To be precise, we need " +#~ "to *post-process* the update computed" +#~ " by the wrapped client to clip " +#~ "it, if necessary, to the threshold " +#~ "value supplied by the server as " +#~ "part of the config dictionary. In " +#~ "addition to this, it may need to" +#~ " perform some extra work if either" +#~ " (or both) of the following keys " +#~ "are also present in the dict." +#~ msgstr "" +#~ "客户端所需的功能完全可以通过 :code:`NumPyClient` 抽象类的 " +#~ ":code:`fit()` " +#~ "方法的封装逻辑来实现。准确地说,我们需要对封装客户端计算的参数更新进行处理,以便在必要时将其剪切到服务器作为配置字典的一部分提供的阈值。除此之外,如果配置字典中还存在以下任一(或两个)键,客户端可能还需要执行一些额外的工作。" + +#~ msgid "" +#~ ":code:`dpfedavg_noise_stddev` : Generate and " +#~ "add the specified amount of noise " +#~ "to the clipped update." +#~ msgstr "code:`dpfedavg_noise_stddev`:生成并在剪切参数更新中添加指定数量的噪声。" + +#~ msgid "" +#~ ":code:`dpfedavg_adaptive_clip_enabled` : Augment the" +#~ " metrics dict in the :code:`FitRes` " +#~ "object being returned to the server " +#~ "with an indicator bit, calculated as " +#~ "described earlier." +#~ msgstr "" +#~ ":code:`dpfedavg_adaptive_clip_enabled`:在返回给服务器的 :code:`FitRes`" +#~ " 对象中的度量值字典中增加一个指标位,计算方法如前所述。" + +#~ msgid "Performing the :math:`(\\epsilon, \\delta)` analysis" +#~ msgstr "进行 :math:`(epsilon, \\delta)` 分析" + +#~ msgid "" +#~ "Assume you have trained for :math:`n`" +#~ " rounds with sampling fraction :math:`q`" +#~ " and noise multiplier :math:`z`. In " +#~ "order to calculate the :math:`\\epsilon` " +#~ "value this would result in for a" +#~ " particular :math:`\\delta`, the following " +#~ "script may be used." +#~ msgstr "" +#~ "假设您已经训练了 :math:`n` 轮,采样比例为 :math:`q`,噪声乘数为 " +#~ ":math:`z`。为了计算特定 :math:`\\delta` 的 :math:`epsilon`" +#~ " 值,可以使用下面的脚本。" + +#~ msgid "Enjoy building more robust and flexible ``ClientApp``s with mods!" +#~ msgstr "" + +#~ msgid "" +#~ ":py:obj:`ClientApp `\\ " +#~ "\\(client\\_fn\\[\\, mods\\]\\)" +#~ msgstr "" + +#~ msgid ":py:obj:`flwr.server.driver `\\" +#~ msgstr "" + +#~ msgid "Flower driver SDK." +#~ msgstr "Flower 服务器。" + +#~ msgid "driver" +#~ msgstr "服务器" + +#~ msgid "" +#~ ":py:obj:`start_driver `\\ " +#~ "\\(\\*\\[\\, server\\_address\\, server\\, ...\\]\\)" +#~ msgstr "" + +#~ msgid "" +#~ ":py:obj:`Driver `\\ " +#~ "\\(\\[driver\\_service\\_address\\, ...\\]\\)" +#~ msgstr "" + +#~ msgid "" +#~ ":py:obj:`GrpcDriver `\\ " +#~ "\\(\\[driver\\_service\\_address\\, ...\\]\\)" +#~ msgstr "" + +#~ msgid "`GrpcDriver` provides access to the gRPC Driver API/service." +#~ msgstr "" + +#~ msgid ":py:obj:`get_nodes `\\ \\(\\)" +#~ msgstr "" + +#~ msgid "" +#~ ":py:obj:`pull_task_res " +#~ "`\\ \\(task\\_ids\\)" +#~ msgstr "" + +#~ msgid "Get task results." +#~ msgstr "汇总训练结果。" + +#~ msgid "" +#~ ":py:obj:`push_task_ins " +#~ "`\\ " +#~ "\\(task\\_ins\\_list\\)" +#~ msgstr "" + +#~ msgid "Schedule tasks." +#~ msgstr "" + +#~ msgid "GrpcDriver" +#~ msgstr "" + +#~ msgid ":py:obj:`connect `\\ \\(\\)" +#~ msgstr "" + +#~ msgid "Connect to the Driver API." +#~ msgstr "" + +#~ msgid "" +#~ ":py:obj:`create_run " +#~ "`\\ \\(req\\)" +#~ msgstr "" + +#~ msgid "Request for run ID." +#~ msgstr "Flower 基线申请" + +#~ msgid "" +#~ ":py:obj:`disconnect " +#~ "`\\ \\(\\)" +#~ msgstr "" + +#~ msgid "Disconnect from the Driver API." +#~ msgstr "" + +#~ msgid "" +#~ ":py:obj:`get_nodes `\\" +#~ " \\(req\\)" +#~ msgstr "" + +#~ msgid "Get client IDs." +#~ msgstr "返回客户端(本身)。" + +#~ msgid "" +#~ ":py:obj:`pull_task_res " +#~ "`\\ \\(req\\)" +#~ msgstr "" + +#~ msgid "" +#~ ":py:obj:`push_task_ins " +#~ "`\\ \\(req\\)" +#~ msgstr "" + +#~ msgid "" +#~ "Flower usage examples used to be " +#~ "bundled with Flower in a package " +#~ "called ``flwr_example``. We are migrating " +#~ "those examples to standalone projects to" +#~ " make them easier to use. All " +#~ "new examples are based in the " +#~ "directory `examples " +#~ "`_." +#~ msgstr "" +#~ "Flower 的使用示例曾与 Flower 捆绑在一个名为 ``flwr_example``" +#~ " 的软件包中。我们正在将这些示例迁移到独立项目中,以使它们更易于使用。所有新示例都位于目录 `examples " +#~ "`_。" + +#~ msgid "Quickstart TensorFlow/Keras" +#~ msgstr "快速入门 TensorFlow/Keras" + +#~ msgid "Legacy Examples (`flwr_example`)" +#~ msgstr "传统示例 (`flwr_example`)" + +#~ msgid "" +#~ "The useage examples in `flwr_example` " +#~ "are deprecated and will be removed " +#~ "in the future. New examples are " +#~ "provided as standalone projects in " +#~ "`examples `_." +#~ msgstr "" +#~ "在 `flwr_example` 中的使用示例已被弃用,今后将被移除。新示例将作为独立项目在 " +#~ "`examples `_" +#~ " 中提供。" + +#~ msgid "Extra Dependencies" +#~ msgstr "额外依赖" + +#~ msgid "" +#~ "The core Flower framework keeps a " +#~ "minimal set of dependencies. The " +#~ "examples demonstrate Flower in the " +#~ "context of different machine learning " +#~ "frameworks, so additional dependencies need" +#~ " to be installed before an example" +#~ " can be run." +#~ msgstr "" +#~ "Flower 核心框架只保留了最低限度的依赖项。这些示例在不同机器学习框架的背景下演示了 " +#~ "Flower,因此在运行示例之前需要安装额外的依赖项。" + +#~ msgid "For PyTorch examples::" +#~ msgstr "PyTorch 示例::" + +#~ msgid "For TensorFlow examples::" +#~ msgstr "TensorFlow 示例::" + +#~ msgid "For both PyTorch and TensorFlow examples::" +#~ msgstr "PyTorch 和 TensorFlow 示例::" + +#~ msgid "" +#~ "Please consult :code:`pyproject.toml` for a" +#~ " full list of possible extras " +#~ "(section :code:`[tool.poetry.extras]`)." +#~ msgstr "" +#~ "请参阅 :code:`pyproject.toml`,了解可能的 extras 的完整列表(章节 " +#~ ":code:`[tool.poems.extras]`)。" + +#~ msgid "PyTorch Examples" +#~ msgstr "PyTorch 示例" + +#~ msgid "" +#~ "Our PyTorch examples are based on " +#~ "PyTorch 1.7. They should work with " +#~ "other releases as well. So far, we" +#~ " provide the following examples." +#~ msgstr "我们的 PyTorch 示例基于 PyTorch 1.7。它们应该也能在其他版本中使用。到目前为止,我们提供了以下示例。" + +#~ msgid "CIFAR-10 Image Classification" +#~ msgstr "CIFAR-10 图像分类" + +#~ msgid "" +#~ "`CIFAR-10 and CIFAR-100 " +#~ "`_ are " +#~ "popular RGB image datasets. The Flower" +#~ " CIFAR-10 example uses PyTorch to " +#~ "train a simple CNN classifier in a" +#~ " federated learning setup with two " +#~ "clients." +#~ msgstr "" +#~ "CIFAR-10 和 CIFAR-100 " +#~ "``_ 是流行的 RGB" +#~ " 图像数据集。Flower CIFAR-10 示例使用 PyTorch " +#~ "在有两个客户端的联邦学习设置中训练一个简单的 CNN 分类器。" + +#~ msgid "First, start a Flower server:" +#~ msgstr "首先,启动 Flower 服务器:" + +#~ msgid "$ ./src/py/flwr_example/pytorch_cifar/run-server.sh" +#~ msgstr "$ ./src/py/flwr_example/pytorch_cifar/run-server.sh" + +#~ msgid "Then, start the two clients in a new terminal window:" +#~ msgstr "然后,在新的终端窗口中启动两个客户端:" + +#~ msgid "$ ./src/py/flwr_example/pytorch_cifar/run-clients.sh" +#~ msgstr "$ ./src/py/flwr_example/pytorch_cifar/run-clients.sh" + +#~ msgid "For more details, see :code:`src/py/flwr_example/pytorch_cifar`." +#~ msgstr "更多详情,请参阅 :code:`src/py/flwr_example/pytorch_cifar`。" + +#~ msgid "ImageNet-2012 Image Classification" +#~ msgstr "ImageNet-2012 图像分类" + +#~ msgid "" +#~ "`ImageNet-2012 `_ is " +#~ "one of the major computer vision " +#~ "datasets. The Flower ImageNet example " +#~ "uses PyTorch to train a ResNet-18 " +#~ "classifier in a federated learning setup" +#~ " with ten clients." +#~ msgstr "" +#~ "ImageNet-2012 `_ " +#~ "是主要的计算机视觉数据集之一。Flower ImageNet 示例使用 PyTorch " +#~ "在有十个客户端的联邦学习设置中训练 ResNet-18 分类器。" + +#~ msgid "$ ./src/py/flwr_example/pytorch_imagenet/run-server.sh" +#~ msgstr "$ ./src/py/flwr_example/pytorch_imagenet/run-server.sh" + +#~ msgid "$ ./src/py/flwr_example/pytorch_imagenet/run-clients.sh" +#~ msgstr "$ ./src/py/flwr_example/pytorch_imagenet/run-clients.sh" + +#~ msgid "For more details, see :code:`src/py/flwr_example/pytorch_imagenet`." +#~ msgstr "更多详情,请参阅 :code:`src/py/flwr_example/pytorch_imagenet`。" + +#~ msgid "TensorFlow Examples" +#~ msgstr "TensorFlow 示例" + +#~ msgid "" +#~ "Our TensorFlow examples are based on " +#~ "TensorFlow 2.0 or newer. So far, " +#~ "we provide the following examples." +#~ msgstr "我们的 TensorFlow 示例基于 TensorFlow 2.0 或更新版本。到目前为止,我们提供了以下示例。" + +#~ msgid "Fashion-MNIST Image Classification" +#~ msgstr "Fashion-MNIST 图像分类" + +#~ msgid "" +#~ "`Fashion-MNIST `_ is often used as " +#~ "the \"Hello, world!\" of machine " +#~ "learning. We follow this tradition and" +#~ " provide an example which samples " +#~ "random local datasets from Fashion-MNIST" +#~ " and trains a simple image " +#~ "classification model over those partitions." +#~ msgstr "" +#~ "`Fashion-MNIST `_ 经常被用作机器学习的 \"你好,世界!\"。我们遵循这一传统" +#~ ",提供了一个从Fashion-MNIST 中随机抽样本地数据集的示例,并在这些分区上训练一个简单的图像分类模型。" + +#~ msgid "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-server.sh" +#~ msgstr "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-server.sh" + +#~ msgid "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-clients.sh" +#~ msgstr "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-clients.sh" + +#~ msgid "" +#~ "For more details, see " +#~ ":code:`src/py/flwr_example/tensorflow_fashion_mnist`." +#~ msgstr "更多详情,请参阅 :code:`src/py/flwr_example/tensorflow_fashion_mnist`。" + +#~ msgid "" +#~ "MXNet is no longer maintained and " +#~ "has been moved into `Attic " +#~ "`_. As a " +#~ "result, we would encourage you to " +#~ "use other ML frameworks alongise Flower," +#~ " for example, PyTorch. This tutorial " +#~ "might be removed in future versions " +#~ "of Flower." +#~ msgstr "" + +#~ msgid "|31e4b1afa87c4b968327bbeafbf184d4|" +#~ msgstr "" + +#~ msgid "|c9d935b4284e4c389a33d86b33e07c0a|" +#~ msgstr "" + +#~ msgid "|00727b5faffb468f84dd1b03ded88638|" +#~ msgstr "" + +#~ msgid "|daf0cf0ff4c24fd29439af78416cf47b|" +#~ msgstr "" + +#~ msgid "|9f093007080d471d94ca90d3e9fde9b6|" +#~ msgstr "" + +#~ msgid "|46a26e6150e0479fbd3dfd655f36eb13|" +#~ msgstr "" + +#~ msgid "|3daba297595c4c7fb845d90404a6179a|" +#~ msgstr "" + +#~ msgid "|5769874fa9c4455b80b2efda850d39d7|" +#~ msgstr "" + +#~ msgid "|ba47ffb421814b0f8f9fa5719093d839|" +#~ msgstr "" + +#~ msgid "|aeac5bf79cbf497082e979834717e01b|" +#~ msgstr "" + +#~ msgid "|ce27ed4bbe95459dba016afc42486ba2|" +#~ msgstr "" + +#~ msgid "|ae94a7f71dda443cbec2385751427d41|" +#~ msgstr "" + +#~ msgid "|e61fce4d43d243e7bb08bdde97d81ce6|" +#~ msgstr "" + +#~ msgid "|08cb60859b07461588fe44e55810b050|" +#~ msgstr "" + diff --git a/doc/source/conf.py b/doc/source/conf.py index 88cb5c05b1d8..ba901f0dd82e 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -86,7 +86,7 @@ author = "The Flower Authors" # The full version, including alpha/beta/rc tags -release = "1.8.0" +release = "1.9.0" # -- General configuration --------------------------------------------------- @@ -162,7 +162,6 @@ def find_test_modules(package_path): # Renamed pages "installation": "how-to-install-flower.html", "configuring-clients.html": "how-to-configure-clients.html", - "quickstart_mxnet": "tutorial-quickstart-mxnet.html", "quickstart_pytorch_lightning": "tutorial-quickstart-pytorch-lightning.html", "quickstart_huggingface": "tutorial-quickstart-huggingface.html", "quickstart_pytorch": "tutorial-quickstart-pytorch.html", @@ -194,7 +193,6 @@ def find_test_modules(package_path): "quickstart-pandas": "tutorial-quickstart-pandas.html", "quickstart-fastai": "tutorial-quickstart-fastai.html", "quickstart-pytorch-lightning": "tutorial-quickstart-pytorch-lightning.html", - "quickstart-mxnet": "tutorial-quickstart-mxnet.html", "quickstart-scikitlearn": "tutorial-quickstart-scikitlearn.html", "quickstart-xgboost": "tutorial-quickstart-xgboost.html", "quickstart-android": "tutorial-quickstart-android.html", @@ -240,6 +238,10 @@ def find_test_modules(package_path): "people": "index.html", "organizations": "index.html", "publications": "index.html", + "quickstart_mxnet": "index.html", + "quickstart-mxnet": "index.html", + "tutorial-quickstart-mxnet": "index.html", + "example-mxnet-walk-through": "index.html", } # -- Options for HTML output ------------------------------------------------- diff --git a/doc/source/contributor-how-to-build-docker-images.rst b/doc/source/contributor-how-to-build-docker-images.rst index 5dead265bee2..bac201f6a7b9 100644 --- a/doc/source/contributor-how-to-build-docker-images.rst +++ b/doc/source/contributor-how-to-build-docker-images.rst @@ -1,8 +1,8 @@ How to build Docker Flower images locally ========================================= -Flower provides pre-made docker images on `Docker Hub `_ -that include all necessary dependencies for running the server. You can also build your own custom +Flower provides pre-made docker images on `Docker Hub `_ +that include all necessary dependencies for running the SuperLink. You can also build your own custom docker images from scratch with a different version of Python or Ubuntu if that is what you need. In this guide, we will explain what images exist and how to build them locally. @@ -20,15 +20,15 @@ Before we can start, we need to meet a few prerequisites in our local developmen :doc:`Run Flower using Docker ` which covers this step in more detail. -Currently, Flower provides two images, a base image and a server image. There will also be a client -image soon. The base image, as the name suggests, contains basic dependencies that both the server -and the client need. This includes system dependencies, Python and Python tools. The server image is -based on the base image, but it additionally installs the Flower server using ``pip``. +Currently, Flower provides two images, a ``base`` image and a ``superlink`` image. The base image, +as the name suggests, contains basic dependencies that the SuperLink needs. +This includes system dependencies, Python and Python tools. The SuperLink image is +based on the base image, but it additionally installs the SuperLink using ``pip``. The build instructions that assemble the images are located in the respective Dockerfiles. You can find them in the subdirectories of ``src/docker``. -Both, base and server image are configured via build arguments. Through build arguments, we can make +Both, base and SuperLink image are configured via build arguments. Through build arguments, we can make our build more flexible. For example, in the base image, we can specify the version of Python to install using the ``PYTHON_VERSION`` build argument. Some of the build arguments have default values, others must be specified when building the image. All available build arguments for each @@ -76,8 +76,8 @@ The following example creates a base image with Python 3.11.0, pip 23.0.1 and se The name of image is ``flwr_base`` and the tag ``0.1.0``. Remember that the build arguments as well as the name and tag can be adapted to your needs. These values serve as examples only. -Building the server image -------------------------- +Building the SuperLink image +---------------------------- .. list-table:: :widths: 25 45 15 15 @@ -89,47 +89,53 @@ Building the server image - Example * - ``BASE_REPOSITORY`` - The repository name of the base image. - - Defaults to ``flwr/server``. + - Defaults to ``flwr/base``. - - * - ``BASE_IMAGE_TAG`` - - The image tag of the base image. - - Defaults to ``py3.11-ubuntu22.04``. + * - ``PYTHON_VERSION`` + - The Python version of the base image. + - Defaults to ``py3.11``. + - + * - ``UBUNTU_VERSION`` + - The Ubuntu version of the base image. + - Defaults to ``ubuntu22.04``. + - + * - ``FLWR_PACKAGE`` + - The PyPI package to install. + - Defaults to ``flwr``. - * - ``FLWR_VERSION`` - Version of Flower to be installed. - Yes - - ``1.7.0`` + - ``1.8.0`` -The following example creates a server image with the official Flower base image py3.11-ubuntu22.04 -and Flower 1.7.0: + +The following example creates a SuperLink image with the official Flower base image +py3.11-ubuntu22.04 and Flower 1.8.0: .. code-block:: bash - $ cd src/docker/server/ + $ cd src/docker/superlink/ $ docker build \ - --build-arg BASE_IMAGE_TAG=py3.11-ubuntu22.04 \ - --build-arg FLWR_VERSION=1.7.0 \ - -t flwr_server:0.1.0 . + --build-arg FLWR_VERSION=1.8.0 \ + -t flwr_superlink:0.1.0 . -The name of image is ``flwr_server`` and the tag ``0.1.0``. Remember that the build arguments as well -as the name and tag can be adapted to your needs. These values serve as examples only. +The name of image is ``flwr_superlink`` and the tag ``0.1.0``. Remember that the build arguments as +well as the name and tag can be adapted to your needs. These values serve as examples only. If you want to use your own base image instead of the official Flower base image, all you need to do -is set the ``BASE_REPOSITORY`` and ``BASE_IMAGE_TAG`` build arguments. The value of -``BASE_REPOSITORY`` must match the name of your image and the value of ``BASE_IMAGE_TAG`` must match -the tag of your image. - +is set the ``BASE_REPOSITORY``, ``PYTHON_VERSION`` and ``UBUNTU_VERSION`` build arguments. .. code-block:: bash - $ cd src/docker/server/ + $ cd src/docker/superlink/ $ docker build \ --build-arg BASE_REPOSITORY=flwr_base \ - --build-arg BASE_IMAGE_TAG=0.1.0 \ - --build-arg FLWR_VERSION=1.7.0 \ - -t flwr_server:0.1.0 . + --build-arg PYTHON_VERSION=3.11 \ + --build-arg UBUNTU_VERSION=ubuntu22.04 \ + --build-arg FLWR_VERSION=1.8.0 \ + -t flwr_superlink:0.1.0 . After creating the image, we can test whether the image is working: .. code-block:: bash - $ docker run --rm flwr_server:0.1.0 --help + $ docker run --rm flwr_superlink:0.1.0 --help diff --git a/doc/source/contributor-how-to-install-development-versions.rst b/doc/source/contributor-how-to-install-development-versions.rst index 558ec7f8ec46..15e2939ef138 100644 --- a/doc/source/contributor-how-to-install-development-versions.rst +++ b/doc/source/contributor-how-to-install-development-versions.rst @@ -48,13 +48,13 @@ Install ``flwr`` from a specific GitHub branch (``branch-name``): Open Jupyter Notebooks on Google Colab -------------------------------------- -Open the notebook ``doc/source/tutorial-get-started-with-flower-pytorch.ipynb``: +Open the notebook ``doc/source/tutorial-series-get-started-with-flower-pytorch.ipynb``: -- https://colab.research.google.com/github/adap/flower/blob/main/doc/source/tutorial-get-started-with-flower-pytorch.ipynb +- https://colab.research.google.com/github/adap/flower/blob/main/doc/source/tutorial-series-get-started-with-flower-pytorch.ipynb Open a development version of the same notebook from branch `branch-name` by changing ``main`` to ``branch-name`` (right after ``blob``): -- https://colab.research.google.com/github/adap/flower/blob/branch-name/doc/source/tutorial-get-started-with-flower-pytorch.ipynb +- https://colab.research.google.com/github/adap/flower/blob/branch-name/doc/source/tutorial-series-get-started-with-flower-pytorch.ipynb Install a `whl` on Google Colab: diff --git a/doc/source/contributor-how-to-set-up-a-virtual-env.rst b/doc/source/contributor-how-to-set-up-a-virtual-env.rst index a3b1e7f9ad12..8b684e24c658 100644 --- a/doc/source/contributor-how-to-set-up-a-virtual-env.rst +++ b/doc/source/contributor-how-to-set-up-a-virtual-env.rst @@ -10,8 +10,13 @@ Python Version Flower requires at least `Python 3.8 `_, but `Python 3.10 `_ or above is recommended. -Virutualenv with Pyenv/Virtualenv ---------------------------------- +.. note:: + Due to a known incompatibility with `ray `_, + we currently recommend utilizing at most `Python 3.11 `_ for + running Flower simulations. + +Virtualenv with Pyenv/Virtualenv +-------------------------------- One of the recommended virtual environment is `pyenv `_/`virtualenv `_. Please see `Flower examples `_ for details. diff --git a/doc/source/contributor-tutorial-get-started-as-a-contributor.rst b/doc/source/contributor-tutorial-get-started-as-a-contributor.rst index 9136fea96bf6..43f9739987ac 100644 --- a/doc/source/contributor-tutorial-get-started-as-a-contributor.rst +++ b/doc/source/contributor-tutorial-get-started-as-a-contributor.rst @@ -102,6 +102,33 @@ Run Linters and Tests $ ./dev/test.sh +Add a pre-commit hook +~~~~~~~~~~~~~~~~~~~~~ + +Developers may integrate a pre-commit hook into their workflow utilizing the `pre-commit `_ library. The pre-commit hook is configured to execute two primary operations: ``./dev/format.sh`` and ``./dev/test.sh`` scripts. + +There are multiple ways developers can use this: + +1. Install the pre-commit hook to your local git directory by simply running: + + :: + + $ pre-commit install + + - Each ``git commit`` will trigger the execution of formatting and linting/test scripts. + - If in a hurry, bypass the hook using ``--no-verify`` with the ``git commit`` command. + :: + + $ git commit --no-verify -m "Add new feature" + +2. For developers who prefer not to install the hook permanently, it is possible to execute a one-time check prior to committing changes by using the following command: + + :: + + $ pre-commit run --all-files + + This executes the formatting and linting checks/tests on all the files without modifying the default behavior of ``git commit``. + Run Github Actions (CI) locally ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/doc/source/example-mxnet-walk-through.rst b/doc/source/example-mxnet-walk-through.rst deleted file mode 100644 index c215f709ffb2..000000000000 --- a/doc/source/example-mxnet-walk-through.rst +++ /dev/null @@ -1,360 +0,0 @@ -Example: MXNet - Run MXNet Federated -==================================== - -This tutorial will show you how to use Flower to build a federated version of an existing MXNet workload. -We are using MXNet to train a Sequential model on the MNIST dataset. -We will structure the example similar to our `PyTorch - From Centralized To Federated `_ walkthrough. MXNet and PyTorch are very similar and a very good comparison between MXNet and PyTorch is given `here `_. -First, we build a centralized training approach based on the `Handwritten Digit Recognition `_ tutorial. -Then, we build upon the centralized training code to run the training in a federated fashion. - -Before we start setting up our MXNet example, we install the :code:`mxnet` and :code:`flwr` packages: - -.. code-block:: shell - - $ pip install mxnet - $ pip install flwr - - -MNIST Training with MXNet -------------------------- - -We begin with a brief description of the centralized training code based on a :code:`Sequential` model. -If you want a more in-depth explanation of what's going on then have a look at the official `MXNet tutorial `_. - -Let's create a new file called:code:`mxnet_mnist.py` with all the components required for a traditional (centralized) MNIST training. -First, the MXNet package :code:`mxnet` needs to be imported. -You can see that we do not yet import the :code:`flwr` package for federated learning. This will be done later. - -.. code-block:: python - - from __future__ import print_function - from typing import Tuple - import mxnet as mx - from mxnet import gluon - from mxnet.gluon import nn - from mxnet import autograd as ag - import mxnet.ndarray as F - from mxnet import nd - - # Fixing the random seed - mx.random.seed(42) - -The :code:`load_data()` function loads the MNIST training and test sets. - -.. code-block:: python - - def load_data() -> Tuple[mx.io.NDArrayIter, mx.io.NDArrayIter]: - print("Download Dataset") - # Download MNIST data - mnist = mx.test_utils.get_mnist() - batch_size = 100 - train_data = mx.io.NDArrayIter( - mnist["train_data"], mnist["train_label"], batch_size, shuffle=True - ) - val_data = mx.io.NDArrayIter(mnist["test_data"], mnist["test_label"], batch_size) - return train_data, val_data - -As already mentioned, we will use the MNIST dataset for this machine learning workload. The model architecture (a very simple :code:`Sequential` model) is defined in :code:`model()`. - -.. code-block:: python - - def model(): - # Define simple Sequential model - net = nn.Sequential() - net.add(nn.Dense(256, activation="relu")) - net.add(nn.Dense(64, activation="relu")) - net.add(nn.Dense(10)) - net.collect_params().initialize() - return net - -We now need to define the training (function :code:`train()`) which loops over the training set and measures the loss for each batch of training examples. - -.. code-block:: python - - def train( - net: mx.gluon.nn, train_data: mx.io.NDArrayIter, epoch: int, device: mx.context - ) -> None: - trainer = gluon.Trainer(net.collect_params(), "sgd", {"learning_rate": 0.03}) - # Use Accuracy and Cross Entropy Loss as the evaluation metric. - accuracy_metric = mx.metric.Accuracy() - loss_metric = mx.metric.CrossEntropy() - metrics = mx.metric.CompositeEvalMetric() - for child_metric in [accuracy_metric, loss_metric]: - metrics.add(child_metric) - softmax_cross_entropy_loss = gluon.loss.SoftmaxCrossEntropyLoss() - for i in range(epoch): - # Reset the train data iterator. - train_data.reset() - # Calculate number of samples - num_examples = 0 - # Loop over the train data iterator. - for batch in train_data: - # Splits train data into multiple slices along batch_axis - # and copy each slice into a context. - data = gluon.utils.split_and_load( - batch.data[0], ctx_list=device, batch_axis=0 - ) - # Splits train labels into multiple slices along batch_axis - # and copy each slice into a context. - label = gluon.utils.split_and_load( - batch.label[0], ctx_list=device, batch_axis=0 - ) - outputs = [] - # Inside training scope - with ag.record(): - for x, y in zip(data, label): - z = net(x) - # Computes softmax cross entropy loss. - loss = softmax_cross_entropy_loss(z, y) - # Backpropogate the error for one iteration. - loss.backward() - outputs.append(z.softmax()) - num_examples += len(x) - # Updates internal evaluation - metric.update(label, outputs) - # Make one step of parameter update. Trainer needs to know the - # batch size of data to normalize the gradient by 1/batch_size. - trainer.step(batch.data[0].shape[0]) - # Gets the evaluation result. - trainings_metric = metrics.get_name_value() - print("Accuracy & loss at epoch %d: %s" % (i, trainings_metric)) - return trainings_metric, num_examples - -The evaluation of the model is defined in function :code:`test()`. The function loops over all test samples and measures the loss and accuracy of the model based on the test dataset. - -.. code-block:: python - - def test( - net: mx.gluon.nn, val_data: mx.io.NDArrayIter, device: mx.context - ) -> Tuple[float, float]: - # Use Accuracy and Cross Entropy Loss as the evaluation metric. - accuracy_metric = mx.metric.Accuracy() - loss_metric = mx.metric.CrossEntropy() - metrics = mx.metric.CompositeEvalMetric() - for child_metric in [accuracy_metric, loss_metric]: - metrics.add(child_metric) - # Reset the validation data iterator. - val_data.reset() - # Get number of samples for val_dat - num_examples = 0 - # Loop over the validation data iterator. - for batch in val_data: - # Splits validation data into multiple slices along batch_axis - # and copy each slice into a context. - data = gluon.utils.split_and_load(batch.data[0], ctx_list=device, batch_axis=0) - # Splits validation label into multiple slices along batch_axis - # and copy each slice into a context. - label = gluon.utils.split_and_load( - batch.label[0], ctx_list=device, batch_axis=0 - ) - outputs = [] - for x in data: - outputs.append(net(x).softmax()) - num_examples += len(x) - # Updates internal evaluation - metrics.update(label, outputs) - return metrics.get_name_value(), num_examples - -Having defined the data loading, model architecture, training, and evaluation we can put everything together and train our model on MNIST. Note that the GPU/CPU device for the training and testing is defined within the :code:`ctx` (context). - -.. code-block:: python - - def main(): - # Setup context to GPU and if not available to CPU - DEVICE = [mx.gpu() if mx.test_utils.list_gpus() else mx.cpu()] - # Load train and validation data - train_data, val_data = load_data() - # Define sequential model - net = model() - # Start forward propagation to initialize model parameters (optional) - init = nd.random.uniform(shape=(2, 784)) - net(init) - # Start model training based on training set - train(net=net, train_data=train_data, epoch=5, device=DEVICE) - # Evaluate model using loss and accuracy - eval_metric, _ = test(net=net, val_data=val_data, device=DEVICE) - acc = eval_metric[0] - loss = eval_metric[1] - print("Evaluation Loss: ", loss) - print("Evaluation Accuracy: ", acc) - - if __name__ == "__main__": - main() - -You can now run your (centralized) MXNet machine learning workload: - -.. code-block:: python - - python3 mxnet_mnist.py - -So far this should all look fairly familiar if you've used MXNet (or even PyTorch) before. -Let's take the next step and use what we've built to create a simple federated learning system consisting of one server and two clients. - -MXNet meets Flower ------------------- - -So far, it was not easily possible to use MXNet workloads for federated learning because federated learning is not supported in MXNet. Since Flower is fully agnostic towards the underlying machine learning framework, it can be used to federated arbitrary machine learning workloads. This section will show you how Flower can be used to federate our centralized MXNet workload. - -The concept to federate an existing workload is always the same and easy to understand. -We have to start a *server* and then use the code in :code:`mxnet_mnist.py` for the *clients* that are connected to the *server*. -The *server* sends model parameters to the clients. The *clients* run the training and update the parameters. -The updated parameters are sent back to the *server* which averages all received parameter updates. -This describes one round of the federated learning process and we repeat this for multiple rounds. - -Our example consists of one *server* and two *clients*. Let's set up :code:`server.py` first. The *server* needs to import the Flower package :code:`flwr`. -Next, we use the :code:`start_server` function to start a server and tell it to perform three rounds of federated learning. - -.. code-block:: python - - import flwr as fl - - if __name__ == "__main__": - fl.server.start_server(server_address="0.0.0.0:8080", config=fl.server.ServerConfig(num_rounds=3)) - -We can already start the *server*: - -.. code-block:: python - - python3 server.py - -Finally, we will define our *client* logic in :code:`client.py` and build upon the previously defined MXNet training in :code:`mxnet_mnist.py`. -Our *client* needs to import :code:`flwr`, but also :code:`mxnet` to update the parameters on our MXNet model: - -.. code-block:: python - - from typing import Dict, List, Tuple - - import flwr as fl - import numpy as np - import mxnet as mx - from mxnet import nd - - import mxnet_mnist - - -Implementing a Flower *client* basically means implementing a subclass of either :code:`flwr.client.Client` or :code:`flwr.client.NumPyClient`. -Our implementation will be based on :code:`flwr.client.NumPyClient` and we'll call it :code:`MNISTClient`. -:code:`NumPyClient` is slightly easier to implement than :code:`Client` if you use a framework with good NumPy interoperability (like PyTorch or MXNet) because it avoids some of the boilerplate that would otherwise be necessary. -:code:`MNISTClient` needs to implement four methods, two methods for getting/setting model parameters, one method for training the model, and one method for testing the model: - -#. :code:`set_parameters (optional)` - * set the model parameters on the local model that are received from the server - * transform MXNet :code:`NDArray`'s to NumPy :code:`ndarray`'s - * loop over the list of model parameters received as NumPy :code:`ndarray`'s (think list of neural network layers) -#. :code:`get_parameters` - * get the model parameters and return them as a list of NumPy :code:`ndarray`'s (which is what :code:`flwr.client.NumPyClient` expects) -#. :code:`fit` - * update the parameters of the local model with the parameters received from the server - * train the model on the local training set - * get the updated local model weights and return them to the server -#. :code:`evaluate` - * update the parameters of the local model with the parameters received from the server - * evaluate the updated model on the local test set - * return the local loss and accuracy to the server - -The challenging part is to transform the MXNet parameters from :code:`NDArray` to :code:`NumPy Arrays` to make it readable for Flower. - -The two :code:`NumPyClient` methods :code:`fit` and :code:`evaluate` make use of the functions :code:`train()` and :code:`test()` previously defined in :code:`mxnet_mnist.py`. -So what we really do here is we tell Flower through our :code:`NumPyClient` subclass which of our already defined functions to call for training and evaluation. -We included type annotations to give you a better understanding of the data types that get passed around. - -.. code-block:: python - - class MNISTClient(fl.client.NumPyClient): - """Flower client implementing MNIST classification using MXNet.""" - - def __init__( - self, - model: mxnet_mnist.model(), - train_data: mx.io.NDArrayIter, - val_data: mx.io.NDArrayIter, - device: mx.context, - ) -> None: - self.model = model - self.train_data = train_data - self.val_data = val_data - self.device = device - - def get_parameters(self, config) -> List[np.ndarray]: - # Return model parameters as a list of NumPy Arrays - param = [] - for val in self.model.collect_params(".*weight").values(): - p = val.data() - # convert parameters from MXNet NDArray to Numpy Array required by Flower Numpy Client - param.append(p.asnumpy()) - return param - - def set_parameters(self, parameters: List[np.ndarray]) -> None: - # Collect model parameters and set new weight values - params = zip(self.model.collect_params(".*weight").keys(), parameters) - for key, value in params: - self.model.collect_params().setattr(key, value) - - def fit( - self, parameters: List[np.ndarray], config: Dict[str, str] - ) -> Tuple[List[np.ndarray], int]: - # Set model parameters, train model, return updated model parameters - self.set_parameters(parameters) - [accuracy, loss], num_examples = mxnet_mnist.train( - self.model, self.train_data, epoch=2, device=self.device - ) - results = {"accuracy": accuracy[1], "loss": loss[1]} - return self.get_parameters(config={}), num_examples, results - - def evaluate( - self, parameters: List[np.ndarray], config: Dict[str, str] - ) -> Tuple[int, float, float]: - # Set model parameters, evaluate model on local test dataset, return result - self.set_parameters(parameters) - [accuracy, loss], num_examples = mxnet_mnist.test( - self.model, self.val_data, device=self.device - ) - print("Evaluation accuracy & loss", accuracy, loss) - return ( - float(loss[1]), - num_examples, - {"accuracy": float(accuracy[1])}, - ) - -Having defined data loading, model architecture, training, and evaluation we can put everything together and train our :code:`Sequential` model on MNIST. - -.. code-block:: python - - def main() -> None: - """Load data, start MNISTClient.""" - - # Setup context to GPU and if not available to CPU - DEVICE = [mx.gpu() if mx.test_utils.list_gpus() else mx.cpu()] - - # Load data - train_data, val_data = mxnet_mnist.load_data() - - # Define model from centralized training - model = mxnet_mnist.model() - - # Make one forward propagation to initialize parameters - init = nd.random.uniform(shape=(2, 784)) - model(init) - - # Start Flower client - client = MNISTClient(model, train_data, val_data, DEVICE) - fl.client.start_numpy_client(server_address="0.0.0.0:8080", client) - - - if __name__ == "__main__": - main() - -And that's it. You can now open two additional terminal windows and run - -.. code-block:: python - - python3 client.py - -in each window (make sure that the server is still running before you do so) and see your MXNet project run federated learning across two clients. Congratulations! - -Next Steps ----------- - -The full source code for this example: `MXNet: From Centralized To Federated (Code) `_. -Our example is of course somewhat over-simplified because both clients load the exact same dataset, which isn't realistic. -You're now prepared to explore this topic further. How about using a CNN or using a different dataset? How about adding more clients? diff --git a/doc/source/example-walkthrough-pytorch-mnist.rst b/doc/source/example-walkthrough-pytorch-mnist.rst deleted file mode 100644 index f8eacc8647fe..000000000000 --- a/doc/source/example-walkthrough-pytorch-mnist.rst +++ /dev/null @@ -1,453 +0,0 @@ -Example: Walk-Through PyTorch & MNIST -===================================== - -In this tutorial we will learn, how to train a Convolutional Neural Network on MNIST using Flower and PyTorch. - -Our example consists of one *server* and two *clients* all having the same model. - -*Clients* are responsible for generating individual weight-updates for the model based on their local datasets. -These updates are then sent to the *server* which will aggregate them to produce a better model. Finally, the *server* sends this improved version of the model back to each *client*. -A complete cycle of weight updates is called a *round*. - -Now that we have a rough idea of what is going on, let's get started. We first need to install Flower. You can do this by running : - -.. code-block:: shell - - $ pip install flwr - -Since we want to use PyTorch to solve a computer vision task, let's go ahead an install PyTorch and the **torchvision** library: - -.. code-block:: shell - - $ pip install torch torchvision - - -Ready... Set... Train! ----------------------- - -Now that we have all our dependencies installed, let's run a simple distributed training with two clients and one server. Our training procedure and network architecture are based on PyTorch's `Basic MNIST Example `_. This will allow you see how easy it is to wrap your code with Flower and begin training in a federated way. -We provide you with two helper scripts, namely *run-server.sh*, and *run-clients.sh*. Don't be afraid to look inside, they are simple enough =). - -Go ahead and launch on a terminal the *run-server.sh* script first as follows: - -.. code-block:: shell - - $ bash ./run-server.sh - - -Now that the server is up and running, go ahead and launch the clients. - -.. code-block:: shell - - $ bash ./run-clients.sh - - -Et voilà! You should be seeing the training procedure and, after a few iterations, the test accuracy for each client. - -.. code-block:: shell - - Train Epoch: 10 [30000/30016 (100%)] Loss: 0.007014 - - Train Epoch: 10 [30000/30016 (100%)] Loss: 0.000403 - - Train Epoch: 11 [30000/30016 (100%)] Loss: 0.001280 - - Train Epoch: 11 [30000/30016 (100%)] Loss: 0.000641 - - Train Epoch: 12 [30000/30016 (100%)] Loss: 0.006784 - - Train Epoch: 12 [30000/30016 (100%)] Loss: 0.007134 - - Client 1 - Evaluate on 5000 samples: Average loss: 0.0290, Accuracy: 99.16% - - Client 0 - Evaluate on 5000 samples: Average loss: 0.0328, Accuracy: 99.14% - - -Now, let's see what is really happening inside. - -Flower Server -------------- - -Inside the server helper script *run-server.sh* you will find the following code that basically runs the :code:`server.py` - -.. code-block:: bash - - python -m flwr_example.quickstart-pytorch.server - - -We can go a bit deeper and see that :code:`server.py` simply launches a server that will coordinate three rounds of training. -Flower Servers are very customizable, but for simple workloads, we can start a server using the `start_server `_ function and leave all the configuration possibilities at their default values, as seen below. - -.. code-block:: python - - import flwr as fl - - fl.server.start_server(config=fl.server.ServerConfig(num_rounds=3)) - - -Flower Client -------------- - -Next, let's take a look at the *run-clients.sh* file. You will see that it contains the main loop that starts a set of *clients*. - -.. code-block:: bash - - python -m flwr_example.quickstart-pytorch.client \ - --cid=$i \ - --server_address=$SERVER_ADDRESS \ - --nb_clients=$NUM_CLIENTS - -* **cid**: is the client ID. It is an integer that uniquely identifies client identifier. -* **sever_address**: String that identifies IP and port of the server. -* **nb_clients**: This defines the number of clients being created. This piece of information is not required by the client, but it helps us partition the original MNIST dataset to make sure that every client is working on unique subsets of both *training* and *test* sets. - -Again, we can go deeper and look inside :code:`flwr_example/quickstart-pytorch/client.py`. -After going through the argument parsing code at the beginning of our :code:`main` function, you will find a call to :code:`mnist.load_data`. This function is responsible for partitioning the original MNIST datasets (*training* and *test*) and returning a :code:`torch.utils.data.DataLoader` s for each of them. -We then instantiate a :code:`PytorchMNISTClient` object with our client ID, our DataLoaders, the number of epochs in each round, and which device we want to use for training (CPU or GPU). - - -.. code-block:: python - - client = mnist.PytorchMNISTClient( - cid=args.cid, - train_loader=train_loader, - test_loader=test_loader, - epochs=args.epochs, - device=device, - ) - -The :code:`PytorchMNISTClient` object when finally passed to :code:`fl.client.start_client` along with the server's address as the training process begins. - - -A Closer Look -------------- - -Now, let's look closely into the :code:`PytorchMNISTClient` inside :code:`flwr_example.quickstart-pytorch.mnist` and see what it is doing: - -.. code-block:: python - - class PytorchMNISTClient(fl.client.Client): - """Flower client implementing MNIST handwritten classification using PyTorch.""" - def __init__( - self, - cid: int, - train_loader: datasets, - test_loader: datasets, - epochs: int, - device: torch.device = torch.device("cpu"), - ) -> None: - self.model = MNISTNet().to(device) - self.cid = cid - self.train_loader = train_loader - self.test_loader = test_loader - self.device = device - self.epochs = epochs - - def get_weights(self) -> fl.common.NDArrays: - """Get model weights as a list of NumPy ndarrays.""" - return [val.cpu().numpy() for _, val in self.model.state_dict().items()] - - def set_weights(self, weights: fl.common.NDArrays) -> None: - """Set model weights from a list of NumPy ndarrays. - - Parameters - ---------- - weights: fl.common.NDArrays - Weights received by the server and set to local model - - - Returns - ------- - - """ - state_dict = OrderedDict( - { - k: torch.tensor(v) - for k, v in zip(self.model.state_dict().keys(), weights) - } - ) - self.model.load_state_dict(state_dict, strict=True) - - def get_parameters(self, config) -> fl.common.ParametersRes: - """Encapsulates the weight into Flower Parameters """ - weights: fl.common.NDArrays = self.get_weights() - parameters = fl.common.ndarrays_to_parameters(weights) - return fl.common.ParametersRes(parameters=parameters) - - def fit(self, ins: fl.common.FitIns) -> fl.common.FitRes: - """Trains the model on local dataset - - Parameters - ---------- - ins: fl.common.FitIns - Parameters sent by the server to be used during training. - - Returns - ------- - Set of variables containing the new set of weights and information the client. - - """ - weights: fl.common.NDArrays = fl.common.parameters_to_ndarrays(ins.parameters) - fit_begin = timeit.default_timer() - - # Set model parameters/weights - self.set_weights(weights) - - # Train model - num_examples_train: int = train( - self.model, self.train_loader, epochs=self.epochs, device=self.device - ) - - # Return the refined weights and the number of examples used for training - weights_prime: fl.common.NDArrays = self.get_weights() - params_prime = fl.common.ndarrays_to_parameters(weights_prime) - fit_duration = timeit.default_timer() - fit_begin - return fl.common.FitRes( - parameters=params_prime, - num_examples=num_examples_train, - num_examples_ceil=num_examples_train, - fit_duration=fit_duration, - ) - - def evaluate(self, ins: fl.common.EvaluateIns) -> fl.common.EvaluateRes: - """ - - Parameters - ---------- - ins: fl.common.EvaluateIns - Parameters sent by the server to be used during testing. - - - Returns - ------- - Information the clients testing results. - - -The first thing to notice is that :code:`PytorchMNISTClient` instantiates a CNN model inside its constructor - -.. code-block:: python - - class PytorchMNISTClient(fl.client.Client): - """Flower client implementing MNIST handwritten classification using PyTorch.""" - - def __init__( - self, - cid: int, - train_loader: datasets, - test_loader: datasets, - epochs: int, - device: torch.device = torch.device("cpu"), - ) -> None: - self.model = MNISTNet().to(device) - ... - -The code for the CNN is available under :code:`quickstart-pytorch.mnist` and it is reproduced below. It is the same network found in `Basic MNIST Example `_. - -.. code-block:: python - - class MNISTNet(nn.Module): - """Simple CNN adapted from Pytorch's 'Basic MNIST Example'.""" - - def __init__(self) -> None: - super(MNISTNet, self).__init__() - self.conv1 = nn.Conv2d(1, 32, 3, 1) - self.conv2 = nn.Conv2d(32, 64, 3, 1) - self.dropout1 = nn.Dropout2d(0.25) - self.dropout2 = nn.Dropout2d(0.5) - self.fc1 = nn.Linear(9216, 128) - self.fc2 = nn.Linear(128, 10) - - def forward(self, x: Tensor) -> Tensor: - """Compute forward pass. - - Parameters - ---------- - x: Tensor - Mini-batch of shape (N,28,28) containing images from MNIST dataset. - - - Returns - ------- - output: Tensor - The probability density of the output being from a specific class given the input. - - """ - x = self.conv1(x) - x = F.relu(x) - x = self.conv2(x) - x = F.relu(x) - x = F.max_pool2d(x, 2) - x = self.dropout1(x) - x = torch.flatten(x, 1) - x = self.fc1(x) - x = F.relu(x) - x = self.dropout2(x) - x = self.fc2(x) - output = F.log_softmax(x, dim=1) - return output - - -The second thing to notice is that :code:`PytorchMNISTClient` class inherits from the :code:`fl.client.Client`, and hence it must implement the following methods: - -.. code-block:: python - - from abc import ABC, abstractmethod - - from flwr.common import EvaluateIns, EvaluateRes, FitIns, FitRes, ParametersRes - - - class Client(ABC): - """Abstract base class for Flower clients.""" - - @abstractmethod - def get_parameters(self, config) -> ParametersRes: - """Return the current local model parameters.""" - - @abstractmethod - def fit(self, ins: FitIns) -> FitRes: - """Refine the provided weights using the locally held dataset.""" - - @abstractmethod - def evaluate(self, ins: EvaluateIns) -> EvaluateRes: - """Evaluate the provided weights using the locally held dataset.""" - - -When comparing the abstract class to its derived class :code:`PytorchMNISTClient` you will notice that :code:`fit` calls a :code:`train` function and that :code:`evaluate` calls a :code:`test`: function. - -These functions can both be found inside the same :code:`quickstart-pytorch.mnist` module: - -.. code-block:: python - - def train( - model: torch.nn.ModuleList, - train_loader: torch.utils.data.DataLoader, - epochs: int, - device: torch.device = torch.device("cpu"), - ) -> int: - """Train routine based on 'Basic MNIST Example' - - Parameters - ---------- - model: torch.nn.ModuleList - Neural network model used in this example. - - train_loader: torch.utils.data.DataLoader - DataLoader used in training. - - epochs: int - Number of epochs to run in each round. - - device: torch.device - (Default value = torch.device("cpu")) - Device where the network will be trained within a client. - - Returns - ------- - num_examples_train: int - Number of total samples used during training. - - """ - model.train() - optimizer = optim.Adadelta(model.parameters(), lr=1.0) - scheduler = StepLR(optimizer, step_size=1, gamma=0.7) - print(f"Training {epochs} epoch(s) w/ {len(train_loader)} mini-batches each") - for epoch in range(epochs): # loop over the dataset multiple time - print() - loss_epoch: float = 0.0 - num_examples_train: int = 0 - for batch_idx, (data, target) in enumerate(train_loader): - # Grab mini-batch and transfer to device - data, target = data.to(device), target.to(device) - num_examples_train += len(data) - - # Zero gradients - optimizer.zero_grad() - - output = model(data) - loss = F.nll_loss(output, target) - loss.backward() - optimizer.step() - - loss_epoch += loss.item() - if batch_idx % 10 == 8: - print( - "Train Epoch: {} [{}/{} ({:.0f}%)] Loss: {:.6f}\t\t\t\t".format( - epoch, - num_examples_train, - len(train_loader) * train_loader.batch_size, - 100.0 - * num_examples_train - / len(train_loader) - / train_loader.batch_size, - loss.item(), - ), - end="\r", - flush=True, - ) - scheduler.step() - return num_examples_train - - - def test( - model: torch.nn.ModuleList, - test_loader: torch.utils.data.DataLoader, - device: torch.device = torch.device("cpu"), - ) -> Tuple[int, float, float]: - """Test routine 'Basic MNIST Example' - - Parameters - ---------- - model: torch.nn.ModuleList : - Neural network model used in this example. - - test_loader: torch.utils.data.DataLoader : - DataLoader used in test. - - device: torch.device : - (Default value = torch.device("cpu")) - Device where the network will be tested within a client. - - Returns - ------- - Tuple containing the total number of test samples, the test_loss, and the accuracy evaluated on the test set. - - """ - model.eval() - test_loss: float = 0 - correct: int = 0 - num_test_samples: int = 0 - with torch.no_grad(): - for data, target in test_loader: - data, target = data.to(device), target.to(device) - num_test_samples += len(data) - output = model(data) - test_loss += F.nll_loss( - output, target, reduction="sum" - ).item() # sum up batch loss - pred = output.argmax( - dim=1, keepdim=True - ) # get the index of the max log-probability - correct += pred.eq(target.view_as(pred)).sum().item() - - test_loss /= num_test_samples - - return (num_test_samples, test_loss, correct / num_test_samples) - - -Observe that these functions encapsulate regular training and test loops and provide :code:`fit` and :code:`evaluate` with final statistics for each round. -You could substitute them with your custom train and test loops and change the network architecture, and the entire example would still work flawlessly. -As a matter of fact, why not try and modify the code to an example of your liking? - - - -Give It a Try -------------- -Looking through the quickstart code description above will have given a good understanding of how *clients* and *servers* work in Flower, how to run a simple experiment, and the internals of a client wrapper. -Here are a few things you could try on your own and get more experience with Flower: - -- Try and change :code:`PytorchMNISTClient` so it can accept different architectures. -- Modify the :code:`train` function so that it accepts different optimizers -- Modify the :code:`test` function so that it proves not only the top-1 (regular accuracy) but also the top-5 accuracy? -- Go larger! Try to adapt the code to larger images and datasets. Why not try training on ImageNet with a ResNet-50? - -You are ready now. Enjoy learning in a federated way! diff --git a/doc/source/how-to-install-flower.rst b/doc/source/how-to-install-flower.rst index aebe5f7316de..964b23125c0b 100644 --- a/doc/source/how-to-install-flower.rst +++ b/doc/source/how-to-install-flower.rst @@ -48,7 +48,7 @@ Verify installation The following command can be used to verify if Flower was successfully installed. If everything worked, it should print the version of Flower to the command line:: python -c "import flwr;print(flwr.__version__)" - 1.5.0 + 1.8.0 Advanced installation options diff --git a/doc/source/how-to-run-flower-using-docker.rst b/doc/source/how-to-run-flower-using-docker.rst index ed034c820142..25262109f247 100644 --- a/doc/source/how-to-run-flower-using-docker.rst +++ b/doc/source/how-to-run-flower-using-docker.rst @@ -2,14 +2,14 @@ Run Flower using Docker ======================= The simplest way to get started with Flower is by using the pre-made Docker images, which you can -find on `Docker Hub `_. +find on `Docker Hub `_. Before you start, make sure that the Docker daemon is running: .. code-block:: bash $ docker -v - Docker version 24.0.7, build afdd53b + Docker version 26.0.0, build 2ae903e If you do not see the version of Docker but instead get an error saying that the command was not found, you will need to install Docker first. You can find installation instruction @@ -21,8 +21,8 @@ was not found, you will need to install Docker first. You can find installation you can follow the `Post-installation steps `_ on the official Docker website. -Flower server -------------- +Flower SuperLink +---------------- Quickstart ~~~~~~~~~~ @@ -31,43 +31,41 @@ If you're looking to try out Flower, you can use the following command: .. code-block:: bash - $ docker run --rm -p 9091:9091 -p 9092:9092 flwr/server:1.7.0-py3.11-ubuntu22.04 \ - --insecure + $ docker run --rm -p 9091:9091 -p 9092:9092 flwr/superlink:1.8.0 --insecure -The command will pull the Docker image with the tag ``1.7.0-py3.11-ubuntu22.04`` from Docker Hub. -The tag contains the information which Flower, Python and Ubuntu is used. In this case, it -uses Flower 1.7.0, Python 3.11 and Ubuntu 22.04. The ``--rm`` flag tells Docker to remove -the container after it exits. +The command pulls the Docker image with the tag ``1.8.0`` from Docker Hub. The tag specifies +the Flower version. In this case, Flower 1.8.0. The ``--rm`` flag tells Docker to remove the +container after it exits. .. note:: - By default, the Flower server keeps state in-memory. When using the Docker flag - ``--rm``, the state is not persisted between container starts. We will show below how to save the - state in a file on your host system. + By default, the Flower SuperLink keeps state in-memory. When using the Docker flag ``--rm``, the + state is not persisted between container starts. We will show below how to save the state in a + file on your host system. The ``-p :`` flag tells Docker to map the ports ``9091``/``9092`` of the host to ``9091``/``9092`` of the container, allowing you to access the Driver API on ``http://localhost:9091`` and the Fleet API on ``http://localhost:9092``. Lastly, any flag that comes after the tag is passed -to the Flower server. Here, we are passing the flag ``--insecure``. +to the Flower SuperLink. Here, we are passing the flag ``--insecure``. .. attention:: - The ``--insecure`` flag enables insecure communication (using HTTP, not HTTPS) and should only be used - for testing purposes. We strongly recommend enabling + The ``--insecure`` flag enables insecure communication (using HTTP, not HTTPS) and should only be + used for testing purposes. We strongly recommend enabling `SSL `_ when deploying to a production environment. -You can use ``--help`` to view all available flags that the server supports: +You can use ``--help`` to view all available flags that the SuperLink supports: .. code-block:: bash - $ docker run --rm flwr/server:1.7.0-py3.11-ubuntu22.04 --help + $ docker run --rm flwr/superlink:1.8.0 --help Mounting a volume to store the state on the host system ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -If you want to persist the state of the server on your host system, all you need to do is specify a -path where you want to save the file on your host system and a name for the database file. In the +If you want to persist the state of the SuperLink on your host system, all you need to do is specify +a path where you want to save the file on your host system and a name for the database file. In the example below, we tell Docker via the flag ``-v`` to mount the user's home directory (``~/`` on your host) into the ``/app/`` directory of the container. Furthermore, we use the flag ``--database`` to specify the name of the database file. @@ -75,18 +73,19 @@ flag ``--database`` to specify the name of the database file. .. code-block:: bash $ docker run --rm \ - -p 9091:9091 -p 9092:9092 -v ~/:/app/ flwr/server:1.7.0-py3.11-ubuntu22.04 \ + -p 9091:9091 -p 9092:9092 -v ~/:/app/ flwr/superlink:1.8.0 \ --insecure \ --database state.db -As soon as the server starts, the file ``state.db`` is created in the user's home directory on -your host system. If the file already exists, the server tries to restore the state from the file. -To start the server with an empty database, simply remove the ``state.db`` file. +As soon as the SuperLink starts, the file ``state.db`` is created in the user's home directory on +your host system. If the file already exists, the SuperLink tries to restore the state from the +file. To start the SuperLink with an empty database, simply remove the ``state.db`` file. Enabling SSL for secure connections ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -To enable SSL, you will need a CA certificate, a server certificate and a server private key. +To enable SSL, you will need a PEM-encoded root certificate, a PEM-encoded private key and a +PEM-encoded certificate chain. .. note:: For testing purposes, you can generate your own self-signed certificates. The @@ -95,20 +94,21 @@ To enable SSL, you will need a CA certificate, a server certificate and a server Assuming all files we need are in the local ``certificates`` directory, we can use the flag ``-v`` to mount the local directory into the ``/app/`` directory of the container. This allows the -server to access the files within the container. Finally, we pass the names of the certificates to -the server with the ``--certificates`` flag. +SuperLink to access the files within the container. Finally, we pass the names of the certificates +to the SuperLink with the ``--certificates`` flag. .. code-block:: bash $ docker run --rm \ - -p 9091:9091 -p 9092:9092 -v ./certificates/:/app/ flwr/server:1.7.0-py3.11-ubuntu22.04 \ + -p 9091:9091 -p 9092:9092 -v ./certificates/:/app/ flwr/superlink:1.8.0 \ --certificates ca.crt server.pem server.key -Using a different Flower or Python version -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Using a different Flower version +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -If you want to use a different version of Flower or Python, you can do so by changing the tag. -All versions we provide are available on `Docker Hub `_. +If you want to use a different version of Flower, for example Flower nightly, you can do so by +changing the tag. All available versions are on +`Docker Hub `_. Pinning a Docker image to a specific version ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -118,19 +118,19 @@ updates of system dependencies that should not change the functionality of Flowe want to ensure that you always use the same image, you can specify the hash of the image instead of the tag. -The following command returns the current image hash referenced by the ``server:1.7.0-py3.11-ubuntu22.04`` tag: +The following command returns the current image hash referenced by the ``superlink:1.8.0`` tag: .. code-block:: bash - $ docker inspect --format='{{index .RepoDigests 0}}' flwr/server:1.7.0-py3.11-ubuntu22.04 - flwr/server@sha256:c4be5012f9d73e3022e98735a889a463bb2f4f434448ebc19c61379920b1b327 + $ docker inspect --format='{{index .RepoDigests 0}}' flwr/superlink:1.8.0 + flwr/superlink@sha256:1b855d1fa4e344e4d95db99793f2bb35d8c63f6a1decdd736863bfe4bb0fe46c -Next, we can pin the hash when running a new server container: +Next, we can pin the hash when running a new SuperLink container: .. code-block:: bash $ docker run \ - --rm flwr/server@sha256:c4be5012f9d73e3022e98735a889a463bb2f4f434448ebc19c61379920b1b327 \ + --rm flwr/superlink@sha256:1b855d1fa4e344e4d95db99793f2bb35d8c63f6a1decdd736863bfe4bb0fe46c \ --insecure Setting environment variables @@ -141,4 +141,4 @@ To set a variable inside a Docker container, you can use the ``-e = .. code-block:: bash $ docker run -e FLWR_TELEMETRY_ENABLED=0 \ - --rm flwr/server:1.7.0-py3.11-ubuntu22.04 --insecure + --rm flwr/superlink:1.8.0 --insecure diff --git a/doc/source/how-to-upgrade-to-flower-next.rst b/doc/source/how-to-upgrade-to-flower-next.rst new file mode 100644 index 000000000000..8c8f3c3f8fd7 --- /dev/null +++ b/doc/source/how-to-upgrade-to-flower-next.rst @@ -0,0 +1,333 @@ +Upgrade to Flower Next +====================== + +Welcome to the migration guide for updating Flower to Flower Next! Whether you're a seasoned user +or just getting started, this guide will help you smoothly transition your existing setup to take +advantage of the latest features and improvements in Flower Next, starting from version 1.8. + +.. note:: + This guide shows how to reuse pre-``1.8`` Flower code with minimum code changes by + using the *compatibility layer* in Flower Next. In another guide, we will show how + to run Flower Next end-to-end with pure Flower Next APIs. + +Let's dive in! + +.. + Generate link text as literal. Refs: + - https://stackoverflow.com/q/71651598 + - https://github.com/jgm/pandoc/issues/3973#issuecomment-337087394 + +.. |clientapp_link| replace:: ``ClientApp()`` +.. |serverapp_link| replace:: ``ServerApp()`` +.. |startclient_link| replace:: ``start_client()`` +.. |startserver_link| replace:: ``start_server()`` +.. |startsim_link| replace:: ``start_simulation()`` +.. |runsimcli_link| replace:: ``flower-simulation`` +.. |runsim_link| replace:: ``run_simulation()`` +.. |flowernext_superlink_link| replace:: ``flower-superlink`` +.. |flowernext_clientapp_link| replace:: ``flower-client-app`` +.. |flowernext_serverapp_link| replace:: ``flower-server-app`` +.. _clientapp_link: ref-api/flwr.client.ClientApp.html +.. _serverapp_link: ref-api/flwr.server.ServerApp.html +.. _startclient_link: ref-api/flwr.client.start_client.html +.. _startserver_link: ref-api/flwr.server.start_server.html +.. _startsim_link: ref-api/flwr.simulation.start_simulation.html +.. _runsimcli_link: ref-api/flwr.simulation.run_simulation_from_cli.html +.. _runsim_link: ref-api/flwr.simulation.run_simulation.html +.. _flowernext_superlink_link: ref-api-cli.html#flower-superlink +.. _flowernext_clientapp_link: ref-api-cli.html#flower-client-app +.. _flowernext_serverapp_link: ref-api-cli.html#flower-server-app + +Install update +-------------- + +Using pip +~~~~~~~~~ + +Here's how to update an existing installation of Flower to Flower Next with ``pip``: + +.. code-block:: bash + + $ python -m pip install -U flwr + +or if you need Flower Next with simulation: + +.. code-block:: bash + + $ python -m pip install -U flwr[simulation] + + +Ensure you set the following version constraint in your ``requirements.txt`` + +.. code-block:: + + # Without simulation support + flwr>=1.8,<2.0 + + # With simulation support + flwr[simulation]>=1.8, <2.0 + +or ``pyproject.toml``: + +.. code-block:: toml + + # Without simulation support + dependencies = ["flwr>=1.8,2.0"] + + # With simulation support + dependencies = ["flwr[simulation]>=1.8,2.0"] + +Using Poetry +~~~~~~~~~~~~ + +Update the ``flwr`` dependency in ``pyproject.toml`` and then reinstall (don't forget to delete ``poetry.lock`` via ``rm poetry.lock`` before running ``poetry install``). + +Ensure you set the following version constraint in your ``pyproject.toml``: + +.. code-block:: toml + + [tool.poetry.dependencies] + python = "^3.8" + + # Without simulation support + flwr = ">=1.8,<2.0" + + # With simulation support + flwr = { version = ">=1.8,<2.0", extras = ["simulation"] } + +Required changes +---------------- + +In Flower Next, the *infrastructure* and *application layers* have been decoupled. +Instead of starting a client in code via ``start_client()``, you create a |clientapp_link|_ and start it via the command line. +Instead of starting a server in code via ``start_server()``, you create a |serverapp_link|_ and start it via the command line. +The long-running components of server and client are called SuperLink and SuperNode. +The following non-breaking changes that require manual updates and allow you to run your project both in the traditional way and in the Flower Next way: + +|clientapp_link|_ +~~~~~~~~~~~~~~~~~ +- Wrap your existing client with |clientapp_link|_ instead of launching it via + |startclient_link|_. Here's an example: + +.. code-block:: python + :emphasize-lines: 5,11 + + # Flower 1.8 + def client_fn(cid: str): + return flwr.client.FlowerClient().to_client() + + app = flwr.client.ClientApp( + client_fn=client_fn, + ) + + # Flower 1.7 + if __name__ == "__main__": + flwr.client.start_client( + server_address="127.0.0.1:8080", + client=flwr.client.FlowerClient().to_client(), + ) + +|serverapp_link|_ +~~~~~~~~~~~~~~~~~ +- Wrap your existing strategy with |serverapp_link|_ instead of starting the server + via |startserver_link|_. Here's an example: + +.. code-block:: python + :emphasize-lines: 2,9 + + # Flower 1.8 + app = flwr.server.ServerApp( + config=config, + strategy=strategy, + ) + + # Flower 1.7 + if __name__ == "__main__": + flwr.server.start_server( + server_address="0.0.0.0:8080", + config=config, + strategy=strategy, + ) + +Deployment +~~~~~~~~~~ +- Run the ``SuperLink`` using |flowernext_superlink_link|_ before running, in sequence, + |flowernext_clientapp_link|_ (2x) and |flowernext_serverapp_link|_. There is no need to + execute `client.py` and `server.py` as Python scripts. +- Here's an example to start the server without HTTPS (only for prototyping): + +.. code-block:: bash + + # Start a Superlink + $ flower-superlink --insecure + + # In a new terminal window, start a long-running SuperNode + $ flower-client-app client:app --insecure + + # In another terminal window, start another long-running SuperNode (at least 2 SuperNodes are required) + $ flower-client-app client:app --insecure + + # In yet another terminal window, run the ServerApp (this starts the actual training run) + $ flower-server-app server:app --insecure + +- Here's another example to start with HTTPS. Use the ``--certificates`` command line + argument to pass paths to (CA certificate, server certificate, and server private key). + +.. code-block:: bash + + # Start a secure Superlink + $ flower-superlink --certificates \ + \ + \ + + + # In a new terminal window, start a long-running secure SuperNode + $ flower-client-app client:app \ + --root-certificates \ + --server 127.0.0.1:9092 + + # In another terminal window, start another long-running secure SuperNode (at least 2 SuperNodes are required) + $ flower-client-app client:app \ + --root-certificates \ + --server 127.0.0.1:9092 + + # In yet another terminal window, run the ServerApp (this starts the actual training run) + $ flower-server-app server:app \ + --root-certificates \ + --server 127.0.0.1:9091 + +Simulation in CLI +~~~~~~~~~~~~~~~~~ +- Wrap your existing client and strategy with |clientapp_link|_ and |serverapp_link|_, + respectively. There is no need to use |startsim_link|_ anymore. Here's an example: + +.. code-block:: python + :emphasize-lines: 9,13,20 + + # Regular Flower client implementation + class FlowerClient(NumPyClient): + # ... + + # Flower 1.8 + def client_fn(cid: str): + return FlowerClient().to_client() + + client_app = flwr.client.ClientApp( + client_fn=client_fn, + ) + + server_app = flwr.server.ServerApp( + config=config, + strategy=strategy, + ) + + # Flower 1.7 + if __name__ == "__main__": + hist = flwr.simulation.start_simulation( + num_clients=100, + ... + ) + +- Run |runsimcli_link|_ in CLI and point to the ``server_app`` / ``client_app`` object in the + code instead of executing the Python script. Here's an example (assuming the + ``server_app`` and ``client_app`` objects are in a ``sim.py`` module): + +.. code-block:: bash + + # Flower 1.8 + $ flower-simulation \ + --server-app=sim:server_app \ + --client-app=sim:client_app \ + --num-supernodes=100 + +.. code-block:: bash + + # Flower 1.7 + $ python sim.py + +- Set default resources for each |clientapp_link|_ using the ``--backend-config`` command + line argument instead of setting the ``client_resources`` argument in + |startsim_link|_. Here's an example: + +.. code-block:: bash + :emphasize-lines: 6 + + # Flower 1.8 + $ flower-simulation \ + --client-app=sim:client_app \ + --server-app=sim:server_app \ + --num-supernodes=100 \ + --backend-config='{"client_resources": {"num_cpus": 2, "num_gpus": 0.25}}' + +.. code-block:: python + :emphasize-lines: 5 + + # Flower 1.7 (in `sim.py`) + if __name__ == "__main__": + hist = flwr.simulation.start_simulation( + num_clients=100, + client_resources = {'num_cpus': 2, "num_gpus": 0.25}, + ... + ) + +Simulation in a Notebook +~~~~~~~~~~~~~~~~~~~~~~~~ +- Run |runsim_link|_ in your notebook instead of |startsim_link|_. Here's an example: + +.. code-block:: python + :emphasize-lines: 19,27 + + NUM_CLIENTS = + + def client_fn(cid: str): + # ... + return FlowerClient().to_client() + + client_app = flwr.client.ClientApp( + client_fn=client_fn, + ) + + server_app = flwr.server.ServerApp( + config=config, + strategy=strategy, + ) + + backend_config = {"client_resources": {"num_cpus": 2, "num_gpus": 0.25}} + + # Flower 1.8 + flwr.simulation.run_simulation( + server_app=server_app, + client_app=client_app, + num_supernodes=NUM_CLIENTS, + backend_config=backend_config, + ) + + # Flower 1.7 + flwr.simulation.start_simulation( + client_fn=client_fn, + num_clients=NUM_CLIENTS, + config=config, + strategy=strategy, + client_resources=backend_config["client_resources"], + ) + + +Further help +------------ + +Some official `Flower code examples `_ are already +updated to Flower Next so they can serve as a reference for using the Flower Next API. If there are +further questions, `join the Flower Slack `_ and use the channel ``#questions``. +You can also `participate in Flower Discuss `_ where you can find us +answering questions, or share and learn from others about migrating to Flower Next. + +.. admonition:: Important + :class: important + + As we continuously enhance Flower Next at a rapid pace, we'll be periodically + updating this guide. Please feel free to share any feedback with us! + +.. + [TODO] Add links to Flower Next 101 and Flower Glossary + +Happy migrating! 🚀 diff --git a/doc/source/index.rst b/doc/source/index.rst index 894155be03f1..d01dbfa6b965 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -53,13 +53,12 @@ A learning-oriented series of federated learning tutorials, the best place to st tutorial-quickstart-pandas tutorial-quickstart-fastai tutorial-quickstart-pytorch-lightning - tutorial-quickstart-mxnet tutorial-quickstart-scikitlearn tutorial-quickstart-xgboost tutorial-quickstart-android tutorial-quickstart-ios -QUICKSTART TUTORIALS: :doc:`PyTorch ` | :doc:`TensorFlow ` | :doc:`🤗 Transformers ` | :doc:`JAX ` | :doc:`Pandas ` | :doc:`fastai ` | :doc:`PyTorch Lightning ` | :doc:`MXNet ` | :doc:`scikit-learn ` | :doc:`XGBoost ` | :doc:`Android ` | :doc:`iOS ` +QUICKSTART TUTORIALS: :doc:`PyTorch ` | :doc:`TensorFlow ` | :doc:`🤗 Transformers ` | :doc:`JAX ` | :doc:`Pandas ` | :doc:`fastai ` | :doc:`PyTorch Lightning ` | :doc:`scikit-learn ` | :doc:`XGBoost ` | :doc:`Android ` | :doc:`iOS ` We also made video tutorials for PyTorch: @@ -90,18 +89,17 @@ Problem-oriented how-to guides show step-by-step how to achieve a specific goal. how-to-monitor-simulation how-to-configure-logging how-to-enable-ssl-connections - how-to-upgrade-to-flower-1.0 how-to-use-built-in-mods - how-to-run-flower-using-docker how-to-use-differential-privacy + how-to-run-flower-using-docker + how-to-upgrade-to-flower-1.0 + how-to-upgrade-to-flower-next .. toctree:: :maxdepth: 1 :caption: Legacy example guides - example-walkthrough-pytorch-mnist example-pytorch-from-centralized-to-federated - example-mxnet-walk-through example-jax-from-centralized-to-federated example-fedbn-pytorch-from-centralized-to-federated diff --git a/doc/source/ref-changelog.md b/doc/source/ref-changelog.md index 1a6524d29353..c742b8cd9cbe 100644 --- a/doc/source/ref-changelog.md +++ b/doc/source/ref-changelog.md @@ -6,6 +6,72 @@ ### Incompatible changes +None + +## v1.8.0 (2024-04-03) + +### Thanks to our contributors + +We would like to give our special thanks to all the contributors who made the new version of Flower possible (in `git shortlog` order): + +`Adam Narozniak`, `Charles Beauville`, `Daniel J. Beutel`, `Daniel Nata Nugraha`, `Danny`, `Gustavo Bertoli`, `Heng Pan`, `Ikko Eltociear Ashimine`, `Jack Cook`, `Javier`, `Raj Parekh`, `Robert Steiner`, `Sebastian van der Voort`, `Taner Topal`, `Yan Gao`, `mohammadnaseri`, `tabdar-khan` + +### What's new? + +- **Introduce Flower Next high-level API (stable)** ([#3002](https://github.com/adap/flower/pull/3002), [#2934](https://github.com/adap/flower/pull/2934), [#2958](https://github.com/adap/flower/pull/2958), [#3173](https://github.com/adap/flower/pull/3173), [#3174](https://github.com/adap/flower/pull/3174), [#2923](https://github.com/adap/flower/pull/2923), [#2691](https://github.com/adap/flower/pull/2691), [#3079](https://github.com/adap/flower/pull/3079), [#2961](https://github.com/adap/flower/pull/2961), [#2924](https://github.com/adap/flower/pull/2924), [#3166](https://github.com/adap/flower/pull/3166), [#3031](https://github.com/adap/flower/pull/3031), [#3057](https://github.com/adap/flower/pull/3057), [#3000](https://github.com/adap/flower/pull/3000), [#3113](https://github.com/adap/flower/pull/3113), [#2957](https://github.com/adap/flower/pull/2957), [#3183](https://github.com/adap/flower/pull/3183), [#3180](https://github.com/adap/flower/pull/3180), [#3035](https://github.com/adap/flower/pull/3035), [#3189](https://github.com/adap/flower/pull/3189), [#3185](https://github.com/adap/flower/pull/3185), [#3190](https://github.com/adap/flower/pull/3190), [#3191](https://github.com/adap/flower/pull/3191), [#3195](https://github.com/adap/flower/pull/3195), [#3197](https://github.com/adap/flower/pull/3197)) + + The Flower Next high-level API is stable! Flower Next is the future of Flower - all new features (like Flower Mods) will be built on top of it. You can start to migrate your existing projects to Flower Next by using `ServerApp` and `ClientApp` (check out `quickstart-pytorch` or `quickstart-tensorflow`, a detailed migration guide will follow shortly). Flower Next allows you to run multiple projects concurrently (we call this multi-run) and execute the same project in either simulation environments or deployment environments without having to change a single line of code. The best part? It's fully compatible with existing Flower projects that use `Strategy`, `NumPyClient` & co. + +- **Introduce Flower Next low-level API (preview)** ([#3062](https://github.com/adap/flower/pull/3062), [#3034](https://github.com/adap/flower/pull/3034), [#3069](https://github.com/adap/flower/pull/3069)) + + In addition to the Flower Next *high-level* API that uses `Strategy`, `NumPyClient` & co, Flower 1.8 also comes with a preview version of the new Flower Next *low-level* API. The low-level API allows for granular control of every aspect of the learning process by sending/receiving individual messages to/from client nodes. The new `ServerApp` supports registering a custom `main` function that allows writing custom training loops for methods like async FL, cyclic training, or federated analytics. The new `ClientApp` supports registering `train`, `evaluate` and `query` functions that can access the raw message received from the `ServerApp`. New abstractions like `RecordSet`, `Message` and `Context` further enable sending multiple models, multiple sets of config values and metrics, stateful computations on the client node and implementations of custom SMPC protocols, to name just a few. + +- **Introduce Flower Mods (preview)** ([#3054](https://github.com/adap/flower/pull/3054), [#2911](https://github.com/adap/flower/pull/2911), [#3083](https://github.com/adap/flower/pull/3083)) + + Flower Modifiers (we call them Mods) can intercept messages and analyze, edit or handle them directly. Mods can be used to develop pluggable modules that work across different projects. Flower 1.8 already includes mods to log the size of a message, the number of parameters sent over the network, differential privacy with fixed clipping and adaptive clipping, local differential privacy and secure aggregation protocols SecAgg and SecAgg+. The Flower Mods API is released as a preview, but researchers can already use it to experiment with arbirtrary SMPC protocols. + +- **Fine-tune LLMs with LLM FlowerTune** ([#3029](https://github.com/adap/flower/pull/3029), [#3089](https://github.com/adap/flower/pull/3089), [#3092](https://github.com/adap/flower/pull/3092), [#3100](https://github.com/adap/flower/pull/3100), [#3114](https://github.com/adap/flower/pull/3114), [#3162](https://github.com/adap/flower/pull/3162), [#3172](https://github.com/adap/flower/pull/3172)) + + We are introducing LLM FlowerTune, an introductory example that demonstrates federated LLM fine-tuning of pre-trained Llama2 models on the Alpaca-GPT4 dataset. The example is built to be easily adapted to use different models and/or datasets. Read our blog post [LLM FlowerTune: Federated LLM Fine-tuning with Flower](https://flower.ai/blog/2024-03-14-llm-flowertune-federated-llm-finetuning-with-flower/) for more details. + +- **Introduce built-in Differential Privacy (preview)** ([#2798](https://github.com/adap/flower/pull/2798), [#2959](https://github.com/adap/flower/pull/2959), [#3038](https://github.com/adap/flower/pull/3038), [#3147](https://github.com/adap/flower/pull/3147), [#2909](https://github.com/adap/flower/pull/2909), [#2893](https://github.com/adap/flower/pull/2893), [#2892](https://github.com/adap/flower/pull/2892), [#3039](https://github.com/adap/flower/pull/3039), [#3074](https://github.com/adap/flower/pull/3074)) + + Built-in Differential Privacy is here! Flower supports both central and local differential privacy (DP). Central DP can be configured with either fixed or adaptive clipping. The clipping can happen either on the server-side or the client-side. Local DP does both clipping and noising on the client-side. A new documentation page [explains Differential Privacy approaches](https://flower.ai/docs/framework/explanation-differential-privacy.html) and a new how-to guide describes [how to use the new Differential Privacy components](https://flower.ai/docs/framework/how-to-use-differential-privacy.html) in Flower. + +- **Introduce built-in Secure Aggregation (preview)** ([#3120](https://github.com/adap/flower/pull/3120), [#3110](https://github.com/adap/flower/pull/3110), [#3108](https://github.com/adap/flower/pull/3108)) + + Built-in Secure Aggregation is here! Flower now supports different secure aggregation protocols out-of-the-box. The best part? You can add secure aggregation to your Flower projects with only a few lines of code. In this initial release, we inlcude support for SecAgg and SecAgg+, but more protocols will be implemented shortly. We'll also add detailed docs that explain secure aggregation and how to use it in Flower. You can already check out the new code example that shows how to use Flower to easily combine Federated Learning, Differential Privacy and Secure Aggregation in the same project. + +- **Introduce** `flwr` **CLI (preview)** ([#2942](https://github.com/adap/flower/pull/2942), [#3055](https://github.com/adap/flower/pull/3055), [#3111](https://github.com/adap/flower/pull/3111), [#3130](https://github.com/adap/flower/pull/3130), [#3136](https://github.com/adap/flower/pull/3136), [#3094](https://github.com/adap/flower/pull/3094), [#3059](https://github.com/adap/flower/pull/3059), [#3049](https://github.com/adap/flower/pull/3049), [#3142](https://github.com/adap/flower/pull/3142)) + + A new `flwr` CLI command allows creating new Flower projects (`flwr new`) and then running them using the Simulation Engine (`flwr run`). + +- **Introduce Flower Next Simulation Engine** ([#3024](https://github.com/adap/flower/pull/3024), [#3061](https://github.com/adap/flower/pull/3061), [#2997](https://github.com/adap/flower/pull/2997), [#2783](https://github.com/adap/flower/pull/2783), [#3184](https://github.com/adap/flower/pull/3184), [#3075](https://github.com/adap/flower/pull/3075), [#3047](https://github.com/adap/flower/pull/3047), [#2998](https://github.com/adap/flower/pull/2998), [#3009](https://github.com/adap/flower/pull/3009), [#3008](https://github.com/adap/flower/pull/3008)) + + The Flower Simulation Engine can now run Flower Next projects. For notebook environments, there's also a new `run_simulation` function that can run `ServerApp` and `ClientApp`. + +- **Handle SuperNode connection errors** ([#2969](https://github.com/adap/flower/pull/2969)) + + A SuperNode will now try to reconnect indefinitely to the SuperLink in case of connection errors. The arguments `--max-retries` and `--max-wait-time` can now be passed to the `flower-client-app` command. `--max-retries` will define the number of tentatives the client should make before it gives up trying to reconnect to the SuperLink, and, `--max-wait-time` defines the time before the SuperNode gives up trying to reconnect to the SuperLink. + +- **General updates to Flower Baselines** ([#2904](https://github.com/adap/flower/pull/2904), [#2482](https://github.com/adap/flower/pull/2482), [#2985](https://github.com/adap/flower/pull/2985), [#2968](https://github.com/adap/flower/pull/2968)) + + There's a new [FedStar](https://flower.ai/docs/baselines/fedstar.html) baseline. Several other baselined have been updated as well. + +- **Improve documentation and translations** ([#3050](https://github.com/adap/flower/pull/3050), [#3044](https://github.com/adap/flower/pull/3044), [#3043](https://github.com/adap/flower/pull/3043), [#2986](https://github.com/adap/flower/pull/2986), [#3041](https://github.com/adap/flower/pull/3041), [#3046](https://github.com/adap/flower/pull/3046), [#3042](https://github.com/adap/flower/pull/3042), [#2978](https://github.com/adap/flower/pull/2978), [#2952](https://github.com/adap/flower/pull/2952), [#3167](https://github.com/adap/flower/pull/3167), [#2953](https://github.com/adap/flower/pull/2953), [#3045](https://github.com/adap/flower/pull/3045), [#2654](https://github.com/adap/flower/pull/2654), [#3082](https://github.com/adap/flower/pull/3082), [#2990](https://github.com/adap/flower/pull/2990), [#2989](https://github.com/adap/flower/pull/2989)) + + As usual, we merged many smaller and larger improvements to the documentation. A special thank you goes to [Sebastian van der Voort](https://github.com/svdvoort) for landing a big documentation PR! + +- **General updates to Flower Examples** ([3134](https://github.com/adap/flower/pull/3134), [2996](https://github.com/adap/flower/pull/2996), [2930](https://github.com/adap/flower/pull/2930), [2967](https://github.com/adap/flower/pull/2967), [2467](https://github.com/adap/flower/pull/2467), [2910](https://github.com/adap/flower/pull/2910), [#2918](https://github.com/adap/flower/pull/2918), [#2773](https://github.com/adap/flower/pull/2773), [#3063](https://github.com/adap/flower/pull/3063), [#3116](https://github.com/adap/flower/pull/3116), [#3117](https://github.com/adap/flower/pull/3117)) + + Two new examples show federated training of a Vision Transformer (ViT) and federated learning in a medical context using the popular MONAI library. `quickstart-pytorch` and `quickstart-tensorflow` demonstrate the new Flower Next `ServerApp` and `ClientApp`. Many other examples received considerable updates as well. + +- **General improvements** ([#3171](https://github.com/adap/flower/pull/3171), [3099](https://github.com/adap/flower/pull/3099), [3003](https://github.com/adap/flower/pull/3003), [3145](https://github.com/adap/flower/pull/3145), [3017](https://github.com/adap/flower/pull/3017), [3085](https://github.com/adap/flower/pull/3085), [3012](https://github.com/adap/flower/pull/3012), [3119](https://github.com/adap/flower/pull/3119), [2991](https://github.com/adap/flower/pull/2991), [2970](https://github.com/adap/flower/pull/2970), [2980](https://github.com/adap/flower/pull/2980), [3086](https://github.com/adap/flower/pull/3086), [2932](https://github.com/adap/flower/pull/2932), [2928](https://github.com/adap/flower/pull/2928), [2941](https://github.com/adap/flower/pull/2941), [2933](https://github.com/adap/flower/pull/2933), [3181](https://github.com/adap/flower/pull/3181), [2973](https://github.com/adap/flower/pull/2973), [2992](https://github.com/adap/flower/pull/2992), [2915](https://github.com/adap/flower/pull/2915), [3040](https://github.com/adap/flower/pull/3040), [3022](https://github.com/adap/flower/pull/3022), [3032](https://github.com/adap/flower/pull/3032), [2902](https://github.com/adap/flower/pull/2902), [2931](https://github.com/adap/flower/pull/2931), [3005](https://github.com/adap/flower/pull/3005), [3132](https://github.com/adap/flower/pull/3132), [3115](https://github.com/adap/flower/pull/3115), [2944](https://github.com/adap/flower/pull/2944), [3064](https://github.com/adap/flower/pull/3064), [3106](https://github.com/adap/flower/pull/3106), [2974](https://github.com/adap/flower/pull/2974), [3178](https://github.com/adap/flower/pull/3178), [2993](https://github.com/adap/flower/pull/2993), [3186](https://github.com/adap/flower/pull/3186), [3091](https://github.com/adap/flower/pull/3091), [3125](https://github.com/adap/flower/pull/3125), [3093](https://github.com/adap/flower/pull/3093), [3013](https://github.com/adap/flower/pull/3013), [3033](https://github.com/adap/flower/pull/3033), [3133](https://github.com/adap/flower/pull/3133), [3068](https://github.com/adap/flower/pull/3068), [2916](https://github.com/adap/flower/pull/2916), [2975](https://github.com/adap/flower/pull/2975), [2984](https://github.com/adap/flower/pull/2984), [2846](https://github.com/adap/flower/pull/2846), [3077](https://github.com/adap/flower/pull/3077), [3143](https://github.com/adap/flower/pull/3143), [2921](https://github.com/adap/flower/pull/2921), [3101](https://github.com/adap/flower/pull/3101), [2927](https://github.com/adap/flower/pull/2927), [2995](https://github.com/adap/flower/pull/2995), [2972](https://github.com/adap/flower/pull/2972), [2912](https://github.com/adap/flower/pull/2912), [3065](https://github.com/adap/flower/pull/3065), [3028](https://github.com/adap/flower/pull/3028), [2922](https://github.com/adap/flower/pull/2922), [2982](https://github.com/adap/flower/pull/2982), [2914](https://github.com/adap/flower/pull/2914), [3179](https://github.com/adap/flower/pull/3179), [3080](https://github.com/adap/flower/pull/3080), [2994](https://github.com/adap/flower/pull/2994), [3187](https://github.com/adap/flower/pull/3187), [2926](https://github.com/adap/flower/pull/2926), [3018](https://github.com/adap/flower/pull/3018), [3144](https://github.com/adap/flower/pull/3144), [3011](https://github.com/adap/flower/pull/3011), [#3152](https://github.com/adap/flower/pull/3152), [#2836](https://github.com/adap/flower/pull/2836), [#2929](https://github.com/adap/flower/pull/2929), [#2943](https://github.com/adap/flower/pull/2943), [#2955](https://github.com/adap/flower/pull/2955), [#2954](https://github.com/adap/flower/pull/2954)) + +### Incompatible changes + +None + ## v1.7.0 (2024-02-05) ### Thanks to our contributors diff --git a/doc/source/ref-example-projects.rst b/doc/source/ref-example-projects.rst index bade86dfaa54..597e3a596c51 100644 --- a/doc/source/ref-example-projects.rst +++ b/doc/source/ref-example-projects.rst @@ -7,15 +7,7 @@ pipelines, usually leveraging popular machine learning frameworks such as `PyTorch `_ or `TensorFlow `_. -.. note:: - Flower usage examples used to be bundled with Flower in a package called - ``flwr_example``. We are migrating those examples to standalone projects to - make them easier to use. All new examples are based in the directory - `examples `_. - The following examples are available as standalone projects. - - Quickstart TensorFlow/Keras --------------------------- @@ -54,101 +46,3 @@ This example shows how Flower can be used to build a federated learning system t - `Federated Learning on Raspberry Pi and Nvidia Jetson (Code) `_ - `Federated Learning on Raspberry Pi and Nvidia Jetson (Blog Post) `_ - - -Legacy Examples (`flwr_example`) --------------------------------- - -.. warning:: - The usage examples in `flwr_example` are deprecated and will be removed in - the future. New examples are provided as standalone projects in - `examples `_. - - -Extra Dependencies -~~~~~~~~~~~~~~~~~~ - -The core Flower framework keeps a minimal set of dependencies. The examples -demonstrate Flower in the context of different machine learning frameworks, so -additional dependencies need to be installed before an example can be run. - -For PyTorch examples:: - - $ pip install flwr[examples-pytorch] - -For TensorFlow examples:: - - $ pip install flwr[examples-tensorflow] - -For both PyTorch and TensorFlow examples:: - - $ pip install flwr[examples-pytorch,examples-tensorflow] - -Please consult :code:`pyproject.toml` for a full list of possible extras -(section :code:`[tool.poetry.extras]`). - - -PyTorch Examples -~~~~~~~~~~~~~~~~ - -Our PyTorch examples are based on PyTorch 1.7. They should work with other -releases as well. So far, we provide the following examples. - -CIFAR-10 Image Classification -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -`CIFAR-10 and CIFAR-100 `_ are -popular RGB image datasets. The Flower CIFAR-10 example uses PyTorch to train a -simple CNN classifier in a federated learning setup with two clients. - -First, start a Flower server: - - $ ./src/py/flwr_example/pytorch_cifar/run-server.sh - -Then, start the two clients in a new terminal window: - - $ ./src/py/flwr_example/pytorch_cifar/run-clients.sh - -For more details, see :code:`src/py/flwr_example/pytorch_cifar`. - -ImageNet-2012 Image Classification -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -`ImageNet-2012 `_ is one of the major computer -vision datasets. The Flower ImageNet example uses PyTorch to train a ResNet-18 -classifier in a federated learning setup with ten clients. - -First, start a Flower server: - - $ ./src/py/flwr_example/pytorch_imagenet/run-server.sh - -Then, start the two clients in a new terminal window: - - $ ./src/py/flwr_example/pytorch_imagenet/run-clients.sh - -For more details, see :code:`src/py/flwr_example/pytorch_imagenet`. - - -TensorFlow Examples -~~~~~~~~~~~~~~~~~~~ - -Our TensorFlow examples are based on TensorFlow 2.0 or newer. So far, we -provide the following examples. - -Fashion-MNIST Image Classification -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -`Fashion-MNIST `_ is often -used as the "Hello, world!" of machine learning. We follow this tradition and -provide an example which samples random local datasets from Fashion-MNIST and -trains a simple image classification model over those partitions. - -First, start a Flower server: - - $ ./src/py/flwr_example/tensorflow_fashion_mnist/run-server.sh - -Then, start the two clients in a new terminal window: - - $ ./src/py/flwr_example/tensorflow_fashion_mnist/run-clients.sh - -For more details, see :code:`src/py/flwr_example/tensorflow_fashion_mnist`. diff --git a/doc/source/tutorial-quickstart-mxnet.rst b/doc/source/tutorial-quickstart-mxnet.rst deleted file mode 100644 index fe582f793280..000000000000 --- a/doc/source/tutorial-quickstart-mxnet.rst +++ /dev/null @@ -1,296 +0,0 @@ -.. _quickstart-mxnet: - - -Quickstart MXNet -================ - -.. warning:: MXNet is no longer maintained and has been moved into `Attic `_. As a result, we would encourage you to use other ML frameworks alongside Flower, for example, PyTorch. This tutorial might be removed in future versions of Flower. - -.. meta:: - :description: Check out this Federated Learning quickstart tutorial for using Flower with MXNet to train a Sequential model on MNIST. - -In this tutorial, we will learn how to train a :code:`Sequential` model on MNIST using Flower and MXNet. - -It is recommended to create a virtual environment and run everything within this :doc:`virtualenv `. - -Our example consists of one *server* and two *clients* all having the same model. - -*Clients* are responsible for generating individual model parameter updates for the model based on their local datasets. -These updates are then sent to the *server* which will aggregate them to produce an updated global model. Finally, the *server* sends this improved version of the model back to each *client*. -A complete cycle of parameters updates is called a *round*. - -Now that we have a rough idea of what is going on, let's get started. We first need to install Flower. You can do this by running: - -.. code-block:: shell - - $ pip install flwr - -Since we want to use MXNet, let's go ahead and install it: - -.. code-block:: shell - - $ pip install mxnet - - -Flower Client -------------- - -Now that we have all our dependencies installed, let's run a simple distributed training with two clients and one server. Our training procedure and network architecture are based on MXNet´s `Hand-written Digit Recognition tutorial `_. - -In a file called :code:`client.py`, import Flower and MXNet related packages: - -.. code-block:: python - - import flwr as fl - - import numpy as np - - import mxnet as mx - from mxnet import nd - from mxnet import gluon - from mxnet.gluon import nn - from mxnet import autograd as ag - import mxnet.ndarray as F - -In addition, define the device allocation in MXNet with: - -.. code-block:: python - - DEVICE = [mx.gpu() if mx.test_utils.list_gpus() else mx.cpu()] - -We use MXNet to load MNIST, a popular image classification dataset of handwritten digits for machine learning. The MXNet utility :code:`mx.test_utils.get_mnist()` downloads the training and test data. - -.. code-block:: python - - def load_data(): - print("Download Dataset") - mnist = mx.test_utils.get_mnist() - batch_size = 100 - train_data = mx.io.NDArrayIter( - mnist["train_data"], mnist["train_label"], batch_size, shuffle=True - ) - val_data = mx.io.NDArrayIter(mnist["test_data"], mnist["test_label"], batch_size) - return train_data, val_data - -Define the training and loss with MXNet. We train the model by looping over the dataset, measure the corresponding loss, and optimize it. - -.. code-block:: python - - def train(net, train_data, epoch): - trainer = gluon.Trainer(net.collect_params(), "sgd", {"learning_rate": 0.03}) - trainer = gluon.Trainer(net.collect_params(), "sgd", {"learning_rate": 0.01}) - accuracy_metric = mx.metric.Accuracy() - loss_metric = mx.metric.CrossEntropy() - metrics = mx.metric.CompositeEvalMetric() - for child_metric in [accuracy_metric, loss_metric]: - metrics.add(child_metric) - softmax_cross_entropy_loss = gluon.loss.SoftmaxCrossEntropyLoss() - for i in range(epoch): - train_data.reset() - num_examples = 0 - for batch in train_data: - data = gluon.utils.split_and_load( - batch.data[0], ctx_list=DEVICE, batch_axis=0 - ) - label = gluon.utils.split_and_load( - batch.label[0], ctx_list=DEVICE, batch_axis=0 - ) - outputs = [] - with ag.record(): - for x, y in zip(data, label): - z = net(x) - loss = softmax_cross_entropy_loss(z, y) - loss.backward() - outputs.append(z.softmax()) - num_examples += len(x) - metrics.update(label, outputs) - trainer.step(batch.data[0].shape[0]) - trainings_metric = metrics.get_name_value() - print("Accuracy & loss at epoch %d: %s" % (i, trainings_metric)) - return trainings_metric, num_examples - - -Next, we define the validation of our machine learning model. We loop over the test set and measure both loss and accuracy on the test set. - -.. code-block:: python - - def test(net, val_data): - accuracy_metric = mx.metric.Accuracy() - loss_metric = mx.metric.CrossEntropy() - metrics = mx.metric.CompositeEvalMetric() - for child_metric in [accuracy_metric, loss_metric]: - metrics.add(child_metric) - val_data.reset() - num_examples = 0 - for batch in val_data: - data = gluon.utils.split_and_load(batch.data[0], ctx_list=DEVICE, batch_axis=0) - label = gluon.utils.split_and_load( - batch.label[0], ctx_list=DEVICE, batch_axis=0 - ) - outputs = [] - for x in data: - outputs.append(net(x).softmax()) - num_examples += len(x) - metrics.update(label, outputs) - return metrics.get_name_value(), num_examples - -After defining the training and testing of a MXNet machine learning model, we use these functions to implement a Flower client. - -Our Flower clients will use a simple :code:`Sequential` model: - -.. code-block:: python - - def main(): - def model(): - net = nn.Sequential() - net.add(nn.Dense(256, activation="relu")) - net.add(nn.Dense(64, activation="relu")) - net.add(nn.Dense(10)) - net.collect_params().initialize() - return net - - train_data, val_data = load_data() - - model = model() - init = nd.random.uniform(shape=(2, 784)) - model(init) - -After loading the dataset with :code:`load_data()` we perform one forward propagation to initialize the model and model parameters with :code:`model(init)`. Next, we implement a Flower client. - -The Flower server interacts with clients through an interface called -:code:`Client`. When the server selects a particular client for training, it -sends training instructions over the network. The client receives those -instructions and calls one of the :code:`Client` methods to run your code -(i.e., to train the neural network we defined earlier). - -Flower provides a convenience class called :code:`NumPyClient` which makes it -easier to implement the :code:`Client` interface when your workload uses MXNet. -Implementing :code:`NumPyClient` usually means defining the following methods -(:code:`set_parameters` is optional though): - -#. :code:`get_parameters` - * return the model weight as a list of NumPy ndarrays -#. :code:`set_parameters` (optional) - * update the local model weights with the parameters received from the server -#. :code:`fit` - * set the local model weights - * train the local model - * receive the updated local model weights -#. :code:`evaluate` - * test the local model - -They can be implemented in the following way: - -.. code-block:: python - - class MNISTClient(fl.client.NumPyClient): - def get_parameters(self, config): - param = [] - for val in model.collect_params(".*weight").values(): - p = val.data() - param.append(p.asnumpy()) - return param - - def set_parameters(self, parameters): - params = zip(model.collect_params(".*weight").keys(), parameters) - for key, value in params: - model.collect_params().setattr(key, value) - - def fit(self, parameters, config): - self.set_parameters(parameters) - [accuracy, loss], num_examples = train(model, train_data, epoch=2) - results = {"accuracy": float(accuracy[1]), "loss": float(loss[1])} - return self.get_parameters(config={}), num_examples, results - - def evaluate(self, parameters, config): - self.set_parameters(parameters) - [accuracy, loss], num_examples = test(model, val_data) - print("Evaluation accuracy & loss", accuracy, loss) - return float(loss[1]), val_data.batch_size, {"accuracy": float(accuracy[1])} - - -We can now create an instance of our class :code:`MNISTClient` and add one line -to actually run this client: - -.. code-block:: python - - fl.client.start_numpy_client(server_address="0.0.0.0:8080", client=MNISTClient()) - -That's it for the client. We only have to implement :code:`Client` or -:code:`NumPyClient` and call :code:`fl.client.start_client()` or :code:`fl.client.start_numpy_client()`. The string :code:`"0.0.0.0:8080"` tells the client which server to connect to. In our case we can run the server and the client on the same machine, therefore we use -:code:`"0.0.0.0:8080"`. If we run a truly federated workload with the server and -clients running on different machines, all that needs to change is the -:code:`server_address` we pass to the client. - -Flower Server -------------- - -For simple workloads we can start a Flower server and leave all the -configuration possibilities at their default values. In a file named -:code:`server.py`, import Flower and start the server: - -.. code-block:: python - - import flwr as fl - - fl.server.start_server(config=fl.server.ServerConfig(num_rounds=3)) - -Train the model, federated! ---------------------------- - -With both client and server ready, we can now run everything and see federated -learning in action. Federated learning systems usually have a server and multiple clients. We -therefore have to start the server first: - -.. code-block:: shell - - $ python server.py - -Once the server is running we can start the clients in different terminals. -Open a new terminal and start the first client: - -.. code-block:: shell - - $ python client.py - -Open another terminal and start the second client: - -.. code-block:: shell - - $ python client.py - -Each client will have its own dataset. -You should now see how the training does in the very first terminal (the one that started the server): - -.. code-block:: shell - - INFO flower 2021-03-11 11:59:04,512 | app.py:76 | Flower server running (insecure, 3 rounds) - INFO flower 2021-03-11 11:59:04,512 | server.py:72 | Getting initial parameters - INFO flower 2021-03-11 11:59:09,089 | server.py:74 | Evaluating initial parameters - INFO flower 2021-03-11 11:59:09,089 | server.py:87 | [TIME] FL starting - DEBUG flower 2021-03-11 11:59:11,997 | server.py:165 | fit_round: strategy sampled 2 clients (out of 2) - DEBUG flower 2021-03-11 11:59:14,652 | server.py:177 | fit_round received 2 results and 0 failures - DEBUG flower 2021-03-11 11:59:14,656 | server.py:139 | evaluate: strategy sampled 2 clients - DEBUG flower 2021-03-11 11:59:14,811 | server.py:149 | evaluate received 2 results and 0 failures - DEBUG flower 2021-03-11 11:59:14,812 | server.py:165 | fit_round: strategy sampled 2 clients (out of 2) - DEBUG flower 2021-03-11 11:59:18,499 | server.py:177 | fit_round received 2 results and 0 failures - DEBUG flower 2021-03-11 11:59:18,503 | server.py:139 | evaluate: strategy sampled 2 clients - DEBUG flower 2021-03-11 11:59:18,784 | server.py:149 | evaluate received 2 results and 0 failures - DEBUG flower 2021-03-11 11:59:18,786 | server.py:165 | fit_round: strategy sampled 2 clients (out of 2) - DEBUG flower 2021-03-11 11:59:22,551 | server.py:177 | fit_round received 2 results and 0 failures - DEBUG flower 2021-03-11 11:59:22,555 | server.py:139 | evaluate: strategy sampled 2 clients - DEBUG flower 2021-03-11 11:59:22,789 | server.py:149 | evaluate received 2 results and 0 failures - INFO flower 2021-03-11 11:59:22,789 | server.py:122 | [TIME] FL finished in 13.700094900001204 - INFO flower 2021-03-11 11:59:22,790 | app.py:109 | app_fit: losses_distributed [(1, 1.5717803835868835), (2, 0.6093432009220123), (3, 0.4424773305654526)] - INFO flower 2021-03-11 11:59:22,790 | app.py:110 | app_fit: accuracies_distributed [] - INFO flower 2021-03-11 11:59:22,791 | app.py:111 | app_fit: losses_centralized [] - INFO flower 2021-03-11 11:59:22,791 | app.py:112 | app_fit: accuracies_centralized [] - DEBUG flower 2021-03-11 11:59:22,793 | server.py:139 | evaluate: strategy sampled 2 clients - DEBUG flower 2021-03-11 11:59:23,111 | server.py:149 | evaluate received 2 results and 0 failures - INFO flower 2021-03-11 11:59:23,112 | app.py:121 | app_evaluate: federated loss: 0.4424773305654526 - INFO flower 2021-03-11 11:59:23,112 | app.py:125 | app_evaluate: results [('ipv4:127.0.0.1:44344', EvaluateRes(loss=0.443320095539093, num_examples=100, accuracy=0.0, metrics={'accuracy': 0.8752475247524752})), ('ipv4:127.0.0.1:44346', EvaluateRes(loss=0.44163456559181213, num_examples=100, accuracy=0.0, metrics={'accuracy': 0.8761386138613861}))] - INFO flower 2021-03-11 11:59:23,112 | app.py:127 | app_evaluate: failures [] - -Congratulations! -You've successfully built and run your first federated learning system. -The full `source code `_ for this example can be found in :code:`examples/quickstart-mxnet`. diff --git a/doc/source/tutorial-quickstart-scikitlearn.rst b/doc/source/tutorial-quickstart-scikitlearn.rst index d1d47dc37f19..93322842cc70 100644 --- a/doc/source/tutorial-quickstart-scikitlearn.rst +++ b/doc/source/tutorial-quickstart-scikitlearn.rst @@ -45,41 +45,51 @@ However, before setting up the client and server, we will define all functionali * :code:`get_model_parameters()` * Returns the parameters of a :code:`sklearn` LogisticRegression model * :code:`set_model_params()` - * Sets the parameters of a :code:`sklean` LogisticRegression model + * Sets the parameters of a :code:`sklearn` LogisticRegression model * :code:`set_initial_params()` * Initializes the model parameters that the Flower server will ask for -* :code:`load_mnist()` - * Loads the MNIST dataset using OpenML -* :code:`shuffle()` - * Shuffles data and its label -* :code:`partition()` - * Splits datasets into a number of partitions Please check out :code:`utils.py` `here `_ for more details. The pre-defined functions are used in the :code:`client.py` and imported. The :code:`client.py` also requires to import several packages such as Flower and scikit-learn: .. code-block:: python + import argparse import warnings - import flwr as fl - import numpy as np - + from sklearn.linear_model import LogisticRegression from sklearn.metrics import log_loss - + + import flwr as fl import utils + from flwr_datasets import FederatedDataset - -We load the MNIST dataset from `OpenML `_, a popular image classification dataset of handwritten digits for machine learning. The utility :code:`utils.load_mnist()` downloads the training and test data. The training set is split afterwards into 10 partitions with :code:`utils.partition()`. +Prior to local training, we need to load the MNIST dataset, a popular image classification dataset of handwritten digits for machine learning, and partition the dataset for FL. This can be conveniently achieved using `Flower Datasets `_. +The :code:`FederatedDataset.load_partition()` method loads the partitioned training set for each partition ID defined in the :code:`--partition-id` argument. .. code-block:: python if __name__ == "__main__": - - (X_train, y_train), (X_test, y_test) = utils.load_mnist() - - partition_id = np.random.choice(10) - (X_train, y_train) = utils.partition(X_train, y_train, 10)[partition_id] + N_CLIENTS = 10 + + parser = argparse.ArgumentParser(description="Flower") + parser.add_argument( + "--partition-id", + type=int, + choices=range(0, N_CLIENTS), + required=True, + help="Specifies the artificial data partition", + ) + args = parser.parse_args() + partition_id = args.partition_id + + fds = FederatedDataset(dataset="mnist", partitioners={"train": N_CLIENTS}) + + dataset = fds.load_partition(partition_id, "train").with_format("numpy") + X, y = dataset["image"].reshape((len(dataset), -1)), dataset["label"] + + X_train, X_test = X[: int(0.8 * len(X))], X[int(0.8 * len(X)) :] + y_train, y_test = y[: int(0.8 * len(y))], y[int(0.8 * len(y)) :] Next, the logistic regression model is defined and initialized with :code:`utils.set_initial_params()`. @@ -168,10 +178,13 @@ First, we import again all required libraries such as Flower and scikit-learn. from flwr.common import NDArrays, Scalar from sklearn.metrics import log_loss from sklearn.linear_model import LogisticRegression - from typing import Dict, Optional + from typing import Dict + + from flwr_datasets import FederatedDataset The number of federated learning rounds is set in :code:`fit_round()` and the evaluation is defined in :code:`get_evaluate_fn()`. The evaluation function is called after each federated learning round and gives you information about loss and accuracy. +Note that we also make use of Flower Datasets here to load the test split of the MNIST dataset for server-side evaluation. .. code-block:: python @@ -183,7 +196,9 @@ The evaluation function is called after each federated learning round and gives def get_evaluate_fn(model: LogisticRegression): """Return an evaluation function for server-side evaluation.""" - _, (X_test, y_test) = utils.load_mnist() + fds = FederatedDataset(dataset="mnist", partitioners={"train": 10}) + dataset = fds.load_split("test").with_format("numpy") + X_test, y_test = dataset["image"].reshape((len(dataset), -1)), dataset["label"] def evaluate( server_round: int, parameters: NDArrays, config: Dict[str, Scalar] @@ -199,7 +214,7 @@ The :code:`main` contains the server-side parameter initialization :code:`utils. .. code-block:: python - # Start Flower server for five rounds of federated learning + # Start Flower server for three rounds of federated learning if __name__ == "__main__": model = LogisticRegression() utils.set_initial_params(model) diff --git a/doc/source/tutorial-series-get-started-with-flower-pytorch.ipynb b/doc/source/tutorial-series-get-started-with-flower-pytorch.ipynb index 2b8dd382bb79..c9d38b417a92 100644 --- a/doc/source/tutorial-series-get-started-with-flower-pytorch.ipynb +++ b/doc/source/tutorial-series-get-started-with-flower-pytorch.ipynb @@ -13,7 +13,7 @@ "\n", "> [Star Flower on GitHub](https://github.com/adap/flower) ⭐️ and join the Flower community on Slack to connect, ask questions, and get help: [Join Slack](https://flower.ai/join-slack) 🌼 We'd love to hear from you in the `#introductions` channel! And if anything is unclear, head over to the `#questions` channel.\n", "\n", - "Let's get stated!" + "Let's get started!" ] }, { @@ -145,7 +145,7 @@ " for partition_id in range(NUM_CLIENTS):\n", " partition = fds.load_partition(partition_id, \"train\")\n", " partition = partition.with_transform(apply_transforms)\n", - " partition = partition.train_test_split(train_size=0.8)\n", + " partition = partition.train_test_split(train_size=0.8, seed=42)\n", " trainloaders.append(DataLoader(partition[\"train\"], batch_size=BATCH_SIZE))\n", " valloaders.append(DataLoader(partition[\"test\"], batch_size=BATCH_SIZE))\n", " testset = fds.load_split(\"test\").with_transform(apply_transforms)\n", diff --git a/e2e/bare-client-auth/README.md b/e2e/bare-client-auth/README.md new file mode 100644 index 000000000000..35967ebe2eb0 --- /dev/null +++ b/e2e/bare-client-auth/README.md @@ -0,0 +1,3 @@ +# Bare Flower testing + +This directory is used for testing Flower in a bare minimum scenario, that is, with a dummy model and dummy operations. This is mainly to test the core functionality of Flower independently from any framework. It can easily be extended to test more complex communication set-ups. diff --git a/e2e/bare-client-auth/certificate.conf b/e2e/bare-client-auth/certificate.conf new file mode 100644 index 000000000000..ea97fcbb700d --- /dev/null +++ b/e2e/bare-client-auth/certificate.conf @@ -0,0 +1,20 @@ +[req] +default_bits = 4096 +prompt = no +default_md = sha256 +req_extensions = req_ext +distinguished_name = dn + +[dn] +C = DE +ST = HH +O = Flower +CN = localhost + +[req_ext] +subjectAltName = @alt_names + +[alt_names] +DNS.1 = localhost +IP.1 = ::1 +IP.2 = 127.0.0.1 diff --git a/e2e/bare-client-auth/client.py b/e2e/bare-client-auth/client.py new file mode 100644 index 000000000000..a56ba5eca552 --- /dev/null +++ b/e2e/bare-client-auth/client.py @@ -0,0 +1,30 @@ +import flwr as fl +import numpy as np +from pathlib import Path + + +model_params = np.array([1]) +objective = 5 + +# Define Flower client +class FlowerClient(fl.client.NumPyClient): + def get_parameters(self, config): + return model_params + + def fit(self, parameters, config): + model_params = parameters + model_params = [param * (objective/np.mean(param)) for param in model_params] + return model_params, 1, {} + + def evaluate(self, parameters, config): + model_params = parameters + loss = min(np.abs(1 - np.mean(model_params)/objective), 1) + accuracy = 1 - loss + return loss, 1, {"accuracy": accuracy} + +def client_fn(cid): + return FlowerClient().to_client() + +app = fl.client.ClientApp( + client_fn=client_fn, +) diff --git a/e2e/bare-client-auth/driver.py b/e2e/bare-client-auth/driver.py new file mode 100644 index 000000000000..f7bfeb613f6a --- /dev/null +++ b/e2e/bare-client-auth/driver.py @@ -0,0 +1,12 @@ +import flwr as fl +from pathlib import Path + + +# Start Flower server +hist = fl.server.start_driver( + server_address="127.0.0.1:9091", + config=fl.server.ServerConfig(num_rounds=3), + root_certificates=Path("certificates/ca.crt").read_bytes(), +) + +assert hist.losses_distributed[-1][1] == 0 diff --git a/e2e/bare-client-auth/generate.sh b/e2e/bare-client-auth/generate.sh new file mode 100755 index 000000000000..ebfdc17b80b5 --- /dev/null +++ b/e2e/bare-client-auth/generate.sh @@ -0,0 +1,72 @@ +#!/bin/bash +# This script will generate all certificates if ca.crt does not exist + +set -e +# Change directory to the script's directory +cd "$(dirname "${BASH_SOURCE[0]}")" + +CERT_DIR=certificates + +# Generate directories if not exists +mkdir -p $CERT_DIR + +# Clearing any existing files in the certificates directory +rm -f $CERT_DIR/* + +# Generate the root certificate authority key and certificate based on key +openssl genrsa -out $CERT_DIR/ca.key 4096 +openssl req \ + -new \ + -x509 \ + -key $CERT_DIR/ca.key \ + -sha256 \ + -subj "/C=DE/ST=HH/O=CA, Inc." \ + -days 365 -out $CERT_DIR/ca.crt + +# Generate a new private key for the server +openssl genrsa -out $CERT_DIR/server.key 4096 + +# Create a signing CSR +openssl req \ + -new \ + -key $CERT_DIR/server.key \ + -out $CERT_DIR/server.csr \ + -config certificate.conf + +# Generate a certificate for the server +openssl x509 \ + -req \ + -in $CERT_DIR/server.csr \ + -CA $CERT_DIR/ca.crt \ + -CAkey $CERT_DIR/ca.key \ + -CAcreateserial \ + -out $CERT_DIR/server.pem \ + -days 365 \ + -sha256 \ + -extfile certificate.conf \ + -extensions req_ext + +KEY_DIR=keys + +mkdir -p $KEY_DIR + +rm -f $KEY_DIR/* + +ssh-keygen -t ecdsa -b 384 -N "" -f "${KEY_DIR}/server_credentials" -C "" + +generate_client_credentials() { + local num_clients=${1:-2} + for ((i=1; i<=num_clients; i++)) + do + ssh-keygen -t ecdsa -b 384 -N "" -f "${KEY_DIR}/client_credentials_$i" -C "" + done +} + +generate_client_credentials "$1" + +printf "%s" "$(cat "${KEY_DIR}/client_credentials_1.pub" | sed 's/.$//')" > $KEY_DIR/client_public_keys.csv +for ((i=2; i<=${1:-2}; i++)) +do + printf ",%s" "$(sed 's/.$//' < "${KEY_DIR}/client_credentials_$i.pub")" >> $KEY_DIR/client_public_keys.csv +done +printf "\n" >> $KEY_DIR/client_public_keys.csv diff --git a/examples/mxnet-from-centralized-to-federated/pyproject.toml b/e2e/bare-client-auth/pyproject.toml similarity index 54% rename from examples/mxnet-from-centralized-to-federated/pyproject.toml rename to e2e/bare-client-auth/pyproject.toml index b00b3ddfe412..693fec815474 100644 --- a/examples/mxnet-from-centralized-to-federated/pyproject.toml +++ b/e2e/bare-client-auth/pyproject.toml @@ -3,13 +3,11 @@ requires = ["poetry-core>=1.4.0"] build-backend = "poetry.core.masonry.api" [tool.poetry] -name = "mxnet_example" +name = "bare_client_auth_test" version = "0.1.0" -description = "MXNet example with MNIST and CNN" +description = "Client-auth-enabled bare Federated Learning test with Flower" authors = ["The Flower Authors "] [tool.poetry.dependencies] -python = ">=3.8,<3.11" -flwr = "1.6.0" -mxnet = "1.9.1" -numpy = "1.23.1" +python = "^3.8" +flwr = { path = "../../", develop = true } diff --git a/e2e/bare-client-auth/server.py b/e2e/bare-client-auth/server.py new file mode 100644 index 000000000000..fcad7a3e4522 --- /dev/null +++ b/e2e/bare-client-auth/server.py @@ -0,0 +1,15 @@ +import flwr as fl +from pathlib import Path + + +hist = fl.server.start_server( + server_address="127.0.0.1:8080", + config=fl.server.ServerConfig(num_rounds=3), + certificates=( + Path("certificates/ca.crt").read_bytes(), + Path("certificates/server.pem").read_bytes(), + Path("certificates/server.key").read_bytes(), + ) +) + +assert hist.losses_distributed[-1][1] == 0 or (hist.losses_distributed[0][1] / hist.losses_distributed[-1][1]) >= 0.98 diff --git a/e2e/test_driver.sh b/e2e/test_driver.sh index 3d4864a1b0fb..61c868ebc212 100755 --- a/e2e/test_driver.sh +++ b/e2e/test_driver.sh @@ -18,28 +18,48 @@ case "$2" in rest_arg="--rest" server_address="http://localhost:9093" db_arg="--database :flwr-in-memory-state:" + server_auth="" + client_auth_1="" + client_auth_2="" ;; sqlite) rest_arg="" server_address="127.0.0.1:9092" db_arg="--database $(date +%s).db" + server_auth="" + client_auth_1="" + client_auth_2="" + ;; + client-auth) + ./generate.sh + rest_arg="" + server_address="127.0.0.1:9092" + db_arg="--database :flwr-in-memory-state:" + server_arg="--certificates certificates/ca.crt certificates/server.pem certificates/server.key" + client_arg="--root-certificates certificates/ca.crt" + server_auth="--require-client-authentication keys/client_public_keys.csv keys/server_credentials keys/server_credentials.pub" + client_auth_1="--authentication-keys keys/client_credentials_1 keys/client_credentials_1.pub" + client_auth_2="--authentication-keys keys/client_credentials_2 keys/client_credentials_2.pub" ;; *) rest_arg="" server_address="127.0.0.1:9092" db_arg="--database :flwr-in-memory-state:" + server_auth="" + client_auth_1="" + client_auth_2="" ;; esac -timeout 2m flower-superlink $server_arg $db_arg $rest_arg & +timeout 2m flower-superlink $server_arg $db_arg $rest_arg $server_auth & sl_pid=$! sleep 3 -timeout 2m flower-client-app client:app $client_arg $rest_arg --server $server_address & +timeout 2m flower-client-app client:app $client_arg $rest_arg --server $server_address $client_auth_1 & cl1_pid=$! sleep 3 -timeout 2m flower-client-app client:app $client_arg $rest_arg --server $server_address & +timeout 2m flower-client-app client:app $client_arg $rest_arg --server $server_address $client_auth_2 & cl2_pid=$! sleep 3 diff --git a/examples/advanced-pytorch/client.py b/examples/advanced-pytorch/client.py index d4c8abe3d404..7c1420a2cecd 100644 --- a/examples/advanced-pytorch/client.py +++ b/examples/advanced-pytorch/client.py @@ -46,7 +46,7 @@ def fit(self, parameters, config): batch_size: int = config["batch_size"] epochs: int = config["local_epochs"] - train_valid = self.trainset.train_test_split(self.validation_split) + train_valid = self.trainset.train_test_split(self.validation_split, seed=42) trainset = train_valid["train"] valset = train_valid["test"] diff --git a/examples/advanced-pytorch/utils.py b/examples/advanced-pytorch/utils.py index fd9dab19a70d..c47b4fa38593 100644 --- a/examples/advanced-pytorch/utils.py +++ b/examples/advanced-pytorch/utils.py @@ -14,7 +14,7 @@ def load_partition(partition_id, toy: bool = False): fds = FederatedDataset(dataset="cifar10", partitioners={"train": 10}) partition = fds.load_partition(partition_id) # Divide data on each node: 80% train, 20% test - partition_train_test = partition.train_test_split(test_size=0.2) + partition_train_test = partition.train_test_split(test_size=0.2, seed=42) partition_train_test = partition_train_test.with_transform(apply_transforms) return partition_train_test["train"], partition_train_test["test"] diff --git a/examples/advanced-tensorflow/client.py b/examples/advanced-tensorflow/client.py index 17d1d2306270..b658a1f9ea04 100644 --- a/examples/advanced-tensorflow/client.py +++ b/examples/advanced-tensorflow/client.py @@ -123,7 +123,7 @@ def load_partition(idx: int): partition.set_format("numpy") # Divide data on each node: 80% train, 20% test - partition = partition.train_test_split(test_size=0.2) + partition = partition.train_test_split(test_size=0.2, seed=42) x_train, y_train = partition["train"]["img"] / 255.0, partition["train"]["label"] x_test, y_test = partition["test"]["img"] / 255.0, partition["test"]["label"] return x_train, y_train, x_test, y_test diff --git a/examples/app-pytorch/client.py b/examples/app-pytorch/client.py index ebbe977ecab1..eb84968bb986 100644 --- a/examples/app-pytorch/client.py +++ b/examples/app-pytorch/client.py @@ -18,7 +18,6 @@ # Define FlowerClient and client_fn class FlowerClient(NumPyClient): - def fit(self, parameters, config): set_weights(net, parameters) results = train(net, trainloader, testloader, epochs=1, device=DEVICE) diff --git a/examples/app-pytorch/client_low_level.py b/examples/app-pytorch/client_low_level.py index feea1ee658fe..19268ff84ba4 100644 --- a/examples/app-pytorch/client_low_level.py +++ b/examples/app-pytorch/client_low_level.py @@ -20,16 +20,16 @@ def hello_world_mod(msg, ctx, call_next) -> Message: @app.train() def train(msg: Message, ctx: Context): print("`train` is not implemented, echoing original message") - return msg.create_reply(msg.content, ttl="") + return msg.create_reply(msg.content) @app.evaluate() def eval(msg: Message, ctx: Context): print("`evaluate` is not implemented, echoing original message") - return msg.create_reply(msg.content, ttl="") + return msg.create_reply(msg.content) @app.query() def query(msg: Message, ctx: Context): print("`query` is not implemented, echoing original message") - return msg.create_reply(msg.content, ttl="") + return msg.create_reply(msg.content) diff --git a/examples/app-pytorch/pyproject.toml b/examples/app-pytorch/pyproject.toml index e47dd2db949d..c00e38aef19b 100644 --- a/examples/app-pytorch/pyproject.toml +++ b/examples/app-pytorch/pyproject.toml @@ -11,7 +11,6 @@ authors = ["The Flower Authors "] [tool.poetry.dependencies] python = "^3.8" # Mandatory dependencies -flwr-nightly = { version = "1.8.0.dev20240309", extras = ["simulation"] } -flwr-datasets = { version = "0.0.2", extras = ["vision"] } +flwr = { version = "^1.8.0", extras = ["simulation"] } torch = "2.2.1" torchvision = "0.17.1" diff --git a/examples/app-pytorch/requirements.txt b/examples/app-pytorch/requirements.txt index 016a84043cbe..117e30b2ad56 100644 --- a/examples/app-pytorch/requirements.txt +++ b/examples/app-pytorch/requirements.txt @@ -1,4 +1,3 @@ -flwr-nightly[simulation]==1.8.0.dev20240309 -flwr-datasets[vision]==0.0.2 +flwr[simulation]>=1.8.0 torch==2.2.1 torchvision==0.17.1 diff --git a/examples/app-pytorch/server_custom.py b/examples/app-pytorch/server_custom.py index 0c2851e2afee..67c1bce99c55 100644 --- a/examples/app-pytorch/server_custom.py +++ b/examples/app-pytorch/server_custom.py @@ -13,6 +13,7 @@ Message, MessageType, Metrics, + DEFAULT_TTL, ) from flwr.common.recordset_compat import fitins_to_recordset, recordset_to_fitres from flwr.server import Driver, History @@ -89,7 +90,7 @@ def main(driver: Driver, context: Context) -> None: message_type=MessageType.TRAIN, dst_node_id=node_id, group_id=str(server_round), - ttl="", + ttl=DEFAULT_TTL, ) messages.append(message) @@ -102,15 +103,19 @@ def main(driver: Driver, context: Context) -> None: all_replies: List[Message] = [] while True: replies = driver.pull_messages(message_ids=message_ids) - print(f"Got {len(replies)} results") + for res in replies: + print(f"Got 1 {'result' if res.has_content() else 'error'}") all_replies += replies if len(all_replies) == len(message_ids): break + print("Pulling messages...") time.sleep(3) - # Collect correct results + # Filter correct results all_fitres = [ - recordset_to_fitres(msg.content, keep_input=True) for msg in all_replies + recordset_to_fitres(msg.content, keep_input=True) + for msg in all_replies + if msg.has_content() ] print(f"Received {len(all_fitres)} results") @@ -127,16 +132,21 @@ def main(driver: Driver, context: Context) -> None: ) metrics_results.append((fitres.num_examples, fitres.metrics)) - # Aggregate parameters (FedAvg) - parameters_aggregated = ndarrays_to_parameters(aggregate(weights_results)) - parameters = parameters_aggregated + if len(weights_results) > 0: + # Aggregate parameters (FedAvg) + parameters_aggregated = ndarrays_to_parameters(aggregate(weights_results)) + parameters = parameters_aggregated - # Aggregate metrics - metrics_aggregated = weighted_average(metrics_results) - history.add_metrics_distributed_fit( - server_round=server_round, metrics=metrics_aggregated - ) - print("Round ", server_round, " metrics: ", metrics_aggregated) + # Aggregate metrics + metrics_aggregated = weighted_average(metrics_results) + history.add_metrics_distributed_fit( + server_round=server_round, metrics=metrics_aggregated + ) + print("Round ", server_round, " metrics: ", metrics_aggregated) + else: + print( + f"Round {server_round} got {len(weights_results)} results. Skipping aggregation..." + ) # Slow down the start of the next round time.sleep(sleep_time) diff --git a/examples/app-pytorch/server_low_level.py b/examples/app-pytorch/server_low_level.py index 560babac1b95..7ab79a4a04c8 100644 --- a/examples/app-pytorch/server_low_level.py +++ b/examples/app-pytorch/server_low_level.py @@ -3,7 +3,15 @@ import time import flwr as fl -from flwr.common import Context, NDArrays, Message, MessageType, Metrics, RecordSet +from flwr.common import ( + Context, + NDArrays, + Message, + MessageType, + Metrics, + RecordSet, + DEFAULT_TTL, +) from flwr.server import Driver @@ -30,7 +38,7 @@ def main(driver: Driver, context: Context) -> None: message_type=MessageType.TRAIN, dst_node_id=node_id, group_id=str(server_round), - ttl="", + ttl=DEFAULT_TTL, ) messages.append(message) diff --git a/examples/app-secure-aggregation/pyproject.toml b/examples/app-secure-aggregation/pyproject.toml index 84b6502064c8..fb1f636d8c33 100644 --- a/examples/app-secure-aggregation/pyproject.toml +++ b/examples/app-secure-aggregation/pyproject.toml @@ -11,4 +11,4 @@ authors = ["The Flower Authors "] [tool.poetry.dependencies] python = "^3.8" # Mandatory dependencies -flwr-nightly = { version = "1.8.0.dev20240309", extras = ["simulation"] } +flwr = { version = "^1.8.0", extras = ["simulation"] } diff --git a/examples/app-secure-aggregation/requirements.txt b/examples/app-secure-aggregation/requirements.txt index 5bac63a0d44c..2d8be098f264 100644 --- a/examples/app-secure-aggregation/requirements.txt +++ b/examples/app-secure-aggregation/requirements.txt @@ -1 +1 @@ -flwr-nightly[simulation]==1.8.0.dev20240309 +flwr[simulation]>=1.8.0 diff --git a/examples/custom-mods/README.md b/examples/custom-mods/README.md index b0ad668c2dec..6b03abcfbfe0 100644 --- a/examples/custom-mods/README.md +++ b/examples/custom-mods/README.md @@ -288,7 +288,7 @@ $ tree . pip install -r requirements.txt ``` -For [W&B](wandb.ai) you will also need a valid account. +For [W&B](https://wandb.ai) you will also need a valid account. ### Start the long-running Flower server (SuperLink) @@ -328,7 +328,7 @@ flower-server-app server:app --insecure ### Check the results -For W&B, you will need to login to the [website](wandb.ai). +For W&B, you will need to login to the [website](https://wandb.ai). For TensorBoard, you will need to run the following command in your terminal: diff --git a/examples/custom-mods/client.py b/examples/custom-mods/client.py index 2b87a24da19d..614daef6bcf6 100644 --- a/examples/custom-mods/client.py +++ b/examples/custom-mods/client.py @@ -86,7 +86,6 @@ def wandb_mod(msg: Message, context: Context, app: ClientAppCallable) -> Message # if the `ClientApp` just processed a "fit" message, let's log some metrics to W&B if reply.metadata.message_type == MessageType.TRAIN and reply.has_content(): - metrics = reply.content.configs_records results_to_log = dict(metrics.get("fitres.metrics", ConfigsRecord())) diff --git a/examples/doc/source/_static/.gitignore b/examples/doc/source/_static/.gitignore index c2412a5912cc..887023baf484 100644 --- a/examples/doc/source/_static/.gitignore +++ b/examples/doc/source/_static/.gitignore @@ -3,3 +3,4 @@ !favicon.ico !flower-logo.png !tmux_jtop_view.gif +!view-gh.png diff --git a/examples/doc/source/_static/view-gh.png b/examples/doc/source/_static/view-gh.png new file mode 100644 index 000000000000..afc3f07bc2d5 Binary files /dev/null and b/examples/doc/source/_static/view-gh.png differ diff --git a/examples/doc/source/conf.py b/examples/doc/source/conf.py index bf177aa5ae24..b9c18fba2e18 100644 --- a/examples/doc/source/conf.py +++ b/examples/doc/source/conf.py @@ -24,13 +24,12 @@ import datetime - project = "Flower" copyright = f"{datetime.date.today().year} Flower Labs GmbH" author = "The Flower Authors" # The full version, including alpha/beta/rc tags -release = "1.8.0" +release = "1.9.0" # -- General configuration --------------------------------------------------- @@ -63,8 +62,10 @@ # Sphinx redirects, implemented after the doc filename changes. # To prevent 404 errors and redirect to the new pages. -# redirects = { -# } +redirects = { + "quickstart-mxnet": "index.html", + "mxnet-from-centralized-to-federated": "index.html", +} # -- Options for HTML output ------------------------------------------------- diff --git a/examples/embedded-devices/Dockerfile b/examples/embedded-devices/Dockerfile index a85c05c4bb7a..48602c89970a 100644 --- a/examples/embedded-devices/Dockerfile +++ b/examples/embedded-devices/Dockerfile @@ -8,7 +8,7 @@ RUN pip3 install --upgrade pip # Install flower RUN pip3 install flwr>=1.0 -RUN pip3 install flwr-datsets>=0.2 +RUN pip3 install flwr-datsets>=0.0.2 RUN pip3 install tqdm==4.65.0 WORKDIR /client diff --git a/examples/embedded-devices/client_pytorch.py b/examples/embedded-devices/client_pytorch.py index 6bd69c16567e..411052bfb1ea 100644 --- a/examples/embedded-devices/client_pytorch.py +++ b/examples/embedded-devices/client_pytorch.py @@ -108,7 +108,7 @@ def apply_transforms(batch): for partition_id in range(NUM_CLIENTS): partition = fds.load_partition(partition_id, "train") # Divide data on each node: 90% train, 10% test - partition = partition.train_test_split(test_size=0.1) + partition = partition.train_test_split(test_size=0.1, seed=42) partition = partition.with_transform(apply_transforms) trainsets.append(partition["train"]) validsets.append(partition["test"]) diff --git a/examples/embedded-devices/client_tf.py b/examples/embedded-devices/client_tf.py index 49c63ce5d9dc..3df75f76312b 100644 --- a/examples/embedded-devices/client_tf.py +++ b/examples/embedded-devices/client_tf.py @@ -44,7 +44,7 @@ def prepare_dataset(use_mnist: bool): partition = fds.load_partition(partition_id, "train") partition.set_format("numpy") # Divide data on each node: 90% train, 10% test - partition = partition.train_test_split(test_size=0.1) + partition = partition.train_test_split(test_size=0.1, seed=42) x_train, y_train = ( partition["train"][img_key] / 255.0, partition["train"]["label"], diff --git a/examples/fl-dp-sa/README.md b/examples/fl-dp-sa/README.md new file mode 100644 index 000000000000..47eedb70a2b8 --- /dev/null +++ b/examples/fl-dp-sa/README.md @@ -0,0 +1,22 @@ +# fl_dp_sa + +This is a simple example that utilizes central differential privacy with client-side fixed clipping and secure aggregation. +Note: This example is designed for a small number of rounds and is intended for demonstration purposes. + +## Install dependencies + +```bash +# Using pip +pip install . + +# Or using Poetry +poetry install +``` + +## Run + +The example uses the MNIST dataset with a total of 100 clients, with 20 clients sampled in each round. The hyperparameters for DP and SecAgg are specified in `server.py`. + +```shell +flower-simulation --server-app fl_dp_sa.server:app --client-app fl_dp_sa.client:app --num-supernodes 100 +``` diff --git a/examples/fl-dp-sa/fl_dp_sa/__init__.py b/examples/fl-dp-sa/fl_dp_sa/__init__.py new file mode 100644 index 000000000000..741260348ab8 --- /dev/null +++ b/examples/fl-dp-sa/fl_dp_sa/__init__.py @@ -0,0 +1 @@ +"""fl_dp_sa: A Flower / PyTorch app.""" diff --git a/examples/fl-dp-sa/fl_dp_sa/client.py b/examples/fl-dp-sa/fl_dp_sa/client.py new file mode 100644 index 000000000000..104264158833 --- /dev/null +++ b/examples/fl-dp-sa/fl_dp_sa/client.py @@ -0,0 +1,43 @@ +"""fl_dp_sa: A Flower / PyTorch app.""" + +from flwr.client import ClientApp, NumPyClient +from flwr.client.mod import fixedclipping_mod, secaggplus_mod + +from fl_dp_sa.task import DEVICE, Net, get_weights, load_data, set_weights, test, train + + +# Load model and data (simple CNN, CIFAR-10) +net = Net().to(DEVICE) + + +# Define FlowerClient and client_fn +class FlowerClient(NumPyClient): + def __init__(self, trainloader, testloader) -> None: + self.trainloader = trainloader + self.testloader = testloader + + def fit(self, parameters, config): + set_weights(net, parameters) + results = train(net, self.trainloader, self.testloader, epochs=1, device=DEVICE) + return get_weights(net), len(self.trainloader.dataset), results + + def evaluate(self, parameters, config): + set_weights(net, parameters) + loss, accuracy = test(net, self.testloader) + return loss, len(self.testloader.dataset), {"accuracy": accuracy} + + +def client_fn(cid: str): + """Create and return an instance of Flower `Client`.""" + trainloader, testloader = load_data(partition_id=int(cid)) + return FlowerClient(trainloader, testloader).to_client() + + +# Flower ClientApp +app = ClientApp( + client_fn=client_fn, + mods=[ + secaggplus_mod, + fixedclipping_mod, + ], +) diff --git a/examples/fl-dp-sa/fl_dp_sa/server.py b/examples/fl-dp-sa/fl_dp_sa/server.py new file mode 100644 index 000000000000..f7da75997e98 --- /dev/null +++ b/examples/fl-dp-sa/fl_dp_sa/server.py @@ -0,0 +1,77 @@ +"""fl_dp_sa: A Flower / PyTorch app.""" + +from typing import List, Tuple + +from flwr.server import Driver, LegacyContext, ServerApp, ServerConfig +from flwr.common import Context, Metrics, ndarrays_to_parameters +from flwr.server.strategy import ( + DifferentialPrivacyClientSideFixedClipping, + FedAvg, +) +from flwr.server.workflow import DefaultWorkflow, SecAggPlusWorkflow + +from fl_dp_sa.task import Net, get_weights + + +# Define metric aggregation function +def weighted_average(metrics: List[Tuple[int, Metrics]]) -> Metrics: + examples = [num_examples for num_examples, _ in metrics] + + # Multiply accuracy of each client by number of examples used + train_losses = [num_examples * m["train_loss"] for num_examples, m in metrics] + train_accuracies = [ + num_examples * m["train_accuracy"] for num_examples, m in metrics + ] + val_losses = [num_examples * m["val_loss"] for num_examples, m in metrics] + val_accuracies = [num_examples * m["val_accuracy"] for num_examples, m in metrics] + + # Aggregate and return custom metric (weighted average) + return { + "train_loss": sum(train_losses) / sum(examples), + "train_accuracy": sum(train_accuracies) / sum(examples), + "val_loss": sum(val_losses) / sum(examples), + "val_accuracy": sum(val_accuracies) / sum(examples), + } + + +# Initialize model parameters +ndarrays = get_weights(Net()) +parameters = ndarrays_to_parameters(ndarrays) + + +# Define strategy +strategy = FedAvg( + fraction_fit=0.2, + fraction_evaluate=0.0, # Disable evaluation for demo purpose + min_fit_clients=20, + min_available_clients=20, + fit_metrics_aggregation_fn=weighted_average, + initial_parameters=parameters, +) +strategy = DifferentialPrivacyClientSideFixedClipping( + strategy, noise_multiplier=0.2, clipping_norm=10, num_sampled_clients=20 +) + + +app = ServerApp() + + +@app.main() +def main(driver: Driver, context: Context) -> None: + # Construct the LegacyContext + context = LegacyContext( + state=context.state, + config=ServerConfig(num_rounds=3), + strategy=strategy, + ) + + # Create the train/evaluate workflow + workflow = DefaultWorkflow( + fit_workflow=SecAggPlusWorkflow( + num_shares=7, + reconstruction_threshold=4, + ) + ) + + # Execute + workflow(driver, context) diff --git a/examples/fl-dp-sa/fl_dp_sa/task.py b/examples/fl-dp-sa/fl_dp_sa/task.py new file mode 100644 index 000000000000..6a94571a2369 --- /dev/null +++ b/examples/fl-dp-sa/fl_dp_sa/task.py @@ -0,0 +1,110 @@ +"""fl_dp_sa: A Flower / PyTorch app.""" + +from collections import OrderedDict +from logging import INFO +from flwr_datasets import FederatedDataset + +import torch +import torch.nn as nn +import torch.nn.functional as F +from flwr.common.logger import log +from torch.utils.data import DataLoader +from torchvision.transforms import Compose, Normalize, ToTensor + + +DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") + + +class Net(nn.Module): + """Model.""" + + def __init__(self) -> None: + super(Net, self).__init__() + self.conv1 = nn.Conv2d(1, 6, 3, padding=1) + self.pool = nn.MaxPool2d(2, 2) + self.conv2 = nn.Conv2d(6, 16, 5) + self.fc1 = nn.Linear(16 * 5 * 5, 120) + self.fc2 = nn.Linear(120, 84) + self.fc3 = nn.Linear(84, 10) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + batch_size = x.size(0) + x = self.pool(F.relu(self.conv1(x))) + x = self.pool(F.relu(self.conv2(x))) + x = x.view(batch_size, -1) + x = F.relu(self.fc1(x)) + x = F.relu(self.fc2(x)) + return self.fc3(x) + + +def load_data(partition_id): + """Load partition MNIST data.""" + fds = FederatedDataset(dataset="mnist", partitioners={"train": 100}) + partition = fds.load_partition(partition_id) + # Divide data on each node: 80% train, 20% test + partition_train_test = partition.train_test_split(test_size=0.2, seed=42) + pytorch_transforms = Compose([ToTensor(), Normalize((0.5,), (0.5,))]) + + def apply_transforms(batch): + """Apply transforms to the partition from FederatedDataset.""" + batch["image"] = [pytorch_transforms(img) for img in batch["image"]] + return batch + + partition_train_test = partition_train_test.with_transform(apply_transforms) + trainloader = DataLoader(partition_train_test["train"], batch_size=32, shuffle=True) + testloader = DataLoader(partition_train_test["test"], batch_size=32) + return trainloader, testloader + + +def train(net, trainloader, valloader, epochs, device): + """Train the model on the training set.""" + net.to(device) # move model to GPU if available + criterion = torch.nn.CrossEntropyLoss().to(device) + optimizer = torch.optim.Adam(net.parameters()) + net.train() + for _ in range(epochs): + for batch in trainloader: + images = batch["image"].to(device) + labels = batch["label"].to(device) + optimizer.zero_grad() + loss = criterion(net(images), labels) + loss.backward() + optimizer.step() + + train_loss, train_acc = test(net, trainloader) + val_loss, val_acc = test(net, valloader) + + results = { + "train_loss": train_loss, + "train_accuracy": train_acc, + "val_loss": val_loss, + "val_accuracy": val_acc, + } + return results + + +def test(net, testloader): + """Validate the model on the test set.""" + net.to(DEVICE) + criterion = torch.nn.CrossEntropyLoss() + correct, loss = 0, 0.0 + with torch.no_grad(): + for batch in testloader: + images = batch["image"].to(DEVICE) + labels = batch["label"].to(DEVICE) + outputs = net(images.to(DEVICE)) + labels = labels.to(DEVICE) + loss += criterion(outputs, labels).item() + correct += (torch.max(outputs.data, 1)[1] == labels).sum().item() + accuracy = correct / len(testloader.dataset) + return loss, accuracy + + +def get_weights(net): + return [val.cpu().numpy() for _, val in net.state_dict().items()] + + +def set_weights(net, parameters): + params_dict = zip(net.state_dict().keys(), parameters) + state_dict = OrderedDict({k: torch.tensor(v) for k, v in params_dict}) + net.load_state_dict(state_dict, strict=True) diff --git a/src/py/flwr/cli/new/templates/app/flower.toml.tpl b/examples/fl-dp-sa/flower.toml similarity index 62% rename from src/py/flwr/cli/new/templates/app/flower.toml.tpl rename to examples/fl-dp-sa/flower.toml index 07a6ffaf9e49..ea2e98206791 100644 --- a/src/py/flwr/cli/new/templates/app/flower.toml.tpl +++ b/examples/fl-dp-sa/flower.toml @@ -1,5 +1,5 @@ [project] -name = "$project_name" +name = "fl_dp_sa" version = "1.0.0" description = "" license = "Apache-2.0" @@ -9,5 +9,5 @@ authors = [ readme = "README.md" [flower.components] -serverapp = "$project_name.server:app" -clientapp = "$project_name.client:app" +serverapp = "fl_dp_sa.server:app" +clientapp = "fl_dp_sa.client:app" diff --git a/examples/fl-dp-sa/pyproject.toml b/examples/fl-dp-sa/pyproject.toml new file mode 100644 index 000000000000..1ca343b072d9 --- /dev/null +++ b/examples/fl-dp-sa/pyproject.toml @@ -0,0 +1,21 @@ +[build-system] +requires = ["poetry-core>=1.4.0"] +build-backend = "poetry.core.masonry.api" + +[tool.poetry] +name = "fl-dp-sa" +version = "0.1.0" +description = "" +license = "Apache-2.0" +authors = [ + "The Flower Authors ", +] +readme = "README.md" + +[tool.poetry.dependencies] +python = "^3.9" +# Mandatory dependencies +flwr = { version = "^1.8.0", extras = ["simulation"] } +flwr-datasets = { version = "0.0.2", extras = ["vision"] } +torch = "2.2.1" +torchvision = "0.17.1" diff --git a/src/py/flwr/cli/new/templates/app/requirements.pytorch.txt.tpl b/examples/fl-dp-sa/requirements.txt similarity index 58% rename from src/py/flwr/cli/new/templates/app/requirements.pytorch.txt.tpl rename to examples/fl-dp-sa/requirements.txt index ddb8a814447b..f20b9d71e339 100644 --- a/src/py/flwr/cli/new/templates/app/requirements.pytorch.txt.tpl +++ b/examples/fl-dp-sa/requirements.txt @@ -1,4 +1,4 @@ -flwr-nightly[simulation]==1.8.0.dev20240313 +flwr[simulation]>=1.8.0 flwr-datasets[vision]==0.0.2 torch==2.2.1 torchvision==0.17.1 diff --git a/examples/flower-in-30-minutes/tutorial.ipynb b/examples/flower-in-30-minutes/tutorial.ipynb index 0e42cff924e8..9f0c86a2507a 100644 --- a/examples/flower-in-30-minutes/tutorial.ipynb +++ b/examples/flower-in-30-minutes/tutorial.ipynb @@ -13,7 +13,7 @@ "\n", "> Star Flower on [GitHub ⭐️](https://github.com/adap/flower) and join the Flower community on Slack to connect, ask questions, and get help: [Join Slack 🌼](https://flower.ai/join-slack/). We'd love to hear from you in the #introductions channel! And if anything is unclear, head over to the #questions channel.\n", "\n", - "Let's get stated!" + "Let's get started!" ] }, { diff --git a/examples/flower-via-docker-compose/helpers/load_data.py b/examples/flower-via-docker-compose/helpers/load_data.py index 1f2784946868..b7d6b0de26c5 100644 --- a/examples/flower-via-docker-compose/helpers/load_data.py +++ b/examples/flower-via-docker-compose/helpers/load_data.py @@ -25,7 +25,7 @@ def load_data(data_sampling_percentage=0.5, client_id=1, total_clients=2): partition.set_format("numpy") # Divide data on each client: 80% train, 20% test - partition = partition.train_test_split(test_size=0.2) + partition = partition.train_test_split(test_size=0.2, seed=42) x_train, y_train = partition["train"]["img"] / 255.0, partition["train"]["label"] x_test, y_test = partition["test"]["img"] / 255.0, partition["test"]["label"] diff --git a/examples/flower-via-docker-compose/requirements.txt b/examples/flower-via-docker-compose/requirements.txt index b93e5b1d9f2b..d08937c4d02a 100644 --- a/examples/flower-via-docker-compose/requirements.txt +++ b/examples/flower-via-docker-compose/requirements.txt @@ -1,4 +1,4 @@ -flwr==1.7.0 +flwr==1.8.0 tensorflow==2.13.1 numpy==1.24.3 prometheus_client == 0.19.0 diff --git a/examples/llm-flowertune/README.md b/examples/llm-flowertune/README.md index 60e183d2a9c0..4f98072f8c7f 100644 --- a/examples/llm-flowertune/README.md +++ b/examples/llm-flowertune/README.md @@ -1,16 +1,14 @@ -# Federated Large Language Model (LLM) Fine-tuning with Flower +# LLM FlowerTune: Federated LLM Fine-tuning with Flower Large language models (LLMs), which have been trained on vast amounts of publicly accessible data, have shown remarkable effectiveness in a wide range of areas. However, despite the fact that more data typically leads to improved performance, there is a concerning prospect that the supply of high-quality public data will deplete within a few years. Federated LLM training could unlock access to an endless pool of distributed private data by allowing multiple data owners to collaboratively train a shared model without the need to exchange raw data. This introductory example conducts federated instruction tuning with pretrained [LLama2](https://huggingface.co/openlm-research) models on [Alpaca-GPT4](https://huggingface.co/datasets/vicgalle/alpaca-gpt4) dataset. -We use [Flower Datasets](https://flower.dev/docs/datasets/) to download, partition and preprocess the dataset. -The fine-tuning is done using the [🤗PEFT](https://huggingface.co/docs/peft/en/index) library. -We use Flower's Simulation Engine to simulate the LLM fine-tuning process in federated way, +We implement LLM FlowerTune by integrating a bundle of techniques: 1) We use [Flower Datasets](https://flower.dev/docs/datasets/) to download, partition and preprocess the dataset. 2) The fine-tuning is done using the [🤗PEFT](https://huggingface.co/docs/peft/en/index) library. 3) We use Flower's Simulation Engine to simulate the LLM fine-tuning process in federated way, which allows users to perform the training on a single GPU. -## Environments Setup +## Environment Setup Start by cloning the code example. We prepared a single-line command that you can copy into your shell which will checkout the example for you: diff --git a/examples/llm-flowertune/requirements.txt b/examples/llm-flowertune/requirements.txt index c7ff57b403f7..7c66612eb2a5 100644 --- a/examples/llm-flowertune/requirements.txt +++ b/examples/llm-flowertune/requirements.txt @@ -1,8 +1,9 @@ -flwr-nightly[rest,simulation] -flwr_datasets==0.0.2 +flwr[rest,simulation]>=1.8.0, <2.0 +flwr-datasets>=0.0.2 hydra-core==1.3.2 trl==0.7.2 bitsandbytes==0.41.3 scipy==1.11.2 peft==0.4.0 fschat[model_worker,webui]==0.2.35 +transformers==4.38.1 diff --git a/examples/mxnet-from-centralized-to-federated/.gitignore b/examples/mxnet-from-centralized-to-federated/.gitignore deleted file mode 100644 index 10d00b5797e2..000000000000 --- a/examples/mxnet-from-centralized-to-federated/.gitignore +++ /dev/null @@ -1 +0,0 @@ -*.gz diff --git a/examples/mxnet-from-centralized-to-federated/README.md b/examples/mxnet-from-centralized-to-federated/README.md deleted file mode 100644 index 2c3f240d8978..000000000000 --- a/examples/mxnet-from-centralized-to-federated/README.md +++ /dev/null @@ -1,81 +0,0 @@ -# MXNet: From Centralized To Federated - -> Note the MXNet project has ended, and is now in [Attic](https://attic.apache.org/projects/mxnet.html). The MXNet GitHub has also [been archived](https://github.com/apache/mxnet). As a result, this example won't be receiving more updates. Using MXNet is no longer recommnended. - -This example demonstrates how an already existing centralized MXNet-based machine learning project can be federated with Flower. - -This introductory example for Flower uses MXNet, but you're not required to be a MXNet expert to run the example. The example will help you to understand how Flower can be used to build federated learning use cases based on an existing MXNet project. - -## Project Setup - -Start by cloning the example project. We prepared a single-line command that you can copy into your shell which will checkout the example for you: - -```shell -git clone --depth=1 https://github.com/adap/flower.git && mv flower/examples/mxnet-from-centralized-to-federated . && rm -rf flower && cd mxnet-from-centralized-to-federated -``` - -This will create a new directory called `mxnet-from-centralized-to-federated` containing the following files: - -```shell --- pyproject.toml --- requirements.txt --- mxnet_mnist.py --- client.py --- server.py --- README.md -``` - -### Installing Dependencies - -Project dependencies (such as `mxnet` and `flwr`) are defined in `pyproject.toml` and `requirements.txt`. We recommend [Poetry](https://python-poetry.org/docs/) to install those dependencies and manage your virtual environment ([Poetry installation](https://python-poetry.org/docs/#installation)) or [pip](https://pip.pypa.io/en/latest/development/), but feel free to use a different way of installing dependencies and managing virtual environments if you have other preferences. - -#### Poetry - -```shell -poetry install -poetry shell -``` - -Poetry will install all your dependencies in a newly created virtual environment. To verify that everything works correctly you can run the following command: - -```shell -poetry run python3 -c "import flwr" -``` - -If you don't see any errors you're good to go! - -#### pip - -Write the command below in your terminal to install the dependencies according to the configuration file requirements.txt. - -```shell -pip install -r requirements.txt -``` - -## Run MXNet Federated - -This MXNet example is based on the [Handwritten Digit Recognition](https://mxnet.apache.org/versions/1.7.0/api/python/docs/tutorials/packages/gluon/image/mnist.html) tutorial and uses the MNIST dataset (hand-written digits with 28x28 pixels in greyscale with 10 classes). Feel free to consult the tutorial if you want to get a better understanding of MXNet. The file `mxnet_mnist.py` contains all the steps that are described in the tutorial. It loads the dataset and a sequential model, trains the model with the training set, and evaluates the trained model on the test set. - -The only things we need are a simple Flower server (in `server.py`) and a Flower client (in `client.py`). The Flower client basically takes model and training code tells Flower how to call it. - -Start the server in a terminal as follows: - -```shell -python3 server.py -``` - -Now that the server is running and waiting for clients, we can start two clients that will participate in the federated learning process. To do so simply open two more terminal windows and run the following commands. - -Start client 1 in the first terminal: - -```shell -python3 client.py -``` - -Start client 2 in the second terminal: - -```shell -python3 client.py -``` - -You are now training a MXNet-based classifier on MNIST, federated across two clients. The setup is of course simplified since both clients hold the same dataset, but you can now continue with your own explorations. How about changing from a sequential model to a CNN? How about adding more clients? diff --git a/examples/mxnet-from-centralized-to-federated/client.py b/examples/mxnet-from-centralized-to-federated/client.py deleted file mode 100644 index bb666a26508e..000000000000 --- a/examples/mxnet-from-centralized-to-federated/client.py +++ /dev/null @@ -1,93 +0,0 @@ -"""Flower client example using MXNet for MNIST classification.""" - -from typing import Dict, List, Tuple - -import flwr as fl -import numpy as np -import mxnet as mx -from mxnet import nd - -import mxnet_mnist - - -# Flower Client -class MNISTClient(fl.client.NumPyClient): - """Flower client implementing MNIST classification using MXNet.""" - - def __init__( - self, - model: mxnet_mnist.model(), - train_data: mx.io.NDArrayIter, - val_data: mx.io.NDArrayIter, - device: mx.context, - ) -> None: - self.model = model - self.train_data = train_data - self.val_data = val_data - self.device = device - - def get_parameters(self, config: Dict) -> List[np.ndarray]: - # Return model parameters as a list of NumPy Arrays - param = [] - for val in self.model.collect_params(".*weight").values(): - p = val.data() - # convert parameters from NDArray to Numpy Array required by Flower Numpy Client - param.append(p.asnumpy()) - return param - - def set_parameters(self, parameters: List[np.ndarray]) -> None: - # Collect model parameters and set new weight values - params = zip(self.model.collect_params(".*weight").keys(), parameters) - for key, value in params: - self.model.collect_params().setattr(key, value) - - def fit( - self, parameters: List[np.ndarray], config: Dict - ) -> Tuple[List[np.ndarray], int, Dict]: - # Set model parameters, train model, return updated model parameters - self.set_parameters(parameters) - [accuracy, loss], num_examples = mxnet_mnist.train( - self.model, self.train_data, epoch=2, device=self.device - ) - results = {"accuracy": accuracy[1], "loss": loss[1]} - return self.get_parameters(config={}), num_examples, results - - def evaluate( - self, parameters: List[np.ndarray], config: Dict - ) -> Tuple[int, float, Dict]: - # Set model parameters, evaluate model on local test dataset, return result - self.set_parameters(parameters) - [accuracy, loss], num_examples = mxnet_mnist.test( - self.model, self.val_data, device=self.device - ) - print("Evaluation accuracy & loss", accuracy, loss) - return ( - float(loss[1]), - num_examples, - {"accuracy": float(accuracy[1])}, - ) - - -def main() -> None: - """Load data, start MNISTClient.""" - - # Set context to GPU or - if not available - to CPU - DEVICE = [mx.gpu() if mx.test_utils.list_gpus() else mx.cpu()] - - # Load data - train_data, val_data = mxnet_mnist.load_data() - - # Load model (from centralized training) - model = mxnet_mnist.model() - - # Do one forward propagation to initialize parameters - init = nd.random.uniform(shape=(2, 784)) - model(init) - - # Start Flower client - client = MNISTClient(model, train_data, val_data, DEVICE) - fl.client.start_numpy_client(server_address="0.0.0.0:8080", client=client) - - -if __name__ == "__main__": - main() diff --git a/examples/mxnet-from-centralized-to-federated/mxnet_mnist.py b/examples/mxnet-from-centralized-to-federated/mxnet_mnist.py deleted file mode 100644 index 5cf39da7c9ca..000000000000 --- a/examples/mxnet-from-centralized-to-federated/mxnet_mnist.py +++ /dev/null @@ -1,144 +0,0 @@ -"""MXNet MNIST image classification. - -The code is generally adapted from: - -https://mxnet.apache.org/api/python/docs/tutorials/packages/gluon/image/mnist.html -""" - -from typing import List, Tuple -import mxnet as mx -from mxnet import gluon -from mxnet.gluon import nn -from mxnet import autograd as ag -import mxnet.ndarray as F -from mxnet import nd - -# Fixing the random seed -mx.random.seed(42) - - -def load_data() -> Tuple[mx.io.NDArrayIter, mx.io.NDArrayIter]: - print("Download Dataset") - # Download MNIST data - mnist = mx.test_utils.get_mnist() - batch_size = 100 - train_data = mx.io.NDArrayIter( - mnist["train_data"], mnist["train_label"], batch_size, shuffle=True - ) - val_data = mx.io.NDArrayIter(mnist["test_data"], mnist["test_label"], batch_size) - return train_data, val_data - - -def model(): - # Define simple Sequential model - net = nn.Sequential() - net.add(nn.Dense(256, activation="relu")) - net.add(nn.Dense(64, activation="relu")) - net.add(nn.Dense(10)) - net.collect_params().initialize() - return net - - -def train( - net: mx.gluon.nn, train_data: mx.io.NDArrayIter, epoch: int, device: mx.context -) -> Tuple[List[float], int]: - trainer = gluon.Trainer(net.collect_params(), "sgd", {"learning_rate": 0.01}) - # Use Accuracy and Cross Entropy Loss as the evaluation metric. - accuracy_metric = mx.metric.Accuracy() - loss_metric = mx.metric.CrossEntropy() - metrics = mx.metric.CompositeEvalMetric() - for child_metric in [accuracy_metric, loss_metric]: - metrics.add(child_metric) - softmax_cross_entropy_loss = gluon.loss.SoftmaxCrossEntropyLoss() - for i in range(epoch): - # Reset the train data iterator. - train_data.reset() - # Calculate number of samples - num_examples = 0 - # Loop over the train data iterator. - for batch in train_data: - # Splits train data into multiple slices along batch_axis - # and copy each slice into a context. - data = gluon.utils.split_and_load( - batch.data[0], ctx_list=device, batch_axis=0 - ) - # Splits train labels into multiple slices along batch_axis - # and copy each slice into a context. - label = gluon.utils.split_and_load( - batch.label[0], ctx_list=device, batch_axis=0 - ) - outputs = [] - # Inside training scope - with ag.record(): - for x, y in zip(data, label): - z = net(x) - # Computes softmax cross entropy loss. - loss = softmax_cross_entropy_loss(z, y) - # Backpropogate the error for one iteration. - loss.backward() - outputs.append(z.softmax()) - num_examples += len(x) - # Updates internal evaluation - metrics.update(label, outputs) - # Make one step of parameter update. Trainer needs to know the - # batch size of data to normalize the gradient by 1/batch_size. - trainer.step(batch.data[0].shape[0]) - # Gets the evaluation result. - trainings_metric = metrics.get_name_value() - print("Accuracy & loss at epoch %d: %s" % (i, trainings_metric)) - return trainings_metric, num_examples - - -def test( - net: mx.gluon.nn, val_data: mx.io.NDArrayIter, device: mx.context -) -> Tuple[List[float], int]: - # Use Accuracy as the evaluation metric. - accuracy_metric = mx.metric.Accuracy() - loss_metric = mx.metric.CrossEntropy() - metrics = mx.metric.CompositeEvalMetric() - for child_metric in [accuracy_metric, loss_metric]: - metrics.add(child_metric) - # Reset the validation data iterator. - val_data.reset() - # Get number of samples for val_dat - num_examples = 0 - # Loop over the validation data iterator. - for batch in val_data: - # Splits validation data into multiple slices along batch_axis - # and copy each slice into a context. - data = gluon.utils.split_and_load(batch.data[0], ctx_list=device, batch_axis=0) - # Splits validation label into multiple slices along batch_axis - # and copy each slice into a context. - label = gluon.utils.split_and_load( - batch.label[0], ctx_list=device, batch_axis=0 - ) - outputs = [] - for x in data: - outputs.append(net(x).softmax()) - num_examples += len(x) - # Updates internal evaluation - metrics.update(label, outputs) - return metrics.get_name_value(), num_examples - - -def main(): - # Set context to GPU or - if not available - to CPU - DEVICE = [mx.gpu() if mx.test_utils.list_gpus() else mx.cpu()] - # Load train and validation data - train_data, val_data = load_data() - # Define sequential model - net = model() - init = nd.random.uniform(shape=(2, 784)) - net(init) - # Start model training based on training set - train(net=net, train_data=train_data, epoch=2, device=DEVICE) - # Evaluate model using loss and accuracy - eval_metric, _ = test(net=net, val_data=val_data, device=DEVICE) - acc = eval_metric[0] - loss = eval_metric[1] - print("Evaluation Loss: ", loss) - print("Evaluation Accuracy: ", acc) - - -if __name__ == "__main__": - main() diff --git a/examples/mxnet-from-centralized-to-federated/requirements.txt b/examples/mxnet-from-centralized-to-federated/requirements.txt deleted file mode 100644 index 8dd6f7150dfd..000000000000 --- a/examples/mxnet-from-centralized-to-federated/requirements.txt +++ /dev/null @@ -1,3 +0,0 @@ -flwr==1.6.0 -mxnet==1.9.1 -numpy==1.23.1 diff --git a/examples/mxnet-from-centralized-to-federated/server.py b/examples/mxnet-from-centralized-to-federated/server.py deleted file mode 100644 index 871aa4e8ec99..000000000000 --- a/examples/mxnet-from-centralized-to-federated/server.py +++ /dev/null @@ -1,9 +0,0 @@ -"""Flower server example.""" - -import flwr as fl - -if __name__ == "__main__": - fl.server.start_server( - server_address="0.0.0.0:8080", - config=fl.server.ServerConfig(num_rounds=3), - ) diff --git a/examples/pytorch-from-centralized-to-federated/cifar.py b/examples/pytorch-from-centralized-to-federated/cifar.py index 277a21da2e70..c592b63b0042 100644 --- a/examples/pytorch-from-centralized-to-federated/cifar.py +++ b/examples/pytorch-from-centralized-to-federated/cifar.py @@ -56,7 +56,7 @@ def load_data(partition_id: int): fds = FederatedDataset(dataset="cifar10", partitioners={"train": 10}) partition = fds.load_partition(partition_id) # Divide data on each node: 80% train, 20% test - partition_train_test = partition.train_test_split(test_size=0.2) + partition_train_test = partition.train_test_split(test_size=0.2, seed=42) pytorch_transforms = Compose( [ToTensor(), Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))] ) diff --git a/examples/quickstart-cpp/README.md b/examples/quickstart-cpp/README.md index d8982048793c..d6cbeebe1bc6 100644 --- a/examples/quickstart-cpp/README.md +++ b/examples/quickstart-cpp/README.md @@ -1,4 +1,4 @@ -# Flower Clients in C++ +# Flower Clients in C++ (under development) In this example you will train a linear model on synthetic data using C++ clients. @@ -12,7 +12,7 @@ Many thanks to the original contributors to this code: ## Install requirements -You'll need CMake and Python. +You'll need CMake and Python with `flwr` installed. ### Building the example @@ -23,16 +23,20 @@ cmake -S . -B build cmake --build build ``` -## Run the server and two clients in separate terminals +## Run the `Flower SuperLink`, the two clients, and the `Flower ServerApp` in separate terminals ```bash -python server.py +flwr-superlink --insecure ``` ```bash -build/flwr_client 0 127.0.0.1:8080 +build/flwr_client 0 127.0.0.1:9092 ``` ```bash -build/flwr_client 1 127.0.0.1:8080 +build/flwr_client 1 127.0.0.1:9092 +``` + +```bash +flower-server-app server:app --insecure ``` diff --git a/examples/quickstart-cpp/driver.py b/examples/quickstart-cpp/driver.py deleted file mode 100644 index f19cf0e9bd98..000000000000 --- a/examples/quickstart-cpp/driver.py +++ /dev/null @@ -1,10 +0,0 @@ -import flwr as fl -from fedavg_cpp import FedAvgCpp - -# Start Flower server for three rounds of federated learning -if __name__ == "__main__": - fl.server.start_driver( - server_address="0.0.0.0:9091", - config=fl.server.ServerConfig(num_rounds=3), - strategy=FedAvgCpp(), - ) diff --git a/examples/quickstart-cpp/fedavg_cpp.py b/examples/quickstart-cpp/fedavg_cpp.py index 672858fb8c48..cd62d07bb848 100644 --- a/examples/quickstart-cpp/fedavg_cpp.py +++ b/examples/quickstart-cpp/fedavg_cpp.py @@ -82,7 +82,6 @@ def aggregate_evaluate( # Do not aggregate if there are failures and failures are not accepted if not self.accept_failures and failures: return None, {} - print(results[0][1]) loss_aggregated = weighted_loss_avg( [ ( diff --git a/examples/quickstart-cpp/server.py b/examples/quickstart-cpp/server.py index aa595b498786..8ad8e0b3647c 100644 --- a/examples/quickstart-cpp/server.py +++ b/examples/quickstart-cpp/server.py @@ -2,17 +2,15 @@ import numpy as np from fedavg_cpp import FedAvgCpp, weights_to_parameters -# Start Flower server for three rounds of federated learning -if __name__ == "__main__": - model_size = 2 - initial_weights = [ - np.array([1.0, 2.0], dtype=np.float64), - np.array([3.0], dtype=np.float64), - ] - initial_parameters = weights_to_parameters(initial_weights) - strategy = FedAvgCpp(initial_parameters=initial_parameters) - fl.server.start_server( - server_address="0.0.0.0:8080", - config=fl.server.ServerConfig(num_rounds=3), - strategy=strategy, - ) +model_size = 2 +initial_weights = [ + np.array([1.0, 2.0], dtype=np.float64), + np.array([3.0], dtype=np.float64), +] +initial_parameters = weights_to_parameters(initial_weights) +strategy = FedAvgCpp(initial_parameters=initial_parameters) + +app = fl.server.ServerApp( + config=fl.server.ServerConfig(num_rounds=3), + strategy=strategy, +) diff --git a/examples/quickstart-cpp/src/main.cc b/examples/quickstart-cpp/src/main.cc index f294f9d69473..f645360992c2 100644 --- a/examples/quickstart-cpp/src/main.cc +++ b/examples/quickstart-cpp/src/main.cc @@ -2,17 +2,10 @@ #include "start.h" int main(int argc, char **argv) { - if (argc != 3 && argc != 4) { - std::cout << "Client takes three mandatory arguments and one optional as " - "follows: " - << std::endl; - std::cout << "./client CLIENT_ID SERVER_URL [GRPC_MODE]" << std::endl; - std::cout - << "GRPC_MODE is optional and can be either 'bidi' (default) or 'rere'." - << std::endl; - std::cout << "Example: ./flwr_client 0 '127.0.0.1:8080' bidi" << std::endl; - std::cout << "This is the same as: ./flwr_client 0 '127.0.0.1:8080'" - << std::endl; + if (argc != 3) { + std::cout << "Client takes 2 mandatory arguments as follows: " << std::endl; + std::cout << "./client CLIENT_ID SERVER_URL" << std::endl; + std::cout << "Example: ./flwr_client 0 '127.0.0.1:8080'" << std::endl; return 0; } @@ -45,15 +38,8 @@ int main(int argc, char **argv) { // Define a server address std::string server_add = SERVER_URL; - if (argc == 4 && std::string(argv[3]) == "rere") { - std::cout << "Starting rere client" << std::endl; - // Start rere client - start::start_rere_client(server_add, &client); - } else { - std::cout << "Starting bidi client" << std::endl; - // Start bidi client - start::start_client(server_add, &client); - } + std::cout << "Starting rere client" << std::endl; + start::start_client(server_add, &client); return 0; } diff --git a/examples/quickstart-huggingface/client.py b/examples/quickstart-huggingface/client.py index 9be08d0cbcf4..a9d48bfa8f13 100644 --- a/examples/quickstart-huggingface/client.py +++ b/examples/quickstart-huggingface/client.py @@ -22,9 +22,9 @@ def load_data(partition_id): fds = FederatedDataset(dataset="imdb", partitioners={"train": 1_000}) partition = fds.load_partition(partition_id) # Divide data: 80% train, 20% test - partition_train_test = partition.train_test_split(test_size=0.2) + partition_train_test = partition.train_test_split(test_size=0.2, seed=42) - tokenizer = AutoTokenizer.from_pretrained(CHECKPOINT) + tokenizer = AutoTokenizer.from_pretrained(CHECKPOINT, model_max_length=512) def tokenize_function(examples): return tokenizer(examples["text"], truncation=True) diff --git a/examples/quickstart-mlcube/dev/mnist.py b/examples/quickstart-mlcube/dev/mnist.py index e52e2cba85c7..55fb8fae62a7 100644 --- a/examples/quickstart-mlcube/dev/mnist.py +++ b/examples/quickstart-mlcube/dev/mnist.py @@ -36,6 +36,7 @@ def create_directory(path: str) -> None: def download(task_args: List[str]) -> None: """Task: download. + Input parameters: --data_dir """ @@ -81,6 +82,7 @@ def download(task_args: List[str]) -> None: def train(task_args: List[str]) -> None: """Task: train. + Input parameters: --data_dir, --log_dir, --model_dir, --parameters_file """ @@ -175,6 +177,7 @@ def train(task_args: List[str]) -> None: def evaluate(task_args: List[str]) -> None: """Task: train. + Input parameters: --data_dir, --log_dir, --model_dir, --parameters_file """ diff --git a/examples/quickstart-mlx/client.py b/examples/quickstart-mlx/client.py index faba2b94d6bd..344cfc65e42d 100644 --- a/examples/quickstart-mlx/client.py +++ b/examples/quickstart-mlx/client.py @@ -107,7 +107,7 @@ def evaluate(self, parameters, config): fds = FederatedDataset(dataset="mnist", partitioners={"train": 3}) partition = fds.load_partition(partition_id=args.partition_id) - partition_splits = partition.train_test_split(test_size=0.2) + partition_splits = partition.train_test_split(test_size=0.2, seed=42) partition_splits["train"].set_format("numpy") partition_splits["test"].set_format("numpy") diff --git a/examples/quickstart-mlx/requirements.txt b/examples/quickstart-mlx/requirements.txt index 0c3ea45ee188..b56f7a15bfb9 100644 --- a/examples/quickstart-mlx/requirements.txt +++ b/examples/quickstart-mlx/requirements.txt @@ -1,4 +1,4 @@ flwr>=1.0, <2.0 mlx==0.0.3 numpy==1.24.4 -flwr-datasets["vision"]>=0.0.2, <1.0 +flwr-datasets[vision]>=0.0.2, <1.0.0 diff --git a/examples/quickstart-monai/pyproject.toml b/examples/quickstart-monai/pyproject.toml index 66a56ee2270b..b1713f05f2ef 100644 --- a/examples/quickstart-monai/pyproject.toml +++ b/examples/quickstart-monai/pyproject.toml @@ -6,7 +6,7 @@ build-backend = "poetry.core.masonry.api" name = "quickstart-monai" version = "0.1.0" description = "MONAI Federated Learning Quickstart with Flower" -authors = ["The Flower Authors "] +authors = ["The Flower Authors "] [tool.poetry.dependencies] python = ">=3.8,<3.11" diff --git a/examples/quickstart-mxnet/.gitignore b/examples/quickstart-mxnet/.gitignore deleted file mode 100644 index 10d00b5797e2..000000000000 --- a/examples/quickstart-mxnet/.gitignore +++ /dev/null @@ -1 +0,0 @@ -*.gz diff --git a/examples/quickstart-mxnet/README.md b/examples/quickstart-mxnet/README.md deleted file mode 100644 index 37e01ef2707c..000000000000 --- a/examples/quickstart-mxnet/README.md +++ /dev/null @@ -1,78 +0,0 @@ -# Flower Example using MXNet - -> Note the MXNet project has ended, and is now in [Attic](https://attic.apache.org/projects/mxnet.html). The MXNet GitHub has also [been archived](https://github.com/apache/mxnet). As a result, this example won't be receiving more updates. Using MXNet is no longer recommnended. - -This example demonstrates how to run a MXNet machine learning project federated with Flower. - -This introductory example for Flower uses MXNet, but you're not required to be a MXNet expert to run the example. The example will help you to understand how Flower can be used to build federated learning use cases based on an existing MXNet projects. - -## Project Setup - -Start by cloning the example project. We prepared a single-line command that you can copy into your shell which will checkout the example for you: - -```shell -git clone --depth=1 https://github.com/adap/flower.git && mv flower/examples/quickstart-mxnet . && rm -rf flower && cd quickstart-mxnet -``` - -This will create a new directory called `quickstart-mxnet` containing the following files: - -```shell --- pyproject.toml --- requirements.txt --- client.py --- server.py --- README.md -``` - -### Installing Dependencies - -Project dependencies (such as `mxnet` and `flwr`) are defined in `pyproject.toml` and `requirements.txt`. We recommend [Poetry](https://python-poetry.org/docs/) to install those dependencies and manage your virtual environment ([Poetry installation](https://python-poetry.org/docs/#installation)) or [pip](https://pip.pypa.io/en/latest/development/), but feel free to use a different way of installing dependencies and managing virtual environments if you have other preferences. - -#### Poetry - -```shell -poetry install -poetry shell -``` - -Poetry will install all your dependencies in a newly created virtual environment. To verify that everything works correctly you can run the following command: - -```shell -poetry run python3 -c "import flwr" -``` - -If you don't see any errors you're good to go! - -#### pip - -Write the command below in your terminal to install the dependencies according to the configuration file requirements.txt. - -```shell -pip install -r requirements.txt -``` - -## Run MXNet Federated - -This MXNet example is based on the [Handwritten Digit Recognition](https://mxnet.apache.org/versions/1.7.0/api/python/docs/tutorials/packages/gluon/image/mnist.html) tutorial and uses the MNIST dataset (hand-written digits with 28x28 pixels in greyscale with 10 classes). Feel free to consult the tutorial if you want to get a better understanding of MXNet. The file `client.py` contains all the steps that are described in the tutorial. It loads the dataset and a sequential model, trains the model with the training set, and evaluates the trained model on the test set. - -You are ready to start the Flower server as well as the clients. You can simply start the server in a terminal as follows: - -```shell -python3 server.py -``` - -Now you are ready to start the Flower clients which will participate in the learning. To do so simply open two more terminal windows and run the following commands. - -Start client 1 in the first terminal: - -```shell -python3 client.py -``` - -Start client 2 in the second terminal: - -```shell -python3 client.py -``` - -You are now training a MXNet-based classifier on MNIST, federated across two clients. The setup is of course simplified since both clients hold the same dataset, but you can now continue with your own explorations. How about changing from a sequential model to a CNN? How about adding more clients? diff --git a/examples/quickstart-mxnet/client.py b/examples/quickstart-mxnet/client.py deleted file mode 100644 index 6c2b2e99775d..000000000000 --- a/examples/quickstart-mxnet/client.py +++ /dev/null @@ -1,136 +0,0 @@ -"""Flower client example using MXNet for MNIST classification. - -The code is generally adapted from: - -https://mxnet.apache.org/api/python/docs/tutorials/packages/gluon/image/mnist.html -""" - -import flwr as fl -import numpy as np -import mxnet as mx -from mxnet import nd -from mxnet import gluon -from mxnet.gluon import nn -from mxnet import autograd as ag -import mxnet.ndarray as F - -# Fixing the random seed -mx.random.seed(42) - -# Setup context to GPU or CPU -DEVICE = [mx.gpu() if mx.test_utils.list_gpus() else mx.cpu()] - - -def main(): - def model(): - net = nn.Sequential() - net.add(nn.Dense(256, activation="relu")) - net.add(nn.Dense(64, activation="relu")) - net.add(nn.Dense(10)) - net.collect_params().initialize() - return net - - train_data, val_data = load_data() - - model = model() - init = nd.random.uniform(shape=(2, 784)) - model(init) - - # Flower Client - class MNISTClient(fl.client.NumPyClient): - def get_parameters(self, config): - param = [] - for val in model.collect_params(".*weight").values(): - p = val.data() - param.append(p.asnumpy()) - return param - - def set_parameters(self, parameters): - params = zip(model.collect_params(".*weight").keys(), parameters) - for key, value in params: - model.collect_params().setattr(key, value) - - def fit(self, parameters, config): - self.set_parameters(parameters) - [accuracy, loss], num_examples = train(model, train_data, epoch=2) - results = {"accuracy": float(accuracy[1]), "loss": float(loss[1])} - return self.get_parameters(config={}), num_examples, results - - def evaluate(self, parameters, config): - self.set_parameters(parameters) - [accuracy, loss], num_examples = test(model, val_data) - print("Evaluation accuracy & loss", accuracy, loss) - return float(loss[1]), num_examples, {"accuracy": float(accuracy[1])} - - # Start Flower client - fl.client.start_numpy_client(server_address="0.0.0.0:8080", client=MNISTClient()) - - -def load_data(): - print("Download Dataset") - mnist = mx.test_utils.get_mnist() - batch_size = 100 - train_data = mx.io.NDArrayIter( - mnist["train_data"], mnist["train_label"], batch_size, shuffle=True - ) - val_data = mx.io.NDArrayIter(mnist["test_data"], mnist["test_label"], batch_size) - return train_data, val_data - - -def train(net, train_data, epoch): - trainer = gluon.Trainer(net.collect_params(), "sgd", {"learning_rate": 0.01}) - accuracy_metric = mx.metric.Accuracy() - loss_metric = mx.metric.CrossEntropy() - metrics = mx.metric.CompositeEvalMetric() - for child_metric in [accuracy_metric, loss_metric]: - metrics.add(child_metric) - softmax_cross_entropy_loss = gluon.loss.SoftmaxCrossEntropyLoss() - for i in range(epoch): - train_data.reset() - num_examples = 0 - for batch in train_data: - data = gluon.utils.split_and_load( - batch.data[0], ctx_list=DEVICE, batch_axis=0 - ) - label = gluon.utils.split_and_load( - batch.label[0], ctx_list=DEVICE, batch_axis=0 - ) - outputs = [] - with ag.record(): - for x, y in zip(data, label): - z = net(x) - loss = softmax_cross_entropy_loss(z, y) - loss.backward() - outputs.append(z.softmax()) - num_examples += len(x) - metrics.update(label, outputs) - trainer.step(batch.data[0].shape[0]) - trainings_metric = metrics.get_name_value() - print("Accuracy & loss at epoch %d: %s" % (i, trainings_metric)) - return trainings_metric, num_examples - - -def test(net, val_data): - accuracy_metric = mx.metric.Accuracy() - loss_metric = mx.metric.CrossEntropy() - metrics = mx.metric.CompositeEvalMetric() - for child_metric in [accuracy_metric, loss_metric]: - metrics.add(child_metric) - val_data.reset() - num_examples = 0 - for batch in val_data: - data = gluon.utils.split_and_load(batch.data[0], ctx_list=DEVICE, batch_axis=0) - label = gluon.utils.split_and_load( - batch.label[0], ctx_list=DEVICE, batch_axis=0 - ) - outputs = [] - for x in data: - outputs.append(net(x).softmax()) - num_examples += len(x) - metrics.update(label, outputs) - metrics.update(label, outputs) - return metrics.get_name_value(), num_examples - - -if __name__ == "__main__": - main() diff --git a/examples/quickstart-mxnet/pyproject.toml b/examples/quickstart-mxnet/pyproject.toml deleted file mode 100644 index b00b3ddfe412..000000000000 --- a/examples/quickstart-mxnet/pyproject.toml +++ /dev/null @@ -1,15 +0,0 @@ -[build-system] -requires = ["poetry-core>=1.4.0"] -build-backend = "poetry.core.masonry.api" - -[tool.poetry] -name = "mxnet_example" -version = "0.1.0" -description = "MXNet example with MNIST and CNN" -authors = ["The Flower Authors "] - -[tool.poetry.dependencies] -python = ">=3.8,<3.11" -flwr = "1.6.0" -mxnet = "1.9.1" -numpy = "1.23.1" diff --git a/examples/quickstart-mxnet/requirements.txt b/examples/quickstart-mxnet/requirements.txt deleted file mode 100644 index 8dd6f7150dfd..000000000000 --- a/examples/quickstart-mxnet/requirements.txt +++ /dev/null @@ -1,3 +0,0 @@ -flwr==1.6.0 -mxnet==1.9.1 -numpy==1.23.1 diff --git a/examples/quickstart-mxnet/server.py b/examples/quickstart-mxnet/server.py deleted file mode 100644 index 871aa4e8ec99..000000000000 --- a/examples/quickstart-mxnet/server.py +++ /dev/null @@ -1,9 +0,0 @@ -"""Flower server example.""" - -import flwr as fl - -if __name__ == "__main__": - fl.server.start_server( - server_address="0.0.0.0:8080", - config=fl.server.ServerConfig(num_rounds=3), - ) diff --git a/examples/quickstart-pytorch-lightning/mnist.py b/examples/quickstart-pytorch-lightning/mnist.py index 95342f4fb9b3..2f6100fe94cc 100644 --- a/examples/quickstart-pytorch-lightning/mnist.py +++ b/examples/quickstart-pytorch-lightning/mnist.py @@ -82,9 +82,11 @@ def load_data(partition): partition = partition.with_transform(apply_transforms) # 20 % for on federated evaluation - partition_full = partition.train_test_split(test_size=0.2) + partition_full = partition.train_test_split(test_size=0.2, seed=42) # 60 % for the federated train and 20 % for the federated validation (both in fit) - partition_train_valid = partition_full["train"].train_test_split(train_size=0.75) + partition_train_valid = partition_full["train"].train_test_split( + train_size=0.75, seed=42 + ) trainloader = DataLoader( partition_train_valid["train"], batch_size=32, diff --git a/examples/quickstart-pytorch/README.md b/examples/quickstart-pytorch/README.md index 02c9b4b38498..93d6a593f362 100644 --- a/examples/quickstart-pytorch/README.md +++ b/examples/quickstart-pytorch/README.md @@ -14,7 +14,6 @@ This will create a new directory called `quickstart-pytorch` containing the foll ```shell -- pyproject.toml --- requirements.txt -- client.py -- server.py -- README.md @@ -22,30 +21,22 @@ This will create a new directory called `quickstart-pytorch` containing the foll ### Installing Dependencies -Project dependencies (such as `torch` and `flwr`) are defined in `pyproject.toml` and `requirements.txt`. We recommend [Poetry](https://python-poetry.org/docs/) to install those dependencies and manage your virtual environment ([Poetry installation](https://python-poetry.org/docs/#installation)) or [pip](https://pip.pypa.io/en/latest/development/), but feel free to use a different way of installing dependencies and managing virtual environments if you have other preferences. - -#### Poetry +Project dependencies (such as `torch` and `flwr`) are defined in `pyproject.toml`. You can install the dependencies by invoking `pip`: ```shell -poetry install -poetry shell +# From a new python environment, run: +pip install . ``` -Poetry will install all your dependencies in a newly created virtual environment. To verify that everything works correctly you can run the following command: +Then, to verify that everything works correctly you can run the following command: ```shell -poetry run python3 -c "import flwr" +python3 -c "import flwr" ``` If you don't see any errors you're good to go! -#### pip - -Write the command below in your terminal to install the dependencies according to the configuration file requirements.txt. - -```shell -pip install -r requirements.txt -``` +______________________________________________________________________ ## Run Federated Learning with PyTorch and Flower @@ -72,3 +63,29 @@ python3 client.py --partition-id 1 ``` You will see that PyTorch is starting a federated training. Look at the [code](https://github.com/adap/flower/tree/main/examples/quickstart-pytorch) for a detailed explanation. + +______________________________________________________________________ + +## Run Federated Learning with PyTorch and `Flower Next` + +### 1. Start the long-running Flower server (SuperLink) + +```bash +flower-superlink --insecure +``` + +### 2. Start the long-running Flower clients (SuperNodes) + +Start 2 Flower `SuperNodes` in 2 separate terminal windows, using: + +```bash +flower-client-app client:app --insecure +``` + +### 3. Run the Flower App + +With both the long-running server (SuperLink) and two clients (SuperNode) up and running, we can now run the actual Flower App: + +```bash +flower-server-app server:app --insecure +``` diff --git a/examples/quickstart-pytorch/client.py b/examples/quickstart-pytorch/client.py index e640ce111dff..be4be88b8f8d 100644 --- a/examples/quickstart-pytorch/client.py +++ b/examples/quickstart-pytorch/client.py @@ -2,7 +2,7 @@ import warnings from collections import OrderedDict -import flwr as fl +from flwr.client import NumPyClient, ClientApp from flwr_datasets import FederatedDataset import torch import torch.nn as nn @@ -74,7 +74,7 @@ def load_data(partition_id): fds = FederatedDataset(dataset="cifar10", partitioners={"train": 3}) partition = fds.load_partition(partition_id) # Divide data on each node: 80% train, 20% test - partition_train_test = partition.train_test_split(test_size=0.2) + partition_train_test = partition.train_test_split(test_size=0.2, seed=42) pytorch_transforms = Compose( [ToTensor(), Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))] ) @@ -99,11 +99,11 @@ def apply_transforms(batch): parser.add_argument( "--partition-id", choices=[0, 1, 2], - required=True, + default=0, type=int, help="Partition of the dataset divided into 3 iid partitions created artificially.", ) -partition_id = parser.parse_args().partition_id +partition_id = parser.parse_known_args()[0].partition_id # Load model and data (simple CNN, CIFAR-10) net = Net().to(DEVICE) @@ -111,7 +111,7 @@ def apply_transforms(batch): # Define Flower client -class FlowerClient(fl.client.NumPyClient): +class FlowerClient(NumPyClient): def get_parameters(self, config): return [val.cpu().numpy() for _, val in net.state_dict().items()] @@ -131,8 +131,22 @@ def evaluate(self, parameters, config): return loss, len(testloader.dataset), {"accuracy": accuracy} -# Start Flower client -fl.client.start_client( - server_address="127.0.0.1:8080", - client=FlowerClient().to_client(), +def client_fn(cid: str): + """Create and return an instance of Flower `Client`.""" + return FlowerClient().to_client() + + +# Flower ClientApp +app = ClientApp( + client_fn=client_fn, ) + + +# Legacy mode +if __name__ == "__main__": + from flwr.client import start_client + + start_client( + server_address="127.0.0.1:8080", + client=FlowerClient().to_client(), + ) diff --git a/examples/quickstart-pytorch/pyproject.toml b/examples/quickstart-pytorch/pyproject.toml index d8e1503dd8a7..4692958d4491 100644 --- a/examples/quickstart-pytorch/pyproject.toml +++ b/examples/quickstart-pytorch/pyproject.toml @@ -1,17 +1,21 @@ [build-system] -requires = ["poetry-core>=1.4.0"] -build-backend = "poetry.core.masonry.api" +requires = ["hatchling"] +build-backend = "hatchling.build" -[tool.poetry] +[project] name = "quickstart-pytorch" version = "0.1.0" description = "PyTorch Federated Learning Quickstart with Flower" -authors = ["The Flower Authors "] +authors = [ + { name = "The Flower Authors", email = "hello@flower.ai" }, +] +dependencies = [ + "flwr>=1.8.0,<2.0", + "flwr-datasets[vision]>=0.0.2,<1.0.0", + "torch==2.1.1", + "torchvision==0.16.1", + "tqdm==4.65.0" +] -[tool.poetry.dependencies] -python = ">=3.8,<3.11" -flwr = ">=1.0,<2.0" -flwr-datasets = { extras = ["vision"], version = ">=0.0.2,<1.0.0" } -torch = "2.1.1" -torchvision = "0.16.1" -tqdm = "4.65.0" +[tool.hatch.build.targets.wheel] +packages = ["."] diff --git a/examples/quickstart-pytorch/requirements.txt b/examples/quickstart-pytorch/requirements.txt deleted file mode 100644 index 4e321e2cd0c2..000000000000 --- a/examples/quickstart-pytorch/requirements.txt +++ /dev/null @@ -1,5 +0,0 @@ -flwr>=1.0, <2.0 -flwr-datasets[vision]>=0.0.2, <1.0.0 -torch==2.1.1 -torchvision==0.16.1 -tqdm==4.65.0 diff --git a/examples/quickstart-pytorch/run.sh b/examples/quickstart-pytorch/run.sh deleted file mode 100755 index 6ca9c8cafec9..000000000000 --- a/examples/quickstart-pytorch/run.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash -set -e -cd "$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"/ - -echo "Starting server" -python server.py & -sleep 3 # Sleep for 3s to give the server enough time to start - -for i in $(seq 0 1); do - echo "Starting client $i" - python client.py --partition-id "$i" & -done - -# Enable CTRL+C to stop all background processes -trap "trap - SIGTERM && kill -- -$$" SIGINT SIGTERM -# Wait for all background processes to complete -wait diff --git a/examples/quickstart-pytorch/server.py b/examples/quickstart-pytorch/server.py index fe691a88aba0..4034703ca690 100644 --- a/examples/quickstart-pytorch/server.py +++ b/examples/quickstart-pytorch/server.py @@ -1,6 +1,7 @@ from typing import List, Tuple -import flwr as fl +from flwr.server import ServerApp, ServerConfig +from flwr.server.strategy import FedAvg from flwr.common import Metrics @@ -15,11 +16,26 @@ def weighted_average(metrics: List[Tuple[int, Metrics]]) -> Metrics: # Define strategy -strategy = fl.server.strategy.FedAvg(evaluate_metrics_aggregation_fn=weighted_average) +strategy = FedAvg(evaluate_metrics_aggregation_fn=weighted_average) -# Start Flower server -fl.server.start_server( - server_address="0.0.0.0:8080", - config=fl.server.ServerConfig(num_rounds=3), + +# Define config +config = ServerConfig(num_rounds=3) + + +# Flower ServerApp +app = ServerApp( + config=config, strategy=strategy, ) + + +# Legacy mode +if __name__ == "__main__": + from flwr.server import start_server + + start_server( + server_address="0.0.0.0:8080", + config=config, + strategy=strategy, + ) diff --git a/examples/quickstart-tabnet/pyproject.toml b/examples/quickstart-tabnet/pyproject.toml index 18f1979791bd..6b7311f068f0 100644 --- a/examples/quickstart-tabnet/pyproject.toml +++ b/examples/quickstart-tabnet/pyproject.toml @@ -13,5 +13,5 @@ python = ">=3.8,<3.11" flwr = ">=1.0,<2.0" tensorflow-cpu = { version = ">=2.9.1,<2.11.1 || >2.11.1", markers = "platform_machine == \"x86_64\"" } tensorflow-macos = { version = ">=2.9.1,<2.11.1 || >2.11.1", markers = "sys_platform == \"darwin\" and platform_machine == \"arm64\"" } -tensorflow_datasets = "4.8.3" +tensorflow_datasets = "4.9.2" tabnet = "0.1.6" diff --git a/examples/quickstart-tensorflow/README.md b/examples/quickstart-tensorflow/README.md index 8d5e9434b086..ae1fe19834a3 100644 --- a/examples/quickstart-tensorflow/README.md +++ b/examples/quickstart-tensorflow/README.md @@ -15,7 +15,6 @@ This will create a new directory called `quickstart-tensorflow` containing the f ```shell -- pyproject.toml --- requirements.txt -- client.py -- server.py -- README.md @@ -23,51 +22,63 @@ This will create a new directory called `quickstart-tensorflow` containing the f ### Installing Dependencies -Project dependencies (such as `tensorflow` and `flwr`) are defined in `pyproject.toml` and `requirements.txt`. We recommend [Poetry](https://python-poetry.org/docs/) to install those dependencies and manage your virtual environment ([Poetry installation](https://python-poetry.org/docs/#installation)) or [pip](https://pip.pypa.io/en/latest/development/), but feel free to use a different way of installing dependencies and managing virtual environments if you have other preferences. - -#### Poetry +Project dependencies (such as `tensorflow` and `flwr`) are defined in `pyproject.toml`. You can install the dependencies by invoking `pip`: ```shell -poetry install -poetry shell +# From a new python environment, run: +pip install . ``` -Poetry will install all your dependencies in a newly created virtual environment. To verify that everything works correctly you can run the following command: +Then, to verify that everything works correctly you can run the following command: ```shell -poetry run python3 -c "import flwr" +python3 -c "import flwr" ``` If you don't see any errors you're good to go! -#### pip - -Write the command below in your terminal to install the dependencies according to the configuration file requirements.txt. - -```shell -pip install -r requirements.txt -``` - ## Run Federated Learning with TensorFlow/Keras and Flower Afterward, you are ready to start the Flower server as well as the clients. You can simply start the server in a terminal as follows: ```shell -poetry run python3 server.py +python3 server.py ``` Now you are ready to start the Flower clients which will participate in the learning. To do so simply open two more terminals and run the following command in each: ```shell -poetry run python3 client.py +python3 client.py --partition-id 0 ``` -Alternatively, you can run all of it in one shell as follows: +Start client 2 in the second terminal: ```shell -poetry run python3 server.py & -poetry run python3 client.py & -poetry run python3 client.py +python3 client.py --partition-id 1 ``` You will see that Keras is starting a federated training. Have a look at the [code](https://github.com/adap/flower/tree/main/examples/quickstart-tensorflow) for a detailed explanation. You can add `steps_per_epoch=3` to `model.fit()` if you just want to evaluate that everything works without having to wait for the client-side training to finish (this will save you a lot of time during development). + +## Run Federated Learning with TensorFlow/Keras and `Flower Next` + +### 1. Start the long-running Flower server (SuperLink) + +```bash +flower-superlink --insecure +``` + +### 2. Start the long-running Flower clients (SuperNodes) + +Start 2 Flower \`SuperNodes in 2 separate terminal windows, using: + +```bash +flower-client-app client:app --insecure +``` + +### 3. Run the Flower App + +With both the long-running server (SuperLink) and two clients (SuperNode) up and running, we can now run the actual Flower App, using: + +```bash +flower-server-app server:app --insecure +``` diff --git a/examples/quickstart-tensorflow/client.py b/examples/quickstart-tensorflow/client.py index 3e2035c09311..6b2bd6639ce0 100644 --- a/examples/quickstart-tensorflow/client.py +++ b/examples/quickstart-tensorflow/client.py @@ -1,7 +1,7 @@ import argparse import os -import flwr as fl +from flwr.client import ClientApp, NumPyClient import tensorflow as tf from flwr_datasets import FederatedDataset @@ -14,11 +14,11 @@ "--partition-id", type=int, choices=[0, 1, 2], - required=True, - help="Partition of the dataset (0,1 or 2). " + default=0, + help="Partition of the dataset (0, 1 or 2). " "The dataset is divided into 3 partitions created artificially.", ) -args = parser.parse_args() +args, _ = parser.parse_known_args() # Load model and data (MobileNetV2, CIFAR-10) model = tf.keras.applications.MobileNetV2((32, 32, 3), classes=10, weights=None) @@ -30,13 +30,13 @@ partition.set_format("numpy") # Divide data on each node: 80% train, 20% test -partition = partition.train_test_split(test_size=0.2) +partition = partition.train_test_split(test_size=0.2, seed=42) x_train, y_train = partition["train"]["img"] / 255.0, partition["train"]["label"] x_test, y_test = partition["test"]["img"] / 255.0, partition["test"]["label"] # Define Flower client -class CifarClient(fl.client.NumPyClient): +class FlowerClient(NumPyClient): def get_parameters(self, config): return model.get_weights() @@ -51,7 +51,22 @@ def evaluate(self, parameters, config): return loss, len(x_test), {"accuracy": accuracy} -# Start Flower client -fl.client.start_client( - server_address="127.0.0.1:8080", client=CifarClient().to_client() +def client_fn(cid: str): + """Create and return an instance of Flower `Client`.""" + return FlowerClient().to_client() + + +# Flower ClientApp +app = ClientApp( + client_fn=client_fn, ) + + +# Legacy mode +if __name__ == "__main__": + from flwr.client import start_client + + start_client( + server_address="127.0.0.1:8080", + client=FlowerClient().to_client(), + ) diff --git a/examples/quickstart-tensorflow/pyproject.toml b/examples/quickstart-tensorflow/pyproject.toml index 98aeb932cab9..c0f71344b2fb 100644 --- a/examples/quickstart-tensorflow/pyproject.toml +++ b/examples/quickstart-tensorflow/pyproject.toml @@ -1,16 +1,20 @@ [build-system] -requires = ["poetry-core>=1.4.0"] -build-backend = "poetry.core.masonry.api" +requires = ["hatchling"] +build-backend = "hatchling.build" -[tool.poetry] +[project] name = "quickstart-tensorflow" version = "0.1.0" description = "Keras Federated Learning Quickstart with Flower" -authors = ["The Flower Authors "] +authors = [ + { name = "The Flower Authors", email = "hello@flower.ai" }, +] +dependencies = [ + "flwr>=1.8.0,<2.0", + "flwr-datasets[vision]>=0.0.2,<1.0.0", + "tensorflow-cpu>=2.9.1, != 2.11.1 ; platform_machine == \"x86_64\"", + "tensorflow-macos>=2.9.1, != 2.11.1 ; sys_platform == \"darwin\" and platform_machine == \"arm64\"" +] -[tool.poetry.dependencies] -python = ">=3.8,<3.11" -flwr = ">=1.0,<2.0" -flwr-datasets = { extras = ["vision"], version = ">=0.0.2,<1.0.0" } -tensorflow-cpu = { version = ">=2.9.1,<2.11.1 || >2.11.1", markers = "platform_machine == \"x86_64\"" } -tensorflow-macos = { version = ">=2.9.1,<2.11.1 || >2.11.1", markers = "sys_platform == \"darwin\" and platform_machine == \"arm64\"" } +[tool.hatch.build.targets.wheel] +packages = ["."] diff --git a/examples/quickstart-tensorflow/requirements.txt b/examples/quickstart-tensorflow/requirements.txt deleted file mode 100644 index 7f025975cae9..000000000000 --- a/examples/quickstart-tensorflow/requirements.txt +++ /dev/null @@ -1,4 +0,0 @@ -flwr>=1.0, <2.0 -flwr-datasets[vision]>=0.0.2, <1.0.0 -tensorflow-macos>=2.9.1, != 2.11.1 ; sys_platform == "darwin" and platform_machine == "arm64" -tensorflow-cpu>=2.9.1, != 2.11.1 ; platform_machine == "x86_64" diff --git a/examples/quickstart-tensorflow/run.sh b/examples/quickstart-tensorflow/run.sh deleted file mode 100755 index 76188f197e3e..000000000000 --- a/examples/quickstart-tensorflow/run.sh +++ /dev/null @@ -1,15 +0,0 @@ -#!/bin/bash - -echo "Starting server" -python server.py & -sleep 3 # Sleep for 3s to give the server enough time to start - -for i in `seq 0 1`; do - echo "Starting client $i" - python client.py --partition-id $i & -done - -# This will allow you to use CTRL+C to stop all background processes -trap "trap - SIGTERM && kill -- -$$" SIGINT SIGTERM -# Wait for all background processes to complete -wait diff --git a/examples/quickstart-tensorflow/server.py b/examples/quickstart-tensorflow/server.py index fe691a88aba0..4034703ca690 100644 --- a/examples/quickstart-tensorflow/server.py +++ b/examples/quickstart-tensorflow/server.py @@ -1,6 +1,7 @@ from typing import List, Tuple -import flwr as fl +from flwr.server import ServerApp, ServerConfig +from flwr.server.strategy import FedAvg from flwr.common import Metrics @@ -15,11 +16,26 @@ def weighted_average(metrics: List[Tuple[int, Metrics]]) -> Metrics: # Define strategy -strategy = fl.server.strategy.FedAvg(evaluate_metrics_aggregation_fn=weighted_average) +strategy = FedAvg(evaluate_metrics_aggregation_fn=weighted_average) -# Start Flower server -fl.server.start_server( - server_address="0.0.0.0:8080", - config=fl.server.ServerConfig(num_rounds=3), + +# Define config +config = ServerConfig(num_rounds=3) + + +# Flower ServerApp +app = ServerApp( + config=config, strategy=strategy, ) + + +# Legacy mode +if __name__ == "__main__": + from flwr.server import start_server + + start_server( + server_address="0.0.0.0:8080", + config=config, + strategy=strategy, + ) diff --git a/examples/simulation-pytorch/sim.ipynb b/examples/simulation-pytorch/sim.ipynb index 6dda1ef9319d..d225069cb444 100644 --- a/examples/simulation-pytorch/sim.ipynb +++ b/examples/simulation-pytorch/sim.ipynb @@ -497,7 +497,7 @@ " client_dataset = dataset.load_partition(int(cid), \"train\")\n", "\n", " # Now let's split it into train (90%) and validation (10%)\n", - " client_dataset_splits = client_dataset.train_test_split(test_size=0.1)\n", + " client_dataset_splits = client_dataset.train_test_split(test_size=0.1, seed=42)\n", "\n", " trainset = client_dataset_splits[\"train\"]\n", " valset = client_dataset_splits[\"test\"]\n", diff --git a/examples/simulation-pytorch/sim.py b/examples/simulation-pytorch/sim.py index 6fb750f2e59c..db68e75653fc 100644 --- a/examples/simulation-pytorch/sim.py +++ b/examples/simulation-pytorch/sim.py @@ -94,7 +94,7 @@ def client_fn(cid: str) -> fl.client.Client: client_dataset = dataset.load_partition(int(cid), "train") # Now let's split it into train (90%) and validation (10%) - client_dataset_splits = client_dataset.train_test_split(test_size=0.1) + client_dataset_splits = client_dataset.train_test_split(test_size=0.1, seed=42) trainset = client_dataset_splits["train"] valset = client_dataset_splits["test"] diff --git a/examples/simulation-tensorflow/sim.ipynb b/examples/simulation-tensorflow/sim.ipynb index 797e2dcc603e..26b7260b5f1c 100644 --- a/examples/simulation-tensorflow/sim.ipynb +++ b/examples/simulation-tensorflow/sim.ipynb @@ -179,7 +179,7 @@ " client_dataset = dataset.load_partition(int(cid), \"train\")\n", "\n", " # Now let's split it into train (90%) and validation (10%)\n", - " client_dataset_splits = client_dataset.train_test_split(test_size=0.1)\n", + " client_dataset_splits = client_dataset.train_test_split(test_size=0.1, seed=42)\n", "\n", " trainset = client_dataset_splits[\"train\"].to_tf_dataset(\n", " columns=\"image\", label_cols=\"label\", batch_size=32\n", diff --git a/examples/simulation-tensorflow/sim.py b/examples/simulation-tensorflow/sim.py index e94e5ec96850..4014e3c6be72 100644 --- a/examples/simulation-tensorflow/sim.py +++ b/examples/simulation-tensorflow/sim.py @@ -83,7 +83,7 @@ def client_fn(cid: str) -> fl.client.Client: client_dataset = dataset.load_partition(int(cid), "train") # Now let's split it into train (90%) and validation (10%) - client_dataset_splits = client_dataset.train_test_split(test_size=0.1) + client_dataset_splits = client_dataset.train_test_split(test_size=0.1, seed=42) trainset = client_dataset_splits["train"].to_tf_dataset( columns="image", label_cols="label", batch_size=32 diff --git a/examples/vertical-fl/README.md b/examples/vertical-fl/README.md index 78588180d3d6..d8c599d617c4 100644 --- a/examples/vertical-fl/README.md +++ b/examples/vertical-fl/README.md @@ -123,7 +123,7 @@ In `task.py`, you'll find the preprocessing functions we'll apply to our data: 'Adult' for ages between 11 and 40, and 'Elderly' for those over 40. If the age isn't listed, we'll label it as 'Unknown'. - ```python3 + ```python def _bin_age(age_series): bins = [-np.inf, 10, 40, np.inf] labels = ["Child", "Adult", "Elderly"] @@ -138,7 +138,7 @@ In `task.py`, you'll find the preprocessing functions we'll apply to our data: understand social status and family roles, simplifying rare titles into a single 'Rare' category and converting any French titles to their English equivalents. - ```python3 + ```python def _extract_title(name_series): titles = name_series.str.extract(" ([A-Za-z]+)\.", expand=False) rare_titles = { @@ -170,7 +170,7 @@ In `task.py`, you'll find the preprocessing functions we'll apply to our data: 'Pclass', 'Embarked', 'Title', 'Cabin', and the binned 'Age' into One-Hot encodings. - ```python3 + ```python def _create_features(df): # Convert 'Age' to numeric, coercing errors to NaN df["Age"] = pd.to_numeric(df["Age"], errors="coerce") @@ -190,7 +190,7 @@ In `task.py`, you'll find the preprocessing functions we'll apply to our data: In `task.py`, we also partition our data for our 3 clients to mirror real-life collaborations where different organizations hold different feature sets: -```python3 +```python def _partition_data(df, all_keywords): partitions = [] keywords_sets = [{"Parch", "Cabin", "Pclass"}, {"Sex", "Title"}] @@ -236,7 +236,7 @@ collective intelligence without sharing sensitive information. Note that our final data processing function looks like that: -```python3 +```python def get_partitions_and_label(): df = pd.read_csv("_static/data/train.csv") processed_df = df.dropna(subset=["Embarked", "Fare"]).copy() @@ -259,7 +259,7 @@ Each client's model is a neural network designed to operate on a distinct subset of features held by a client. In this example we will use simple linear regression models. -```python3 +```python class ClientModel(nn.Module): def __init__(self, input_size): super(ClientModel, self).__init__() @@ -281,7 +281,7 @@ The server's model acts as the central aggregator in the VFL system. It's also a neural network but with a slightly different architecture tailored to its role in aggregating the client models' outputs. -```python3 +```python class ServerModel(nn.Module): def __init__(self): super(ServerModel, self).__init__() @@ -305,7 +305,7 @@ a probability score indicative of the likelihood of survival. The strategy we will write to perform the aggregation will inherit from `FedAvg` and set the following additional attributes: -```python3 +```python self.model = ServerModel(12) self.initial_parameters = ndarrays_to_parameters( [val.cpu().numpy() for _, val in self.model.state_dict().items()] @@ -319,7 +319,7 @@ With `labels` given as an argument to the strategy. We then redefine the `aggregate_fit` method: -```python3 +```python def aggregate_fit( self, rnd, @@ -406,7 +406,7 @@ The last thing we have to do is to redefine the `aggregate_evaluate` function to disable distributed evaluation (as the clients do not hold any labels to test their local models). -```python3 +```python def aggregate_evaluate( self, rnd, @@ -420,7 +420,7 @@ def aggregate_evaluate( Our `FlowerClient` class is going to be quite straight forward. -```python3 +```python class FlowerClient(fl.client.NumPyClient): def __init__(self, cid, data): self.cid = cid @@ -487,7 +487,7 @@ the `aggregate_evaluate` function of the strategy. Putting everything together, to start our simulation we use the following function: -```python3 +```python hist = fl.simulation.start_simulation( client_fn=client_fn, num_clients=3, diff --git a/examples/vit-finetune/client.py b/examples/vit-finetune/client.py index 68d98926feeb..bf91fa0c4328 100644 --- a/examples/vit-finetune/client.py +++ b/examples/vit-finetune/client.py @@ -8,9 +8,7 @@ class FedViTClient(NumPyClient): - def __init__(self, trainset): - self.trainset = trainset self.model = get_model() diff --git a/examples/vit-finetune/main.py b/examples/vit-finetune/main.py index 1257246304a1..c629a6f68980 100644 --- a/examples/vit-finetune/main.py +++ b/examples/vit-finetune/main.py @@ -19,7 +19,6 @@ def main(): - args = parser.parse_args() # To control the degree of parallelism diff --git a/examples/whisper-federated-finetuning/utils.py b/examples/whisper-federated-finetuning/utils.py index 21fe0309151c..117cf7100ddd 100644 --- a/examples/whisper-federated-finetuning/utils.py +++ b/examples/whisper-federated-finetuning/utils.py @@ -107,10 +107,10 @@ def prepare_silences_dataset(train_dataset, ratio_silence: float = 0.1) -> Datas """Generate silences for the train set. One of the classes in the SpeechCommands datatset is `silence`. However, the dataset - does not include clips of silence. It does however include 5 long files with different - background sounds. The taks of this function is to extract several (defined by `ratio_silence`) - one-second long clips from those background audio files. Later, those audio clips will be - included into the training set. + does not include clips of silence. It does however include 5 long files with + different background sounds. The taks of this function is to extract several + (defined by `ratio_silence`) one-second long clips from those background audio + files. Later, those audio clips will be included into the training set. """ # retrieve original silence audio clips silences = [d for d in train_dataset if d["label"] == 35] @@ -138,9 +138,9 @@ def prepare_silences_dataset(train_dataset, ratio_silence: float = 0.1) -> Datas def construct_client_mapping(full_trainset, num_clients: int = 100): """Create a mapping to partition the dataset into `num_client` buckets. - These buckets contain the same number of `spekaer_id` but likely different - number of training exampes since each `speaker_id` in SpeechCommands does - provide different amounts of data to the dataset. + These buckets contain the same number of `spekaer_id` but likely different number of + training exampes since each `speaker_id` in SpeechCommands does provide different + amounts of data to the dataset. """ client_ids = list(set(full_trainset["speaker_id"])) client_ids.remove( @@ -191,7 +191,7 @@ def set_params(model: torch.nn.ModuleList, params: List[fl.common.NDArrays]): def get_model(device, num_classes, compile: bool = True): - """Create model: Whisper-tiny Encoder + classification head""" + """Create model: Whisper-tiny Encoder + classification head.""" encoder = WhisperForConditionalGeneration.from_pretrained( "openai/whisper-tiny" ).get_encoder() diff --git a/examples/xgboost-quickstart/README.md b/examples/xgboost-quickstart/README.md index 72dde5706e8d..b196520d37e6 100644 --- a/examples/xgboost-quickstart/README.md +++ b/examples/xgboost-quickstart/README.md @@ -4,7 +4,7 @@ This example demonstrates how to perform EXtreme Gradient Boosting (XGBoost) wit We use [HIGGS](https://archive.ics.uci.edu/dataset/280/higgs) dataset for this example to perform a binary classification task. Tree-based with bagging method is used for aggregation on the server. -This project provides a minimal code example to enable you to get stated quickly. For a more comprehensive code example, take a look at [xgboost-comprehensive](https://github.com/adap/flower/tree/main/examples/xgboost-comprehensive). +This project provides a minimal code example to enable you to get started quickly. For a more comprehensive code example, take a look at [xgboost-comprehensive](https://github.com/adap/flower/tree/main/examples/xgboost-comprehensive). ## Project Setup diff --git a/pyproject.toml b/pyproject.toml index e0514254ecac..5adb2df45a5b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "poetry.core.masonry.api" [tool.poetry] name = "flwr" -version = "1.8.0" +version = "1.9.0" description = "Flower: A Friendly Federated Learning Framework" license = "Apache-2.0" authors = ["The Flower Authors "] @@ -56,9 +56,10 @@ flwr = "flwr.cli.app:app" flower-driver-api = "flwr.server:run_driver_api" flower-fleet-api = "flwr.server:run_fleet_api" flower-superlink = "flwr.server:run_superlink" +flower-supernode = "flwr.client:run_supernode" flower-client-app = "flwr.client:run_client_app" flower-server-app = "flwr.server:run_server_app" -flower-simulation = "flwr.simulation:run_simulation_from_cli" +flower-simulation = "flwr.simulation.run_simulation:run_simulation_from_cli" [tool.poetry.dependencies] python = "^3.8" @@ -72,8 +73,7 @@ iterators = "^0.0.2" typer = { version = "^0.9.0", extras=["all"] } tomli = "^2.0.1" # Optional dependencies (Simulation Engine) -ray = { version = "==2.6.3", optional = true } -pydantic = { version = "<2.0.0", optional = true } +ray = { version = "==2.6.3", optional = true, python = ">=3.8,<3.12" } # Optional dependencies (REST transport layer) requests = { version = "^2.31.0", optional = true } starlette = { version = "^0.31.0", optional = true } @@ -127,6 +127,7 @@ check-wheel-contents = "==0.4.0" GitPython = "==3.1.32" PyGithub = "==2.1.1" licensecheck = "==2024" +pre-commit = "==3.5.0" [tool.isort] line_length = 88 @@ -135,7 +136,7 @@ multi_line_output = 3 include_trailing_comma = true force_grid_wrap = 0 use_parentheses = true -known_first_party = ["flwr", "flwr_experimental", "flwr_tool"] +known_first_party = ["flwr", "flwr_tool"] [tool.black] line-length = 88 @@ -169,13 +170,6 @@ plugins = [ ignore_missing_imports = true strict = true -[[tool.mypy.overrides]] -module = [ - "flwr_example.*", - "flwr_experimental.*", -] -ignore_errors = true - [[tool.mypy.overrides]] module = [ "importlib.metadata.*", diff --git a/src/cc/flwr/.gitignore b/src/cc/flwr/.gitignore index bd834005883d..1909b6136f64 100644 --- a/src/cc/flwr/.gitignore +++ b/src/cc/flwr/.gitignore @@ -1,2 +1,3 @@ build/ +.clangd *.bak diff --git a/src/cc/flwr/CMakeLists.txt b/src/cc/flwr/CMakeLists.txt index c242f52b237b..9955d21e84ad 100644 --- a/src/cc/flwr/CMakeLists.txt +++ b/src/cc/flwr/CMakeLists.txt @@ -73,6 +73,8 @@ GENERATE_AND_COPY(transport) GENERATE_AND_COPY(node) GENERATE_AND_COPY(task) GENERATE_AND_COPY(fleet) +GENERATE_AND_COPY(error) +GENERATE_AND_COPY(recordset) add_library(flwr_grpc_proto STATIC ${ALL_PROTO_FILES}) diff --git a/src/cc/flwr/include/communicator.h b/src/cc/flwr/include/communicator.h new file mode 100644 index 000000000000..ace4821ab6af --- /dev/null +++ b/src/cc/flwr/include/communicator.h @@ -0,0 +1,30 @@ +#ifndef COMMUNICATOR_H +#define COMMUNICATOR_H + +#include "flwr/proto/fleet.pb.h" +#include +#include + +class Communicator { +public: + virtual bool send_create_node(flwr::proto::CreateNodeRequest request, + flwr::proto::CreateNodeResponse *response) = 0; + + virtual bool send_delete_node(flwr::proto::DeleteNodeRequest request, + flwr::proto::DeleteNodeResponse *response) = 0; + + virtual bool + send_pull_task_ins(flwr::proto::PullTaskInsRequest request, + flwr::proto::PullTaskInsResponse *response) = 0; + + virtual bool + send_push_task_res(flwr::proto::PushTaskResRequest request, + flwr::proto::PushTaskResResponse *response) = 0; +}; + +void create_node(Communicator *communicator); +void delete_node(Communicator *communicator); +void send(Communicator *communicator, flwr::proto::TaskRes task_res); +std::optional receive(Communicator *communicator); + +#endif diff --git a/src/cc/flwr/include/flwr/proto/error.grpc.pb.cc b/src/cc/flwr/include/flwr/proto/error.grpc.pb.cc new file mode 100644 index 000000000000..dbe668508d80 --- /dev/null +++ b/src/cc/flwr/include/flwr/proto/error.grpc.pb.cc @@ -0,0 +1,27 @@ +// Generated by the gRPC C++ plugin. +// If you make any local change, they will be lost. +// source: flwr/proto/error.proto + +#include "flwr/proto/error.pb.h" +#include "flwr/proto/error.grpc.pb.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +namespace flwr { +namespace proto { + +} // namespace flwr +} // namespace proto + diff --git a/src/cc/flwr/include/flwr/proto/error.grpc.pb.h b/src/cc/flwr/include/flwr/proto/error.grpc.pb.h new file mode 100644 index 000000000000..df31ee174acf --- /dev/null +++ b/src/cc/flwr/include/flwr/proto/error.grpc.pb.h @@ -0,0 +1,51 @@ +// Generated by the gRPC C++ plugin. +// If you make any local change, they will be lost. +// source: flwr/proto/error.proto +// Original file comments: +// Copyright 2024 Flower Labs GmbH. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// ============================================================================== +// +#ifndef GRPC_flwr_2fproto_2ferror_2eproto__INCLUDED +#define GRPC_flwr_2fproto_2ferror_2eproto__INCLUDED + +#include "flwr/proto/error.pb.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace flwr { +namespace proto { + +} // namespace proto +} // namespace flwr + + +#endif // GRPC_flwr_2fproto_2ferror_2eproto__INCLUDED diff --git a/src/cc/flwr/include/flwr/proto/error.pb.cc b/src/cc/flwr/include/flwr/proto/error.pb.cc new file mode 100644 index 000000000000..c086fa941954 --- /dev/null +++ b/src/cc/flwr/include/flwr/proto/error.pb.cc @@ -0,0 +1,312 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: flwr/proto/error.proto + +#include "flwr/proto/error.pb.h" + +#include + +#include +#include +#include +#include +#include +#include +#include +// @@protoc_insertion_point(includes) +#include + +PROTOBUF_PRAGMA_INIT_SEG +namespace flwr { +namespace proto { +constexpr Error::Error( + ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized) + : reason_(&::PROTOBUF_NAMESPACE_ID::internal::fixed_address_empty_string) + , code_(int64_t{0}){} +struct ErrorDefaultTypeInternal { + constexpr ErrorDefaultTypeInternal() + : _instance(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized{}) {} + ~ErrorDefaultTypeInternal() {} + union { + Error _instance; + }; +}; +PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT ErrorDefaultTypeInternal _Error_default_instance_; +} // namespace proto +} // namespace flwr +static ::PROTOBUF_NAMESPACE_ID::Metadata file_level_metadata_flwr_2fproto_2ferror_2eproto[1]; +static constexpr ::PROTOBUF_NAMESPACE_ID::EnumDescriptor const** file_level_enum_descriptors_flwr_2fproto_2ferror_2eproto = nullptr; +static constexpr ::PROTOBUF_NAMESPACE_ID::ServiceDescriptor const** file_level_service_descriptors_flwr_2fproto_2ferror_2eproto = nullptr; + +const ::PROTOBUF_NAMESPACE_ID::uint32 TableStruct_flwr_2fproto_2ferror_2eproto::offsets[] PROTOBUF_SECTION_VARIABLE(protodesc_cold) = { + ~0u, // no _has_bits_ + PROTOBUF_FIELD_OFFSET(::flwr::proto::Error, _internal_metadata_), + ~0u, // no _extensions_ + ~0u, // no _oneof_case_ + ~0u, // no _weak_field_map_ + ~0u, // no _inlined_string_donated_ + PROTOBUF_FIELD_OFFSET(::flwr::proto::Error, code_), + PROTOBUF_FIELD_OFFSET(::flwr::proto::Error, reason_), +}; +static const ::PROTOBUF_NAMESPACE_ID::internal::MigrationSchema schemas[] PROTOBUF_SECTION_VARIABLE(protodesc_cold) = { + { 0, -1, -1, sizeof(::flwr::proto::Error)}, +}; + +static ::PROTOBUF_NAMESPACE_ID::Message const * const file_default_instances[] = { + reinterpret_cast(&::flwr::proto::_Error_default_instance_), +}; + +const char descriptor_table_protodef_flwr_2fproto_2ferror_2eproto[] PROTOBUF_SECTION_VARIABLE(protodesc_cold) = + "\n\026flwr/proto/error.proto\022\nflwr.proto\"%\n\005" + "Error\022\014\n\004code\030\001 \001(\022\022\016\n\006reason\030\002 \001(\tb\006pro" + "to3" + ; +static ::PROTOBUF_NAMESPACE_ID::internal::once_flag descriptor_table_flwr_2fproto_2ferror_2eproto_once; +const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable descriptor_table_flwr_2fproto_2ferror_2eproto = { + false, false, 83, descriptor_table_protodef_flwr_2fproto_2ferror_2eproto, "flwr/proto/error.proto", + &descriptor_table_flwr_2fproto_2ferror_2eproto_once, nullptr, 0, 1, + schemas, file_default_instances, TableStruct_flwr_2fproto_2ferror_2eproto::offsets, + file_level_metadata_flwr_2fproto_2ferror_2eproto, file_level_enum_descriptors_flwr_2fproto_2ferror_2eproto, file_level_service_descriptors_flwr_2fproto_2ferror_2eproto, +}; +PROTOBUF_ATTRIBUTE_WEAK const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable* descriptor_table_flwr_2fproto_2ferror_2eproto_getter() { + return &descriptor_table_flwr_2fproto_2ferror_2eproto; +} + +// Force running AddDescriptors() at dynamic initialization time. +PROTOBUF_ATTRIBUTE_INIT_PRIORITY static ::PROTOBUF_NAMESPACE_ID::internal::AddDescriptorsRunner dynamic_init_dummy_flwr_2fproto_2ferror_2eproto(&descriptor_table_flwr_2fproto_2ferror_2eproto); +namespace flwr { +namespace proto { + +// =================================================================== + +class Error::_Internal { + public: +}; + +Error::Error(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned) + : ::PROTOBUF_NAMESPACE_ID::Message(arena, is_message_owned) { + SharedCtor(); + if (!is_message_owned) { + RegisterArenaDtor(arena); + } + // @@protoc_insertion_point(arena_constructor:flwr.proto.Error) +} +Error::Error(const Error& from) + : ::PROTOBUF_NAMESPACE_ID::Message() { + _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); + reason_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); + if (!from._internal_reason().empty()) { + reason_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, from._internal_reason(), + GetArenaForAllocation()); + } + code_ = from.code_; + // @@protoc_insertion_point(copy_constructor:flwr.proto.Error) +} + +void Error::SharedCtor() { +reason_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); +code_ = int64_t{0}; +} + +Error::~Error() { + // @@protoc_insertion_point(destructor:flwr.proto.Error) + if (GetArenaForAllocation() != nullptr) return; + SharedDtor(); + _internal_metadata_.Delete<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); +} + +inline void Error::SharedDtor() { + GOOGLE_DCHECK(GetArenaForAllocation() == nullptr); + reason_.DestroyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); +} + +void Error::ArenaDtor(void* object) { + Error* _this = reinterpret_cast< Error* >(object); + (void)_this; +} +void Error::RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena*) { +} +void Error::SetCachedSize(int size) const { + _cached_size_.Set(size); +} + +void Error::Clear() { +// @@protoc_insertion_point(message_clear_start:flwr.proto.Error) + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + // Prevent compiler warnings about cached_has_bits being unused + (void) cached_has_bits; + + reason_.ClearToEmpty(); + code_ = int64_t{0}; + _internal_metadata_.Clear<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); +} + +const char* Error::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) { +#define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure + while (!ctx->Done(&ptr)) { + ::PROTOBUF_NAMESPACE_ID::uint32 tag; + ptr = ::PROTOBUF_NAMESPACE_ID::internal::ReadTag(ptr, &tag); + switch (tag >> 3) { + // sint64 code = 1; + case 1: + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 8)) { + code_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarintZigZag64(&ptr); + CHK_(ptr); + } else + goto handle_unusual; + continue; + // string reason = 2; + case 2: + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 18)) { + auto str = _internal_mutable_reason(); + ptr = ::PROTOBUF_NAMESPACE_ID::internal::InlineGreedyStringParser(str, ptr, ctx); + CHK_(::PROTOBUF_NAMESPACE_ID::internal::VerifyUTF8(str, "flwr.proto.Error.reason")); + CHK_(ptr); + } else + goto handle_unusual; + continue; + default: + goto handle_unusual; + } // switch + handle_unusual: + if ((tag == 0) || ((tag & 7) == 4)) { + CHK_(ptr); + ctx->SetLastTag(tag); + goto message_done; + } + ptr = UnknownFieldParse( + tag, + _internal_metadata_.mutable_unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(), + ptr, ctx); + CHK_(ptr != nullptr); + } // while +message_done: + return ptr; +failure: + ptr = nullptr; + goto message_done; +#undef CHK_ +} + +::PROTOBUF_NAMESPACE_ID::uint8* Error::_InternalSerialize( + ::PROTOBUF_NAMESPACE_ID::uint8* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const { + // @@protoc_insertion_point(serialize_to_array_start:flwr.proto.Error) + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + (void) cached_has_bits; + + // sint64 code = 1; + if (this->_internal_code() != 0) { + target = stream->EnsureSpace(target); + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteSInt64ToArray(1, this->_internal_code(), target); + } + + // string reason = 2; + if (!this->_internal_reason().empty()) { + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String( + this->_internal_reason().data(), static_cast(this->_internal_reason().length()), + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE, + "flwr.proto.Error.reason"); + target = stream->WriteStringMaybeAliased( + 2, this->_internal_reason(), target); + } + + if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) { + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::InternalSerializeUnknownFieldsToArray( + _internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance), target, stream); + } + // @@protoc_insertion_point(serialize_to_array_end:flwr.proto.Error) + return target; +} + +size_t Error::ByteSizeLong() const { +// @@protoc_insertion_point(message_byte_size_start:flwr.proto.Error) + size_t total_size = 0; + + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + // Prevent compiler warnings about cached_has_bits being unused + (void) cached_has_bits; + + // string reason = 2; + if (!this->_internal_reason().empty()) { + total_size += 1 + + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize( + this->_internal_reason()); + } + + // sint64 code = 1; + if (this->_internal_code() != 0) { + total_size += ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SInt64SizePlusOne(this->_internal_code()); + } + + return MaybeComputeUnknownFieldsSize(total_size, &_cached_size_); +} + +const ::PROTOBUF_NAMESPACE_ID::Message::ClassData Error::_class_data_ = { + ::PROTOBUF_NAMESPACE_ID::Message::CopyWithSizeCheck, + Error::MergeImpl +}; +const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*Error::GetClassData() const { return &_class_data_; } + +void Error::MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, + const ::PROTOBUF_NAMESPACE_ID::Message& from) { + static_cast(to)->MergeFrom( + static_cast(from)); +} + + +void Error::MergeFrom(const Error& from) { +// @@protoc_insertion_point(class_specific_merge_from_start:flwr.proto.Error) + GOOGLE_DCHECK_NE(&from, this); + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + (void) cached_has_bits; + + if (!from._internal_reason().empty()) { + _internal_set_reason(from._internal_reason()); + } + if (from._internal_code() != 0) { + _internal_set_code(from._internal_code()); + } + _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); +} + +void Error::CopyFrom(const Error& from) { +// @@protoc_insertion_point(class_specific_copy_from_start:flwr.proto.Error) + if (&from == this) return; + Clear(); + MergeFrom(from); +} + +bool Error::IsInitialized() const { + return true; +} + +void Error::InternalSwap(Error* other) { + using std::swap; + auto* lhs_arena = GetArenaForAllocation(); + auto* rhs_arena = other->GetArenaForAllocation(); + _internal_metadata_.InternalSwap(&other->_internal_metadata_); + ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::InternalSwap( + &::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), + &reason_, lhs_arena, + &other->reason_, rhs_arena + ); + swap(code_, other->code_); +} + +::PROTOBUF_NAMESPACE_ID::Metadata Error::GetMetadata() const { + return ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors( + &descriptor_table_flwr_2fproto_2ferror_2eproto_getter, &descriptor_table_flwr_2fproto_2ferror_2eproto_once, + file_level_metadata_flwr_2fproto_2ferror_2eproto[0]); +} + +// @@protoc_insertion_point(namespace_scope) +} // namespace proto +} // namespace flwr +PROTOBUF_NAMESPACE_OPEN +template<> PROTOBUF_NOINLINE ::flwr::proto::Error* Arena::CreateMaybeMessage< ::flwr::proto::Error >(Arena* arena) { + return Arena::CreateMessageInternal< ::flwr::proto::Error >(arena); +} +PROTOBUF_NAMESPACE_CLOSE + +// @@protoc_insertion_point(global_scope) +#include diff --git a/src/cc/flwr/include/flwr/proto/error.pb.h b/src/cc/flwr/include/flwr/proto/error.pb.h new file mode 100644 index 000000000000..483e5575d1ce --- /dev/null +++ b/src/cc/flwr/include/flwr/proto/error.pb.h @@ -0,0 +1,317 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: flwr/proto/error.proto + +#ifndef GOOGLE_PROTOBUF_INCLUDED_flwr_2fproto_2ferror_2eproto +#define GOOGLE_PROTOBUF_INCLUDED_flwr_2fproto_2ferror_2eproto + +#include +#include + +#include +#if PROTOBUF_VERSION < 3018000 +#error This file was generated by a newer version of protoc which is +#error incompatible with your Protocol Buffer headers. Please update +#error your headers. +#endif +#if 3018001 < PROTOBUF_MIN_PROTOC_VERSION +#error This file was generated by an older version of protoc which is +#error incompatible with your Protocol Buffer headers. Please +#error regenerate this file with a newer version of protoc. +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include // IWYU pragma: export +#include // IWYU pragma: export +#include +// @@protoc_insertion_point(includes) +#include +#define PROTOBUF_INTERNAL_EXPORT_flwr_2fproto_2ferror_2eproto +PROTOBUF_NAMESPACE_OPEN +namespace internal { +class AnyMetadata; +} // namespace internal +PROTOBUF_NAMESPACE_CLOSE + +// Internal implementation detail -- do not use these members. +struct TableStruct_flwr_2fproto_2ferror_2eproto { + static const ::PROTOBUF_NAMESPACE_ID::internal::ParseTableField entries[] + PROTOBUF_SECTION_VARIABLE(protodesc_cold); + static const ::PROTOBUF_NAMESPACE_ID::internal::AuxiliaryParseTableField aux[] + PROTOBUF_SECTION_VARIABLE(protodesc_cold); + static const ::PROTOBUF_NAMESPACE_ID::internal::ParseTable schema[1] + PROTOBUF_SECTION_VARIABLE(protodesc_cold); + static const ::PROTOBUF_NAMESPACE_ID::internal::FieldMetadata field_metadata[]; + static const ::PROTOBUF_NAMESPACE_ID::internal::SerializationTable serialization_table[]; + static const ::PROTOBUF_NAMESPACE_ID::uint32 offsets[]; +}; +extern const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable descriptor_table_flwr_2fproto_2ferror_2eproto; +namespace flwr { +namespace proto { +class Error; +struct ErrorDefaultTypeInternal; +extern ErrorDefaultTypeInternal _Error_default_instance_; +} // namespace proto +} // namespace flwr +PROTOBUF_NAMESPACE_OPEN +template<> ::flwr::proto::Error* Arena::CreateMaybeMessage<::flwr::proto::Error>(Arena*); +PROTOBUF_NAMESPACE_CLOSE +namespace flwr { +namespace proto { + +// =================================================================== + +class Error final : + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:flwr.proto.Error) */ { + public: + inline Error() : Error(nullptr) {} + ~Error() override; + explicit constexpr Error(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + + Error(const Error& from); + Error(Error&& from) noexcept + : Error() { + *this = ::std::move(from); + } + + inline Error& operator=(const Error& from) { + CopyFrom(from); + return *this; + } + inline Error& operator=(Error&& from) noexcept { + if (this == &from) return *this; + if (GetOwningArena() == from.GetOwningArena() + #ifdef PROTOBUF_FORCE_COPY_IN_MOVE + && GetOwningArena() != nullptr + #endif // !PROTOBUF_FORCE_COPY_IN_MOVE + ) { + InternalSwap(&from); + } else { + CopyFrom(from); + } + return *this; + } + + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { + return GetDescriptor(); + } + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { + return default_instance().GetMetadata().descriptor; + } + static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { + return default_instance().GetMetadata().reflection; + } + static const Error& default_instance() { + return *internal_default_instance(); + } + static inline const Error* internal_default_instance() { + return reinterpret_cast( + &_Error_default_instance_); + } + static constexpr int kIndexInFileMessages = + 0; + + friend void swap(Error& a, Error& b) { + a.Swap(&b); + } + inline void Swap(Error* other) { + if (other == this) return; + if (GetOwningArena() == other->GetOwningArena()) { + InternalSwap(other); + } else { + ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); + } + } + void UnsafeArenaSwap(Error* other) { + if (other == this) return; + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); + InternalSwap(other); + } + + // implements Message ---------------------------------------------- + + inline Error* New() const final { + return new Error(); + } + + Error* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final { + return CreateMaybeMessage(arena); + } + using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; + void CopyFrom(const Error& from); + using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + void MergeFrom(const Error& from); + private: + static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, const ::PROTOBUF_NAMESPACE_ID::Message& from); + public: + PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; + bool IsInitialized() const final; + + size_t ByteSizeLong() const final; + const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; + ::PROTOBUF_NAMESPACE_ID::uint8* _InternalSerialize( + ::PROTOBUF_NAMESPACE_ID::uint8* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final; + int GetCachedSize() const final { return _cached_size_.Get(); } + + private: + void SharedCtor(); + void SharedDtor(); + void SetCachedSize(int size) const final; + void InternalSwap(Error* other); + friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { + return "flwr.proto.Error"; + } + protected: + explicit Error(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned = false); + private: + static void ArenaDtor(void* object); + inline void RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena* arena); + public: + + static const ClassData _class_data_; + const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final; + + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + + // nested types ---------------------------------------------------- + + // accessors ------------------------------------------------------- + + enum : int { + kReasonFieldNumber = 2, + kCodeFieldNumber = 1, + }; + // string reason = 2; + void clear_reason(); + const std::string& reason() const; + template + void set_reason(ArgT0&& arg0, ArgT... args); + std::string* mutable_reason(); + PROTOBUF_MUST_USE_RESULT std::string* release_reason(); + void set_allocated_reason(std::string* reason); + private: + const std::string& _internal_reason() const; + inline PROTOBUF_ALWAYS_INLINE void _internal_set_reason(const std::string& value); + std::string* _internal_mutable_reason(); + public: + + // sint64 code = 1; + void clear_code(); + ::PROTOBUF_NAMESPACE_ID::int64 code() const; + void set_code(::PROTOBUF_NAMESPACE_ID::int64 value); + private: + ::PROTOBUF_NAMESPACE_ID::int64 _internal_code() const; + void _internal_set_code(::PROTOBUF_NAMESPACE_ID::int64 value); + public: + + // @@protoc_insertion_point(class_scope:flwr.proto.Error) + private: + class _Internal; + + template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; + typedef void InternalArenaConstructable_; + typedef void DestructorSkippable_; + ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr reason_; + ::PROTOBUF_NAMESPACE_ID::int64 code_; + mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; + friend struct ::TableStruct_flwr_2fproto_2ferror_2eproto; +}; +// =================================================================== + + +// =================================================================== + +#ifdef __GNUC__ + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wstrict-aliasing" +#endif // __GNUC__ +// Error + +// sint64 code = 1; +inline void Error::clear_code() { + code_ = int64_t{0}; +} +inline ::PROTOBUF_NAMESPACE_ID::int64 Error::_internal_code() const { + return code_; +} +inline ::PROTOBUF_NAMESPACE_ID::int64 Error::code() const { + // @@protoc_insertion_point(field_get:flwr.proto.Error.code) + return _internal_code(); +} +inline void Error::_internal_set_code(::PROTOBUF_NAMESPACE_ID::int64 value) { + + code_ = value; +} +inline void Error::set_code(::PROTOBUF_NAMESPACE_ID::int64 value) { + _internal_set_code(value); + // @@protoc_insertion_point(field_set:flwr.proto.Error.code) +} + +// string reason = 2; +inline void Error::clear_reason() { + reason_.ClearToEmpty(); +} +inline const std::string& Error::reason() const { + // @@protoc_insertion_point(field_get:flwr.proto.Error.reason) + return _internal_reason(); +} +template +inline PROTOBUF_ALWAYS_INLINE +void Error::set_reason(ArgT0&& arg0, ArgT... args) { + + reason_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, static_cast(arg0), args..., GetArenaForAllocation()); + // @@protoc_insertion_point(field_set:flwr.proto.Error.reason) +} +inline std::string* Error::mutable_reason() { + std::string* _s = _internal_mutable_reason(); + // @@protoc_insertion_point(field_mutable:flwr.proto.Error.reason) + return _s; +} +inline const std::string& Error::_internal_reason() const { + return reason_.Get(); +} +inline void Error::_internal_set_reason(const std::string& value) { + + reason_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, value, GetArenaForAllocation()); +} +inline std::string* Error::_internal_mutable_reason() { + + return reason_.Mutable(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, GetArenaForAllocation()); +} +inline std::string* Error::release_reason() { + // @@protoc_insertion_point(field_release:flwr.proto.Error.reason) + return reason_.Release(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), GetArenaForAllocation()); +} +inline void Error::set_allocated_reason(std::string* reason) { + if (reason != nullptr) { + + } else { + + } + reason_.SetAllocated(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), reason, + GetArenaForAllocation()); + // @@protoc_insertion_point(field_set_allocated:flwr.proto.Error.reason) +} + +#ifdef __GNUC__ + #pragma GCC diagnostic pop +#endif // __GNUC__ + +// @@protoc_insertion_point(namespace_scope) + +} // namespace proto +} // namespace flwr + +// @@protoc_insertion_point(global_scope) + +#include +#endif // GOOGLE_PROTOBUF_INCLUDED_GOOGLE_PROTOBUF_INCLUDED_flwr_2fproto_2ferror_2eproto diff --git a/src/cc/flwr/include/flwr/proto/fleet.grpc.pb.cc b/src/cc/flwr/include/flwr/proto/fleet.grpc.pb.cc index c71a6a3e1c45..0e6c69ad14ac 100644 --- a/src/cc/flwr/include/flwr/proto/fleet.grpc.pb.cc +++ b/src/cc/flwr/include/flwr/proto/fleet.grpc.pb.cc @@ -25,8 +25,10 @@ namespace proto { static const char* Fleet_method_names[] = { "/flwr.proto.Fleet/CreateNode", "/flwr.proto.Fleet/DeleteNode", + "/flwr.proto.Fleet/Ping", "/flwr.proto.Fleet/PullTaskIns", "/flwr.proto.Fleet/PushTaskRes", + "/flwr.proto.Fleet/GetRun", }; std::unique_ptr< Fleet::Stub> Fleet::NewStub(const std::shared_ptr< ::grpc::ChannelInterface>& channel, const ::grpc::StubOptions& options) { @@ -38,8 +40,10 @@ std::unique_ptr< Fleet::Stub> Fleet::NewStub(const std::shared_ptr< ::grpc::Chan Fleet::Stub::Stub(const std::shared_ptr< ::grpc::ChannelInterface>& channel, const ::grpc::StubOptions& options) : channel_(channel), rpcmethod_CreateNode_(Fleet_method_names[0], options.suffix_for_stats(),::grpc::internal::RpcMethod::NORMAL_RPC, channel) , rpcmethod_DeleteNode_(Fleet_method_names[1], options.suffix_for_stats(),::grpc::internal::RpcMethod::NORMAL_RPC, channel) - , rpcmethod_PullTaskIns_(Fleet_method_names[2], options.suffix_for_stats(),::grpc::internal::RpcMethod::NORMAL_RPC, channel) - , rpcmethod_PushTaskRes_(Fleet_method_names[3], options.suffix_for_stats(),::grpc::internal::RpcMethod::NORMAL_RPC, channel) + , rpcmethod_Ping_(Fleet_method_names[2], options.suffix_for_stats(),::grpc::internal::RpcMethod::NORMAL_RPC, channel) + , rpcmethod_PullTaskIns_(Fleet_method_names[3], options.suffix_for_stats(),::grpc::internal::RpcMethod::NORMAL_RPC, channel) + , rpcmethod_PushTaskRes_(Fleet_method_names[4], options.suffix_for_stats(),::grpc::internal::RpcMethod::NORMAL_RPC, channel) + , rpcmethod_GetRun_(Fleet_method_names[5], options.suffix_for_stats(),::grpc::internal::RpcMethod::NORMAL_RPC, channel) {} ::grpc::Status Fleet::Stub::CreateNode(::grpc::ClientContext* context, const ::flwr::proto::CreateNodeRequest& request, ::flwr::proto::CreateNodeResponse* response) { @@ -88,6 +92,29 @@ ::grpc::ClientAsyncResponseReader< ::flwr::proto::DeleteNodeResponse>* Fleet::St return result; } +::grpc::Status Fleet::Stub::Ping(::grpc::ClientContext* context, const ::flwr::proto::PingRequest& request, ::flwr::proto::PingResponse* response) { + return ::grpc::internal::BlockingUnaryCall< ::flwr::proto::PingRequest, ::flwr::proto::PingResponse, ::grpc::protobuf::MessageLite, ::grpc::protobuf::MessageLite>(channel_.get(), rpcmethod_Ping_, context, request, response); +} + +void Fleet::Stub::async::Ping(::grpc::ClientContext* context, const ::flwr::proto::PingRequest* request, ::flwr::proto::PingResponse* response, std::function f) { + ::grpc::internal::CallbackUnaryCall< ::flwr::proto::PingRequest, ::flwr::proto::PingResponse, ::grpc::protobuf::MessageLite, ::grpc::protobuf::MessageLite>(stub_->channel_.get(), stub_->rpcmethod_Ping_, context, request, response, std::move(f)); +} + +void Fleet::Stub::async::Ping(::grpc::ClientContext* context, const ::flwr::proto::PingRequest* request, ::flwr::proto::PingResponse* response, ::grpc::ClientUnaryReactor* reactor) { + ::grpc::internal::ClientCallbackUnaryFactory::Create< ::grpc::protobuf::MessageLite, ::grpc::protobuf::MessageLite>(stub_->channel_.get(), stub_->rpcmethod_Ping_, context, request, response, reactor); +} + +::grpc::ClientAsyncResponseReader< ::flwr::proto::PingResponse>* Fleet::Stub::PrepareAsyncPingRaw(::grpc::ClientContext* context, const ::flwr::proto::PingRequest& request, ::grpc::CompletionQueue* cq) { + return ::grpc::internal::ClientAsyncResponseReaderHelper::Create< ::flwr::proto::PingResponse, ::flwr::proto::PingRequest, ::grpc::protobuf::MessageLite, ::grpc::protobuf::MessageLite>(channel_.get(), cq, rpcmethod_Ping_, context, request); +} + +::grpc::ClientAsyncResponseReader< ::flwr::proto::PingResponse>* Fleet::Stub::AsyncPingRaw(::grpc::ClientContext* context, const ::flwr::proto::PingRequest& request, ::grpc::CompletionQueue* cq) { + auto* result = + this->PrepareAsyncPingRaw(context, request, cq); + result->StartCall(); + return result; +} + ::grpc::Status Fleet::Stub::PullTaskIns(::grpc::ClientContext* context, const ::flwr::proto::PullTaskInsRequest& request, ::flwr::proto::PullTaskInsResponse* response) { return ::grpc::internal::BlockingUnaryCall< ::flwr::proto::PullTaskInsRequest, ::flwr::proto::PullTaskInsResponse, ::grpc::protobuf::MessageLite, ::grpc::protobuf::MessageLite>(channel_.get(), rpcmethod_PullTaskIns_, context, request, response); } @@ -134,6 +161,29 @@ ::grpc::ClientAsyncResponseReader< ::flwr::proto::PushTaskResResponse>* Fleet::S return result; } +::grpc::Status Fleet::Stub::GetRun(::grpc::ClientContext* context, const ::flwr::proto::GetRunRequest& request, ::flwr::proto::GetRunResponse* response) { + return ::grpc::internal::BlockingUnaryCall< ::flwr::proto::GetRunRequest, ::flwr::proto::GetRunResponse, ::grpc::protobuf::MessageLite, ::grpc::protobuf::MessageLite>(channel_.get(), rpcmethod_GetRun_, context, request, response); +} + +void Fleet::Stub::async::GetRun(::grpc::ClientContext* context, const ::flwr::proto::GetRunRequest* request, ::flwr::proto::GetRunResponse* response, std::function f) { + ::grpc::internal::CallbackUnaryCall< ::flwr::proto::GetRunRequest, ::flwr::proto::GetRunResponse, ::grpc::protobuf::MessageLite, ::grpc::protobuf::MessageLite>(stub_->channel_.get(), stub_->rpcmethod_GetRun_, context, request, response, std::move(f)); +} + +void Fleet::Stub::async::GetRun(::grpc::ClientContext* context, const ::flwr::proto::GetRunRequest* request, ::flwr::proto::GetRunResponse* response, ::grpc::ClientUnaryReactor* reactor) { + ::grpc::internal::ClientCallbackUnaryFactory::Create< ::grpc::protobuf::MessageLite, ::grpc::protobuf::MessageLite>(stub_->channel_.get(), stub_->rpcmethod_GetRun_, context, request, response, reactor); +} + +::grpc::ClientAsyncResponseReader< ::flwr::proto::GetRunResponse>* Fleet::Stub::PrepareAsyncGetRunRaw(::grpc::ClientContext* context, const ::flwr::proto::GetRunRequest& request, ::grpc::CompletionQueue* cq) { + return ::grpc::internal::ClientAsyncResponseReaderHelper::Create< ::flwr::proto::GetRunResponse, ::flwr::proto::GetRunRequest, ::grpc::protobuf::MessageLite, ::grpc::protobuf::MessageLite>(channel_.get(), cq, rpcmethod_GetRun_, context, request); +} + +::grpc::ClientAsyncResponseReader< ::flwr::proto::GetRunResponse>* Fleet::Stub::AsyncGetRunRaw(::grpc::ClientContext* context, const ::flwr::proto::GetRunRequest& request, ::grpc::CompletionQueue* cq) { + auto* result = + this->PrepareAsyncGetRunRaw(context, request, cq); + result->StartCall(); + return result; +} + Fleet::Service::Service() { AddMethod(new ::grpc::internal::RpcServiceMethod( Fleet_method_names[0], @@ -158,6 +208,16 @@ Fleet::Service::Service() { AddMethod(new ::grpc::internal::RpcServiceMethod( Fleet_method_names[2], ::grpc::internal::RpcMethod::NORMAL_RPC, + new ::grpc::internal::RpcMethodHandler< Fleet::Service, ::flwr::proto::PingRequest, ::flwr::proto::PingResponse, ::grpc::protobuf::MessageLite, ::grpc::protobuf::MessageLite>( + [](Fleet::Service* service, + ::grpc::ServerContext* ctx, + const ::flwr::proto::PingRequest* req, + ::flwr::proto::PingResponse* resp) { + return service->Ping(ctx, req, resp); + }, this))); + AddMethod(new ::grpc::internal::RpcServiceMethod( + Fleet_method_names[3], + ::grpc::internal::RpcMethod::NORMAL_RPC, new ::grpc::internal::RpcMethodHandler< Fleet::Service, ::flwr::proto::PullTaskInsRequest, ::flwr::proto::PullTaskInsResponse, ::grpc::protobuf::MessageLite, ::grpc::protobuf::MessageLite>( [](Fleet::Service* service, ::grpc::ServerContext* ctx, @@ -166,7 +226,7 @@ Fleet::Service::Service() { return service->PullTaskIns(ctx, req, resp); }, this))); AddMethod(new ::grpc::internal::RpcServiceMethod( - Fleet_method_names[3], + Fleet_method_names[4], ::grpc::internal::RpcMethod::NORMAL_RPC, new ::grpc::internal::RpcMethodHandler< Fleet::Service, ::flwr::proto::PushTaskResRequest, ::flwr::proto::PushTaskResResponse, ::grpc::protobuf::MessageLite, ::grpc::protobuf::MessageLite>( [](Fleet::Service* service, @@ -175,6 +235,16 @@ Fleet::Service::Service() { ::flwr::proto::PushTaskResResponse* resp) { return service->PushTaskRes(ctx, req, resp); }, this))); + AddMethod(new ::grpc::internal::RpcServiceMethod( + Fleet_method_names[5], + ::grpc::internal::RpcMethod::NORMAL_RPC, + new ::grpc::internal::RpcMethodHandler< Fleet::Service, ::flwr::proto::GetRunRequest, ::flwr::proto::GetRunResponse, ::grpc::protobuf::MessageLite, ::grpc::protobuf::MessageLite>( + [](Fleet::Service* service, + ::grpc::ServerContext* ctx, + const ::flwr::proto::GetRunRequest* req, + ::flwr::proto::GetRunResponse* resp) { + return service->GetRun(ctx, req, resp); + }, this))); } Fleet::Service::~Service() { @@ -194,6 +264,13 @@ ::grpc::Status Fleet::Service::DeleteNode(::grpc::ServerContext* context, const return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } +::grpc::Status Fleet::Service::Ping(::grpc::ServerContext* context, const ::flwr::proto::PingRequest* request, ::flwr::proto::PingResponse* response) { + (void) context; + (void) request; + (void) response; + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); +} + ::grpc::Status Fleet::Service::PullTaskIns(::grpc::ServerContext* context, const ::flwr::proto::PullTaskInsRequest* request, ::flwr::proto::PullTaskInsResponse* response) { (void) context; (void) request; @@ -208,6 +285,13 @@ ::grpc::Status Fleet::Service::PushTaskRes(::grpc::ServerContext* context, const return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } +::grpc::Status Fleet::Service::GetRun(::grpc::ServerContext* context, const ::flwr::proto::GetRunRequest* request, ::flwr::proto::GetRunResponse* response) { + (void) context; + (void) request; + (void) response; + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); +} + } // namespace flwr } // namespace proto diff --git a/src/cc/flwr/include/flwr/proto/fleet.grpc.pb.h b/src/cc/flwr/include/flwr/proto/fleet.grpc.pb.h index 03d445142c37..fb1e4bf7b6c4 100644 --- a/src/cc/flwr/include/flwr/proto/fleet.grpc.pb.h +++ b/src/cc/flwr/include/flwr/proto/fleet.grpc.pb.h @@ -66,6 +66,13 @@ class Fleet final { std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::flwr::proto::DeleteNodeResponse>> PrepareAsyncDeleteNode(::grpc::ClientContext* context, const ::flwr::proto::DeleteNodeRequest& request, ::grpc::CompletionQueue* cq) { return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::flwr::proto::DeleteNodeResponse>>(PrepareAsyncDeleteNodeRaw(context, request, cq)); } + virtual ::grpc::Status Ping(::grpc::ClientContext* context, const ::flwr::proto::PingRequest& request, ::flwr::proto::PingResponse* response) = 0; + std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::flwr::proto::PingResponse>> AsyncPing(::grpc::ClientContext* context, const ::flwr::proto::PingRequest& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::flwr::proto::PingResponse>>(AsyncPingRaw(context, request, cq)); + } + std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::flwr::proto::PingResponse>> PrepareAsyncPing(::grpc::ClientContext* context, const ::flwr::proto::PingRequest& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::flwr::proto::PingResponse>>(PrepareAsyncPingRaw(context, request, cq)); + } // Retrieve one or more tasks, if possible // // HTTP API path: /api/v1/fleet/pull-task-ins @@ -86,6 +93,13 @@ class Fleet final { std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::flwr::proto::PushTaskResResponse>> PrepareAsyncPushTaskRes(::grpc::ClientContext* context, const ::flwr::proto::PushTaskResRequest& request, ::grpc::CompletionQueue* cq) { return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::flwr::proto::PushTaskResResponse>>(PrepareAsyncPushTaskResRaw(context, request, cq)); } + virtual ::grpc::Status GetRun(::grpc::ClientContext* context, const ::flwr::proto::GetRunRequest& request, ::flwr::proto::GetRunResponse* response) = 0; + std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::flwr::proto::GetRunResponse>> AsyncGetRun(::grpc::ClientContext* context, const ::flwr::proto::GetRunRequest& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::flwr::proto::GetRunResponse>>(AsyncGetRunRaw(context, request, cq)); + } + std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::flwr::proto::GetRunResponse>> PrepareAsyncGetRun(::grpc::ClientContext* context, const ::flwr::proto::GetRunRequest& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::flwr::proto::GetRunResponse>>(PrepareAsyncGetRunRaw(context, request, cq)); + } class async_interface { public: virtual ~async_interface() {} @@ -93,6 +107,8 @@ class Fleet final { virtual void CreateNode(::grpc::ClientContext* context, const ::flwr::proto::CreateNodeRequest* request, ::flwr::proto::CreateNodeResponse* response, ::grpc::ClientUnaryReactor* reactor) = 0; virtual void DeleteNode(::grpc::ClientContext* context, const ::flwr::proto::DeleteNodeRequest* request, ::flwr::proto::DeleteNodeResponse* response, std::function) = 0; virtual void DeleteNode(::grpc::ClientContext* context, const ::flwr::proto::DeleteNodeRequest* request, ::flwr::proto::DeleteNodeResponse* response, ::grpc::ClientUnaryReactor* reactor) = 0; + virtual void Ping(::grpc::ClientContext* context, const ::flwr::proto::PingRequest* request, ::flwr::proto::PingResponse* response, std::function) = 0; + virtual void Ping(::grpc::ClientContext* context, const ::flwr::proto::PingRequest* request, ::flwr::proto::PingResponse* response, ::grpc::ClientUnaryReactor* reactor) = 0; // Retrieve one or more tasks, if possible // // HTTP API path: /api/v1/fleet/pull-task-ins @@ -103,6 +119,8 @@ class Fleet final { // HTTP API path: /api/v1/fleet/push-task-res virtual void PushTaskRes(::grpc::ClientContext* context, const ::flwr::proto::PushTaskResRequest* request, ::flwr::proto::PushTaskResResponse* response, std::function) = 0; virtual void PushTaskRes(::grpc::ClientContext* context, const ::flwr::proto::PushTaskResRequest* request, ::flwr::proto::PushTaskResResponse* response, ::grpc::ClientUnaryReactor* reactor) = 0; + virtual void GetRun(::grpc::ClientContext* context, const ::flwr::proto::GetRunRequest* request, ::flwr::proto::GetRunResponse* response, std::function) = 0; + virtual void GetRun(::grpc::ClientContext* context, const ::flwr::proto::GetRunRequest* request, ::flwr::proto::GetRunResponse* response, ::grpc::ClientUnaryReactor* reactor) = 0; }; typedef class async_interface experimental_async_interface; virtual class async_interface* async() { return nullptr; } @@ -112,10 +130,14 @@ class Fleet final { virtual ::grpc::ClientAsyncResponseReaderInterface< ::flwr::proto::CreateNodeResponse>* PrepareAsyncCreateNodeRaw(::grpc::ClientContext* context, const ::flwr::proto::CreateNodeRequest& request, ::grpc::CompletionQueue* cq) = 0; virtual ::grpc::ClientAsyncResponseReaderInterface< ::flwr::proto::DeleteNodeResponse>* AsyncDeleteNodeRaw(::grpc::ClientContext* context, const ::flwr::proto::DeleteNodeRequest& request, ::grpc::CompletionQueue* cq) = 0; virtual ::grpc::ClientAsyncResponseReaderInterface< ::flwr::proto::DeleteNodeResponse>* PrepareAsyncDeleteNodeRaw(::grpc::ClientContext* context, const ::flwr::proto::DeleteNodeRequest& request, ::grpc::CompletionQueue* cq) = 0; + virtual ::grpc::ClientAsyncResponseReaderInterface< ::flwr::proto::PingResponse>* AsyncPingRaw(::grpc::ClientContext* context, const ::flwr::proto::PingRequest& request, ::grpc::CompletionQueue* cq) = 0; + virtual ::grpc::ClientAsyncResponseReaderInterface< ::flwr::proto::PingResponse>* PrepareAsyncPingRaw(::grpc::ClientContext* context, const ::flwr::proto::PingRequest& request, ::grpc::CompletionQueue* cq) = 0; virtual ::grpc::ClientAsyncResponseReaderInterface< ::flwr::proto::PullTaskInsResponse>* AsyncPullTaskInsRaw(::grpc::ClientContext* context, const ::flwr::proto::PullTaskInsRequest& request, ::grpc::CompletionQueue* cq) = 0; virtual ::grpc::ClientAsyncResponseReaderInterface< ::flwr::proto::PullTaskInsResponse>* PrepareAsyncPullTaskInsRaw(::grpc::ClientContext* context, const ::flwr::proto::PullTaskInsRequest& request, ::grpc::CompletionQueue* cq) = 0; virtual ::grpc::ClientAsyncResponseReaderInterface< ::flwr::proto::PushTaskResResponse>* AsyncPushTaskResRaw(::grpc::ClientContext* context, const ::flwr::proto::PushTaskResRequest& request, ::grpc::CompletionQueue* cq) = 0; virtual ::grpc::ClientAsyncResponseReaderInterface< ::flwr::proto::PushTaskResResponse>* PrepareAsyncPushTaskResRaw(::grpc::ClientContext* context, const ::flwr::proto::PushTaskResRequest& request, ::grpc::CompletionQueue* cq) = 0; + virtual ::grpc::ClientAsyncResponseReaderInterface< ::flwr::proto::GetRunResponse>* AsyncGetRunRaw(::grpc::ClientContext* context, const ::flwr::proto::GetRunRequest& request, ::grpc::CompletionQueue* cq) = 0; + virtual ::grpc::ClientAsyncResponseReaderInterface< ::flwr::proto::GetRunResponse>* PrepareAsyncGetRunRaw(::grpc::ClientContext* context, const ::flwr::proto::GetRunRequest& request, ::grpc::CompletionQueue* cq) = 0; }; class Stub final : public StubInterface { public: @@ -134,6 +156,13 @@ class Fleet final { std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::flwr::proto::DeleteNodeResponse>> PrepareAsyncDeleteNode(::grpc::ClientContext* context, const ::flwr::proto::DeleteNodeRequest& request, ::grpc::CompletionQueue* cq) { return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::flwr::proto::DeleteNodeResponse>>(PrepareAsyncDeleteNodeRaw(context, request, cq)); } + ::grpc::Status Ping(::grpc::ClientContext* context, const ::flwr::proto::PingRequest& request, ::flwr::proto::PingResponse* response) override; + std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::flwr::proto::PingResponse>> AsyncPing(::grpc::ClientContext* context, const ::flwr::proto::PingRequest& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::flwr::proto::PingResponse>>(AsyncPingRaw(context, request, cq)); + } + std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::flwr::proto::PingResponse>> PrepareAsyncPing(::grpc::ClientContext* context, const ::flwr::proto::PingRequest& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::flwr::proto::PingResponse>>(PrepareAsyncPingRaw(context, request, cq)); + } ::grpc::Status PullTaskIns(::grpc::ClientContext* context, const ::flwr::proto::PullTaskInsRequest& request, ::flwr::proto::PullTaskInsResponse* response) override; std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::flwr::proto::PullTaskInsResponse>> AsyncPullTaskIns(::grpc::ClientContext* context, const ::flwr::proto::PullTaskInsRequest& request, ::grpc::CompletionQueue* cq) { return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::flwr::proto::PullTaskInsResponse>>(AsyncPullTaskInsRaw(context, request, cq)); @@ -148,6 +177,13 @@ class Fleet final { std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::flwr::proto::PushTaskResResponse>> PrepareAsyncPushTaskRes(::grpc::ClientContext* context, const ::flwr::proto::PushTaskResRequest& request, ::grpc::CompletionQueue* cq) { return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::flwr::proto::PushTaskResResponse>>(PrepareAsyncPushTaskResRaw(context, request, cq)); } + ::grpc::Status GetRun(::grpc::ClientContext* context, const ::flwr::proto::GetRunRequest& request, ::flwr::proto::GetRunResponse* response) override; + std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::flwr::proto::GetRunResponse>> AsyncGetRun(::grpc::ClientContext* context, const ::flwr::proto::GetRunRequest& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::flwr::proto::GetRunResponse>>(AsyncGetRunRaw(context, request, cq)); + } + std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::flwr::proto::GetRunResponse>> PrepareAsyncGetRun(::grpc::ClientContext* context, const ::flwr::proto::GetRunRequest& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::flwr::proto::GetRunResponse>>(PrepareAsyncGetRunRaw(context, request, cq)); + } class async final : public StubInterface::async_interface { public: @@ -155,10 +191,14 @@ class Fleet final { void CreateNode(::grpc::ClientContext* context, const ::flwr::proto::CreateNodeRequest* request, ::flwr::proto::CreateNodeResponse* response, ::grpc::ClientUnaryReactor* reactor) override; void DeleteNode(::grpc::ClientContext* context, const ::flwr::proto::DeleteNodeRequest* request, ::flwr::proto::DeleteNodeResponse* response, std::function) override; void DeleteNode(::grpc::ClientContext* context, const ::flwr::proto::DeleteNodeRequest* request, ::flwr::proto::DeleteNodeResponse* response, ::grpc::ClientUnaryReactor* reactor) override; + void Ping(::grpc::ClientContext* context, const ::flwr::proto::PingRequest* request, ::flwr::proto::PingResponse* response, std::function) override; + void Ping(::grpc::ClientContext* context, const ::flwr::proto::PingRequest* request, ::flwr::proto::PingResponse* response, ::grpc::ClientUnaryReactor* reactor) override; void PullTaskIns(::grpc::ClientContext* context, const ::flwr::proto::PullTaskInsRequest* request, ::flwr::proto::PullTaskInsResponse* response, std::function) override; void PullTaskIns(::grpc::ClientContext* context, const ::flwr::proto::PullTaskInsRequest* request, ::flwr::proto::PullTaskInsResponse* response, ::grpc::ClientUnaryReactor* reactor) override; void PushTaskRes(::grpc::ClientContext* context, const ::flwr::proto::PushTaskResRequest* request, ::flwr::proto::PushTaskResResponse* response, std::function) override; void PushTaskRes(::grpc::ClientContext* context, const ::flwr::proto::PushTaskResRequest* request, ::flwr::proto::PushTaskResResponse* response, ::grpc::ClientUnaryReactor* reactor) override; + void GetRun(::grpc::ClientContext* context, const ::flwr::proto::GetRunRequest* request, ::flwr::proto::GetRunResponse* response, std::function) override; + void GetRun(::grpc::ClientContext* context, const ::flwr::proto::GetRunRequest* request, ::flwr::proto::GetRunResponse* response, ::grpc::ClientUnaryReactor* reactor) override; private: friend class Stub; explicit async(Stub* stub): stub_(stub) { } @@ -174,14 +214,20 @@ class Fleet final { ::grpc::ClientAsyncResponseReader< ::flwr::proto::CreateNodeResponse>* PrepareAsyncCreateNodeRaw(::grpc::ClientContext* context, const ::flwr::proto::CreateNodeRequest& request, ::grpc::CompletionQueue* cq) override; ::grpc::ClientAsyncResponseReader< ::flwr::proto::DeleteNodeResponse>* AsyncDeleteNodeRaw(::grpc::ClientContext* context, const ::flwr::proto::DeleteNodeRequest& request, ::grpc::CompletionQueue* cq) override; ::grpc::ClientAsyncResponseReader< ::flwr::proto::DeleteNodeResponse>* PrepareAsyncDeleteNodeRaw(::grpc::ClientContext* context, const ::flwr::proto::DeleteNodeRequest& request, ::grpc::CompletionQueue* cq) override; + ::grpc::ClientAsyncResponseReader< ::flwr::proto::PingResponse>* AsyncPingRaw(::grpc::ClientContext* context, const ::flwr::proto::PingRequest& request, ::grpc::CompletionQueue* cq) override; + ::grpc::ClientAsyncResponseReader< ::flwr::proto::PingResponse>* PrepareAsyncPingRaw(::grpc::ClientContext* context, const ::flwr::proto::PingRequest& request, ::grpc::CompletionQueue* cq) override; ::grpc::ClientAsyncResponseReader< ::flwr::proto::PullTaskInsResponse>* AsyncPullTaskInsRaw(::grpc::ClientContext* context, const ::flwr::proto::PullTaskInsRequest& request, ::grpc::CompletionQueue* cq) override; ::grpc::ClientAsyncResponseReader< ::flwr::proto::PullTaskInsResponse>* PrepareAsyncPullTaskInsRaw(::grpc::ClientContext* context, const ::flwr::proto::PullTaskInsRequest& request, ::grpc::CompletionQueue* cq) override; ::grpc::ClientAsyncResponseReader< ::flwr::proto::PushTaskResResponse>* AsyncPushTaskResRaw(::grpc::ClientContext* context, const ::flwr::proto::PushTaskResRequest& request, ::grpc::CompletionQueue* cq) override; ::grpc::ClientAsyncResponseReader< ::flwr::proto::PushTaskResResponse>* PrepareAsyncPushTaskResRaw(::grpc::ClientContext* context, const ::flwr::proto::PushTaskResRequest& request, ::grpc::CompletionQueue* cq) override; + ::grpc::ClientAsyncResponseReader< ::flwr::proto::GetRunResponse>* AsyncGetRunRaw(::grpc::ClientContext* context, const ::flwr::proto::GetRunRequest& request, ::grpc::CompletionQueue* cq) override; + ::grpc::ClientAsyncResponseReader< ::flwr::proto::GetRunResponse>* PrepareAsyncGetRunRaw(::grpc::ClientContext* context, const ::flwr::proto::GetRunRequest& request, ::grpc::CompletionQueue* cq) override; const ::grpc::internal::RpcMethod rpcmethod_CreateNode_; const ::grpc::internal::RpcMethod rpcmethod_DeleteNode_; + const ::grpc::internal::RpcMethod rpcmethod_Ping_; const ::grpc::internal::RpcMethod rpcmethod_PullTaskIns_; const ::grpc::internal::RpcMethod rpcmethod_PushTaskRes_; + const ::grpc::internal::RpcMethod rpcmethod_GetRun_; }; static std::unique_ptr NewStub(const std::shared_ptr< ::grpc::ChannelInterface>& channel, const ::grpc::StubOptions& options = ::grpc::StubOptions()); @@ -191,6 +237,7 @@ class Fleet final { virtual ~Service(); virtual ::grpc::Status CreateNode(::grpc::ServerContext* context, const ::flwr::proto::CreateNodeRequest* request, ::flwr::proto::CreateNodeResponse* response); virtual ::grpc::Status DeleteNode(::grpc::ServerContext* context, const ::flwr::proto::DeleteNodeRequest* request, ::flwr::proto::DeleteNodeResponse* response); + virtual ::grpc::Status Ping(::grpc::ServerContext* context, const ::flwr::proto::PingRequest* request, ::flwr::proto::PingResponse* response); // Retrieve one or more tasks, if possible // // HTTP API path: /api/v1/fleet/pull-task-ins @@ -199,6 +246,7 @@ class Fleet final { // // HTTP API path: /api/v1/fleet/push-task-res virtual ::grpc::Status PushTaskRes(::grpc::ServerContext* context, const ::flwr::proto::PushTaskResRequest* request, ::flwr::proto::PushTaskResResponse* response); + virtual ::grpc::Status GetRun(::grpc::ServerContext* context, const ::flwr::proto::GetRunRequest* request, ::flwr::proto::GetRunResponse* response); }; template class WithAsyncMethod_CreateNode : public BaseClass { @@ -241,12 +289,32 @@ class Fleet final { } }; template + class WithAsyncMethod_Ping : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithAsyncMethod_Ping() { + ::grpc::Service::MarkMethodAsync(2); + } + ~WithAsyncMethod_Ping() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status Ping(::grpc::ServerContext* /*context*/, const ::flwr::proto::PingRequest* /*request*/, ::flwr::proto::PingResponse* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + void RequestPing(::grpc::ServerContext* context, ::flwr::proto::PingRequest* request, ::grpc::ServerAsyncResponseWriter< ::flwr::proto::PingResponse>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { + ::grpc::Service::RequestAsyncUnary(2, context, request, response, new_call_cq, notification_cq, tag); + } + }; + template class WithAsyncMethod_PullTaskIns : public BaseClass { private: void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: WithAsyncMethod_PullTaskIns() { - ::grpc::Service::MarkMethodAsync(2); + ::grpc::Service::MarkMethodAsync(3); } ~WithAsyncMethod_PullTaskIns() override { BaseClassMustBeDerivedFromService(this); @@ -257,7 +325,7 @@ class Fleet final { return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } void RequestPullTaskIns(::grpc::ServerContext* context, ::flwr::proto::PullTaskInsRequest* request, ::grpc::ServerAsyncResponseWriter< ::flwr::proto::PullTaskInsResponse>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { - ::grpc::Service::RequestAsyncUnary(2, context, request, response, new_call_cq, notification_cq, tag); + ::grpc::Service::RequestAsyncUnary(3, context, request, response, new_call_cq, notification_cq, tag); } }; template @@ -266,7 +334,7 @@ class Fleet final { void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: WithAsyncMethod_PushTaskRes() { - ::grpc::Service::MarkMethodAsync(3); + ::grpc::Service::MarkMethodAsync(4); } ~WithAsyncMethod_PushTaskRes() override { BaseClassMustBeDerivedFromService(this); @@ -277,10 +345,30 @@ class Fleet final { return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } void RequestPushTaskRes(::grpc::ServerContext* context, ::flwr::proto::PushTaskResRequest* request, ::grpc::ServerAsyncResponseWriter< ::flwr::proto::PushTaskResResponse>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { - ::grpc::Service::RequestAsyncUnary(3, context, request, response, new_call_cq, notification_cq, tag); + ::grpc::Service::RequestAsyncUnary(4, context, request, response, new_call_cq, notification_cq, tag); } }; - typedef WithAsyncMethod_CreateNode > > > AsyncService; + template + class WithAsyncMethod_GetRun : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithAsyncMethod_GetRun() { + ::grpc::Service::MarkMethodAsync(5); + } + ~WithAsyncMethod_GetRun() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status GetRun(::grpc::ServerContext* /*context*/, const ::flwr::proto::GetRunRequest* /*request*/, ::flwr::proto::GetRunResponse* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + void RequestGetRun(::grpc::ServerContext* context, ::flwr::proto::GetRunRequest* request, ::grpc::ServerAsyncResponseWriter< ::flwr::proto::GetRunResponse>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { + ::grpc::Service::RequestAsyncUnary(5, context, request, response, new_call_cq, notification_cq, tag); + } + }; + typedef WithAsyncMethod_CreateNode > > > > > AsyncService; template class WithCallbackMethod_CreateNode : public BaseClass { private: @@ -336,18 +424,45 @@ class Fleet final { ::grpc::CallbackServerContext* /*context*/, const ::flwr::proto::DeleteNodeRequest* /*request*/, ::flwr::proto::DeleteNodeResponse* /*response*/) { return nullptr; } }; template + class WithCallbackMethod_Ping : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithCallbackMethod_Ping() { + ::grpc::Service::MarkMethodCallback(2, + new ::grpc::internal::CallbackUnaryHandler< ::flwr::proto::PingRequest, ::flwr::proto::PingResponse>( + [this]( + ::grpc::CallbackServerContext* context, const ::flwr::proto::PingRequest* request, ::flwr::proto::PingResponse* response) { return this->Ping(context, request, response); }));} + void SetMessageAllocatorFor_Ping( + ::grpc::MessageAllocator< ::flwr::proto::PingRequest, ::flwr::proto::PingResponse>* allocator) { + ::grpc::internal::MethodHandler* const handler = ::grpc::Service::GetHandler(2); + static_cast<::grpc::internal::CallbackUnaryHandler< ::flwr::proto::PingRequest, ::flwr::proto::PingResponse>*>(handler) + ->SetMessageAllocator(allocator); + } + ~WithCallbackMethod_Ping() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status Ping(::grpc::ServerContext* /*context*/, const ::flwr::proto::PingRequest* /*request*/, ::flwr::proto::PingResponse* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + virtual ::grpc::ServerUnaryReactor* Ping( + ::grpc::CallbackServerContext* /*context*/, const ::flwr::proto::PingRequest* /*request*/, ::flwr::proto::PingResponse* /*response*/) { return nullptr; } + }; + template class WithCallbackMethod_PullTaskIns : public BaseClass { private: void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: WithCallbackMethod_PullTaskIns() { - ::grpc::Service::MarkMethodCallback(2, + ::grpc::Service::MarkMethodCallback(3, new ::grpc::internal::CallbackUnaryHandler< ::flwr::proto::PullTaskInsRequest, ::flwr::proto::PullTaskInsResponse>( [this]( ::grpc::CallbackServerContext* context, const ::flwr::proto::PullTaskInsRequest* request, ::flwr::proto::PullTaskInsResponse* response) { return this->PullTaskIns(context, request, response); }));} void SetMessageAllocatorFor_PullTaskIns( ::grpc::MessageAllocator< ::flwr::proto::PullTaskInsRequest, ::flwr::proto::PullTaskInsResponse>* allocator) { - ::grpc::internal::MethodHandler* const handler = ::grpc::Service::GetHandler(2); + ::grpc::internal::MethodHandler* const handler = ::grpc::Service::GetHandler(3); static_cast<::grpc::internal::CallbackUnaryHandler< ::flwr::proto::PullTaskInsRequest, ::flwr::proto::PullTaskInsResponse>*>(handler) ->SetMessageAllocator(allocator); } @@ -368,13 +483,13 @@ class Fleet final { void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: WithCallbackMethod_PushTaskRes() { - ::grpc::Service::MarkMethodCallback(3, + ::grpc::Service::MarkMethodCallback(4, new ::grpc::internal::CallbackUnaryHandler< ::flwr::proto::PushTaskResRequest, ::flwr::proto::PushTaskResResponse>( [this]( ::grpc::CallbackServerContext* context, const ::flwr::proto::PushTaskResRequest* request, ::flwr::proto::PushTaskResResponse* response) { return this->PushTaskRes(context, request, response); }));} void SetMessageAllocatorFor_PushTaskRes( ::grpc::MessageAllocator< ::flwr::proto::PushTaskResRequest, ::flwr::proto::PushTaskResResponse>* allocator) { - ::grpc::internal::MethodHandler* const handler = ::grpc::Service::GetHandler(3); + ::grpc::internal::MethodHandler* const handler = ::grpc::Service::GetHandler(4); static_cast<::grpc::internal::CallbackUnaryHandler< ::flwr::proto::PushTaskResRequest, ::flwr::proto::PushTaskResResponse>*>(handler) ->SetMessageAllocator(allocator); } @@ -389,7 +504,34 @@ class Fleet final { virtual ::grpc::ServerUnaryReactor* PushTaskRes( ::grpc::CallbackServerContext* /*context*/, const ::flwr::proto::PushTaskResRequest* /*request*/, ::flwr::proto::PushTaskResResponse* /*response*/) { return nullptr; } }; - typedef WithCallbackMethod_CreateNode > > > CallbackService; + template + class WithCallbackMethod_GetRun : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithCallbackMethod_GetRun() { + ::grpc::Service::MarkMethodCallback(5, + new ::grpc::internal::CallbackUnaryHandler< ::flwr::proto::GetRunRequest, ::flwr::proto::GetRunResponse>( + [this]( + ::grpc::CallbackServerContext* context, const ::flwr::proto::GetRunRequest* request, ::flwr::proto::GetRunResponse* response) { return this->GetRun(context, request, response); }));} + void SetMessageAllocatorFor_GetRun( + ::grpc::MessageAllocator< ::flwr::proto::GetRunRequest, ::flwr::proto::GetRunResponse>* allocator) { + ::grpc::internal::MethodHandler* const handler = ::grpc::Service::GetHandler(5); + static_cast<::grpc::internal::CallbackUnaryHandler< ::flwr::proto::GetRunRequest, ::flwr::proto::GetRunResponse>*>(handler) + ->SetMessageAllocator(allocator); + } + ~WithCallbackMethod_GetRun() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status GetRun(::grpc::ServerContext* /*context*/, const ::flwr::proto::GetRunRequest* /*request*/, ::flwr::proto::GetRunResponse* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + virtual ::grpc::ServerUnaryReactor* GetRun( + ::grpc::CallbackServerContext* /*context*/, const ::flwr::proto::GetRunRequest* /*request*/, ::flwr::proto::GetRunResponse* /*response*/) { return nullptr; } + }; + typedef WithCallbackMethod_CreateNode > > > > > CallbackService; typedef CallbackService ExperimentalCallbackService; template class WithGenericMethod_CreateNode : public BaseClass { @@ -426,12 +568,29 @@ class Fleet final { } }; template + class WithGenericMethod_Ping : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithGenericMethod_Ping() { + ::grpc::Service::MarkMethodGeneric(2); + } + ~WithGenericMethod_Ping() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status Ping(::grpc::ServerContext* /*context*/, const ::flwr::proto::PingRequest* /*request*/, ::flwr::proto::PingResponse* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + }; + template class WithGenericMethod_PullTaskIns : public BaseClass { private: void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: WithGenericMethod_PullTaskIns() { - ::grpc::Service::MarkMethodGeneric(2); + ::grpc::Service::MarkMethodGeneric(3); } ~WithGenericMethod_PullTaskIns() override { BaseClassMustBeDerivedFromService(this); @@ -448,7 +607,7 @@ class Fleet final { void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: WithGenericMethod_PushTaskRes() { - ::grpc::Service::MarkMethodGeneric(3); + ::grpc::Service::MarkMethodGeneric(4); } ~WithGenericMethod_PushTaskRes() override { BaseClassMustBeDerivedFromService(this); @@ -460,6 +619,23 @@ class Fleet final { } }; template + class WithGenericMethod_GetRun : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithGenericMethod_GetRun() { + ::grpc::Service::MarkMethodGeneric(5); + } + ~WithGenericMethod_GetRun() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status GetRun(::grpc::ServerContext* /*context*/, const ::flwr::proto::GetRunRequest* /*request*/, ::flwr::proto::GetRunResponse* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + }; + template class WithRawMethod_CreateNode : public BaseClass { private: void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} @@ -500,12 +676,32 @@ class Fleet final { } }; template + class WithRawMethod_Ping : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithRawMethod_Ping() { + ::grpc::Service::MarkMethodRaw(2); + } + ~WithRawMethod_Ping() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status Ping(::grpc::ServerContext* /*context*/, const ::flwr::proto::PingRequest* /*request*/, ::flwr::proto::PingResponse* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + void RequestPing(::grpc::ServerContext* context, ::grpc::ByteBuffer* request, ::grpc::ServerAsyncResponseWriter< ::grpc::ByteBuffer>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { + ::grpc::Service::RequestAsyncUnary(2, context, request, response, new_call_cq, notification_cq, tag); + } + }; + template class WithRawMethod_PullTaskIns : public BaseClass { private: void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: WithRawMethod_PullTaskIns() { - ::grpc::Service::MarkMethodRaw(2); + ::grpc::Service::MarkMethodRaw(3); } ~WithRawMethod_PullTaskIns() override { BaseClassMustBeDerivedFromService(this); @@ -516,7 +712,7 @@ class Fleet final { return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } void RequestPullTaskIns(::grpc::ServerContext* context, ::grpc::ByteBuffer* request, ::grpc::ServerAsyncResponseWriter< ::grpc::ByteBuffer>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { - ::grpc::Service::RequestAsyncUnary(2, context, request, response, new_call_cq, notification_cq, tag); + ::grpc::Service::RequestAsyncUnary(3, context, request, response, new_call_cq, notification_cq, tag); } }; template @@ -525,7 +721,7 @@ class Fleet final { void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: WithRawMethod_PushTaskRes() { - ::grpc::Service::MarkMethodRaw(3); + ::grpc::Service::MarkMethodRaw(4); } ~WithRawMethod_PushTaskRes() override { BaseClassMustBeDerivedFromService(this); @@ -536,7 +732,27 @@ class Fleet final { return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } void RequestPushTaskRes(::grpc::ServerContext* context, ::grpc::ByteBuffer* request, ::grpc::ServerAsyncResponseWriter< ::grpc::ByteBuffer>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { - ::grpc::Service::RequestAsyncUnary(3, context, request, response, new_call_cq, notification_cq, tag); + ::grpc::Service::RequestAsyncUnary(4, context, request, response, new_call_cq, notification_cq, tag); + } + }; + template + class WithRawMethod_GetRun : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithRawMethod_GetRun() { + ::grpc::Service::MarkMethodRaw(5); + } + ~WithRawMethod_GetRun() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status GetRun(::grpc::ServerContext* /*context*/, const ::flwr::proto::GetRunRequest* /*request*/, ::flwr::proto::GetRunResponse* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + void RequestGetRun(::grpc::ServerContext* context, ::grpc::ByteBuffer* request, ::grpc::ServerAsyncResponseWriter< ::grpc::ByteBuffer>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { + ::grpc::Service::RequestAsyncUnary(5, context, request, response, new_call_cq, notification_cq, tag); } }; template @@ -584,12 +800,34 @@ class Fleet final { ::grpc::CallbackServerContext* /*context*/, const ::grpc::ByteBuffer* /*request*/, ::grpc::ByteBuffer* /*response*/) { return nullptr; } }; template + class WithRawCallbackMethod_Ping : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithRawCallbackMethod_Ping() { + ::grpc::Service::MarkMethodRawCallback(2, + new ::grpc::internal::CallbackUnaryHandler< ::grpc::ByteBuffer, ::grpc::ByteBuffer>( + [this]( + ::grpc::CallbackServerContext* context, const ::grpc::ByteBuffer* request, ::grpc::ByteBuffer* response) { return this->Ping(context, request, response); })); + } + ~WithRawCallbackMethod_Ping() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status Ping(::grpc::ServerContext* /*context*/, const ::flwr::proto::PingRequest* /*request*/, ::flwr::proto::PingResponse* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + virtual ::grpc::ServerUnaryReactor* Ping( + ::grpc::CallbackServerContext* /*context*/, const ::grpc::ByteBuffer* /*request*/, ::grpc::ByteBuffer* /*response*/) { return nullptr; } + }; + template class WithRawCallbackMethod_PullTaskIns : public BaseClass { private: void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: WithRawCallbackMethod_PullTaskIns() { - ::grpc::Service::MarkMethodRawCallback(2, + ::grpc::Service::MarkMethodRawCallback(3, new ::grpc::internal::CallbackUnaryHandler< ::grpc::ByteBuffer, ::grpc::ByteBuffer>( [this]( ::grpc::CallbackServerContext* context, const ::grpc::ByteBuffer* request, ::grpc::ByteBuffer* response) { return this->PullTaskIns(context, request, response); })); @@ -611,7 +849,7 @@ class Fleet final { void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: WithRawCallbackMethod_PushTaskRes() { - ::grpc::Service::MarkMethodRawCallback(3, + ::grpc::Service::MarkMethodRawCallback(4, new ::grpc::internal::CallbackUnaryHandler< ::grpc::ByteBuffer, ::grpc::ByteBuffer>( [this]( ::grpc::CallbackServerContext* context, const ::grpc::ByteBuffer* request, ::grpc::ByteBuffer* response) { return this->PushTaskRes(context, request, response); })); @@ -628,6 +866,28 @@ class Fleet final { ::grpc::CallbackServerContext* /*context*/, const ::grpc::ByteBuffer* /*request*/, ::grpc::ByteBuffer* /*response*/) { return nullptr; } }; template + class WithRawCallbackMethod_GetRun : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithRawCallbackMethod_GetRun() { + ::grpc::Service::MarkMethodRawCallback(5, + new ::grpc::internal::CallbackUnaryHandler< ::grpc::ByteBuffer, ::grpc::ByteBuffer>( + [this]( + ::grpc::CallbackServerContext* context, const ::grpc::ByteBuffer* request, ::grpc::ByteBuffer* response) { return this->GetRun(context, request, response); })); + } + ~WithRawCallbackMethod_GetRun() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status GetRun(::grpc::ServerContext* /*context*/, const ::flwr::proto::GetRunRequest* /*request*/, ::flwr::proto::GetRunResponse* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + virtual ::grpc::ServerUnaryReactor* GetRun( + ::grpc::CallbackServerContext* /*context*/, const ::grpc::ByteBuffer* /*request*/, ::grpc::ByteBuffer* /*response*/) { return nullptr; } + }; + template class WithStreamedUnaryMethod_CreateNode : public BaseClass { private: void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} @@ -682,12 +942,39 @@ class Fleet final { virtual ::grpc::Status StreamedDeleteNode(::grpc::ServerContext* context, ::grpc::ServerUnaryStreamer< ::flwr::proto::DeleteNodeRequest,::flwr::proto::DeleteNodeResponse>* server_unary_streamer) = 0; }; template + class WithStreamedUnaryMethod_Ping : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithStreamedUnaryMethod_Ping() { + ::grpc::Service::MarkMethodStreamed(2, + new ::grpc::internal::StreamedUnaryHandler< + ::flwr::proto::PingRequest, ::flwr::proto::PingResponse>( + [this](::grpc::ServerContext* context, + ::grpc::ServerUnaryStreamer< + ::flwr::proto::PingRequest, ::flwr::proto::PingResponse>* streamer) { + return this->StreamedPing(context, + streamer); + })); + } + ~WithStreamedUnaryMethod_Ping() override { + BaseClassMustBeDerivedFromService(this); + } + // disable regular version of this method + ::grpc::Status Ping(::grpc::ServerContext* /*context*/, const ::flwr::proto::PingRequest* /*request*/, ::flwr::proto::PingResponse* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + // replace default version of method with streamed unary + virtual ::grpc::Status StreamedPing(::grpc::ServerContext* context, ::grpc::ServerUnaryStreamer< ::flwr::proto::PingRequest,::flwr::proto::PingResponse>* server_unary_streamer) = 0; + }; + template class WithStreamedUnaryMethod_PullTaskIns : public BaseClass { private: void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: WithStreamedUnaryMethod_PullTaskIns() { - ::grpc::Service::MarkMethodStreamed(2, + ::grpc::Service::MarkMethodStreamed(3, new ::grpc::internal::StreamedUnaryHandler< ::flwr::proto::PullTaskInsRequest, ::flwr::proto::PullTaskInsResponse>( [this](::grpc::ServerContext* context, @@ -714,7 +1001,7 @@ class Fleet final { void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: WithStreamedUnaryMethod_PushTaskRes() { - ::grpc::Service::MarkMethodStreamed(3, + ::grpc::Service::MarkMethodStreamed(4, new ::grpc::internal::StreamedUnaryHandler< ::flwr::proto::PushTaskResRequest, ::flwr::proto::PushTaskResResponse>( [this](::grpc::ServerContext* context, @@ -735,9 +1022,36 @@ class Fleet final { // replace default version of method with streamed unary virtual ::grpc::Status StreamedPushTaskRes(::grpc::ServerContext* context, ::grpc::ServerUnaryStreamer< ::flwr::proto::PushTaskResRequest,::flwr::proto::PushTaskResResponse>* server_unary_streamer) = 0; }; - typedef WithStreamedUnaryMethod_CreateNode > > > StreamedUnaryService; + template + class WithStreamedUnaryMethod_GetRun : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithStreamedUnaryMethod_GetRun() { + ::grpc::Service::MarkMethodStreamed(5, + new ::grpc::internal::StreamedUnaryHandler< + ::flwr::proto::GetRunRequest, ::flwr::proto::GetRunResponse>( + [this](::grpc::ServerContext* context, + ::grpc::ServerUnaryStreamer< + ::flwr::proto::GetRunRequest, ::flwr::proto::GetRunResponse>* streamer) { + return this->StreamedGetRun(context, + streamer); + })); + } + ~WithStreamedUnaryMethod_GetRun() override { + BaseClassMustBeDerivedFromService(this); + } + // disable regular version of this method + ::grpc::Status GetRun(::grpc::ServerContext* /*context*/, const ::flwr::proto::GetRunRequest* /*request*/, ::flwr::proto::GetRunResponse* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + // replace default version of method with streamed unary + virtual ::grpc::Status StreamedGetRun(::grpc::ServerContext* context, ::grpc::ServerUnaryStreamer< ::flwr::proto::GetRunRequest,::flwr::proto::GetRunResponse>* server_unary_streamer) = 0; + }; + typedef WithStreamedUnaryMethod_CreateNode > > > > > StreamedUnaryService; typedef Service SplitStreamedService; - typedef WithStreamedUnaryMethod_CreateNode > > > StreamedService; + typedef WithStreamedUnaryMethod_CreateNode > > > > > StreamedService; }; } // namespace proto diff --git a/src/cc/flwr/include/flwr/proto/fleet.pb.cc b/src/cc/flwr/include/flwr/proto/fleet.pb.cc index 302331374db1..d221658623c3 100644 --- a/src/cc/flwr/include/flwr/proto/fleet.pb.cc +++ b/src/cc/flwr/include/flwr/proto/fleet.pb.cc @@ -19,7 +19,8 @@ PROTOBUF_PRAGMA_INIT_SEG namespace flwr { namespace proto { constexpr CreateNodeRequest::CreateNodeRequest( - ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized){} + ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized) + : ping_interval_(0){} struct CreateNodeRequestDefaultTypeInternal { constexpr CreateNodeRequestDefaultTypeInternal() : _instance(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized{}) {} @@ -64,6 +65,31 @@ struct DeleteNodeResponseDefaultTypeInternal { }; }; PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT DeleteNodeResponseDefaultTypeInternal _DeleteNodeResponse_default_instance_; +constexpr PingRequest::PingRequest( + ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized) + : node_(nullptr) + , ping_interval_(0){} +struct PingRequestDefaultTypeInternal { + constexpr PingRequestDefaultTypeInternal() + : _instance(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized{}) {} + ~PingRequestDefaultTypeInternal() {} + union { + PingRequest _instance; + }; +}; +PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PingRequestDefaultTypeInternal _PingRequest_default_instance_; +constexpr PingResponse::PingResponse( + ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized) + : success_(false){} +struct PingResponseDefaultTypeInternal { + constexpr PingResponseDefaultTypeInternal() + : _instance(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized{}) {} + ~PingResponseDefaultTypeInternal() {} + union { + PingResponse _instance; + }; +}; +PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PingResponseDefaultTypeInternal _PingResponse_default_instance_; constexpr PullTaskInsRequest::PullTaskInsRequest( ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized) : task_ids_() @@ -126,6 +152,44 @@ struct PushTaskResResponseDefaultTypeInternal { }; }; PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PushTaskResResponseDefaultTypeInternal _PushTaskResResponse_default_instance_; +constexpr Run::Run( + ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized) + : fab_id_(&::PROTOBUF_NAMESPACE_ID::internal::fixed_address_empty_string) + , fab_version_(&::PROTOBUF_NAMESPACE_ID::internal::fixed_address_empty_string) + , run_id_(int64_t{0}){} +struct RunDefaultTypeInternal { + constexpr RunDefaultTypeInternal() + : _instance(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized{}) {} + ~RunDefaultTypeInternal() {} + union { + Run _instance; + }; +}; +PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT RunDefaultTypeInternal _Run_default_instance_; +constexpr GetRunRequest::GetRunRequest( + ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized) + : run_id_(int64_t{0}){} +struct GetRunRequestDefaultTypeInternal { + constexpr GetRunRequestDefaultTypeInternal() + : _instance(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized{}) {} + ~GetRunRequestDefaultTypeInternal() {} + union { + GetRunRequest _instance; + }; +}; +PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT GetRunRequestDefaultTypeInternal _GetRunRequest_default_instance_; +constexpr GetRunResponse::GetRunResponse( + ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized) + : run_(nullptr){} +struct GetRunResponseDefaultTypeInternal { + constexpr GetRunResponseDefaultTypeInternal() + : _instance(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized{}) {} + ~GetRunResponseDefaultTypeInternal() {} + union { + GetRunResponse _instance; + }; +}; +PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT GetRunResponseDefaultTypeInternal _GetRunResponse_default_instance_; constexpr Reconnect::Reconnect( ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized) : reconnect_(uint64_t{0u}){} @@ -140,7 +204,7 @@ struct ReconnectDefaultTypeInternal { PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT ReconnectDefaultTypeInternal _Reconnect_default_instance_; } // namespace proto } // namespace flwr -static ::PROTOBUF_NAMESPACE_ID::Metadata file_level_metadata_flwr_2fproto_2ffleet_2eproto[10]; +static ::PROTOBUF_NAMESPACE_ID::Metadata file_level_metadata_flwr_2fproto_2ffleet_2eproto[15]; static constexpr ::PROTOBUF_NAMESPACE_ID::EnumDescriptor const** file_level_enum_descriptors_flwr_2fproto_2ffleet_2eproto = nullptr; static constexpr ::PROTOBUF_NAMESPACE_ID::ServiceDescriptor const** file_level_service_descriptors_flwr_2fproto_2ffleet_2eproto = nullptr; @@ -151,6 +215,7 @@ const ::PROTOBUF_NAMESPACE_ID::uint32 TableStruct_flwr_2fproto_2ffleet_2eproto:: ~0u, // no _oneof_case_ ~0u, // no _weak_field_map_ ~0u, // no _inlined_string_donated_ + PROTOBUF_FIELD_OFFSET(::flwr::proto::CreateNodeRequest, ping_interval_), ~0u, // no _has_bits_ PROTOBUF_FIELD_OFFSET(::flwr::proto::CreateNodeResponse, _internal_metadata_), ~0u, // no _extensions_ @@ -172,6 +237,21 @@ const ::PROTOBUF_NAMESPACE_ID::uint32 TableStruct_flwr_2fproto_2ffleet_2eproto:: ~0u, // no _weak_field_map_ ~0u, // no _inlined_string_donated_ ~0u, // no _has_bits_ + PROTOBUF_FIELD_OFFSET(::flwr::proto::PingRequest, _internal_metadata_), + ~0u, // no _extensions_ + ~0u, // no _oneof_case_ + ~0u, // no _weak_field_map_ + ~0u, // no _inlined_string_donated_ + PROTOBUF_FIELD_OFFSET(::flwr::proto::PingRequest, node_), + PROTOBUF_FIELD_OFFSET(::flwr::proto::PingRequest, ping_interval_), + ~0u, // no _has_bits_ + PROTOBUF_FIELD_OFFSET(::flwr::proto::PingResponse, _internal_metadata_), + ~0u, // no _extensions_ + ~0u, // no _oneof_case_ + ~0u, // no _weak_field_map_ + ~0u, // no _inlined_string_donated_ + PROTOBUF_FIELD_OFFSET(::flwr::proto::PingResponse, success_), + ~0u, // no _has_bits_ PROTOBUF_FIELD_OFFSET(::flwr::proto::PullTaskInsRequest, _internal_metadata_), ~0u, // no _extensions_ ~0u, // no _oneof_case_ @@ -213,6 +293,29 @@ const ::PROTOBUF_NAMESPACE_ID::uint32 TableStruct_flwr_2fproto_2ffleet_2eproto:: PROTOBUF_FIELD_OFFSET(::flwr::proto::PushTaskResResponse, reconnect_), PROTOBUF_FIELD_OFFSET(::flwr::proto::PushTaskResResponse, results_), ~0u, // no _has_bits_ + PROTOBUF_FIELD_OFFSET(::flwr::proto::Run, _internal_metadata_), + ~0u, // no _extensions_ + ~0u, // no _oneof_case_ + ~0u, // no _weak_field_map_ + ~0u, // no _inlined_string_donated_ + PROTOBUF_FIELD_OFFSET(::flwr::proto::Run, run_id_), + PROTOBUF_FIELD_OFFSET(::flwr::proto::Run, fab_id_), + PROTOBUF_FIELD_OFFSET(::flwr::proto::Run, fab_version_), + ~0u, // no _has_bits_ + PROTOBUF_FIELD_OFFSET(::flwr::proto::GetRunRequest, _internal_metadata_), + ~0u, // no _extensions_ + ~0u, // no _oneof_case_ + ~0u, // no _weak_field_map_ + ~0u, // no _inlined_string_donated_ + PROTOBUF_FIELD_OFFSET(::flwr::proto::GetRunRequest, run_id_), + ~0u, // no _has_bits_ + PROTOBUF_FIELD_OFFSET(::flwr::proto::GetRunResponse, _internal_metadata_), + ~0u, // no _extensions_ + ~0u, // no _oneof_case_ + ~0u, // no _weak_field_map_ + ~0u, // no _inlined_string_donated_ + PROTOBUF_FIELD_OFFSET(::flwr::proto::GetRunResponse, run_), + ~0u, // no _has_bits_ PROTOBUF_FIELD_OFFSET(::flwr::proto::Reconnect, _internal_metadata_), ~0u, // no _extensions_ ~0u, // no _oneof_case_ @@ -222,15 +325,20 @@ const ::PROTOBUF_NAMESPACE_ID::uint32 TableStruct_flwr_2fproto_2ffleet_2eproto:: }; static const ::PROTOBUF_NAMESPACE_ID::internal::MigrationSchema schemas[] PROTOBUF_SECTION_VARIABLE(protodesc_cold) = { { 0, -1, -1, sizeof(::flwr::proto::CreateNodeRequest)}, - { 6, -1, -1, sizeof(::flwr::proto::CreateNodeResponse)}, - { 13, -1, -1, sizeof(::flwr::proto::DeleteNodeRequest)}, - { 20, -1, -1, sizeof(::flwr::proto::DeleteNodeResponse)}, - { 26, -1, -1, sizeof(::flwr::proto::PullTaskInsRequest)}, - { 34, -1, -1, sizeof(::flwr::proto::PullTaskInsResponse)}, - { 42, -1, -1, sizeof(::flwr::proto::PushTaskResRequest)}, - { 49, 57, -1, sizeof(::flwr::proto::PushTaskResResponse_ResultsEntry_DoNotUse)}, - { 59, -1, -1, sizeof(::flwr::proto::PushTaskResResponse)}, - { 67, -1, -1, sizeof(::flwr::proto::Reconnect)}, + { 7, -1, -1, sizeof(::flwr::proto::CreateNodeResponse)}, + { 14, -1, -1, sizeof(::flwr::proto::DeleteNodeRequest)}, + { 21, -1, -1, sizeof(::flwr::proto::DeleteNodeResponse)}, + { 27, -1, -1, sizeof(::flwr::proto::PingRequest)}, + { 35, -1, -1, sizeof(::flwr::proto::PingResponse)}, + { 42, -1, -1, sizeof(::flwr::proto::PullTaskInsRequest)}, + { 50, -1, -1, sizeof(::flwr::proto::PullTaskInsResponse)}, + { 58, -1, -1, sizeof(::flwr::proto::PushTaskResRequest)}, + { 65, 73, -1, sizeof(::flwr::proto::PushTaskResResponse_ResultsEntry_DoNotUse)}, + { 75, -1, -1, sizeof(::flwr::proto::PushTaskResResponse)}, + { 83, -1, -1, sizeof(::flwr::proto::Run)}, + { 92, -1, -1, sizeof(::flwr::proto::GetRunRequest)}, + { 99, -1, -1, sizeof(::flwr::proto::GetRunResponse)}, + { 106, -1, -1, sizeof(::flwr::proto::Reconnect)}, }; static ::PROTOBUF_NAMESPACE_ID::Message const * const file_default_instances[] = { @@ -238,41 +346,56 @@ static ::PROTOBUF_NAMESPACE_ID::Message const * const file_default_instances[] = reinterpret_cast(&::flwr::proto::_CreateNodeResponse_default_instance_), reinterpret_cast(&::flwr::proto::_DeleteNodeRequest_default_instance_), reinterpret_cast(&::flwr::proto::_DeleteNodeResponse_default_instance_), + reinterpret_cast(&::flwr::proto::_PingRequest_default_instance_), + reinterpret_cast(&::flwr::proto::_PingResponse_default_instance_), reinterpret_cast(&::flwr::proto::_PullTaskInsRequest_default_instance_), reinterpret_cast(&::flwr::proto::_PullTaskInsResponse_default_instance_), reinterpret_cast(&::flwr::proto::_PushTaskResRequest_default_instance_), reinterpret_cast(&::flwr::proto::_PushTaskResResponse_ResultsEntry_DoNotUse_default_instance_), reinterpret_cast(&::flwr::proto::_PushTaskResResponse_default_instance_), + reinterpret_cast(&::flwr::proto::_Run_default_instance_), + reinterpret_cast(&::flwr::proto::_GetRunRequest_default_instance_), + reinterpret_cast(&::flwr::proto::_GetRunResponse_default_instance_), reinterpret_cast(&::flwr::proto::_Reconnect_default_instance_), }; const char descriptor_table_protodef_flwr_2fproto_2ffleet_2eproto[] PROTOBUF_SECTION_VARIABLE(protodesc_cold) = "\n\026flwr/proto/fleet.proto\022\nflwr.proto\032\025fl" "wr/proto/node.proto\032\025flwr/proto/task.pro" - "to\"\023\n\021CreateNodeRequest\"4\n\022CreateNodeRes" - "ponse\022\036\n\004node\030\001 \001(\0132\020.flwr.proto.Node\"3\n" - "\021DeleteNodeRequest\022\036\n\004node\030\001 \001(\0132\020.flwr." - "proto.Node\"\024\n\022DeleteNodeResponse\"F\n\022Pull" - "TaskInsRequest\022\036\n\004node\030\001 \001(\0132\020.flwr.prot" - "o.Node\022\020\n\010task_ids\030\002 \003(\t\"k\n\023PullTaskInsR" - "esponse\022(\n\treconnect\030\001 \001(\0132\025.flwr.proto." - "Reconnect\022*\n\rtask_ins_list\030\002 \003(\0132\023.flwr." - "proto.TaskIns\"@\n\022PushTaskResRequest\022*\n\rt" - "ask_res_list\030\001 \003(\0132\023.flwr.proto.TaskRes\"" - "\256\001\n\023PushTaskResResponse\022(\n\treconnect\030\001 \001" - "(\0132\025.flwr.proto.Reconnect\022=\n\007results\030\002 \003" - "(\0132,.flwr.proto.PushTaskResResponse.Resu" - "ltsEntry\032.\n\014ResultsEntry\022\013\n\003key\030\001 \001(\t\022\r\n" - "\005value\030\002 \001(\r:\0028\001\"\036\n\tReconnect\022\021\n\treconne" - "ct\030\001 \001(\0042\311\002\n\005Fleet\022M\n\nCreateNode\022\035.flwr." - "proto.CreateNodeRequest\032\036.flwr.proto.Cre" - "ateNodeResponse\"\000\022M\n\nDeleteNode\022\035.flwr.p" - "roto.DeleteNodeRequest\032\036.flwr.proto.Dele" - "teNodeResponse\"\000\022P\n\013PullTaskIns\022\036.flwr.p" - "roto.PullTaskInsRequest\032\037.flwr.proto.Pul" - "lTaskInsResponse\"\000\022P\n\013PushTaskRes\022\036.flwr" - ".proto.PushTaskResRequest\032\037.flwr.proto.P" - "ushTaskResResponse\"\000b\006proto3" + "to\"*\n\021CreateNodeRequest\022\025\n\rping_interval" + "\030\001 \001(\001\"4\n\022CreateNodeResponse\022\036\n\004node\030\001 \001" + "(\0132\020.flwr.proto.Node\"3\n\021DeleteNodeReques" + "t\022\036\n\004node\030\001 \001(\0132\020.flwr.proto.Node\"\024\n\022Del" + "eteNodeResponse\"D\n\013PingRequest\022\036\n\004node\030\001" + " \001(\0132\020.flwr.proto.Node\022\025\n\rping_interval\030" + "\002 \001(\001\"\037\n\014PingResponse\022\017\n\007success\030\001 \001(\010\"F" + "\n\022PullTaskInsRequest\022\036\n\004node\030\001 \001(\0132\020.flw" + "r.proto.Node\022\020\n\010task_ids\030\002 \003(\t\"k\n\023PullTa" + "skInsResponse\022(\n\treconnect\030\001 \001(\0132\025.flwr." + "proto.Reconnect\022*\n\rtask_ins_list\030\002 \003(\0132\023" + ".flwr.proto.TaskIns\"@\n\022PushTaskResReques" + "t\022*\n\rtask_res_list\030\001 \003(\0132\023.flwr.proto.Ta" + "skRes\"\256\001\n\023PushTaskResResponse\022(\n\treconne" + "ct\030\001 \001(\0132\025.flwr.proto.Reconnect\022=\n\007resul" + "ts\030\002 \003(\0132,.flwr.proto.PushTaskResRespons" + "e.ResultsEntry\032.\n\014ResultsEntry\022\013\n\003key\030\001 " + "\001(\t\022\r\n\005value\030\002 \001(\r:\0028\001\":\n\003Run\022\016\n\006run_id\030" + "\001 \001(\022\022\016\n\006fab_id\030\002 \001(\t\022\023\n\013fab_version\030\003 \001" + "(\t\"\037\n\rGetRunRequest\022\016\n\006run_id\030\001 \001(\022\".\n\016G" + "etRunResponse\022\034\n\003run\030\001 \001(\0132\017.flwr.proto." + "Run\"\036\n\tReconnect\022\021\n\treconnect\030\001 \001(\0042\311\003\n\005" + "Fleet\022M\n\nCreateNode\022\035.flwr.proto.CreateN" + "odeRequest\032\036.flwr.proto.CreateNodeRespon" + "se\"\000\022M\n\nDeleteNode\022\035.flwr.proto.DeleteNo" + "deRequest\032\036.flwr.proto.DeleteNodeRespons" + "e\"\000\022;\n\004Ping\022\027.flwr.proto.PingRequest\032\030.f" + "lwr.proto.PingResponse\"\000\022P\n\013PullTaskIns\022" + "\036.flwr.proto.PullTaskInsRequest\032\037.flwr.p" + "roto.PullTaskInsResponse\"\000\022P\n\013PushTaskRe" + "s\022\036.flwr.proto.PushTaskResRequest\032\037.flwr" + ".proto.PushTaskResResponse\"\000\022A\n\006GetRun\022\031" + ".flwr.proto.GetRunRequest\032\032.flwr.proto.G" + "etRunResponse\"\000b\006proto3" ; static const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable*const descriptor_table_flwr_2fproto_2ffleet_2eproto_deps[2] = { &::descriptor_table_flwr_2fproto_2fnode_2eproto, @@ -280,8 +403,8 @@ static const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable*const descriptor }; static ::PROTOBUF_NAMESPACE_ID::internal::once_flag descriptor_table_flwr_2fproto_2ffleet_2eproto_once; const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable descriptor_table_flwr_2fproto_2ffleet_2eproto = { - false, false, 1028, descriptor_table_protodef_flwr_2fproto_2ffleet_2eproto, "flwr/proto/fleet.proto", - &descriptor_table_flwr_2fproto_2ffleet_2eproto_once, descriptor_table_flwr_2fproto_2ffleet_2eproto_deps, 2, 10, + false, false, 1423, descriptor_table_protodef_flwr_2fproto_2ffleet_2eproto, "flwr/proto/fleet.proto", + &descriptor_table_flwr_2fproto_2ffleet_2eproto_once, descriptor_table_flwr_2fproto_2ffleet_2eproto_deps, 2, 15, schemas, file_default_instances, TableStruct_flwr_2fproto_2ffleet_2eproto::offsets, file_level_metadata_flwr_2fproto_2ffleet_2eproto, file_level_enum_descriptors_flwr_2fproto_2ffleet_2eproto, file_level_service_descriptors_flwr_2fproto_2ffleet_2eproto, }; @@ -302,30 +425,169 @@ class CreateNodeRequest::_Internal { CreateNodeRequest::CreateNodeRequest(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned) - : ::PROTOBUF_NAMESPACE_ID::internal::ZeroFieldsBase(arena, is_message_owned) { + : ::PROTOBUF_NAMESPACE_ID::Message(arena, is_message_owned) { + SharedCtor(); + if (!is_message_owned) { + RegisterArenaDtor(arena); + } // @@protoc_insertion_point(arena_constructor:flwr.proto.CreateNodeRequest) } CreateNodeRequest::CreateNodeRequest(const CreateNodeRequest& from) - : ::PROTOBUF_NAMESPACE_ID::internal::ZeroFieldsBase() { + : ::PROTOBUF_NAMESPACE_ID::Message() { _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); + ping_interval_ = from.ping_interval_; // @@protoc_insertion_point(copy_constructor:flwr.proto.CreateNodeRequest) } +void CreateNodeRequest::SharedCtor() { +ping_interval_ = 0; +} + +CreateNodeRequest::~CreateNodeRequest() { + // @@protoc_insertion_point(destructor:flwr.proto.CreateNodeRequest) + if (GetArenaForAllocation() != nullptr) return; + SharedDtor(); + _internal_metadata_.Delete<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); +} + +inline void CreateNodeRequest::SharedDtor() { + GOOGLE_DCHECK(GetArenaForAllocation() == nullptr); +} + +void CreateNodeRequest::ArenaDtor(void* object) { + CreateNodeRequest* _this = reinterpret_cast< CreateNodeRequest* >(object); + (void)_this; +} +void CreateNodeRequest::RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena*) { +} +void CreateNodeRequest::SetCachedSize(int size) const { + _cached_size_.Set(size); +} + +void CreateNodeRequest::Clear() { +// @@protoc_insertion_point(message_clear_start:flwr.proto.CreateNodeRequest) + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + // Prevent compiler warnings about cached_has_bits being unused + (void) cached_has_bits; + + ping_interval_ = 0; + _internal_metadata_.Clear<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); +} + +const char* CreateNodeRequest::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) { +#define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure + while (!ctx->Done(&ptr)) { + ::PROTOBUF_NAMESPACE_ID::uint32 tag; + ptr = ::PROTOBUF_NAMESPACE_ID::internal::ReadTag(ptr, &tag); + switch (tag >> 3) { + // double ping_interval = 1; + case 1: + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 9)) { + ping_interval_ = ::PROTOBUF_NAMESPACE_ID::internal::UnalignedLoad(ptr); + ptr += sizeof(double); + } else + goto handle_unusual; + continue; + default: + goto handle_unusual; + } // switch + handle_unusual: + if ((tag == 0) || ((tag & 7) == 4)) { + CHK_(ptr); + ctx->SetLastTag(tag); + goto message_done; + } + ptr = UnknownFieldParse( + tag, + _internal_metadata_.mutable_unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(), + ptr, ctx); + CHK_(ptr != nullptr); + } // while +message_done: + return ptr; +failure: + ptr = nullptr; + goto message_done; +#undef CHK_ +} + +::PROTOBUF_NAMESPACE_ID::uint8* CreateNodeRequest::_InternalSerialize( + ::PROTOBUF_NAMESPACE_ID::uint8* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const { + // @@protoc_insertion_point(serialize_to_array_start:flwr.proto.CreateNodeRequest) + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + (void) cached_has_bits; + + // double ping_interval = 1; + if (!(this->_internal_ping_interval() <= 0 && this->_internal_ping_interval() >= 0)) { + target = stream->EnsureSpace(target); + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteDoubleToArray(1, this->_internal_ping_interval(), target); + } + + if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) { + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::InternalSerializeUnknownFieldsToArray( + _internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance), target, stream); + } + // @@protoc_insertion_point(serialize_to_array_end:flwr.proto.CreateNodeRequest) + return target; +} + +size_t CreateNodeRequest::ByteSizeLong() const { +// @@protoc_insertion_point(message_byte_size_start:flwr.proto.CreateNodeRequest) + size_t total_size = 0; + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + // Prevent compiler warnings about cached_has_bits being unused + (void) cached_has_bits; + // double ping_interval = 1; + if (!(this->_internal_ping_interval() <= 0 && this->_internal_ping_interval() >= 0)) { + total_size += 1 + 8; + } + return MaybeComputeUnknownFieldsSize(total_size, &_cached_size_); +} const ::PROTOBUF_NAMESPACE_ID::Message::ClassData CreateNodeRequest::_class_data_ = { - ::PROTOBUF_NAMESPACE_ID::internal::ZeroFieldsBase::CopyImpl, - ::PROTOBUF_NAMESPACE_ID::internal::ZeroFieldsBase::MergeImpl, + ::PROTOBUF_NAMESPACE_ID::Message::CopyWithSizeCheck, + CreateNodeRequest::MergeImpl }; const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*CreateNodeRequest::GetClassData() const { return &_class_data_; } +void CreateNodeRequest::MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, + const ::PROTOBUF_NAMESPACE_ID::Message& from) { + static_cast(to)->MergeFrom( + static_cast(from)); +} + +void CreateNodeRequest::MergeFrom(const CreateNodeRequest& from) { +// @@protoc_insertion_point(class_specific_merge_from_start:flwr.proto.CreateNodeRequest) + GOOGLE_DCHECK_NE(&from, this); + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + (void) cached_has_bits; + if (!(from._internal_ping_interval() <= 0 && from._internal_ping_interval() >= 0)) { + _internal_set_ping_interval(from._internal_ping_interval()); + } + _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); +} +void CreateNodeRequest::CopyFrom(const CreateNodeRequest& from) { +// @@protoc_insertion_point(class_specific_copy_from_start:flwr.proto.CreateNodeRequest) + if (&from == this) return; + Clear(); + MergeFrom(from); +} +bool CreateNodeRequest::IsInitialized() const { + return true; +} +void CreateNodeRequest::InternalSwap(CreateNodeRequest* other) { + using std::swap; + _internal_metadata_.InternalSwap(&other->_internal_metadata_); + swap(ping_interval_, other->ping_interval_); +} ::PROTOBUF_NAMESPACE_ID::Metadata CreateNodeRequest::GetMetadata() const { return ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors( @@ -776,84 +1038,86 @@ ::PROTOBUF_NAMESPACE_ID::Metadata DeleteNodeResponse::GetMetadata() const { // =================================================================== -class PullTaskInsRequest::_Internal { +class PingRequest::_Internal { public: - static const ::flwr::proto::Node& node(const PullTaskInsRequest* msg); + static const ::flwr::proto::Node& node(const PingRequest* msg); }; const ::flwr::proto::Node& -PullTaskInsRequest::_Internal::node(const PullTaskInsRequest* msg) { +PingRequest::_Internal::node(const PingRequest* msg) { return *msg->node_; } -void PullTaskInsRequest::clear_node() { +void PingRequest::clear_node() { if (GetArenaForAllocation() == nullptr && node_ != nullptr) { delete node_; } node_ = nullptr; } -PullTaskInsRequest::PullTaskInsRequest(::PROTOBUF_NAMESPACE_ID::Arena* arena, +PingRequest::PingRequest(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned) - : ::PROTOBUF_NAMESPACE_ID::Message(arena, is_message_owned), - task_ids_(arena) { + : ::PROTOBUF_NAMESPACE_ID::Message(arena, is_message_owned) { SharedCtor(); if (!is_message_owned) { RegisterArenaDtor(arena); } - // @@protoc_insertion_point(arena_constructor:flwr.proto.PullTaskInsRequest) + // @@protoc_insertion_point(arena_constructor:flwr.proto.PingRequest) } -PullTaskInsRequest::PullTaskInsRequest(const PullTaskInsRequest& from) - : ::PROTOBUF_NAMESPACE_ID::Message(), - task_ids_(from.task_ids_) { +PingRequest::PingRequest(const PingRequest& from) + : ::PROTOBUF_NAMESPACE_ID::Message() { _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); if (from._internal_has_node()) { node_ = new ::flwr::proto::Node(*from.node_); } else { node_ = nullptr; } - // @@protoc_insertion_point(copy_constructor:flwr.proto.PullTaskInsRequest) + ping_interval_ = from.ping_interval_; + // @@protoc_insertion_point(copy_constructor:flwr.proto.PingRequest) } -void PullTaskInsRequest::SharedCtor() { -node_ = nullptr; +void PingRequest::SharedCtor() { +::memset(reinterpret_cast(this) + static_cast( + reinterpret_cast(&node_) - reinterpret_cast(this)), + 0, static_cast(reinterpret_cast(&ping_interval_) - + reinterpret_cast(&node_)) + sizeof(ping_interval_)); } -PullTaskInsRequest::~PullTaskInsRequest() { - // @@protoc_insertion_point(destructor:flwr.proto.PullTaskInsRequest) +PingRequest::~PingRequest() { + // @@protoc_insertion_point(destructor:flwr.proto.PingRequest) if (GetArenaForAllocation() != nullptr) return; SharedDtor(); _internal_metadata_.Delete<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); } -inline void PullTaskInsRequest::SharedDtor() { +inline void PingRequest::SharedDtor() { GOOGLE_DCHECK(GetArenaForAllocation() == nullptr); if (this != internal_default_instance()) delete node_; } -void PullTaskInsRequest::ArenaDtor(void* object) { - PullTaskInsRequest* _this = reinterpret_cast< PullTaskInsRequest* >(object); +void PingRequest::ArenaDtor(void* object) { + PingRequest* _this = reinterpret_cast< PingRequest* >(object); (void)_this; } -void PullTaskInsRequest::RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena*) { +void PingRequest::RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena*) { } -void PullTaskInsRequest::SetCachedSize(int size) const { +void PingRequest::SetCachedSize(int size) const { _cached_size_.Set(size); } -void PullTaskInsRequest::Clear() { -// @@protoc_insertion_point(message_clear_start:flwr.proto.PullTaskInsRequest) +void PingRequest::Clear() { +// @@protoc_insertion_point(message_clear_start:flwr.proto.PingRequest) ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; // Prevent compiler warnings about cached_has_bits being unused (void) cached_has_bits; - task_ids_.Clear(); if (GetArenaForAllocation() == nullptr && node_ != nullptr) { delete node_; } node_ = nullptr; + ping_interval_ = 0; _internal_metadata_.Clear<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); } -const char* PullTaskInsRequest::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) { +const char* PingRequest::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) { #define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure while (!ctx->Done(&ptr)) { ::PROTOBUF_NAMESPACE_ID::uint32 tag; @@ -867,18 +1131,11 @@ const char* PullTaskInsRequest::_InternalParse(const char* ptr, ::PROTOBUF_NAMES } else goto handle_unusual; continue; - // repeated string task_ids = 2; + // double ping_interval = 2; case 2: - if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 18)) { - ptr -= 1; - do { - ptr += 1; - auto str = _internal_add_task_ids(); - ptr = ::PROTOBUF_NAMESPACE_ID::internal::InlineGreedyStringParser(str, ptr, ctx); - CHK_(::PROTOBUF_NAMESPACE_ID::internal::VerifyUTF8(str, "flwr.proto.PullTaskInsRequest.task_ids")); - CHK_(ptr); - if (!ctx->DataAvailable(ptr)) break; - } while (::PROTOBUF_NAMESPACE_ID::internal::ExpectTag<18>(ptr)); + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 17)) { + ping_interval_ = ::PROTOBUF_NAMESPACE_ID::internal::UnalignedLoad(ptr); + ptr += sizeof(double); } else goto handle_unusual; continue; @@ -905,9 +1162,9 @@ const char* PullTaskInsRequest::_InternalParse(const char* ptr, ::PROTOBUF_NAMES #undef CHK_ } -::PROTOBUF_NAMESPACE_ID::uint8* PullTaskInsRequest::_InternalSerialize( +::PROTOBUF_NAMESPACE_ID::uint8* PingRequest::_InternalSerialize( ::PROTOBUF_NAMESPACE_ID::uint8* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const { - // @@protoc_insertion_point(serialize_to_array_start:flwr.proto.PullTaskInsRequest) + // @@protoc_insertion_point(serialize_to_array_start:flwr.proto.PingRequest) ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; (void) cached_has_bits; @@ -919,40 +1176,28 @@ ::PROTOBUF_NAMESPACE_ID::uint8* PullTaskInsRequest::_InternalSerialize( 1, _Internal::node(this), target, stream); } - // repeated string task_ids = 2; - for (int i = 0, n = this->_internal_task_ids_size(); i < n; i++) { - const auto& s = this->_internal_task_ids(i); - ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String( - s.data(), static_cast(s.length()), - ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE, - "flwr.proto.PullTaskInsRequest.task_ids"); - target = stream->WriteString(2, s, target); + // double ping_interval = 2; + if (!(this->_internal_ping_interval() <= 0 && this->_internal_ping_interval() >= 0)) { + target = stream->EnsureSpace(target); + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteDoubleToArray(2, this->_internal_ping_interval(), target); } if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) { target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::InternalSerializeUnknownFieldsToArray( _internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance), target, stream); } - // @@protoc_insertion_point(serialize_to_array_end:flwr.proto.PullTaskInsRequest) + // @@protoc_insertion_point(serialize_to_array_end:flwr.proto.PingRequest) return target; } -size_t PullTaskInsRequest::ByteSizeLong() const { -// @@protoc_insertion_point(message_byte_size_start:flwr.proto.PullTaskInsRequest) +size_t PingRequest::ByteSizeLong() const { +// @@protoc_insertion_point(message_byte_size_start:flwr.proto.PingRequest) size_t total_size = 0; ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; // Prevent compiler warnings about cached_has_bits being unused (void) cached_has_bits; - // repeated string task_ids = 2; - total_size += 1 * - ::PROTOBUF_NAMESPACE_ID::internal::FromIntSize(task_ids_.size()); - for (int i = 0, n = task_ids_.size(); i < n; i++) { - total_size += ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize( - task_ids_.Get(i)); - } - // .flwr.proto.Node node = 1; if (this->_internal_has_node()) { total_size += 1 + @@ -960,54 +1205,65 @@ size_t PullTaskInsRequest::ByteSizeLong() const { *node_); } + // double ping_interval = 2; + if (!(this->_internal_ping_interval() <= 0 && this->_internal_ping_interval() >= 0)) { + total_size += 1 + 8; + } + return MaybeComputeUnknownFieldsSize(total_size, &_cached_size_); } -const ::PROTOBUF_NAMESPACE_ID::Message::ClassData PullTaskInsRequest::_class_data_ = { +const ::PROTOBUF_NAMESPACE_ID::Message::ClassData PingRequest::_class_data_ = { ::PROTOBUF_NAMESPACE_ID::Message::CopyWithSizeCheck, - PullTaskInsRequest::MergeImpl + PingRequest::MergeImpl }; -const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*PullTaskInsRequest::GetClassData() const { return &_class_data_; } +const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*PingRequest::GetClassData() const { return &_class_data_; } -void PullTaskInsRequest::MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, +void PingRequest::MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, const ::PROTOBUF_NAMESPACE_ID::Message& from) { - static_cast(to)->MergeFrom( - static_cast(from)); + static_cast(to)->MergeFrom( + static_cast(from)); } -void PullTaskInsRequest::MergeFrom(const PullTaskInsRequest& from) { -// @@protoc_insertion_point(class_specific_merge_from_start:flwr.proto.PullTaskInsRequest) +void PingRequest::MergeFrom(const PingRequest& from) { +// @@protoc_insertion_point(class_specific_merge_from_start:flwr.proto.PingRequest) GOOGLE_DCHECK_NE(&from, this); ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; (void) cached_has_bits; - task_ids_.MergeFrom(from.task_ids_); if (from._internal_has_node()) { _internal_mutable_node()->::flwr::proto::Node::MergeFrom(from._internal_node()); } + if (!(from._internal_ping_interval() <= 0 && from._internal_ping_interval() >= 0)) { + _internal_set_ping_interval(from._internal_ping_interval()); + } _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); } -void PullTaskInsRequest::CopyFrom(const PullTaskInsRequest& from) { -// @@protoc_insertion_point(class_specific_copy_from_start:flwr.proto.PullTaskInsRequest) +void PingRequest::CopyFrom(const PingRequest& from) { +// @@protoc_insertion_point(class_specific_copy_from_start:flwr.proto.PingRequest) if (&from == this) return; Clear(); MergeFrom(from); } -bool PullTaskInsRequest::IsInitialized() const { +bool PingRequest::IsInitialized() const { return true; } -void PullTaskInsRequest::InternalSwap(PullTaskInsRequest* other) { +void PingRequest::InternalSwap(PingRequest* other) { using std::swap; _internal_metadata_.InternalSwap(&other->_internal_metadata_); - task_ids_.InternalSwap(&other->task_ids_); - swap(node_, other->node_); + ::PROTOBUF_NAMESPACE_ID::internal::memswap< + PROTOBUF_FIELD_OFFSET(PingRequest, ping_interval_) + + sizeof(PingRequest::ping_interval_) + - PROTOBUF_FIELD_OFFSET(PingRequest, node_)>( + reinterpret_cast(&node_), + reinterpret_cast(&other->node_)); } -::PROTOBUF_NAMESPACE_ID::Metadata PullTaskInsRequest::GetMetadata() const { +::PROTOBUF_NAMESPACE_ID::Metadata PingRequest::GetMetadata() const { return ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors( &descriptor_table_flwr_2fproto_2ffleet_2eproto_getter, &descriptor_table_flwr_2fproto_2ffleet_2eproto_once, file_level_metadata_flwr_2fproto_2ffleet_2eproto[4]); @@ -1015,101 +1271,284 @@ ::PROTOBUF_NAMESPACE_ID::Metadata PullTaskInsRequest::GetMetadata() const { // =================================================================== -class PullTaskInsResponse::_Internal { +class PingResponse::_Internal { public: - static const ::flwr::proto::Reconnect& reconnect(const PullTaskInsResponse* msg); }; -const ::flwr::proto::Reconnect& -PullTaskInsResponse::_Internal::reconnect(const PullTaskInsResponse* msg) { - return *msg->reconnect_; -} -void PullTaskInsResponse::clear_task_ins_list() { - task_ins_list_.Clear(); -} -PullTaskInsResponse::PullTaskInsResponse(::PROTOBUF_NAMESPACE_ID::Arena* arena, +PingResponse::PingResponse(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned) - : ::PROTOBUF_NAMESPACE_ID::Message(arena, is_message_owned), - task_ins_list_(arena) { + : ::PROTOBUF_NAMESPACE_ID::Message(arena, is_message_owned) { SharedCtor(); if (!is_message_owned) { RegisterArenaDtor(arena); } - // @@protoc_insertion_point(arena_constructor:flwr.proto.PullTaskInsResponse) + // @@protoc_insertion_point(arena_constructor:flwr.proto.PingResponse) } -PullTaskInsResponse::PullTaskInsResponse(const PullTaskInsResponse& from) - : ::PROTOBUF_NAMESPACE_ID::Message(), - task_ins_list_(from.task_ins_list_) { +PingResponse::PingResponse(const PingResponse& from) + : ::PROTOBUF_NAMESPACE_ID::Message() { _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); - if (from._internal_has_reconnect()) { - reconnect_ = new ::flwr::proto::Reconnect(*from.reconnect_); + success_ = from.success_; + // @@protoc_insertion_point(copy_constructor:flwr.proto.PingResponse) +} + +void PingResponse::SharedCtor() { +success_ = false; +} + +PingResponse::~PingResponse() { + // @@protoc_insertion_point(destructor:flwr.proto.PingResponse) + if (GetArenaForAllocation() != nullptr) return; + SharedDtor(); + _internal_metadata_.Delete<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); +} + +inline void PingResponse::SharedDtor() { + GOOGLE_DCHECK(GetArenaForAllocation() == nullptr); +} + +void PingResponse::ArenaDtor(void* object) { + PingResponse* _this = reinterpret_cast< PingResponse* >(object); + (void)_this; +} +void PingResponse::RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena*) { +} +void PingResponse::SetCachedSize(int size) const { + _cached_size_.Set(size); +} + +void PingResponse::Clear() { +// @@protoc_insertion_point(message_clear_start:flwr.proto.PingResponse) + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + // Prevent compiler warnings about cached_has_bits being unused + (void) cached_has_bits; + + success_ = false; + _internal_metadata_.Clear<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); +} + +const char* PingResponse::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) { +#define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure + while (!ctx->Done(&ptr)) { + ::PROTOBUF_NAMESPACE_ID::uint32 tag; + ptr = ::PROTOBUF_NAMESPACE_ID::internal::ReadTag(ptr, &tag); + switch (tag >> 3) { + // bool success = 1; + case 1: + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 8)) { + success_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr); + CHK_(ptr); + } else + goto handle_unusual; + continue; + default: + goto handle_unusual; + } // switch + handle_unusual: + if ((tag == 0) || ((tag & 7) == 4)) { + CHK_(ptr); + ctx->SetLastTag(tag); + goto message_done; + } + ptr = UnknownFieldParse( + tag, + _internal_metadata_.mutable_unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(), + ptr, ctx); + CHK_(ptr != nullptr); + } // while +message_done: + return ptr; +failure: + ptr = nullptr; + goto message_done; +#undef CHK_ +} + +::PROTOBUF_NAMESPACE_ID::uint8* PingResponse::_InternalSerialize( + ::PROTOBUF_NAMESPACE_ID::uint8* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const { + // @@protoc_insertion_point(serialize_to_array_start:flwr.proto.PingResponse) + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + (void) cached_has_bits; + + // bool success = 1; + if (this->_internal_success() != 0) { + target = stream->EnsureSpace(target); + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteBoolToArray(1, this->_internal_success(), target); + } + + if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) { + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::InternalSerializeUnknownFieldsToArray( + _internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance), target, stream); + } + // @@protoc_insertion_point(serialize_to_array_end:flwr.proto.PingResponse) + return target; +} + +size_t PingResponse::ByteSizeLong() const { +// @@protoc_insertion_point(message_byte_size_start:flwr.proto.PingResponse) + size_t total_size = 0; + + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + // Prevent compiler warnings about cached_has_bits being unused + (void) cached_has_bits; + + // bool success = 1; + if (this->_internal_success() != 0) { + total_size += 1 + 1; + } + + return MaybeComputeUnknownFieldsSize(total_size, &_cached_size_); +} + +const ::PROTOBUF_NAMESPACE_ID::Message::ClassData PingResponse::_class_data_ = { + ::PROTOBUF_NAMESPACE_ID::Message::CopyWithSizeCheck, + PingResponse::MergeImpl +}; +const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*PingResponse::GetClassData() const { return &_class_data_; } + +void PingResponse::MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, + const ::PROTOBUF_NAMESPACE_ID::Message& from) { + static_cast(to)->MergeFrom( + static_cast(from)); +} + + +void PingResponse::MergeFrom(const PingResponse& from) { +// @@protoc_insertion_point(class_specific_merge_from_start:flwr.proto.PingResponse) + GOOGLE_DCHECK_NE(&from, this); + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + (void) cached_has_bits; + + if (from._internal_success() != 0) { + _internal_set_success(from._internal_success()); + } + _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); +} + +void PingResponse::CopyFrom(const PingResponse& from) { +// @@protoc_insertion_point(class_specific_copy_from_start:flwr.proto.PingResponse) + if (&from == this) return; + Clear(); + MergeFrom(from); +} + +bool PingResponse::IsInitialized() const { + return true; +} + +void PingResponse::InternalSwap(PingResponse* other) { + using std::swap; + _internal_metadata_.InternalSwap(&other->_internal_metadata_); + swap(success_, other->success_); +} + +::PROTOBUF_NAMESPACE_ID::Metadata PingResponse::GetMetadata() const { + return ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors( + &descriptor_table_flwr_2fproto_2ffleet_2eproto_getter, &descriptor_table_flwr_2fproto_2ffleet_2eproto_once, + file_level_metadata_flwr_2fproto_2ffleet_2eproto[5]); +} + +// =================================================================== + +class PullTaskInsRequest::_Internal { + public: + static const ::flwr::proto::Node& node(const PullTaskInsRequest* msg); +}; + +const ::flwr::proto::Node& +PullTaskInsRequest::_Internal::node(const PullTaskInsRequest* msg) { + return *msg->node_; +} +void PullTaskInsRequest::clear_node() { + if (GetArenaForAllocation() == nullptr && node_ != nullptr) { + delete node_; + } + node_ = nullptr; +} +PullTaskInsRequest::PullTaskInsRequest(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned) + : ::PROTOBUF_NAMESPACE_ID::Message(arena, is_message_owned), + task_ids_(arena) { + SharedCtor(); + if (!is_message_owned) { + RegisterArenaDtor(arena); + } + // @@protoc_insertion_point(arena_constructor:flwr.proto.PullTaskInsRequest) +} +PullTaskInsRequest::PullTaskInsRequest(const PullTaskInsRequest& from) + : ::PROTOBUF_NAMESPACE_ID::Message(), + task_ids_(from.task_ids_) { + _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); + if (from._internal_has_node()) { + node_ = new ::flwr::proto::Node(*from.node_); } else { - reconnect_ = nullptr; + node_ = nullptr; } - // @@protoc_insertion_point(copy_constructor:flwr.proto.PullTaskInsResponse) + // @@protoc_insertion_point(copy_constructor:flwr.proto.PullTaskInsRequest) } -void PullTaskInsResponse::SharedCtor() { -reconnect_ = nullptr; +void PullTaskInsRequest::SharedCtor() { +node_ = nullptr; } -PullTaskInsResponse::~PullTaskInsResponse() { - // @@protoc_insertion_point(destructor:flwr.proto.PullTaskInsResponse) +PullTaskInsRequest::~PullTaskInsRequest() { + // @@protoc_insertion_point(destructor:flwr.proto.PullTaskInsRequest) if (GetArenaForAllocation() != nullptr) return; SharedDtor(); _internal_metadata_.Delete<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); } -inline void PullTaskInsResponse::SharedDtor() { +inline void PullTaskInsRequest::SharedDtor() { GOOGLE_DCHECK(GetArenaForAllocation() == nullptr); - if (this != internal_default_instance()) delete reconnect_; + if (this != internal_default_instance()) delete node_; } -void PullTaskInsResponse::ArenaDtor(void* object) { - PullTaskInsResponse* _this = reinterpret_cast< PullTaskInsResponse* >(object); +void PullTaskInsRequest::ArenaDtor(void* object) { + PullTaskInsRequest* _this = reinterpret_cast< PullTaskInsRequest* >(object); (void)_this; } -void PullTaskInsResponse::RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena*) { +void PullTaskInsRequest::RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena*) { } -void PullTaskInsResponse::SetCachedSize(int size) const { +void PullTaskInsRequest::SetCachedSize(int size) const { _cached_size_.Set(size); } -void PullTaskInsResponse::Clear() { -// @@protoc_insertion_point(message_clear_start:flwr.proto.PullTaskInsResponse) +void PullTaskInsRequest::Clear() { +// @@protoc_insertion_point(message_clear_start:flwr.proto.PullTaskInsRequest) ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; // Prevent compiler warnings about cached_has_bits being unused (void) cached_has_bits; - task_ins_list_.Clear(); - if (GetArenaForAllocation() == nullptr && reconnect_ != nullptr) { - delete reconnect_; + task_ids_.Clear(); + if (GetArenaForAllocation() == nullptr && node_ != nullptr) { + delete node_; } - reconnect_ = nullptr; + node_ = nullptr; _internal_metadata_.Clear<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); } -const char* PullTaskInsResponse::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) { +const char* PullTaskInsRequest::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) { #define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure while (!ctx->Done(&ptr)) { ::PROTOBUF_NAMESPACE_ID::uint32 tag; ptr = ::PROTOBUF_NAMESPACE_ID::internal::ReadTag(ptr, &tag); switch (tag >> 3) { - // .flwr.proto.Reconnect reconnect = 1; + // .flwr.proto.Node node = 1; case 1: if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 10)) { - ptr = ctx->ParseMessage(_internal_mutable_reconnect(), ptr); + ptr = ctx->ParseMessage(_internal_mutable_node(), ptr); CHK_(ptr); } else goto handle_unusual; continue; - // repeated .flwr.proto.TaskIns task_ins_list = 2; + // repeated string task_ids = 2; case 2: if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 18)) { ptr -= 1; do { ptr += 1; - ptr = ctx->ParseMessage(_internal_add_task_ins_list(), ptr); + auto str = _internal_add_task_ids(); + ptr = ::PROTOBUF_NAMESPACE_ID::internal::InlineGreedyStringParser(str, ptr, ctx); + CHK_(::PROTOBUF_NAMESPACE_ID::internal::VerifyUTF8(str, "flwr.proto.PullTaskInsRequest.task_ids")); CHK_(ptr); if (!ctx->DataAvailable(ptr)) break; } while (::PROTOBUF_NAMESPACE_ID::internal::ExpectTag<18>(ptr)); @@ -1139,187 +1578,1149 @@ const char* PullTaskInsResponse::_InternalParse(const char* ptr, ::PROTOBUF_NAME #undef CHK_ } -::PROTOBUF_NAMESPACE_ID::uint8* PullTaskInsResponse::_InternalSerialize( +::PROTOBUF_NAMESPACE_ID::uint8* PullTaskInsRequest::_InternalSerialize( ::PROTOBUF_NAMESPACE_ID::uint8* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const { - // @@protoc_insertion_point(serialize_to_array_start:flwr.proto.PullTaskInsResponse) + // @@protoc_insertion_point(serialize_to_array_start:flwr.proto.PullTaskInsRequest) ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; (void) cached_has_bits; - // .flwr.proto.Reconnect reconnect = 1; - if (this->_internal_has_reconnect()) { + // .flwr.proto.Node node = 1; + if (this->_internal_has_node()) { target = stream->EnsureSpace(target); target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite:: InternalWriteMessage( - 1, _Internal::reconnect(this), target, stream); + 1, _Internal::node(this), target, stream); } - // repeated .flwr.proto.TaskIns task_ins_list = 2; - for (unsigned int i = 0, - n = static_cast(this->_internal_task_ins_list_size()); i < n; i++) { - target = stream->EnsureSpace(target); - target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite:: - InternalWriteMessage(2, this->_internal_task_ins_list(i), target, stream); + // repeated string task_ids = 2; + for (int i = 0, n = this->_internal_task_ids_size(); i < n; i++) { + const auto& s = this->_internal_task_ids(i); + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String( + s.data(), static_cast(s.length()), + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE, + "flwr.proto.PullTaskInsRequest.task_ids"); + target = stream->WriteString(2, s, target); } if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) { target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::InternalSerializeUnknownFieldsToArray( _internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance), target, stream); } - // @@protoc_insertion_point(serialize_to_array_end:flwr.proto.PullTaskInsResponse) + // @@protoc_insertion_point(serialize_to_array_end:flwr.proto.PullTaskInsRequest) return target; } -size_t PullTaskInsResponse::ByteSizeLong() const { -// @@protoc_insertion_point(message_byte_size_start:flwr.proto.PullTaskInsResponse) +size_t PullTaskInsRequest::ByteSizeLong() const { +// @@protoc_insertion_point(message_byte_size_start:flwr.proto.PullTaskInsRequest) size_t total_size = 0; ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; // Prevent compiler warnings about cached_has_bits being unused (void) cached_has_bits; - // repeated .flwr.proto.TaskIns task_ins_list = 2; - total_size += 1UL * this->_internal_task_ins_list_size(); - for (const auto& msg : this->task_ins_list_) { - total_size += - ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(msg); + // repeated string task_ids = 2; + total_size += 1 * + ::PROTOBUF_NAMESPACE_ID::internal::FromIntSize(task_ids_.size()); + for (int i = 0, n = task_ids_.size(); i < n; i++) { + total_size += ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize( + task_ids_.Get(i)); + } + + // .flwr.proto.Node node = 1; + if (this->_internal_has_node()) { + total_size += 1 + + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize( + *node_); + } + + return MaybeComputeUnknownFieldsSize(total_size, &_cached_size_); +} + +const ::PROTOBUF_NAMESPACE_ID::Message::ClassData PullTaskInsRequest::_class_data_ = { + ::PROTOBUF_NAMESPACE_ID::Message::CopyWithSizeCheck, + PullTaskInsRequest::MergeImpl +}; +const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*PullTaskInsRequest::GetClassData() const { return &_class_data_; } + +void PullTaskInsRequest::MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, + const ::PROTOBUF_NAMESPACE_ID::Message& from) { + static_cast(to)->MergeFrom( + static_cast(from)); +} + + +void PullTaskInsRequest::MergeFrom(const PullTaskInsRequest& from) { +// @@protoc_insertion_point(class_specific_merge_from_start:flwr.proto.PullTaskInsRequest) + GOOGLE_DCHECK_NE(&from, this); + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + (void) cached_has_bits; + + task_ids_.MergeFrom(from.task_ids_); + if (from._internal_has_node()) { + _internal_mutable_node()->::flwr::proto::Node::MergeFrom(from._internal_node()); + } + _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); +} + +void PullTaskInsRequest::CopyFrom(const PullTaskInsRequest& from) { +// @@protoc_insertion_point(class_specific_copy_from_start:flwr.proto.PullTaskInsRequest) + if (&from == this) return; + Clear(); + MergeFrom(from); +} + +bool PullTaskInsRequest::IsInitialized() const { + return true; +} + +void PullTaskInsRequest::InternalSwap(PullTaskInsRequest* other) { + using std::swap; + _internal_metadata_.InternalSwap(&other->_internal_metadata_); + task_ids_.InternalSwap(&other->task_ids_); + swap(node_, other->node_); +} + +::PROTOBUF_NAMESPACE_ID::Metadata PullTaskInsRequest::GetMetadata() const { + return ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors( + &descriptor_table_flwr_2fproto_2ffleet_2eproto_getter, &descriptor_table_flwr_2fproto_2ffleet_2eproto_once, + file_level_metadata_flwr_2fproto_2ffleet_2eproto[6]); +} + +// =================================================================== + +class PullTaskInsResponse::_Internal { + public: + static const ::flwr::proto::Reconnect& reconnect(const PullTaskInsResponse* msg); +}; + +const ::flwr::proto::Reconnect& +PullTaskInsResponse::_Internal::reconnect(const PullTaskInsResponse* msg) { + return *msg->reconnect_; +} +void PullTaskInsResponse::clear_task_ins_list() { + task_ins_list_.Clear(); +} +PullTaskInsResponse::PullTaskInsResponse(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned) + : ::PROTOBUF_NAMESPACE_ID::Message(arena, is_message_owned), + task_ins_list_(arena) { + SharedCtor(); + if (!is_message_owned) { + RegisterArenaDtor(arena); + } + // @@protoc_insertion_point(arena_constructor:flwr.proto.PullTaskInsResponse) +} +PullTaskInsResponse::PullTaskInsResponse(const PullTaskInsResponse& from) + : ::PROTOBUF_NAMESPACE_ID::Message(), + task_ins_list_(from.task_ins_list_) { + _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); + if (from._internal_has_reconnect()) { + reconnect_ = new ::flwr::proto::Reconnect(*from.reconnect_); + } else { + reconnect_ = nullptr; + } + // @@protoc_insertion_point(copy_constructor:flwr.proto.PullTaskInsResponse) +} + +void PullTaskInsResponse::SharedCtor() { +reconnect_ = nullptr; +} + +PullTaskInsResponse::~PullTaskInsResponse() { + // @@protoc_insertion_point(destructor:flwr.proto.PullTaskInsResponse) + if (GetArenaForAllocation() != nullptr) return; + SharedDtor(); + _internal_metadata_.Delete<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); +} + +inline void PullTaskInsResponse::SharedDtor() { + GOOGLE_DCHECK(GetArenaForAllocation() == nullptr); + if (this != internal_default_instance()) delete reconnect_; +} + +void PullTaskInsResponse::ArenaDtor(void* object) { + PullTaskInsResponse* _this = reinterpret_cast< PullTaskInsResponse* >(object); + (void)_this; +} +void PullTaskInsResponse::RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena*) { +} +void PullTaskInsResponse::SetCachedSize(int size) const { + _cached_size_.Set(size); +} + +void PullTaskInsResponse::Clear() { +// @@protoc_insertion_point(message_clear_start:flwr.proto.PullTaskInsResponse) + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + // Prevent compiler warnings about cached_has_bits being unused + (void) cached_has_bits; + + task_ins_list_.Clear(); + if (GetArenaForAllocation() == nullptr && reconnect_ != nullptr) { + delete reconnect_; + } + reconnect_ = nullptr; + _internal_metadata_.Clear<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); +} + +const char* PullTaskInsResponse::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) { +#define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure + while (!ctx->Done(&ptr)) { + ::PROTOBUF_NAMESPACE_ID::uint32 tag; + ptr = ::PROTOBUF_NAMESPACE_ID::internal::ReadTag(ptr, &tag); + switch (tag >> 3) { + // .flwr.proto.Reconnect reconnect = 1; + case 1: + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 10)) { + ptr = ctx->ParseMessage(_internal_mutable_reconnect(), ptr); + CHK_(ptr); + } else + goto handle_unusual; + continue; + // repeated .flwr.proto.TaskIns task_ins_list = 2; + case 2: + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 18)) { + ptr -= 1; + do { + ptr += 1; + ptr = ctx->ParseMessage(_internal_add_task_ins_list(), ptr); + CHK_(ptr); + if (!ctx->DataAvailable(ptr)) break; + } while (::PROTOBUF_NAMESPACE_ID::internal::ExpectTag<18>(ptr)); + } else + goto handle_unusual; + continue; + default: + goto handle_unusual; + } // switch + handle_unusual: + if ((tag == 0) || ((tag & 7) == 4)) { + CHK_(ptr); + ctx->SetLastTag(tag); + goto message_done; + } + ptr = UnknownFieldParse( + tag, + _internal_metadata_.mutable_unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(), + ptr, ctx); + CHK_(ptr != nullptr); + } // while +message_done: + return ptr; +failure: + ptr = nullptr; + goto message_done; +#undef CHK_ +} + +::PROTOBUF_NAMESPACE_ID::uint8* PullTaskInsResponse::_InternalSerialize( + ::PROTOBUF_NAMESPACE_ID::uint8* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const { + // @@protoc_insertion_point(serialize_to_array_start:flwr.proto.PullTaskInsResponse) + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + (void) cached_has_bits; + + // .flwr.proto.Reconnect reconnect = 1; + if (this->_internal_has_reconnect()) { + target = stream->EnsureSpace(target); + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite:: + InternalWriteMessage( + 1, _Internal::reconnect(this), target, stream); + } + + // repeated .flwr.proto.TaskIns task_ins_list = 2; + for (unsigned int i = 0, + n = static_cast(this->_internal_task_ins_list_size()); i < n; i++) { + target = stream->EnsureSpace(target); + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite:: + InternalWriteMessage(2, this->_internal_task_ins_list(i), target, stream); + } + + if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) { + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::InternalSerializeUnknownFieldsToArray( + _internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance), target, stream); + } + // @@protoc_insertion_point(serialize_to_array_end:flwr.proto.PullTaskInsResponse) + return target; +} + +size_t PullTaskInsResponse::ByteSizeLong() const { +// @@protoc_insertion_point(message_byte_size_start:flwr.proto.PullTaskInsResponse) + size_t total_size = 0; + + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + // Prevent compiler warnings about cached_has_bits being unused + (void) cached_has_bits; + + // repeated .flwr.proto.TaskIns task_ins_list = 2; + total_size += 1UL * this->_internal_task_ins_list_size(); + for (const auto& msg : this->task_ins_list_) { + total_size += + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(msg); + } + + // .flwr.proto.Reconnect reconnect = 1; + if (this->_internal_has_reconnect()) { + total_size += 1 + + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize( + *reconnect_); + } + + return MaybeComputeUnknownFieldsSize(total_size, &_cached_size_); +} + +const ::PROTOBUF_NAMESPACE_ID::Message::ClassData PullTaskInsResponse::_class_data_ = { + ::PROTOBUF_NAMESPACE_ID::Message::CopyWithSizeCheck, + PullTaskInsResponse::MergeImpl +}; +const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*PullTaskInsResponse::GetClassData() const { return &_class_data_; } + +void PullTaskInsResponse::MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, + const ::PROTOBUF_NAMESPACE_ID::Message& from) { + static_cast(to)->MergeFrom( + static_cast(from)); +} + + +void PullTaskInsResponse::MergeFrom(const PullTaskInsResponse& from) { +// @@protoc_insertion_point(class_specific_merge_from_start:flwr.proto.PullTaskInsResponse) + GOOGLE_DCHECK_NE(&from, this); + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + (void) cached_has_bits; + + task_ins_list_.MergeFrom(from.task_ins_list_); + if (from._internal_has_reconnect()) { + _internal_mutable_reconnect()->::flwr::proto::Reconnect::MergeFrom(from._internal_reconnect()); + } + _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); +} + +void PullTaskInsResponse::CopyFrom(const PullTaskInsResponse& from) { +// @@protoc_insertion_point(class_specific_copy_from_start:flwr.proto.PullTaskInsResponse) + if (&from == this) return; + Clear(); + MergeFrom(from); +} + +bool PullTaskInsResponse::IsInitialized() const { + return true; +} + +void PullTaskInsResponse::InternalSwap(PullTaskInsResponse* other) { + using std::swap; + _internal_metadata_.InternalSwap(&other->_internal_metadata_); + task_ins_list_.InternalSwap(&other->task_ins_list_); + swap(reconnect_, other->reconnect_); +} + +::PROTOBUF_NAMESPACE_ID::Metadata PullTaskInsResponse::GetMetadata() const { + return ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors( + &descriptor_table_flwr_2fproto_2ffleet_2eproto_getter, &descriptor_table_flwr_2fproto_2ffleet_2eproto_once, + file_level_metadata_flwr_2fproto_2ffleet_2eproto[7]); +} + +// =================================================================== + +class PushTaskResRequest::_Internal { + public: +}; + +void PushTaskResRequest::clear_task_res_list() { + task_res_list_.Clear(); +} +PushTaskResRequest::PushTaskResRequest(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned) + : ::PROTOBUF_NAMESPACE_ID::Message(arena, is_message_owned), + task_res_list_(arena) { + SharedCtor(); + if (!is_message_owned) { + RegisterArenaDtor(arena); + } + // @@protoc_insertion_point(arena_constructor:flwr.proto.PushTaskResRequest) +} +PushTaskResRequest::PushTaskResRequest(const PushTaskResRequest& from) + : ::PROTOBUF_NAMESPACE_ID::Message(), + task_res_list_(from.task_res_list_) { + _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); + // @@protoc_insertion_point(copy_constructor:flwr.proto.PushTaskResRequest) +} + +void PushTaskResRequest::SharedCtor() { +} + +PushTaskResRequest::~PushTaskResRequest() { + // @@protoc_insertion_point(destructor:flwr.proto.PushTaskResRequest) + if (GetArenaForAllocation() != nullptr) return; + SharedDtor(); + _internal_metadata_.Delete<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); +} + +inline void PushTaskResRequest::SharedDtor() { + GOOGLE_DCHECK(GetArenaForAllocation() == nullptr); +} + +void PushTaskResRequest::ArenaDtor(void* object) { + PushTaskResRequest* _this = reinterpret_cast< PushTaskResRequest* >(object); + (void)_this; +} +void PushTaskResRequest::RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena*) { +} +void PushTaskResRequest::SetCachedSize(int size) const { + _cached_size_.Set(size); +} + +void PushTaskResRequest::Clear() { +// @@protoc_insertion_point(message_clear_start:flwr.proto.PushTaskResRequest) + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + // Prevent compiler warnings about cached_has_bits being unused + (void) cached_has_bits; + + task_res_list_.Clear(); + _internal_metadata_.Clear<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); +} + +const char* PushTaskResRequest::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) { +#define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure + while (!ctx->Done(&ptr)) { + ::PROTOBUF_NAMESPACE_ID::uint32 tag; + ptr = ::PROTOBUF_NAMESPACE_ID::internal::ReadTag(ptr, &tag); + switch (tag >> 3) { + // repeated .flwr.proto.TaskRes task_res_list = 1; + case 1: + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 10)) { + ptr -= 1; + do { + ptr += 1; + ptr = ctx->ParseMessage(_internal_add_task_res_list(), ptr); + CHK_(ptr); + if (!ctx->DataAvailable(ptr)) break; + } while (::PROTOBUF_NAMESPACE_ID::internal::ExpectTag<10>(ptr)); + } else + goto handle_unusual; + continue; + default: + goto handle_unusual; + } // switch + handle_unusual: + if ((tag == 0) || ((tag & 7) == 4)) { + CHK_(ptr); + ctx->SetLastTag(tag); + goto message_done; + } + ptr = UnknownFieldParse( + tag, + _internal_metadata_.mutable_unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(), + ptr, ctx); + CHK_(ptr != nullptr); + } // while +message_done: + return ptr; +failure: + ptr = nullptr; + goto message_done; +#undef CHK_ +} + +::PROTOBUF_NAMESPACE_ID::uint8* PushTaskResRequest::_InternalSerialize( + ::PROTOBUF_NAMESPACE_ID::uint8* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const { + // @@protoc_insertion_point(serialize_to_array_start:flwr.proto.PushTaskResRequest) + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + (void) cached_has_bits; + + // repeated .flwr.proto.TaskRes task_res_list = 1; + for (unsigned int i = 0, + n = static_cast(this->_internal_task_res_list_size()); i < n; i++) { + target = stream->EnsureSpace(target); + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite:: + InternalWriteMessage(1, this->_internal_task_res_list(i), target, stream); + } + + if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) { + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::InternalSerializeUnknownFieldsToArray( + _internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance), target, stream); + } + // @@protoc_insertion_point(serialize_to_array_end:flwr.proto.PushTaskResRequest) + return target; +} + +size_t PushTaskResRequest::ByteSizeLong() const { +// @@protoc_insertion_point(message_byte_size_start:flwr.proto.PushTaskResRequest) + size_t total_size = 0; + + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + // Prevent compiler warnings about cached_has_bits being unused + (void) cached_has_bits; + + // repeated .flwr.proto.TaskRes task_res_list = 1; + total_size += 1UL * this->_internal_task_res_list_size(); + for (const auto& msg : this->task_res_list_) { + total_size += + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(msg); + } + + return MaybeComputeUnknownFieldsSize(total_size, &_cached_size_); +} + +const ::PROTOBUF_NAMESPACE_ID::Message::ClassData PushTaskResRequest::_class_data_ = { + ::PROTOBUF_NAMESPACE_ID::Message::CopyWithSizeCheck, + PushTaskResRequest::MergeImpl +}; +const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*PushTaskResRequest::GetClassData() const { return &_class_data_; } + +void PushTaskResRequest::MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, + const ::PROTOBUF_NAMESPACE_ID::Message& from) { + static_cast(to)->MergeFrom( + static_cast(from)); +} + + +void PushTaskResRequest::MergeFrom(const PushTaskResRequest& from) { +// @@protoc_insertion_point(class_specific_merge_from_start:flwr.proto.PushTaskResRequest) + GOOGLE_DCHECK_NE(&from, this); + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + (void) cached_has_bits; + + task_res_list_.MergeFrom(from.task_res_list_); + _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); +} + +void PushTaskResRequest::CopyFrom(const PushTaskResRequest& from) { +// @@protoc_insertion_point(class_specific_copy_from_start:flwr.proto.PushTaskResRequest) + if (&from == this) return; + Clear(); + MergeFrom(from); +} + +bool PushTaskResRequest::IsInitialized() const { + return true; +} + +void PushTaskResRequest::InternalSwap(PushTaskResRequest* other) { + using std::swap; + _internal_metadata_.InternalSwap(&other->_internal_metadata_); + task_res_list_.InternalSwap(&other->task_res_list_); +} + +::PROTOBUF_NAMESPACE_ID::Metadata PushTaskResRequest::GetMetadata() const { + return ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors( + &descriptor_table_flwr_2fproto_2ffleet_2eproto_getter, &descriptor_table_flwr_2fproto_2ffleet_2eproto_once, + file_level_metadata_flwr_2fproto_2ffleet_2eproto[8]); +} + +// =================================================================== + +PushTaskResResponse_ResultsEntry_DoNotUse::PushTaskResResponse_ResultsEntry_DoNotUse() {} +PushTaskResResponse_ResultsEntry_DoNotUse::PushTaskResResponse_ResultsEntry_DoNotUse(::PROTOBUF_NAMESPACE_ID::Arena* arena) + : SuperType(arena) {} +void PushTaskResResponse_ResultsEntry_DoNotUse::MergeFrom(const PushTaskResResponse_ResultsEntry_DoNotUse& other) { + MergeFromInternal(other); +} +::PROTOBUF_NAMESPACE_ID::Metadata PushTaskResResponse_ResultsEntry_DoNotUse::GetMetadata() const { + return ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors( + &descriptor_table_flwr_2fproto_2ffleet_2eproto_getter, &descriptor_table_flwr_2fproto_2ffleet_2eproto_once, + file_level_metadata_flwr_2fproto_2ffleet_2eproto[9]); +} + +// =================================================================== + +class PushTaskResResponse::_Internal { + public: + static const ::flwr::proto::Reconnect& reconnect(const PushTaskResResponse* msg); +}; + +const ::flwr::proto::Reconnect& +PushTaskResResponse::_Internal::reconnect(const PushTaskResResponse* msg) { + return *msg->reconnect_; +} +PushTaskResResponse::PushTaskResResponse(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned) + : ::PROTOBUF_NAMESPACE_ID::Message(arena, is_message_owned), + results_(arena) { + SharedCtor(); + if (!is_message_owned) { + RegisterArenaDtor(arena); + } + // @@protoc_insertion_point(arena_constructor:flwr.proto.PushTaskResResponse) +} +PushTaskResResponse::PushTaskResResponse(const PushTaskResResponse& from) + : ::PROTOBUF_NAMESPACE_ID::Message() { + _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); + results_.MergeFrom(from.results_); + if (from._internal_has_reconnect()) { + reconnect_ = new ::flwr::proto::Reconnect(*from.reconnect_); + } else { + reconnect_ = nullptr; + } + // @@protoc_insertion_point(copy_constructor:flwr.proto.PushTaskResResponse) +} + +void PushTaskResResponse::SharedCtor() { +reconnect_ = nullptr; +} + +PushTaskResResponse::~PushTaskResResponse() { + // @@protoc_insertion_point(destructor:flwr.proto.PushTaskResResponse) + if (GetArenaForAllocation() != nullptr) return; + SharedDtor(); + _internal_metadata_.Delete<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); +} + +inline void PushTaskResResponse::SharedDtor() { + GOOGLE_DCHECK(GetArenaForAllocation() == nullptr); + if (this != internal_default_instance()) delete reconnect_; +} + +void PushTaskResResponse::ArenaDtor(void* object) { + PushTaskResResponse* _this = reinterpret_cast< PushTaskResResponse* >(object); + (void)_this; + _this->results_. ~MapField(); +} +inline void PushTaskResResponse::RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena* arena) { + if (arena != nullptr) { + arena->OwnCustomDestructor(this, &PushTaskResResponse::ArenaDtor); + } +} +void PushTaskResResponse::SetCachedSize(int size) const { + _cached_size_.Set(size); +} + +void PushTaskResResponse::Clear() { +// @@protoc_insertion_point(message_clear_start:flwr.proto.PushTaskResResponse) + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + // Prevent compiler warnings about cached_has_bits being unused + (void) cached_has_bits; + + results_.Clear(); + if (GetArenaForAllocation() == nullptr && reconnect_ != nullptr) { + delete reconnect_; + } + reconnect_ = nullptr; + _internal_metadata_.Clear<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); +} + +const char* PushTaskResResponse::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) { +#define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure + while (!ctx->Done(&ptr)) { + ::PROTOBUF_NAMESPACE_ID::uint32 tag; + ptr = ::PROTOBUF_NAMESPACE_ID::internal::ReadTag(ptr, &tag); + switch (tag >> 3) { + // .flwr.proto.Reconnect reconnect = 1; + case 1: + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 10)) { + ptr = ctx->ParseMessage(_internal_mutable_reconnect(), ptr); + CHK_(ptr); + } else + goto handle_unusual; + continue; + // map results = 2; + case 2: + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 18)) { + ptr -= 1; + do { + ptr += 1; + ptr = ctx->ParseMessage(&results_, ptr); + CHK_(ptr); + if (!ctx->DataAvailable(ptr)) break; + } while (::PROTOBUF_NAMESPACE_ID::internal::ExpectTag<18>(ptr)); + } else + goto handle_unusual; + continue; + default: + goto handle_unusual; + } // switch + handle_unusual: + if ((tag == 0) || ((tag & 7) == 4)) { + CHK_(ptr); + ctx->SetLastTag(tag); + goto message_done; + } + ptr = UnknownFieldParse( + tag, + _internal_metadata_.mutable_unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(), + ptr, ctx); + CHK_(ptr != nullptr); + } // while +message_done: + return ptr; +failure: + ptr = nullptr; + goto message_done; +#undef CHK_ +} + +::PROTOBUF_NAMESPACE_ID::uint8* PushTaskResResponse::_InternalSerialize( + ::PROTOBUF_NAMESPACE_ID::uint8* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const { + // @@protoc_insertion_point(serialize_to_array_start:flwr.proto.PushTaskResResponse) + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + (void) cached_has_bits; + + // .flwr.proto.Reconnect reconnect = 1; + if (this->_internal_has_reconnect()) { + target = stream->EnsureSpace(target); + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite:: + InternalWriteMessage( + 1, _Internal::reconnect(this), target, stream); + } + + // map results = 2; + if (!this->_internal_results().empty()) { + typedef ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::PROTOBUF_NAMESPACE_ID::uint32 >::const_pointer + ConstPtr; + typedef ConstPtr SortItem; + typedef ::PROTOBUF_NAMESPACE_ID::internal::CompareByDerefFirst Less; + struct Utf8Check { + static void Check(ConstPtr p) { + (void)p; + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String( + p->first.data(), static_cast(p->first.length()), + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE, + "flwr.proto.PushTaskResResponse.ResultsEntry.key"); + } + }; + + if (stream->IsSerializationDeterministic() && + this->_internal_results().size() > 1) { + ::std::unique_ptr items( + new SortItem[this->_internal_results().size()]); + typedef ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::PROTOBUF_NAMESPACE_ID::uint32 >::size_type size_type; + size_type n = 0; + for (::PROTOBUF_NAMESPACE_ID::Map< std::string, ::PROTOBUF_NAMESPACE_ID::uint32 >::const_iterator + it = this->_internal_results().begin(); + it != this->_internal_results().end(); ++it, ++n) { + items[static_cast(n)] = SortItem(&*it); + } + ::std::sort(&items[0], &items[static_cast(n)], Less()); + for (size_type i = 0; i < n; i++) { + target = PushTaskResResponse_ResultsEntry_DoNotUse::Funcs::InternalSerialize(2, items[static_cast(i)]->first, items[static_cast(i)]->second, target, stream); + Utf8Check::Check(&(*items[static_cast(i)])); + } + } else { + for (::PROTOBUF_NAMESPACE_ID::Map< std::string, ::PROTOBUF_NAMESPACE_ID::uint32 >::const_iterator + it = this->_internal_results().begin(); + it != this->_internal_results().end(); ++it) { + target = PushTaskResResponse_ResultsEntry_DoNotUse::Funcs::InternalSerialize(2, it->first, it->second, target, stream); + Utf8Check::Check(&(*it)); + } + } + } + + if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) { + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::InternalSerializeUnknownFieldsToArray( + _internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance), target, stream); + } + // @@protoc_insertion_point(serialize_to_array_end:flwr.proto.PushTaskResResponse) + return target; +} + +size_t PushTaskResResponse::ByteSizeLong() const { +// @@protoc_insertion_point(message_byte_size_start:flwr.proto.PushTaskResResponse) + size_t total_size = 0; + + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + // Prevent compiler warnings about cached_has_bits being unused + (void) cached_has_bits; + + // map results = 2; + total_size += 1 * + ::PROTOBUF_NAMESPACE_ID::internal::FromIntSize(this->_internal_results_size()); + for (::PROTOBUF_NAMESPACE_ID::Map< std::string, ::PROTOBUF_NAMESPACE_ID::uint32 >::const_iterator + it = this->_internal_results().begin(); + it != this->_internal_results().end(); ++it) { + total_size += PushTaskResResponse_ResultsEntry_DoNotUse::Funcs::ByteSizeLong(it->first, it->second); + } + + // .flwr.proto.Reconnect reconnect = 1; + if (this->_internal_has_reconnect()) { + total_size += 1 + + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize( + *reconnect_); + } + + return MaybeComputeUnknownFieldsSize(total_size, &_cached_size_); +} + +const ::PROTOBUF_NAMESPACE_ID::Message::ClassData PushTaskResResponse::_class_data_ = { + ::PROTOBUF_NAMESPACE_ID::Message::CopyWithSizeCheck, + PushTaskResResponse::MergeImpl +}; +const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*PushTaskResResponse::GetClassData() const { return &_class_data_; } + +void PushTaskResResponse::MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, + const ::PROTOBUF_NAMESPACE_ID::Message& from) { + static_cast(to)->MergeFrom( + static_cast(from)); +} + + +void PushTaskResResponse::MergeFrom(const PushTaskResResponse& from) { +// @@protoc_insertion_point(class_specific_merge_from_start:flwr.proto.PushTaskResResponse) + GOOGLE_DCHECK_NE(&from, this); + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + (void) cached_has_bits; + + results_.MergeFrom(from.results_); + if (from._internal_has_reconnect()) { + _internal_mutable_reconnect()->::flwr::proto::Reconnect::MergeFrom(from._internal_reconnect()); + } + _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); +} + +void PushTaskResResponse::CopyFrom(const PushTaskResResponse& from) { +// @@protoc_insertion_point(class_specific_copy_from_start:flwr.proto.PushTaskResResponse) + if (&from == this) return; + Clear(); + MergeFrom(from); +} + +bool PushTaskResResponse::IsInitialized() const { + return true; +} + +void PushTaskResResponse::InternalSwap(PushTaskResResponse* other) { + using std::swap; + _internal_metadata_.InternalSwap(&other->_internal_metadata_); + results_.InternalSwap(&other->results_); + swap(reconnect_, other->reconnect_); +} + +::PROTOBUF_NAMESPACE_ID::Metadata PushTaskResResponse::GetMetadata() const { + return ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors( + &descriptor_table_flwr_2fproto_2ffleet_2eproto_getter, &descriptor_table_flwr_2fproto_2ffleet_2eproto_once, + file_level_metadata_flwr_2fproto_2ffleet_2eproto[10]); +} + +// =================================================================== + +class Run::_Internal { + public: +}; + +Run::Run(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned) + : ::PROTOBUF_NAMESPACE_ID::Message(arena, is_message_owned) { + SharedCtor(); + if (!is_message_owned) { + RegisterArenaDtor(arena); + } + // @@protoc_insertion_point(arena_constructor:flwr.proto.Run) +} +Run::Run(const Run& from) + : ::PROTOBUF_NAMESPACE_ID::Message() { + _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); + fab_id_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); + if (!from._internal_fab_id().empty()) { + fab_id_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, from._internal_fab_id(), + GetArenaForAllocation()); + } + fab_version_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); + if (!from._internal_fab_version().empty()) { + fab_version_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, from._internal_fab_version(), + GetArenaForAllocation()); + } + run_id_ = from.run_id_; + // @@protoc_insertion_point(copy_constructor:flwr.proto.Run) +} + +void Run::SharedCtor() { +fab_id_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); +fab_version_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); +run_id_ = int64_t{0}; +} + +Run::~Run() { + // @@protoc_insertion_point(destructor:flwr.proto.Run) + if (GetArenaForAllocation() != nullptr) return; + SharedDtor(); + _internal_metadata_.Delete<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); +} + +inline void Run::SharedDtor() { + GOOGLE_DCHECK(GetArenaForAllocation() == nullptr); + fab_id_.DestroyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); + fab_version_.DestroyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); +} + +void Run::ArenaDtor(void* object) { + Run* _this = reinterpret_cast< Run* >(object); + (void)_this; +} +void Run::RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena*) { +} +void Run::SetCachedSize(int size) const { + _cached_size_.Set(size); +} + +void Run::Clear() { +// @@protoc_insertion_point(message_clear_start:flwr.proto.Run) + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + // Prevent compiler warnings about cached_has_bits being unused + (void) cached_has_bits; + + fab_id_.ClearToEmpty(); + fab_version_.ClearToEmpty(); + run_id_ = int64_t{0}; + _internal_metadata_.Clear<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); +} + +const char* Run::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) { +#define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure + while (!ctx->Done(&ptr)) { + ::PROTOBUF_NAMESPACE_ID::uint32 tag; + ptr = ::PROTOBUF_NAMESPACE_ID::internal::ReadTag(ptr, &tag); + switch (tag >> 3) { + // sint64 run_id = 1; + case 1: + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 8)) { + run_id_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarintZigZag64(&ptr); + CHK_(ptr); + } else + goto handle_unusual; + continue; + // string fab_id = 2; + case 2: + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 18)) { + auto str = _internal_mutable_fab_id(); + ptr = ::PROTOBUF_NAMESPACE_ID::internal::InlineGreedyStringParser(str, ptr, ctx); + CHK_(::PROTOBUF_NAMESPACE_ID::internal::VerifyUTF8(str, "flwr.proto.Run.fab_id")); + CHK_(ptr); + } else + goto handle_unusual; + continue; + // string fab_version = 3; + case 3: + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 26)) { + auto str = _internal_mutable_fab_version(); + ptr = ::PROTOBUF_NAMESPACE_ID::internal::InlineGreedyStringParser(str, ptr, ctx); + CHK_(::PROTOBUF_NAMESPACE_ID::internal::VerifyUTF8(str, "flwr.proto.Run.fab_version")); + CHK_(ptr); + } else + goto handle_unusual; + continue; + default: + goto handle_unusual; + } // switch + handle_unusual: + if ((tag == 0) || ((tag & 7) == 4)) { + CHK_(ptr); + ctx->SetLastTag(tag); + goto message_done; + } + ptr = UnknownFieldParse( + tag, + _internal_metadata_.mutable_unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(), + ptr, ctx); + CHK_(ptr != nullptr); + } // while +message_done: + return ptr; +failure: + ptr = nullptr; + goto message_done; +#undef CHK_ +} + +::PROTOBUF_NAMESPACE_ID::uint8* Run::_InternalSerialize( + ::PROTOBUF_NAMESPACE_ID::uint8* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const { + // @@protoc_insertion_point(serialize_to_array_start:flwr.proto.Run) + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + (void) cached_has_bits; + + // sint64 run_id = 1; + if (this->_internal_run_id() != 0) { + target = stream->EnsureSpace(target); + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteSInt64ToArray(1, this->_internal_run_id(), target); + } + + // string fab_id = 2; + if (!this->_internal_fab_id().empty()) { + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String( + this->_internal_fab_id().data(), static_cast(this->_internal_fab_id().length()), + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE, + "flwr.proto.Run.fab_id"); + target = stream->WriteStringMaybeAliased( + 2, this->_internal_fab_id(), target); + } + + // string fab_version = 3; + if (!this->_internal_fab_version().empty()) { + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String( + this->_internal_fab_version().data(), static_cast(this->_internal_fab_version().length()), + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE, + "flwr.proto.Run.fab_version"); + target = stream->WriteStringMaybeAliased( + 3, this->_internal_fab_version(), target); + } + + if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) { + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::InternalSerializeUnknownFieldsToArray( + _internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance), target, stream); + } + // @@protoc_insertion_point(serialize_to_array_end:flwr.proto.Run) + return target; +} + +size_t Run::ByteSizeLong() const { +// @@protoc_insertion_point(message_byte_size_start:flwr.proto.Run) + size_t total_size = 0; + + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + // Prevent compiler warnings about cached_has_bits being unused + (void) cached_has_bits; + + // string fab_id = 2; + if (!this->_internal_fab_id().empty()) { + total_size += 1 + + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize( + this->_internal_fab_id()); } - // .flwr.proto.Reconnect reconnect = 1; - if (this->_internal_has_reconnect()) { + // string fab_version = 3; + if (!this->_internal_fab_version().empty()) { total_size += 1 + - ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize( - *reconnect_); + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize( + this->_internal_fab_version()); + } + + // sint64 run_id = 1; + if (this->_internal_run_id() != 0) { + total_size += ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SInt64SizePlusOne(this->_internal_run_id()); } return MaybeComputeUnknownFieldsSize(total_size, &_cached_size_); } -const ::PROTOBUF_NAMESPACE_ID::Message::ClassData PullTaskInsResponse::_class_data_ = { +const ::PROTOBUF_NAMESPACE_ID::Message::ClassData Run::_class_data_ = { ::PROTOBUF_NAMESPACE_ID::Message::CopyWithSizeCheck, - PullTaskInsResponse::MergeImpl + Run::MergeImpl }; -const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*PullTaskInsResponse::GetClassData() const { return &_class_data_; } +const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*Run::GetClassData() const { return &_class_data_; } -void PullTaskInsResponse::MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, +void Run::MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, const ::PROTOBUF_NAMESPACE_ID::Message& from) { - static_cast(to)->MergeFrom( - static_cast(from)); + static_cast(to)->MergeFrom( + static_cast(from)); } -void PullTaskInsResponse::MergeFrom(const PullTaskInsResponse& from) { -// @@protoc_insertion_point(class_specific_merge_from_start:flwr.proto.PullTaskInsResponse) +void Run::MergeFrom(const Run& from) { +// @@protoc_insertion_point(class_specific_merge_from_start:flwr.proto.Run) GOOGLE_DCHECK_NE(&from, this); ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; (void) cached_has_bits; - task_ins_list_.MergeFrom(from.task_ins_list_); - if (from._internal_has_reconnect()) { - _internal_mutable_reconnect()->::flwr::proto::Reconnect::MergeFrom(from._internal_reconnect()); + if (!from._internal_fab_id().empty()) { + _internal_set_fab_id(from._internal_fab_id()); + } + if (!from._internal_fab_version().empty()) { + _internal_set_fab_version(from._internal_fab_version()); + } + if (from._internal_run_id() != 0) { + _internal_set_run_id(from._internal_run_id()); } _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); } -void PullTaskInsResponse::CopyFrom(const PullTaskInsResponse& from) { -// @@protoc_insertion_point(class_specific_copy_from_start:flwr.proto.PullTaskInsResponse) +void Run::CopyFrom(const Run& from) { +// @@protoc_insertion_point(class_specific_copy_from_start:flwr.proto.Run) if (&from == this) return; Clear(); MergeFrom(from); } -bool PullTaskInsResponse::IsInitialized() const { +bool Run::IsInitialized() const { return true; } -void PullTaskInsResponse::InternalSwap(PullTaskInsResponse* other) { +void Run::InternalSwap(Run* other) { using std::swap; + auto* lhs_arena = GetArenaForAllocation(); + auto* rhs_arena = other->GetArenaForAllocation(); _internal_metadata_.InternalSwap(&other->_internal_metadata_); - task_ins_list_.InternalSwap(&other->task_ins_list_); - swap(reconnect_, other->reconnect_); -} - -::PROTOBUF_NAMESPACE_ID::Metadata PullTaskInsResponse::GetMetadata() const { + ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::InternalSwap( + &::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), + &fab_id_, lhs_arena, + &other->fab_id_, rhs_arena + ); + ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::InternalSwap( + &::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), + &fab_version_, lhs_arena, + &other->fab_version_, rhs_arena + ); + swap(run_id_, other->run_id_); +} + +::PROTOBUF_NAMESPACE_ID::Metadata Run::GetMetadata() const { return ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors( &descriptor_table_flwr_2fproto_2ffleet_2eproto_getter, &descriptor_table_flwr_2fproto_2ffleet_2eproto_once, - file_level_metadata_flwr_2fproto_2ffleet_2eproto[5]); + file_level_metadata_flwr_2fproto_2ffleet_2eproto[11]); } // =================================================================== -class PushTaskResRequest::_Internal { +class GetRunRequest::_Internal { public: }; -void PushTaskResRequest::clear_task_res_list() { - task_res_list_.Clear(); -} -PushTaskResRequest::PushTaskResRequest(::PROTOBUF_NAMESPACE_ID::Arena* arena, +GetRunRequest::GetRunRequest(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned) - : ::PROTOBUF_NAMESPACE_ID::Message(arena, is_message_owned), - task_res_list_(arena) { + : ::PROTOBUF_NAMESPACE_ID::Message(arena, is_message_owned) { SharedCtor(); if (!is_message_owned) { RegisterArenaDtor(arena); } - // @@protoc_insertion_point(arena_constructor:flwr.proto.PushTaskResRequest) + // @@protoc_insertion_point(arena_constructor:flwr.proto.GetRunRequest) } -PushTaskResRequest::PushTaskResRequest(const PushTaskResRequest& from) - : ::PROTOBUF_NAMESPACE_ID::Message(), - task_res_list_(from.task_res_list_) { +GetRunRequest::GetRunRequest(const GetRunRequest& from) + : ::PROTOBUF_NAMESPACE_ID::Message() { _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); - // @@protoc_insertion_point(copy_constructor:flwr.proto.PushTaskResRequest) + run_id_ = from.run_id_; + // @@protoc_insertion_point(copy_constructor:flwr.proto.GetRunRequest) } -void PushTaskResRequest::SharedCtor() { +void GetRunRequest::SharedCtor() { +run_id_ = int64_t{0}; } -PushTaskResRequest::~PushTaskResRequest() { - // @@protoc_insertion_point(destructor:flwr.proto.PushTaskResRequest) +GetRunRequest::~GetRunRequest() { + // @@protoc_insertion_point(destructor:flwr.proto.GetRunRequest) if (GetArenaForAllocation() != nullptr) return; SharedDtor(); _internal_metadata_.Delete<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); } -inline void PushTaskResRequest::SharedDtor() { +inline void GetRunRequest::SharedDtor() { GOOGLE_DCHECK(GetArenaForAllocation() == nullptr); } -void PushTaskResRequest::ArenaDtor(void* object) { - PushTaskResRequest* _this = reinterpret_cast< PushTaskResRequest* >(object); +void GetRunRequest::ArenaDtor(void* object) { + GetRunRequest* _this = reinterpret_cast< GetRunRequest* >(object); (void)_this; } -void PushTaskResRequest::RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena*) { +void GetRunRequest::RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena*) { } -void PushTaskResRequest::SetCachedSize(int size) const { +void GetRunRequest::SetCachedSize(int size) const { _cached_size_.Set(size); } -void PushTaskResRequest::Clear() { -// @@protoc_insertion_point(message_clear_start:flwr.proto.PushTaskResRequest) +void GetRunRequest::Clear() { +// @@protoc_insertion_point(message_clear_start:flwr.proto.GetRunRequest) ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; // Prevent compiler warnings about cached_has_bits being unused (void) cached_has_bits; - task_res_list_.Clear(); + run_id_ = int64_t{0}; _internal_metadata_.Clear<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); } -const char* PushTaskResRequest::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) { +const char* GetRunRequest::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) { #define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure while (!ctx->Done(&ptr)) { ::PROTOBUF_NAMESPACE_ID::uint32 tag; ptr = ::PROTOBUF_NAMESPACE_ID::internal::ReadTag(ptr, &tag); switch (tag >> 3) { - // repeated .flwr.proto.TaskRes task_res_list = 1; + // sint64 run_id = 1; case 1: - if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 10)) { - ptr -= 1; - do { - ptr += 1; - ptr = ctx->ParseMessage(_internal_add_task_res_list(), ptr); - CHK_(ptr); - if (!ctx->DataAvailable(ptr)) break; - } while (::PROTOBUF_NAMESPACE_ID::internal::ExpectTag<10>(ptr)); + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 8)) { + run_id_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarintZigZag64(&ptr); + CHK_(ptr); } else goto handle_unusual; continue; @@ -1346,210 +2747,174 @@ const char* PushTaskResRequest::_InternalParse(const char* ptr, ::PROTOBUF_NAMES #undef CHK_ } -::PROTOBUF_NAMESPACE_ID::uint8* PushTaskResRequest::_InternalSerialize( +::PROTOBUF_NAMESPACE_ID::uint8* GetRunRequest::_InternalSerialize( ::PROTOBUF_NAMESPACE_ID::uint8* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const { - // @@protoc_insertion_point(serialize_to_array_start:flwr.proto.PushTaskResRequest) + // @@protoc_insertion_point(serialize_to_array_start:flwr.proto.GetRunRequest) ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; (void) cached_has_bits; - // repeated .flwr.proto.TaskRes task_res_list = 1; - for (unsigned int i = 0, - n = static_cast(this->_internal_task_res_list_size()); i < n; i++) { + // sint64 run_id = 1; + if (this->_internal_run_id() != 0) { target = stream->EnsureSpace(target); - target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite:: - InternalWriteMessage(1, this->_internal_task_res_list(i), target, stream); + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteSInt64ToArray(1, this->_internal_run_id(), target); } if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) { target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::InternalSerializeUnknownFieldsToArray( _internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance), target, stream); } - // @@protoc_insertion_point(serialize_to_array_end:flwr.proto.PushTaskResRequest) + // @@protoc_insertion_point(serialize_to_array_end:flwr.proto.GetRunRequest) return target; } -size_t PushTaskResRequest::ByteSizeLong() const { -// @@protoc_insertion_point(message_byte_size_start:flwr.proto.PushTaskResRequest) +size_t GetRunRequest::ByteSizeLong() const { +// @@protoc_insertion_point(message_byte_size_start:flwr.proto.GetRunRequest) size_t total_size = 0; ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; // Prevent compiler warnings about cached_has_bits being unused (void) cached_has_bits; - // repeated .flwr.proto.TaskRes task_res_list = 1; - total_size += 1UL * this->_internal_task_res_list_size(); - for (const auto& msg : this->task_res_list_) { - total_size += - ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(msg); + // sint64 run_id = 1; + if (this->_internal_run_id() != 0) { + total_size += ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SInt64SizePlusOne(this->_internal_run_id()); } return MaybeComputeUnknownFieldsSize(total_size, &_cached_size_); } -const ::PROTOBUF_NAMESPACE_ID::Message::ClassData PushTaskResRequest::_class_data_ = { +const ::PROTOBUF_NAMESPACE_ID::Message::ClassData GetRunRequest::_class_data_ = { ::PROTOBUF_NAMESPACE_ID::Message::CopyWithSizeCheck, - PushTaskResRequest::MergeImpl + GetRunRequest::MergeImpl }; -const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*PushTaskResRequest::GetClassData() const { return &_class_data_; } +const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetRunRequest::GetClassData() const { return &_class_data_; } -void PushTaskResRequest::MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, +void GetRunRequest::MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, const ::PROTOBUF_NAMESPACE_ID::Message& from) { - static_cast(to)->MergeFrom( - static_cast(from)); + static_cast(to)->MergeFrom( + static_cast(from)); } -void PushTaskResRequest::MergeFrom(const PushTaskResRequest& from) { -// @@protoc_insertion_point(class_specific_merge_from_start:flwr.proto.PushTaskResRequest) +void GetRunRequest::MergeFrom(const GetRunRequest& from) { +// @@protoc_insertion_point(class_specific_merge_from_start:flwr.proto.GetRunRequest) GOOGLE_DCHECK_NE(&from, this); ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; (void) cached_has_bits; - task_res_list_.MergeFrom(from.task_res_list_); + if (from._internal_run_id() != 0) { + _internal_set_run_id(from._internal_run_id()); + } _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); } -void PushTaskResRequest::CopyFrom(const PushTaskResRequest& from) { -// @@protoc_insertion_point(class_specific_copy_from_start:flwr.proto.PushTaskResRequest) +void GetRunRequest::CopyFrom(const GetRunRequest& from) { +// @@protoc_insertion_point(class_specific_copy_from_start:flwr.proto.GetRunRequest) if (&from == this) return; Clear(); MergeFrom(from); } -bool PushTaskResRequest::IsInitialized() const { +bool GetRunRequest::IsInitialized() const { return true; } -void PushTaskResRequest::InternalSwap(PushTaskResRequest* other) { +void GetRunRequest::InternalSwap(GetRunRequest* other) { using std::swap; _internal_metadata_.InternalSwap(&other->_internal_metadata_); - task_res_list_.InternalSwap(&other->task_res_list_); -} - -::PROTOBUF_NAMESPACE_ID::Metadata PushTaskResRequest::GetMetadata() const { - return ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors( - &descriptor_table_flwr_2fproto_2ffleet_2eproto_getter, &descriptor_table_flwr_2fproto_2ffleet_2eproto_once, - file_level_metadata_flwr_2fproto_2ffleet_2eproto[6]); + swap(run_id_, other->run_id_); } -// =================================================================== - -PushTaskResResponse_ResultsEntry_DoNotUse::PushTaskResResponse_ResultsEntry_DoNotUse() {} -PushTaskResResponse_ResultsEntry_DoNotUse::PushTaskResResponse_ResultsEntry_DoNotUse(::PROTOBUF_NAMESPACE_ID::Arena* arena) - : SuperType(arena) {} -void PushTaskResResponse_ResultsEntry_DoNotUse::MergeFrom(const PushTaskResResponse_ResultsEntry_DoNotUse& other) { - MergeFromInternal(other); -} -::PROTOBUF_NAMESPACE_ID::Metadata PushTaskResResponse_ResultsEntry_DoNotUse::GetMetadata() const { +::PROTOBUF_NAMESPACE_ID::Metadata GetRunRequest::GetMetadata() const { return ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors( &descriptor_table_flwr_2fproto_2ffleet_2eproto_getter, &descriptor_table_flwr_2fproto_2ffleet_2eproto_once, - file_level_metadata_flwr_2fproto_2ffleet_2eproto[7]); + file_level_metadata_flwr_2fproto_2ffleet_2eproto[12]); } // =================================================================== -class PushTaskResResponse::_Internal { +class GetRunResponse::_Internal { public: - static const ::flwr::proto::Reconnect& reconnect(const PushTaskResResponse* msg); + static const ::flwr::proto::Run& run(const GetRunResponse* msg); }; -const ::flwr::proto::Reconnect& -PushTaskResResponse::_Internal::reconnect(const PushTaskResResponse* msg) { - return *msg->reconnect_; +const ::flwr::proto::Run& +GetRunResponse::_Internal::run(const GetRunResponse* msg) { + return *msg->run_; } -PushTaskResResponse::PushTaskResResponse(::PROTOBUF_NAMESPACE_ID::Arena* arena, +GetRunResponse::GetRunResponse(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned) - : ::PROTOBUF_NAMESPACE_ID::Message(arena, is_message_owned), - results_(arena) { + : ::PROTOBUF_NAMESPACE_ID::Message(arena, is_message_owned) { SharedCtor(); if (!is_message_owned) { RegisterArenaDtor(arena); } - // @@protoc_insertion_point(arena_constructor:flwr.proto.PushTaskResResponse) + // @@protoc_insertion_point(arena_constructor:flwr.proto.GetRunResponse) } -PushTaskResResponse::PushTaskResResponse(const PushTaskResResponse& from) +GetRunResponse::GetRunResponse(const GetRunResponse& from) : ::PROTOBUF_NAMESPACE_ID::Message() { _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); - results_.MergeFrom(from.results_); - if (from._internal_has_reconnect()) { - reconnect_ = new ::flwr::proto::Reconnect(*from.reconnect_); + if (from._internal_has_run()) { + run_ = new ::flwr::proto::Run(*from.run_); } else { - reconnect_ = nullptr; + run_ = nullptr; } - // @@protoc_insertion_point(copy_constructor:flwr.proto.PushTaskResResponse) + // @@protoc_insertion_point(copy_constructor:flwr.proto.GetRunResponse) } -void PushTaskResResponse::SharedCtor() { -reconnect_ = nullptr; +void GetRunResponse::SharedCtor() { +run_ = nullptr; } -PushTaskResResponse::~PushTaskResResponse() { - // @@protoc_insertion_point(destructor:flwr.proto.PushTaskResResponse) +GetRunResponse::~GetRunResponse() { + // @@protoc_insertion_point(destructor:flwr.proto.GetRunResponse) if (GetArenaForAllocation() != nullptr) return; SharedDtor(); _internal_metadata_.Delete<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); } -inline void PushTaskResResponse::SharedDtor() { +inline void GetRunResponse::SharedDtor() { GOOGLE_DCHECK(GetArenaForAllocation() == nullptr); - if (this != internal_default_instance()) delete reconnect_; + if (this != internal_default_instance()) delete run_; } -void PushTaskResResponse::ArenaDtor(void* object) { - PushTaskResResponse* _this = reinterpret_cast< PushTaskResResponse* >(object); +void GetRunResponse::ArenaDtor(void* object) { + GetRunResponse* _this = reinterpret_cast< GetRunResponse* >(object); (void)_this; - _this->results_. ~MapField(); } -inline void PushTaskResResponse::RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena* arena) { - if (arena != nullptr) { - arena->OwnCustomDestructor(this, &PushTaskResResponse::ArenaDtor); - } +void GetRunResponse::RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena*) { } -void PushTaskResResponse::SetCachedSize(int size) const { +void GetRunResponse::SetCachedSize(int size) const { _cached_size_.Set(size); } -void PushTaskResResponse::Clear() { -// @@protoc_insertion_point(message_clear_start:flwr.proto.PushTaskResResponse) +void GetRunResponse::Clear() { +// @@protoc_insertion_point(message_clear_start:flwr.proto.GetRunResponse) ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; // Prevent compiler warnings about cached_has_bits being unused (void) cached_has_bits; - results_.Clear(); - if (GetArenaForAllocation() == nullptr && reconnect_ != nullptr) { - delete reconnect_; + if (GetArenaForAllocation() == nullptr && run_ != nullptr) { + delete run_; } - reconnect_ = nullptr; + run_ = nullptr; _internal_metadata_.Clear<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); } -const char* PushTaskResResponse::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) { +const char* GetRunResponse::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) { #define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure while (!ctx->Done(&ptr)) { ::PROTOBUF_NAMESPACE_ID::uint32 tag; ptr = ::PROTOBUF_NAMESPACE_ID::internal::ReadTag(ptr, &tag); switch (tag >> 3) { - // .flwr.proto.Reconnect reconnect = 1; + // .flwr.proto.Run run = 1; case 1: if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 10)) { - ptr = ctx->ParseMessage(_internal_mutable_reconnect(), ptr); + ptr = ctx->ParseMessage(_internal_mutable_run(), ptr); CHK_(ptr); } else goto handle_unusual; continue; - // map results = 2; - case 2: - if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 18)) { - ptr -= 1; - do { - ptr += 1; - ptr = ctx->ParseMessage(&results_, ptr); - CHK_(ptr); - if (!ctx->DataAvailable(ptr)) break; - } while (::PROTOBUF_NAMESPACE_ID::internal::ExpectTag<18>(ptr)); - } else - goto handle_unusual; - continue; default: goto handle_unusual; } // switch @@ -1573,145 +2938,92 @@ const char* PushTaskResResponse::_InternalParse(const char* ptr, ::PROTOBUF_NAME #undef CHK_ } -::PROTOBUF_NAMESPACE_ID::uint8* PushTaskResResponse::_InternalSerialize( +::PROTOBUF_NAMESPACE_ID::uint8* GetRunResponse::_InternalSerialize( ::PROTOBUF_NAMESPACE_ID::uint8* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const { - // @@protoc_insertion_point(serialize_to_array_start:flwr.proto.PushTaskResResponse) + // @@protoc_insertion_point(serialize_to_array_start:flwr.proto.GetRunResponse) ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; (void) cached_has_bits; - // .flwr.proto.Reconnect reconnect = 1; - if (this->_internal_has_reconnect()) { + // .flwr.proto.Run run = 1; + if (this->_internal_has_run()) { target = stream->EnsureSpace(target); target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite:: InternalWriteMessage( - 1, _Internal::reconnect(this), target, stream); - } - - // map results = 2; - if (!this->_internal_results().empty()) { - typedef ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::PROTOBUF_NAMESPACE_ID::uint32 >::const_pointer - ConstPtr; - typedef ConstPtr SortItem; - typedef ::PROTOBUF_NAMESPACE_ID::internal::CompareByDerefFirst Less; - struct Utf8Check { - static void Check(ConstPtr p) { - (void)p; - ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String( - p->first.data(), static_cast(p->first.length()), - ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE, - "flwr.proto.PushTaskResResponse.ResultsEntry.key"); - } - }; - - if (stream->IsSerializationDeterministic() && - this->_internal_results().size() > 1) { - ::std::unique_ptr items( - new SortItem[this->_internal_results().size()]); - typedef ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::PROTOBUF_NAMESPACE_ID::uint32 >::size_type size_type; - size_type n = 0; - for (::PROTOBUF_NAMESPACE_ID::Map< std::string, ::PROTOBUF_NAMESPACE_ID::uint32 >::const_iterator - it = this->_internal_results().begin(); - it != this->_internal_results().end(); ++it, ++n) { - items[static_cast(n)] = SortItem(&*it); - } - ::std::sort(&items[0], &items[static_cast(n)], Less()); - for (size_type i = 0; i < n; i++) { - target = PushTaskResResponse_ResultsEntry_DoNotUse::Funcs::InternalSerialize(2, items[static_cast(i)]->first, items[static_cast(i)]->second, target, stream); - Utf8Check::Check(&(*items[static_cast(i)])); - } - } else { - for (::PROTOBUF_NAMESPACE_ID::Map< std::string, ::PROTOBUF_NAMESPACE_ID::uint32 >::const_iterator - it = this->_internal_results().begin(); - it != this->_internal_results().end(); ++it) { - target = PushTaskResResponse_ResultsEntry_DoNotUse::Funcs::InternalSerialize(2, it->first, it->second, target, stream); - Utf8Check::Check(&(*it)); - } - } + 1, _Internal::run(this), target, stream); } if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) { target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::InternalSerializeUnknownFieldsToArray( _internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance), target, stream); } - // @@protoc_insertion_point(serialize_to_array_end:flwr.proto.PushTaskResResponse) + // @@protoc_insertion_point(serialize_to_array_end:flwr.proto.GetRunResponse) return target; } -size_t PushTaskResResponse::ByteSizeLong() const { -// @@protoc_insertion_point(message_byte_size_start:flwr.proto.PushTaskResResponse) +size_t GetRunResponse::ByteSizeLong() const { +// @@protoc_insertion_point(message_byte_size_start:flwr.proto.GetRunResponse) size_t total_size = 0; ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; // Prevent compiler warnings about cached_has_bits being unused (void) cached_has_bits; - // map results = 2; - total_size += 1 * - ::PROTOBUF_NAMESPACE_ID::internal::FromIntSize(this->_internal_results_size()); - for (::PROTOBUF_NAMESPACE_ID::Map< std::string, ::PROTOBUF_NAMESPACE_ID::uint32 >::const_iterator - it = this->_internal_results().begin(); - it != this->_internal_results().end(); ++it) { - total_size += PushTaskResResponse_ResultsEntry_DoNotUse::Funcs::ByteSizeLong(it->first, it->second); - } - - // .flwr.proto.Reconnect reconnect = 1; - if (this->_internal_has_reconnect()) { + // .flwr.proto.Run run = 1; + if (this->_internal_has_run()) { total_size += 1 + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize( - *reconnect_); + *run_); } return MaybeComputeUnknownFieldsSize(total_size, &_cached_size_); } -const ::PROTOBUF_NAMESPACE_ID::Message::ClassData PushTaskResResponse::_class_data_ = { +const ::PROTOBUF_NAMESPACE_ID::Message::ClassData GetRunResponse::_class_data_ = { ::PROTOBUF_NAMESPACE_ID::Message::CopyWithSizeCheck, - PushTaskResResponse::MergeImpl + GetRunResponse::MergeImpl }; -const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*PushTaskResResponse::GetClassData() const { return &_class_data_; } +const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetRunResponse::GetClassData() const { return &_class_data_; } -void PushTaskResResponse::MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, +void GetRunResponse::MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, const ::PROTOBUF_NAMESPACE_ID::Message& from) { - static_cast(to)->MergeFrom( - static_cast(from)); + static_cast(to)->MergeFrom( + static_cast(from)); } -void PushTaskResResponse::MergeFrom(const PushTaskResResponse& from) { -// @@protoc_insertion_point(class_specific_merge_from_start:flwr.proto.PushTaskResResponse) +void GetRunResponse::MergeFrom(const GetRunResponse& from) { +// @@protoc_insertion_point(class_specific_merge_from_start:flwr.proto.GetRunResponse) GOOGLE_DCHECK_NE(&from, this); ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; (void) cached_has_bits; - results_.MergeFrom(from.results_); - if (from._internal_has_reconnect()) { - _internal_mutable_reconnect()->::flwr::proto::Reconnect::MergeFrom(from._internal_reconnect()); + if (from._internal_has_run()) { + _internal_mutable_run()->::flwr::proto::Run::MergeFrom(from._internal_run()); } _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); } -void PushTaskResResponse::CopyFrom(const PushTaskResResponse& from) { -// @@protoc_insertion_point(class_specific_copy_from_start:flwr.proto.PushTaskResResponse) +void GetRunResponse::CopyFrom(const GetRunResponse& from) { +// @@protoc_insertion_point(class_specific_copy_from_start:flwr.proto.GetRunResponse) if (&from == this) return; Clear(); MergeFrom(from); } -bool PushTaskResResponse::IsInitialized() const { +bool GetRunResponse::IsInitialized() const { return true; } -void PushTaskResResponse::InternalSwap(PushTaskResResponse* other) { +void GetRunResponse::InternalSwap(GetRunResponse* other) { using std::swap; _internal_metadata_.InternalSwap(&other->_internal_metadata_); - results_.InternalSwap(&other->results_); - swap(reconnect_, other->reconnect_); + swap(run_, other->run_); } -::PROTOBUF_NAMESPACE_ID::Metadata PushTaskResResponse::GetMetadata() const { +::PROTOBUF_NAMESPACE_ID::Metadata GetRunResponse::GetMetadata() const { return ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors( &descriptor_table_flwr_2fproto_2ffleet_2eproto_getter, &descriptor_table_flwr_2fproto_2ffleet_2eproto_once, - file_level_metadata_flwr_2fproto_2ffleet_2eproto[8]); + file_level_metadata_flwr_2fproto_2ffleet_2eproto[13]); } // =================================================================== @@ -1889,7 +3201,7 @@ void Reconnect::InternalSwap(Reconnect* other) { ::PROTOBUF_NAMESPACE_ID::Metadata Reconnect::GetMetadata() const { return ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors( &descriptor_table_flwr_2fproto_2ffleet_2eproto_getter, &descriptor_table_flwr_2fproto_2ffleet_2eproto_once, - file_level_metadata_flwr_2fproto_2ffleet_2eproto[9]); + file_level_metadata_flwr_2fproto_2ffleet_2eproto[14]); } // @@protoc_insertion_point(namespace_scope) @@ -1908,6 +3220,12 @@ template<> PROTOBUF_NOINLINE ::flwr::proto::DeleteNodeRequest* Arena::CreateMayb template<> PROTOBUF_NOINLINE ::flwr::proto::DeleteNodeResponse* Arena::CreateMaybeMessage< ::flwr::proto::DeleteNodeResponse >(Arena* arena) { return Arena::CreateMessageInternal< ::flwr::proto::DeleteNodeResponse >(arena); } +template<> PROTOBUF_NOINLINE ::flwr::proto::PingRequest* Arena::CreateMaybeMessage< ::flwr::proto::PingRequest >(Arena* arena) { + return Arena::CreateMessageInternal< ::flwr::proto::PingRequest >(arena); +} +template<> PROTOBUF_NOINLINE ::flwr::proto::PingResponse* Arena::CreateMaybeMessage< ::flwr::proto::PingResponse >(Arena* arena) { + return Arena::CreateMessageInternal< ::flwr::proto::PingResponse >(arena); +} template<> PROTOBUF_NOINLINE ::flwr::proto::PullTaskInsRequest* Arena::CreateMaybeMessage< ::flwr::proto::PullTaskInsRequest >(Arena* arena) { return Arena::CreateMessageInternal< ::flwr::proto::PullTaskInsRequest >(arena); } @@ -1923,6 +3241,15 @@ template<> PROTOBUF_NOINLINE ::flwr::proto::PushTaskResResponse_ResultsEntry_DoN template<> PROTOBUF_NOINLINE ::flwr::proto::PushTaskResResponse* Arena::CreateMaybeMessage< ::flwr::proto::PushTaskResResponse >(Arena* arena) { return Arena::CreateMessageInternal< ::flwr::proto::PushTaskResResponse >(arena); } +template<> PROTOBUF_NOINLINE ::flwr::proto::Run* Arena::CreateMaybeMessage< ::flwr::proto::Run >(Arena* arena) { + return Arena::CreateMessageInternal< ::flwr::proto::Run >(arena); +} +template<> PROTOBUF_NOINLINE ::flwr::proto::GetRunRequest* Arena::CreateMaybeMessage< ::flwr::proto::GetRunRequest >(Arena* arena) { + return Arena::CreateMessageInternal< ::flwr::proto::GetRunRequest >(arena); +} +template<> PROTOBUF_NOINLINE ::flwr::proto::GetRunResponse* Arena::CreateMaybeMessage< ::flwr::proto::GetRunResponse >(Arena* arena) { + return Arena::CreateMessageInternal< ::flwr::proto::GetRunResponse >(arena); +} template<> PROTOBUF_NOINLINE ::flwr::proto::Reconnect* Arena::CreateMaybeMessage< ::flwr::proto::Reconnect >(Arena* arena) { return Arena::CreateMessageInternal< ::flwr::proto::Reconnect >(arena); } diff --git a/src/cc/flwr/include/flwr/proto/fleet.pb.h b/src/cc/flwr/include/flwr/proto/fleet.pb.h index 842e800f5b1c..9ad30b5752f5 100644 --- a/src/cc/flwr/include/flwr/proto/fleet.pb.h +++ b/src/cc/flwr/include/flwr/proto/fleet.pb.h @@ -52,7 +52,7 @@ struct TableStruct_flwr_2fproto_2ffleet_2eproto { PROTOBUF_SECTION_VARIABLE(protodesc_cold); static const ::PROTOBUF_NAMESPACE_ID::internal::AuxiliaryParseTableField aux[] PROTOBUF_SECTION_VARIABLE(protodesc_cold); - static const ::PROTOBUF_NAMESPACE_ID::internal::ParseTable schema[10] + static const ::PROTOBUF_NAMESPACE_ID::internal::ParseTable schema[15] PROTOBUF_SECTION_VARIABLE(protodesc_cold); static const ::PROTOBUF_NAMESPACE_ID::internal::FieldMetadata field_metadata[]; static const ::PROTOBUF_NAMESPACE_ID::internal::SerializationTable serialization_table[]; @@ -73,6 +73,18 @@ extern DeleteNodeRequestDefaultTypeInternal _DeleteNodeRequest_default_instance_ class DeleteNodeResponse; struct DeleteNodeResponseDefaultTypeInternal; extern DeleteNodeResponseDefaultTypeInternal _DeleteNodeResponse_default_instance_; +class GetRunRequest; +struct GetRunRequestDefaultTypeInternal; +extern GetRunRequestDefaultTypeInternal _GetRunRequest_default_instance_; +class GetRunResponse; +struct GetRunResponseDefaultTypeInternal; +extern GetRunResponseDefaultTypeInternal _GetRunResponse_default_instance_; +class PingRequest; +struct PingRequestDefaultTypeInternal; +extern PingRequestDefaultTypeInternal _PingRequest_default_instance_; +class PingResponse; +struct PingResponseDefaultTypeInternal; +extern PingResponseDefaultTypeInternal _PingResponse_default_instance_; class PullTaskInsRequest; struct PullTaskInsRequestDefaultTypeInternal; extern PullTaskInsRequestDefaultTypeInternal _PullTaskInsRequest_default_instance_; @@ -91,6 +103,9 @@ extern PushTaskResResponse_ResultsEntry_DoNotUseDefaultTypeInternal _PushTaskRes class Reconnect; struct ReconnectDefaultTypeInternal; extern ReconnectDefaultTypeInternal _Reconnect_default_instance_; +class Run; +struct RunDefaultTypeInternal; +extern RunDefaultTypeInternal _Run_default_instance_; } // namespace proto } // namespace flwr PROTOBUF_NAMESPACE_OPEN @@ -98,12 +113,17 @@ template<> ::flwr::proto::CreateNodeRequest* Arena::CreateMaybeMessage<::flwr::p template<> ::flwr::proto::CreateNodeResponse* Arena::CreateMaybeMessage<::flwr::proto::CreateNodeResponse>(Arena*); template<> ::flwr::proto::DeleteNodeRequest* Arena::CreateMaybeMessage<::flwr::proto::DeleteNodeRequest>(Arena*); template<> ::flwr::proto::DeleteNodeResponse* Arena::CreateMaybeMessage<::flwr::proto::DeleteNodeResponse>(Arena*); +template<> ::flwr::proto::GetRunRequest* Arena::CreateMaybeMessage<::flwr::proto::GetRunRequest>(Arena*); +template<> ::flwr::proto::GetRunResponse* Arena::CreateMaybeMessage<::flwr::proto::GetRunResponse>(Arena*); +template<> ::flwr::proto::PingRequest* Arena::CreateMaybeMessage<::flwr::proto::PingRequest>(Arena*); +template<> ::flwr::proto::PingResponse* Arena::CreateMaybeMessage<::flwr::proto::PingResponse>(Arena*); template<> ::flwr::proto::PullTaskInsRequest* Arena::CreateMaybeMessage<::flwr::proto::PullTaskInsRequest>(Arena*); template<> ::flwr::proto::PullTaskInsResponse* Arena::CreateMaybeMessage<::flwr::proto::PullTaskInsResponse>(Arena*); template<> ::flwr::proto::PushTaskResRequest* Arena::CreateMaybeMessage<::flwr::proto::PushTaskResRequest>(Arena*); template<> ::flwr::proto::PushTaskResResponse* Arena::CreateMaybeMessage<::flwr::proto::PushTaskResResponse>(Arena*); template<> ::flwr::proto::PushTaskResResponse_ResultsEntry_DoNotUse* Arena::CreateMaybeMessage<::flwr::proto::PushTaskResResponse_ResultsEntry_DoNotUse>(Arena*); template<> ::flwr::proto::Reconnect* Arena::CreateMaybeMessage<::flwr::proto::Reconnect>(Arena*); +template<> ::flwr::proto::Run* Arena::CreateMaybeMessage<::flwr::proto::Run>(Arena*); PROTOBUF_NAMESPACE_CLOSE namespace flwr { namespace proto { @@ -111,9 +131,10 @@ namespace proto { // =================================================================== class CreateNodeRequest final : - public ::PROTOBUF_NAMESPACE_ID::internal::ZeroFieldsBase /* @@protoc_insertion_point(class_definition:flwr.proto.CreateNodeRequest) */ { + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:flwr.proto.CreateNodeRequest) */ { public: inline CreateNodeRequest() : CreateNodeRequest(nullptr) {} + ~CreateNodeRequest() override; explicit constexpr CreateNodeRequest(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); CreateNodeRequest(const CreateNodeRequest& from); @@ -185,15 +206,27 @@ class CreateNodeRequest final : CreateNodeRequest* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final { return CreateMaybeMessage(arena); } - using ::PROTOBUF_NAMESPACE_ID::internal::ZeroFieldsBase::CopyFrom; - inline void CopyFrom(const CreateNodeRequest& from) { - ::PROTOBUF_NAMESPACE_ID::internal::ZeroFieldsBase::CopyImpl(this, from); - } - using ::PROTOBUF_NAMESPACE_ID::internal::ZeroFieldsBase::MergeFrom; - void MergeFrom(const CreateNodeRequest& from) { - ::PROTOBUF_NAMESPACE_ID::internal::ZeroFieldsBase::MergeImpl(this, from); - } + using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; + void CopyFrom(const CreateNodeRequest& from); + using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + void MergeFrom(const CreateNodeRequest& from); + private: + static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, const ::PROTOBUF_NAMESPACE_ID::Message& from); public: + PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; + bool IsInitialized() const final; + + size_t ByteSizeLong() const final; + const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; + ::PROTOBUF_NAMESPACE_ID::uint8* _InternalSerialize( + ::PROTOBUF_NAMESPACE_ID::uint8* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final; + int GetCachedSize() const final { return _cached_size_.Get(); } + + private: + void SharedCtor(); + void SharedDtor(); + void SetCachedSize(int size) const final; + void InternalSwap(CreateNodeRequest* other); friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { return "flwr.proto.CreateNodeRequest"; @@ -202,6 +235,8 @@ class CreateNodeRequest final : explicit CreateNodeRequest(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned = false); private: + static void ArenaDtor(void* object); + inline void RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena* arena); public: static const ClassData _class_data_; @@ -213,6 +248,18 @@ class CreateNodeRequest final : // accessors ------------------------------------------------------- + enum : int { + kPingIntervalFieldNumber = 1, + }; + // double ping_interval = 1; + void clear_ping_interval(); + double ping_interval() const; + void set_ping_interval(double value); + private: + double _internal_ping_interval() const; + void _internal_set_ping_interval(double value); + public: + // @@protoc_insertion_point(class_scope:flwr.proto.CreateNodeRequest) private: class _Internal; @@ -220,6 +267,7 @@ class CreateNodeRequest final : template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; typedef void InternalArenaConstructable_; typedef void DestructorSkippable_; + double ping_interval_; mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; friend struct ::TableStruct_flwr_2fproto_2ffleet_2eproto; }; @@ -644,24 +692,24 @@ class DeleteNodeResponse final : }; // ------------------------------------------------------------------- -class PullTaskInsRequest final : - public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:flwr.proto.PullTaskInsRequest) */ { +class PingRequest final : + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:flwr.proto.PingRequest) */ { public: - inline PullTaskInsRequest() : PullTaskInsRequest(nullptr) {} - ~PullTaskInsRequest() override; - explicit constexpr PullTaskInsRequest(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + inline PingRequest() : PingRequest(nullptr) {} + ~PingRequest() override; + explicit constexpr PingRequest(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); - PullTaskInsRequest(const PullTaskInsRequest& from); - PullTaskInsRequest(PullTaskInsRequest&& from) noexcept - : PullTaskInsRequest() { + PingRequest(const PingRequest& from); + PingRequest(PingRequest&& from) noexcept + : PingRequest() { *this = ::std::move(from); } - inline PullTaskInsRequest& operator=(const PullTaskInsRequest& from) { + inline PingRequest& operator=(const PingRequest& from) { CopyFrom(from); return *this; } - inline PullTaskInsRequest& operator=(PullTaskInsRequest&& from) noexcept { + inline PingRequest& operator=(PingRequest&& from) noexcept { if (this == &from) return *this; if (GetOwningArena() == from.GetOwningArena() #ifdef PROTOBUF_FORCE_COPY_IN_MOVE @@ -684,20 +732,20 @@ class PullTaskInsRequest final : static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { return default_instance().GetMetadata().reflection; } - static const PullTaskInsRequest& default_instance() { + static const PingRequest& default_instance() { return *internal_default_instance(); } - static inline const PullTaskInsRequest* internal_default_instance() { - return reinterpret_cast( - &_PullTaskInsRequest_default_instance_); + static inline const PingRequest* internal_default_instance() { + return reinterpret_cast( + &_PingRequest_default_instance_); } static constexpr int kIndexInFileMessages = 4; - friend void swap(PullTaskInsRequest& a, PullTaskInsRequest& b) { + friend void swap(PingRequest& a, PingRequest& b) { a.Swap(&b); } - inline void Swap(PullTaskInsRequest* other) { + inline void Swap(PingRequest* other) { if (other == this) return; if (GetOwningArena() == other->GetOwningArena()) { InternalSwap(other); @@ -705,7 +753,7 @@ class PullTaskInsRequest final : ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); } } - void UnsafeArenaSwap(PullTaskInsRequest* other) { + void UnsafeArenaSwap(PingRequest* other) { if (other == this) return; GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); InternalSwap(other); @@ -713,17 +761,17 @@ class PullTaskInsRequest final : // implements Message ---------------------------------------------- - inline PullTaskInsRequest* New() const final { - return new PullTaskInsRequest(); + inline PingRequest* New() const final { + return new PingRequest(); } - PullTaskInsRequest* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final { - return CreateMaybeMessage(arena); + PingRequest* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final { + return CreateMaybeMessage(arena); } using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; - void CopyFrom(const PullTaskInsRequest& from); + void CopyFrom(const PingRequest& from); using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; - void MergeFrom(const PullTaskInsRequest& from); + void MergeFrom(const PingRequest& from); private: static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, const ::PROTOBUF_NAMESPACE_ID::Message& from); public: @@ -740,13 +788,13 @@ class PullTaskInsRequest final : void SharedCtor(); void SharedDtor(); void SetCachedSize(int size) const final; - void InternalSwap(PullTaskInsRequest* other); + void InternalSwap(PingRequest* other); friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { - return "flwr.proto.PullTaskInsRequest"; + return "flwr.proto.PingRequest"; } protected: - explicit PullTaskInsRequest(::PROTOBUF_NAMESPACE_ID::Arena* arena, + explicit PingRequest(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned = false); private: static void ArenaDtor(void* object); @@ -763,33 +811,9 @@ class PullTaskInsRequest final : // accessors ------------------------------------------------------- enum : int { - kTaskIdsFieldNumber = 2, kNodeFieldNumber = 1, + kPingIntervalFieldNumber = 2, }; - // repeated string task_ids = 2; - int task_ids_size() const; - private: - int _internal_task_ids_size() const; - public: - void clear_task_ids(); - const std::string& task_ids(int index) const; - std::string* mutable_task_ids(int index); - void set_task_ids(int index, const std::string& value); - void set_task_ids(int index, std::string&& value); - void set_task_ids(int index, const char* value); - void set_task_ids(int index, const char* value, size_t size); - std::string* add_task_ids(); - void add_task_ids(const std::string& value); - void add_task_ids(std::string&& value); - void add_task_ids(const char* value); - void add_task_ids(const char* value, size_t size); - const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField& task_ids() const; - ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField* mutable_task_ids(); - private: - const std::string& _internal_task_ids(int index) const; - std::string* _internal_add_task_ids(); - public: - // .flwr.proto.Node node = 1; bool has_node() const; private: @@ -808,38 +832,47 @@ class PullTaskInsRequest final : ::flwr::proto::Node* node); ::flwr::proto::Node* unsafe_arena_release_node(); - // @@protoc_insertion_point(class_scope:flwr.proto.PullTaskInsRequest) + // double ping_interval = 2; + void clear_ping_interval(); + double ping_interval() const; + void set_ping_interval(double value); + private: + double _internal_ping_interval() const; + void _internal_set_ping_interval(double value); + public: + + // @@protoc_insertion_point(class_scope:flwr.proto.PingRequest) private: class _Internal; template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; typedef void InternalArenaConstructable_; typedef void DestructorSkippable_; - ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField task_ids_; ::flwr::proto::Node* node_; + double ping_interval_; mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; friend struct ::TableStruct_flwr_2fproto_2ffleet_2eproto; }; // ------------------------------------------------------------------- -class PullTaskInsResponse final : - public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:flwr.proto.PullTaskInsResponse) */ { +class PingResponse final : + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:flwr.proto.PingResponse) */ { public: - inline PullTaskInsResponse() : PullTaskInsResponse(nullptr) {} - ~PullTaskInsResponse() override; - explicit constexpr PullTaskInsResponse(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + inline PingResponse() : PingResponse(nullptr) {} + ~PingResponse() override; + explicit constexpr PingResponse(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); - PullTaskInsResponse(const PullTaskInsResponse& from); - PullTaskInsResponse(PullTaskInsResponse&& from) noexcept - : PullTaskInsResponse() { + PingResponse(const PingResponse& from); + PingResponse(PingResponse&& from) noexcept + : PingResponse() { *this = ::std::move(from); } - inline PullTaskInsResponse& operator=(const PullTaskInsResponse& from) { + inline PingResponse& operator=(const PingResponse& from) { CopyFrom(from); return *this; } - inline PullTaskInsResponse& operator=(PullTaskInsResponse&& from) noexcept { + inline PingResponse& operator=(PingResponse&& from) noexcept { if (this == &from) return *this; if (GetOwningArena() == from.GetOwningArena() #ifdef PROTOBUF_FORCE_COPY_IN_MOVE @@ -862,20 +895,20 @@ class PullTaskInsResponse final : static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { return default_instance().GetMetadata().reflection; } - static const PullTaskInsResponse& default_instance() { + static const PingResponse& default_instance() { return *internal_default_instance(); } - static inline const PullTaskInsResponse* internal_default_instance() { - return reinterpret_cast( - &_PullTaskInsResponse_default_instance_); + static inline const PingResponse* internal_default_instance() { + return reinterpret_cast( + &_PingResponse_default_instance_); } static constexpr int kIndexInFileMessages = 5; - friend void swap(PullTaskInsResponse& a, PullTaskInsResponse& b) { + friend void swap(PingResponse& a, PingResponse& b) { a.Swap(&b); } - inline void Swap(PullTaskInsResponse* other) { + inline void Swap(PingResponse* other) { if (other == this) return; if (GetOwningArena() == other->GetOwningArena()) { InternalSwap(other); @@ -883,7 +916,7 @@ class PullTaskInsResponse final : ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); } } - void UnsafeArenaSwap(PullTaskInsResponse* other) { + void UnsafeArenaSwap(PingResponse* other) { if (other == this) return; GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); InternalSwap(other); @@ -891,17 +924,17 @@ class PullTaskInsResponse final : // implements Message ---------------------------------------------- - inline PullTaskInsResponse* New() const final { - return new PullTaskInsResponse(); + inline PingResponse* New() const final { + return new PingResponse(); } - PullTaskInsResponse* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final { - return CreateMaybeMessage(arena); + PingResponse* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final { + return CreateMaybeMessage(arena); } using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; - void CopyFrom(const PullTaskInsResponse& from); + void CopyFrom(const PingResponse& from); using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; - void MergeFrom(const PullTaskInsResponse& from); + void MergeFrom(const PingResponse& from); private: static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, const ::PROTOBUF_NAMESPACE_ID::Message& from); public: @@ -918,13 +951,13 @@ class PullTaskInsResponse final : void SharedCtor(); void SharedDtor(); void SetCachedSize(int size) const final; - void InternalSwap(PullTaskInsResponse* other); + void InternalSwap(PingResponse* other); friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { - return "flwr.proto.PullTaskInsResponse"; + return "flwr.proto.PingResponse"; } protected: - explicit PullTaskInsResponse(::PROTOBUF_NAMESPACE_ID::Arena* arena, + explicit PingResponse(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned = false); private: static void ArenaDtor(void* object); @@ -941,77 +974,48 @@ class PullTaskInsResponse final : // accessors ------------------------------------------------------- enum : int { - kTaskInsListFieldNumber = 2, - kReconnectFieldNumber = 1, + kSuccessFieldNumber = 1, }; - // repeated .flwr.proto.TaskIns task_ins_list = 2; - int task_ins_list_size() const; - private: - int _internal_task_ins_list_size() const; - public: - void clear_task_ins_list(); - ::flwr::proto::TaskIns* mutable_task_ins_list(int index); - ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::flwr::proto::TaskIns >* - mutable_task_ins_list(); - private: - const ::flwr::proto::TaskIns& _internal_task_ins_list(int index) const; - ::flwr::proto::TaskIns* _internal_add_task_ins_list(); - public: - const ::flwr::proto::TaskIns& task_ins_list(int index) const; - ::flwr::proto::TaskIns* add_task_ins_list(); - const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::flwr::proto::TaskIns >& - task_ins_list() const; - - // .flwr.proto.Reconnect reconnect = 1; - bool has_reconnect() const; - private: - bool _internal_has_reconnect() const; - public: - void clear_reconnect(); - const ::flwr::proto::Reconnect& reconnect() const; - PROTOBUF_MUST_USE_RESULT ::flwr::proto::Reconnect* release_reconnect(); - ::flwr::proto::Reconnect* mutable_reconnect(); - void set_allocated_reconnect(::flwr::proto::Reconnect* reconnect); + // bool success = 1; + void clear_success(); + bool success() const; + void set_success(bool value); private: - const ::flwr::proto::Reconnect& _internal_reconnect() const; - ::flwr::proto::Reconnect* _internal_mutable_reconnect(); + bool _internal_success() const; + void _internal_set_success(bool value); public: - void unsafe_arena_set_allocated_reconnect( - ::flwr::proto::Reconnect* reconnect); - ::flwr::proto::Reconnect* unsafe_arena_release_reconnect(); - // @@protoc_insertion_point(class_scope:flwr.proto.PullTaskInsResponse) + // @@protoc_insertion_point(class_scope:flwr.proto.PingResponse) private: class _Internal; template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; typedef void InternalArenaConstructable_; typedef void DestructorSkippable_; - ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::flwr::proto::TaskIns > task_ins_list_; - ::flwr::proto::Reconnect* reconnect_; + bool success_; mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; friend struct ::TableStruct_flwr_2fproto_2ffleet_2eproto; }; // ------------------------------------------------------------------- -class PushTaskResRequest final : - public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:flwr.proto.PushTaskResRequest) */ { +class PullTaskInsRequest final : + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:flwr.proto.PullTaskInsRequest) */ { public: - inline PushTaskResRequest() : PushTaskResRequest(nullptr) {} - ~PushTaskResRequest() override; - explicit constexpr PushTaskResRequest(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + inline PullTaskInsRequest() : PullTaskInsRequest(nullptr) {} + ~PullTaskInsRequest() override; + explicit constexpr PullTaskInsRequest(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); - PushTaskResRequest(const PushTaskResRequest& from); - PushTaskResRequest(PushTaskResRequest&& from) noexcept - : PushTaskResRequest() { + PullTaskInsRequest(const PullTaskInsRequest& from); + PullTaskInsRequest(PullTaskInsRequest&& from) noexcept + : PullTaskInsRequest() { *this = ::std::move(from); } - inline PushTaskResRequest& operator=(const PushTaskResRequest& from) { + inline PullTaskInsRequest& operator=(const PullTaskInsRequest& from) { CopyFrom(from); return *this; } - inline PushTaskResRequest& operator=(PushTaskResRequest&& from) noexcept { + inline PullTaskInsRequest& operator=(PullTaskInsRequest&& from) noexcept { if (this == &from) return *this; if (GetOwningArena() == from.GetOwningArena() #ifdef PROTOBUF_FORCE_COPY_IN_MOVE @@ -1034,20 +1038,20 @@ class PushTaskResRequest final : static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { return default_instance().GetMetadata().reflection; } - static const PushTaskResRequest& default_instance() { + static const PullTaskInsRequest& default_instance() { return *internal_default_instance(); } - static inline const PushTaskResRequest* internal_default_instance() { - return reinterpret_cast( - &_PushTaskResRequest_default_instance_); + static inline const PullTaskInsRequest* internal_default_instance() { + return reinterpret_cast( + &_PullTaskInsRequest_default_instance_); } static constexpr int kIndexInFileMessages = 6; - friend void swap(PushTaskResRequest& a, PushTaskResRequest& b) { + friend void swap(PullTaskInsRequest& a, PullTaskInsRequest& b) { a.Swap(&b); } - inline void Swap(PushTaskResRequest* other) { + inline void Swap(PullTaskInsRequest* other) { if (other == this) return; if (GetOwningArena() == other->GetOwningArena()) { InternalSwap(other); @@ -1055,7 +1059,7 @@ class PushTaskResRequest final : ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); } } - void UnsafeArenaSwap(PushTaskResRequest* other) { + void UnsafeArenaSwap(PullTaskInsRequest* other) { if (other == this) return; GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); InternalSwap(other); @@ -1063,17 +1067,17 @@ class PushTaskResRequest final : // implements Message ---------------------------------------------- - inline PushTaskResRequest* New() const final { - return new PushTaskResRequest(); + inline PullTaskInsRequest* New() const final { + return new PullTaskInsRequest(); } - PushTaskResRequest* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final { - return CreateMaybeMessage(arena); + PullTaskInsRequest* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final { + return CreateMaybeMessage(arena); } using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; - void CopyFrom(const PushTaskResRequest& from); + void CopyFrom(const PullTaskInsRequest& from); using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; - void MergeFrom(const PushTaskResRequest& from); + void MergeFrom(const PullTaskInsRequest& from); private: static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, const ::PROTOBUF_NAMESPACE_ID::Message& from); public: @@ -1090,13 +1094,13 @@ class PushTaskResRequest final : void SharedCtor(); void SharedDtor(); void SetCachedSize(int size) const final; - void InternalSwap(PushTaskResRequest* other); + void InternalSwap(PullTaskInsRequest* other); friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { - return "flwr.proto.PushTaskResRequest"; + return "flwr.proto.PullTaskInsRequest"; } protected: - explicit PushTaskResRequest(::PROTOBUF_NAMESPACE_ID::Arena* arena, + explicit PullTaskInsRequest(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned = false); private: static void ArenaDtor(void* object); @@ -1113,82 +1117,255 @@ class PushTaskResRequest final : // accessors ------------------------------------------------------- enum : int { - kTaskResListFieldNumber = 1, + kTaskIdsFieldNumber = 2, + kNodeFieldNumber = 1, }; - // repeated .flwr.proto.TaskRes task_res_list = 1; - int task_res_list_size() const; + // repeated string task_ids = 2; + int task_ids_size() const; private: - int _internal_task_res_list_size() const; + int _internal_task_ids_size() const; public: - void clear_task_res_list(); - ::flwr::proto::TaskRes* mutable_task_res_list(int index); - ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::flwr::proto::TaskRes >* - mutable_task_res_list(); + void clear_task_ids(); + const std::string& task_ids(int index) const; + std::string* mutable_task_ids(int index); + void set_task_ids(int index, const std::string& value); + void set_task_ids(int index, std::string&& value); + void set_task_ids(int index, const char* value); + void set_task_ids(int index, const char* value, size_t size); + std::string* add_task_ids(); + void add_task_ids(const std::string& value); + void add_task_ids(std::string&& value); + void add_task_ids(const char* value); + void add_task_ids(const char* value, size_t size); + const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField& task_ids() const; + ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField* mutable_task_ids(); private: - const ::flwr::proto::TaskRes& _internal_task_res_list(int index) const; - ::flwr::proto::TaskRes* _internal_add_task_res_list(); + const std::string& _internal_task_ids(int index) const; + std::string* _internal_add_task_ids(); public: - const ::flwr::proto::TaskRes& task_res_list(int index) const; - ::flwr::proto::TaskRes* add_task_res_list(); - const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::flwr::proto::TaskRes >& - task_res_list() const; - - // @@protoc_insertion_point(class_scope:flwr.proto.PushTaskResRequest) - private: - class _Internal; - - template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; - typedef void InternalArenaConstructable_; - typedef void DestructorSkippable_; - ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::flwr::proto::TaskRes > task_res_list_; - mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; - friend struct ::TableStruct_flwr_2fproto_2ffleet_2eproto; -}; -// ------------------------------------------------------------------- -class PushTaskResResponse_ResultsEntry_DoNotUse : public ::PROTOBUF_NAMESPACE_ID::internal::MapEntry { -public: - typedef ::PROTOBUF_NAMESPACE_ID::internal::MapEntry SuperType; - PushTaskResResponse_ResultsEntry_DoNotUse(); - explicit constexpr PushTaskResResponse_ResultsEntry_DoNotUse( - ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); - explicit PushTaskResResponse_ResultsEntry_DoNotUse(::PROTOBUF_NAMESPACE_ID::Arena* arena); - void MergeFrom(const PushTaskResResponse_ResultsEntry_DoNotUse& other); - static const PushTaskResResponse_ResultsEntry_DoNotUse* internal_default_instance() { return reinterpret_cast(&_PushTaskResResponse_ResultsEntry_DoNotUse_default_instance_); } - static bool ValidateKey(std::string* s) { - return ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(s->data(), static_cast(s->size()), ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::PARSE, "flwr.proto.PushTaskResResponse.ResultsEntry.key"); - } - static bool ValidateValue(void*) { return true; } + // .flwr.proto.Node node = 1; + bool has_node() const; + private: + bool _internal_has_node() const; + public: + void clear_node(); + const ::flwr::proto::Node& node() const; + PROTOBUF_MUST_USE_RESULT ::flwr::proto::Node* release_node(); + ::flwr::proto::Node* mutable_node(); + void set_allocated_node(::flwr::proto::Node* node); + private: + const ::flwr::proto::Node& _internal_node() const; + ::flwr::proto::Node* _internal_mutable_node(); + public: + void unsafe_arena_set_allocated_node( + ::flwr::proto::Node* node); + ::flwr::proto::Node* unsafe_arena_release_node(); + + // @@protoc_insertion_point(class_scope:flwr.proto.PullTaskInsRequest) + private: + class _Internal; + + template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; + typedef void InternalArenaConstructable_; + typedef void DestructorSkippable_; + ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField task_ids_; + ::flwr::proto::Node* node_; + mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; + friend struct ::TableStruct_flwr_2fproto_2ffleet_2eproto; +}; +// ------------------------------------------------------------------- + +class PullTaskInsResponse final : + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:flwr.proto.PullTaskInsResponse) */ { + public: + inline PullTaskInsResponse() : PullTaskInsResponse(nullptr) {} + ~PullTaskInsResponse() override; + explicit constexpr PullTaskInsResponse(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + + PullTaskInsResponse(const PullTaskInsResponse& from); + PullTaskInsResponse(PullTaskInsResponse&& from) noexcept + : PullTaskInsResponse() { + *this = ::std::move(from); + } + + inline PullTaskInsResponse& operator=(const PullTaskInsResponse& from) { + CopyFrom(from); + return *this; + } + inline PullTaskInsResponse& operator=(PullTaskInsResponse&& from) noexcept { + if (this == &from) return *this; + if (GetOwningArena() == from.GetOwningArena() + #ifdef PROTOBUF_FORCE_COPY_IN_MOVE + && GetOwningArena() != nullptr + #endif // !PROTOBUF_FORCE_COPY_IN_MOVE + ) { + InternalSwap(&from); + } else { + CopyFrom(from); + } + return *this; + } + + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { + return GetDescriptor(); + } + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { + return default_instance().GetMetadata().descriptor; + } + static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { + return default_instance().GetMetadata().reflection; + } + static const PullTaskInsResponse& default_instance() { + return *internal_default_instance(); + } + static inline const PullTaskInsResponse* internal_default_instance() { + return reinterpret_cast( + &_PullTaskInsResponse_default_instance_); + } + static constexpr int kIndexInFileMessages = + 7; + + friend void swap(PullTaskInsResponse& a, PullTaskInsResponse& b) { + a.Swap(&b); + } + inline void Swap(PullTaskInsResponse* other) { + if (other == this) return; + if (GetOwningArena() == other->GetOwningArena()) { + InternalSwap(other); + } else { + ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); + } + } + void UnsafeArenaSwap(PullTaskInsResponse* other) { + if (other == this) return; + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); + InternalSwap(other); + } + + // implements Message ---------------------------------------------- + + inline PullTaskInsResponse* New() const final { + return new PullTaskInsResponse(); + } + + PullTaskInsResponse* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final { + return CreateMaybeMessage(arena); + } + using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; + void CopyFrom(const PullTaskInsResponse& from); using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + void MergeFrom(const PullTaskInsResponse& from); + private: + static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, const ::PROTOBUF_NAMESPACE_ID::Message& from); + public: + PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; + bool IsInitialized() const final; + + size_t ByteSizeLong() const final; + const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; + ::PROTOBUF_NAMESPACE_ID::uint8* _InternalSerialize( + ::PROTOBUF_NAMESPACE_ID::uint8* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final; + int GetCachedSize() const final { return _cached_size_.Get(); } + + private: + void SharedCtor(); + void SharedDtor(); + void SetCachedSize(int size) const final; + void InternalSwap(PullTaskInsResponse* other); + friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { + return "flwr.proto.PullTaskInsResponse"; + } + protected: + explicit PullTaskInsResponse(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned = false); + private: + static void ArenaDtor(void* object); + inline void RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena* arena); + public: + + static const ClassData _class_data_; + const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final; + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; -}; + // nested types ---------------------------------------------------- + + // accessors ------------------------------------------------------- + + enum : int { + kTaskInsListFieldNumber = 2, + kReconnectFieldNumber = 1, + }; + // repeated .flwr.proto.TaskIns task_ins_list = 2; + int task_ins_list_size() const; + private: + int _internal_task_ins_list_size() const; + public: + void clear_task_ins_list(); + ::flwr::proto::TaskIns* mutable_task_ins_list(int index); + ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::flwr::proto::TaskIns >* + mutable_task_ins_list(); + private: + const ::flwr::proto::TaskIns& _internal_task_ins_list(int index) const; + ::flwr::proto::TaskIns* _internal_add_task_ins_list(); + public: + const ::flwr::proto::TaskIns& task_ins_list(int index) const; + ::flwr::proto::TaskIns* add_task_ins_list(); + const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::flwr::proto::TaskIns >& + task_ins_list() const; + + // .flwr.proto.Reconnect reconnect = 1; + bool has_reconnect() const; + private: + bool _internal_has_reconnect() const; + public: + void clear_reconnect(); + const ::flwr::proto::Reconnect& reconnect() const; + PROTOBUF_MUST_USE_RESULT ::flwr::proto::Reconnect* release_reconnect(); + ::flwr::proto::Reconnect* mutable_reconnect(); + void set_allocated_reconnect(::flwr::proto::Reconnect* reconnect); + private: + const ::flwr::proto::Reconnect& _internal_reconnect() const; + ::flwr::proto::Reconnect* _internal_mutable_reconnect(); + public: + void unsafe_arena_set_allocated_reconnect( + ::flwr::proto::Reconnect* reconnect); + ::flwr::proto::Reconnect* unsafe_arena_release_reconnect(); + + // @@protoc_insertion_point(class_scope:flwr.proto.PullTaskInsResponse) + private: + class _Internal; + + template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; + typedef void InternalArenaConstructable_; + typedef void DestructorSkippable_; + ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::flwr::proto::TaskIns > task_ins_list_; + ::flwr::proto::Reconnect* reconnect_; + mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; + friend struct ::TableStruct_flwr_2fproto_2ffleet_2eproto; +}; // ------------------------------------------------------------------- -class PushTaskResResponse final : - public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:flwr.proto.PushTaskResResponse) */ { +class PushTaskResRequest final : + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:flwr.proto.PushTaskResRequest) */ { public: - inline PushTaskResResponse() : PushTaskResResponse(nullptr) {} - ~PushTaskResResponse() override; - explicit constexpr PushTaskResResponse(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + inline PushTaskResRequest() : PushTaskResRequest(nullptr) {} + ~PushTaskResRequest() override; + explicit constexpr PushTaskResRequest(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); - PushTaskResResponse(const PushTaskResResponse& from); - PushTaskResResponse(PushTaskResResponse&& from) noexcept - : PushTaskResResponse() { + PushTaskResRequest(const PushTaskResRequest& from); + PushTaskResRequest(PushTaskResRequest&& from) noexcept + : PushTaskResRequest() { *this = ::std::move(from); } - inline PushTaskResResponse& operator=(const PushTaskResResponse& from) { + inline PushTaskResRequest& operator=(const PushTaskResRequest& from) { CopyFrom(from); return *this; } - inline PushTaskResResponse& operator=(PushTaskResResponse&& from) noexcept { + inline PushTaskResRequest& operator=(PushTaskResRequest&& from) noexcept { if (this == &from) return *this; if (GetOwningArena() == from.GetOwningArena() #ifdef PROTOBUF_FORCE_COPY_IN_MOVE @@ -1211,20 +1388,20 @@ class PushTaskResResponse final : static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { return default_instance().GetMetadata().reflection; } - static const PushTaskResResponse& default_instance() { + static const PushTaskResRequest& default_instance() { return *internal_default_instance(); } - static inline const PushTaskResResponse* internal_default_instance() { - return reinterpret_cast( - &_PushTaskResResponse_default_instance_); + static inline const PushTaskResRequest* internal_default_instance() { + return reinterpret_cast( + &_PushTaskResRequest_default_instance_); } static constexpr int kIndexInFileMessages = 8; - friend void swap(PushTaskResResponse& a, PushTaskResResponse& b) { + friend void swap(PushTaskResRequest& a, PushTaskResRequest& b) { a.Swap(&b); } - inline void Swap(PushTaskResResponse* other) { + inline void Swap(PushTaskResRequest* other) { if (other == this) return; if (GetOwningArena() == other->GetOwningArena()) { InternalSwap(other); @@ -1232,7 +1409,7 @@ class PushTaskResResponse final : ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); } } - void UnsafeArenaSwap(PushTaskResResponse* other) { + void UnsafeArenaSwap(PushTaskResRequest* other) { if (other == this) return; GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); InternalSwap(other); @@ -1240,17 +1417,688 @@ class PushTaskResResponse final : // implements Message ---------------------------------------------- - inline PushTaskResResponse* New() const final { - return new PushTaskResResponse(); + inline PushTaskResRequest* New() const final { + return new PushTaskResRequest(); } - PushTaskResResponse* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final { - return CreateMaybeMessage(arena); + PushTaskResRequest* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final { + return CreateMaybeMessage(arena); + } + using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; + void CopyFrom(const PushTaskResRequest& from); + using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + void MergeFrom(const PushTaskResRequest& from); + private: + static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, const ::PROTOBUF_NAMESPACE_ID::Message& from); + public: + PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; + bool IsInitialized() const final; + + size_t ByteSizeLong() const final; + const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; + ::PROTOBUF_NAMESPACE_ID::uint8* _InternalSerialize( + ::PROTOBUF_NAMESPACE_ID::uint8* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final; + int GetCachedSize() const final { return _cached_size_.Get(); } + + private: + void SharedCtor(); + void SharedDtor(); + void SetCachedSize(int size) const final; + void InternalSwap(PushTaskResRequest* other); + friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { + return "flwr.proto.PushTaskResRequest"; + } + protected: + explicit PushTaskResRequest(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned = false); + private: + static void ArenaDtor(void* object); + inline void RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena* arena); + public: + + static const ClassData _class_data_; + const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final; + + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + + // nested types ---------------------------------------------------- + + // accessors ------------------------------------------------------- + + enum : int { + kTaskResListFieldNumber = 1, + }; + // repeated .flwr.proto.TaskRes task_res_list = 1; + int task_res_list_size() const; + private: + int _internal_task_res_list_size() const; + public: + void clear_task_res_list(); + ::flwr::proto::TaskRes* mutable_task_res_list(int index); + ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::flwr::proto::TaskRes >* + mutable_task_res_list(); + private: + const ::flwr::proto::TaskRes& _internal_task_res_list(int index) const; + ::flwr::proto::TaskRes* _internal_add_task_res_list(); + public: + const ::flwr::proto::TaskRes& task_res_list(int index) const; + ::flwr::proto::TaskRes* add_task_res_list(); + const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::flwr::proto::TaskRes >& + task_res_list() const; + + // @@protoc_insertion_point(class_scope:flwr.proto.PushTaskResRequest) + private: + class _Internal; + + template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; + typedef void InternalArenaConstructable_; + typedef void DestructorSkippable_; + ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::flwr::proto::TaskRes > task_res_list_; + mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; + friend struct ::TableStruct_flwr_2fproto_2ffleet_2eproto; +}; +// ------------------------------------------------------------------- + +class PushTaskResResponse_ResultsEntry_DoNotUse : public ::PROTOBUF_NAMESPACE_ID::internal::MapEntry { +public: + typedef ::PROTOBUF_NAMESPACE_ID::internal::MapEntry SuperType; + PushTaskResResponse_ResultsEntry_DoNotUse(); + explicit constexpr PushTaskResResponse_ResultsEntry_DoNotUse( + ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + explicit PushTaskResResponse_ResultsEntry_DoNotUse(::PROTOBUF_NAMESPACE_ID::Arena* arena); + void MergeFrom(const PushTaskResResponse_ResultsEntry_DoNotUse& other); + static const PushTaskResResponse_ResultsEntry_DoNotUse* internal_default_instance() { return reinterpret_cast(&_PushTaskResResponse_ResultsEntry_DoNotUse_default_instance_); } + static bool ValidateKey(std::string* s) { + return ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(s->data(), static_cast(s->size()), ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::PARSE, "flwr.proto.PushTaskResResponse.ResultsEntry.key"); + } + static bool ValidateValue(void*) { return true; } + using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; +}; + +// ------------------------------------------------------------------- + +class PushTaskResResponse final : + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:flwr.proto.PushTaskResResponse) */ { + public: + inline PushTaskResResponse() : PushTaskResResponse(nullptr) {} + ~PushTaskResResponse() override; + explicit constexpr PushTaskResResponse(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + + PushTaskResResponse(const PushTaskResResponse& from); + PushTaskResResponse(PushTaskResResponse&& from) noexcept + : PushTaskResResponse() { + *this = ::std::move(from); + } + + inline PushTaskResResponse& operator=(const PushTaskResResponse& from) { + CopyFrom(from); + return *this; + } + inline PushTaskResResponse& operator=(PushTaskResResponse&& from) noexcept { + if (this == &from) return *this; + if (GetOwningArena() == from.GetOwningArena() + #ifdef PROTOBUF_FORCE_COPY_IN_MOVE + && GetOwningArena() != nullptr + #endif // !PROTOBUF_FORCE_COPY_IN_MOVE + ) { + InternalSwap(&from); + } else { + CopyFrom(from); + } + return *this; + } + + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { + return GetDescriptor(); + } + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { + return default_instance().GetMetadata().descriptor; + } + static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { + return default_instance().GetMetadata().reflection; + } + static const PushTaskResResponse& default_instance() { + return *internal_default_instance(); + } + static inline const PushTaskResResponse* internal_default_instance() { + return reinterpret_cast( + &_PushTaskResResponse_default_instance_); + } + static constexpr int kIndexInFileMessages = + 10; + + friend void swap(PushTaskResResponse& a, PushTaskResResponse& b) { + a.Swap(&b); + } + inline void Swap(PushTaskResResponse* other) { + if (other == this) return; + if (GetOwningArena() == other->GetOwningArena()) { + InternalSwap(other); + } else { + ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); + } + } + void UnsafeArenaSwap(PushTaskResResponse* other) { + if (other == this) return; + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); + InternalSwap(other); + } + + // implements Message ---------------------------------------------- + + inline PushTaskResResponse* New() const final { + return new PushTaskResResponse(); + } + + PushTaskResResponse* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final { + return CreateMaybeMessage(arena); + } + using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; + void CopyFrom(const PushTaskResResponse& from); + using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + void MergeFrom(const PushTaskResResponse& from); + private: + static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, const ::PROTOBUF_NAMESPACE_ID::Message& from); + public: + PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; + bool IsInitialized() const final; + + size_t ByteSizeLong() const final; + const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; + ::PROTOBUF_NAMESPACE_ID::uint8* _InternalSerialize( + ::PROTOBUF_NAMESPACE_ID::uint8* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final; + int GetCachedSize() const final { return _cached_size_.Get(); } + + private: + void SharedCtor(); + void SharedDtor(); + void SetCachedSize(int size) const final; + void InternalSwap(PushTaskResResponse* other); + friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { + return "flwr.proto.PushTaskResResponse"; + } + protected: + explicit PushTaskResResponse(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned = false); + private: + static void ArenaDtor(void* object); + inline void RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena* arena); + public: + + static const ClassData _class_data_; + const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final; + + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + + // nested types ---------------------------------------------------- + + + // accessors ------------------------------------------------------- + + enum : int { + kResultsFieldNumber = 2, + kReconnectFieldNumber = 1, + }; + // map results = 2; + int results_size() const; + private: + int _internal_results_size() const; + public: + void clear_results(); + private: + const ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::PROTOBUF_NAMESPACE_ID::uint32 >& + _internal_results() const; + ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::PROTOBUF_NAMESPACE_ID::uint32 >* + _internal_mutable_results(); + public: + const ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::PROTOBUF_NAMESPACE_ID::uint32 >& + results() const; + ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::PROTOBUF_NAMESPACE_ID::uint32 >* + mutable_results(); + + // .flwr.proto.Reconnect reconnect = 1; + bool has_reconnect() const; + private: + bool _internal_has_reconnect() const; + public: + void clear_reconnect(); + const ::flwr::proto::Reconnect& reconnect() const; + PROTOBUF_MUST_USE_RESULT ::flwr::proto::Reconnect* release_reconnect(); + ::flwr::proto::Reconnect* mutable_reconnect(); + void set_allocated_reconnect(::flwr::proto::Reconnect* reconnect); + private: + const ::flwr::proto::Reconnect& _internal_reconnect() const; + ::flwr::proto::Reconnect* _internal_mutable_reconnect(); + public: + void unsafe_arena_set_allocated_reconnect( + ::flwr::proto::Reconnect* reconnect); + ::flwr::proto::Reconnect* unsafe_arena_release_reconnect(); + + // @@protoc_insertion_point(class_scope:flwr.proto.PushTaskResResponse) + private: + class _Internal; + + template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; + typedef void InternalArenaConstructable_; + typedef void DestructorSkippable_; + ::PROTOBUF_NAMESPACE_ID::internal::MapField< + PushTaskResResponse_ResultsEntry_DoNotUse, + std::string, ::PROTOBUF_NAMESPACE_ID::uint32, + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_STRING, + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_UINT32> results_; + ::flwr::proto::Reconnect* reconnect_; + mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; + friend struct ::TableStruct_flwr_2fproto_2ffleet_2eproto; +}; +// ------------------------------------------------------------------- + +class Run final : + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:flwr.proto.Run) */ { + public: + inline Run() : Run(nullptr) {} + ~Run() override; + explicit constexpr Run(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + + Run(const Run& from); + Run(Run&& from) noexcept + : Run() { + *this = ::std::move(from); + } + + inline Run& operator=(const Run& from) { + CopyFrom(from); + return *this; + } + inline Run& operator=(Run&& from) noexcept { + if (this == &from) return *this; + if (GetOwningArena() == from.GetOwningArena() + #ifdef PROTOBUF_FORCE_COPY_IN_MOVE + && GetOwningArena() != nullptr + #endif // !PROTOBUF_FORCE_COPY_IN_MOVE + ) { + InternalSwap(&from); + } else { + CopyFrom(from); + } + return *this; + } + + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { + return GetDescriptor(); + } + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { + return default_instance().GetMetadata().descriptor; + } + static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { + return default_instance().GetMetadata().reflection; + } + static const Run& default_instance() { + return *internal_default_instance(); + } + static inline const Run* internal_default_instance() { + return reinterpret_cast( + &_Run_default_instance_); + } + static constexpr int kIndexInFileMessages = + 11; + + friend void swap(Run& a, Run& b) { + a.Swap(&b); + } + inline void Swap(Run* other) { + if (other == this) return; + if (GetOwningArena() == other->GetOwningArena()) { + InternalSwap(other); + } else { + ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); + } + } + void UnsafeArenaSwap(Run* other) { + if (other == this) return; + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); + InternalSwap(other); + } + + // implements Message ---------------------------------------------- + + inline Run* New() const final { + return new Run(); + } + + Run* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final { + return CreateMaybeMessage(arena); + } + using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; + void CopyFrom(const Run& from); + using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + void MergeFrom(const Run& from); + private: + static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, const ::PROTOBUF_NAMESPACE_ID::Message& from); + public: + PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; + bool IsInitialized() const final; + + size_t ByteSizeLong() const final; + const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; + ::PROTOBUF_NAMESPACE_ID::uint8* _InternalSerialize( + ::PROTOBUF_NAMESPACE_ID::uint8* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final; + int GetCachedSize() const final { return _cached_size_.Get(); } + + private: + void SharedCtor(); + void SharedDtor(); + void SetCachedSize(int size) const final; + void InternalSwap(Run* other); + friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { + return "flwr.proto.Run"; + } + protected: + explicit Run(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned = false); + private: + static void ArenaDtor(void* object); + inline void RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena* arena); + public: + + static const ClassData _class_data_; + const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final; + + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + + // nested types ---------------------------------------------------- + + // accessors ------------------------------------------------------- + + enum : int { + kFabIdFieldNumber = 2, + kFabVersionFieldNumber = 3, + kRunIdFieldNumber = 1, + }; + // string fab_id = 2; + void clear_fab_id(); + const std::string& fab_id() const; + template + void set_fab_id(ArgT0&& arg0, ArgT... args); + std::string* mutable_fab_id(); + PROTOBUF_MUST_USE_RESULT std::string* release_fab_id(); + void set_allocated_fab_id(std::string* fab_id); + private: + const std::string& _internal_fab_id() const; + inline PROTOBUF_ALWAYS_INLINE void _internal_set_fab_id(const std::string& value); + std::string* _internal_mutable_fab_id(); + public: + + // string fab_version = 3; + void clear_fab_version(); + const std::string& fab_version() const; + template + void set_fab_version(ArgT0&& arg0, ArgT... args); + std::string* mutable_fab_version(); + PROTOBUF_MUST_USE_RESULT std::string* release_fab_version(); + void set_allocated_fab_version(std::string* fab_version); + private: + const std::string& _internal_fab_version() const; + inline PROTOBUF_ALWAYS_INLINE void _internal_set_fab_version(const std::string& value); + std::string* _internal_mutable_fab_version(); + public: + + // sint64 run_id = 1; + void clear_run_id(); + ::PROTOBUF_NAMESPACE_ID::int64 run_id() const; + void set_run_id(::PROTOBUF_NAMESPACE_ID::int64 value); + private: + ::PROTOBUF_NAMESPACE_ID::int64 _internal_run_id() const; + void _internal_set_run_id(::PROTOBUF_NAMESPACE_ID::int64 value); + public: + + // @@protoc_insertion_point(class_scope:flwr.proto.Run) + private: + class _Internal; + + template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; + typedef void InternalArenaConstructable_; + typedef void DestructorSkippable_; + ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr fab_id_; + ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr fab_version_; + ::PROTOBUF_NAMESPACE_ID::int64 run_id_; + mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; + friend struct ::TableStruct_flwr_2fproto_2ffleet_2eproto; +}; +// ------------------------------------------------------------------- + +class GetRunRequest final : + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:flwr.proto.GetRunRequest) */ { + public: + inline GetRunRequest() : GetRunRequest(nullptr) {} + ~GetRunRequest() override; + explicit constexpr GetRunRequest(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + + GetRunRequest(const GetRunRequest& from); + GetRunRequest(GetRunRequest&& from) noexcept + : GetRunRequest() { + *this = ::std::move(from); + } + + inline GetRunRequest& operator=(const GetRunRequest& from) { + CopyFrom(from); + return *this; + } + inline GetRunRequest& operator=(GetRunRequest&& from) noexcept { + if (this == &from) return *this; + if (GetOwningArena() == from.GetOwningArena() + #ifdef PROTOBUF_FORCE_COPY_IN_MOVE + && GetOwningArena() != nullptr + #endif // !PROTOBUF_FORCE_COPY_IN_MOVE + ) { + InternalSwap(&from); + } else { + CopyFrom(from); + } + return *this; + } + + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { + return GetDescriptor(); + } + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { + return default_instance().GetMetadata().descriptor; + } + static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { + return default_instance().GetMetadata().reflection; + } + static const GetRunRequest& default_instance() { + return *internal_default_instance(); + } + static inline const GetRunRequest* internal_default_instance() { + return reinterpret_cast( + &_GetRunRequest_default_instance_); + } + static constexpr int kIndexInFileMessages = + 12; + + friend void swap(GetRunRequest& a, GetRunRequest& b) { + a.Swap(&b); + } + inline void Swap(GetRunRequest* other) { + if (other == this) return; + if (GetOwningArena() == other->GetOwningArena()) { + InternalSwap(other); + } else { + ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); + } + } + void UnsafeArenaSwap(GetRunRequest* other) { + if (other == this) return; + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); + InternalSwap(other); + } + + // implements Message ---------------------------------------------- + + inline GetRunRequest* New() const final { + return new GetRunRequest(); + } + + GetRunRequest* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final { + return CreateMaybeMessage(arena); + } + using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; + void CopyFrom(const GetRunRequest& from); + using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + void MergeFrom(const GetRunRequest& from); + private: + static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, const ::PROTOBUF_NAMESPACE_ID::Message& from); + public: + PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; + bool IsInitialized() const final; + + size_t ByteSizeLong() const final; + const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; + ::PROTOBUF_NAMESPACE_ID::uint8* _InternalSerialize( + ::PROTOBUF_NAMESPACE_ID::uint8* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final; + int GetCachedSize() const final { return _cached_size_.Get(); } + + private: + void SharedCtor(); + void SharedDtor(); + void SetCachedSize(int size) const final; + void InternalSwap(GetRunRequest* other); + friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { + return "flwr.proto.GetRunRequest"; + } + protected: + explicit GetRunRequest(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned = false); + private: + static void ArenaDtor(void* object); + inline void RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena* arena); + public: + + static const ClassData _class_data_; + const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final; + + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + + // nested types ---------------------------------------------------- + + // accessors ------------------------------------------------------- + + enum : int { + kRunIdFieldNumber = 1, + }; + // sint64 run_id = 1; + void clear_run_id(); + ::PROTOBUF_NAMESPACE_ID::int64 run_id() const; + void set_run_id(::PROTOBUF_NAMESPACE_ID::int64 value); + private: + ::PROTOBUF_NAMESPACE_ID::int64 _internal_run_id() const; + void _internal_set_run_id(::PROTOBUF_NAMESPACE_ID::int64 value); + public: + + // @@protoc_insertion_point(class_scope:flwr.proto.GetRunRequest) + private: + class _Internal; + + template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; + typedef void InternalArenaConstructable_; + typedef void DestructorSkippable_; + ::PROTOBUF_NAMESPACE_ID::int64 run_id_; + mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; + friend struct ::TableStruct_flwr_2fproto_2ffleet_2eproto; +}; +// ------------------------------------------------------------------- + +class GetRunResponse final : + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:flwr.proto.GetRunResponse) */ { + public: + inline GetRunResponse() : GetRunResponse(nullptr) {} + ~GetRunResponse() override; + explicit constexpr GetRunResponse(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + + GetRunResponse(const GetRunResponse& from); + GetRunResponse(GetRunResponse&& from) noexcept + : GetRunResponse() { + *this = ::std::move(from); + } + + inline GetRunResponse& operator=(const GetRunResponse& from) { + CopyFrom(from); + return *this; + } + inline GetRunResponse& operator=(GetRunResponse&& from) noexcept { + if (this == &from) return *this; + if (GetOwningArena() == from.GetOwningArena() + #ifdef PROTOBUF_FORCE_COPY_IN_MOVE + && GetOwningArena() != nullptr + #endif // !PROTOBUF_FORCE_COPY_IN_MOVE + ) { + InternalSwap(&from); + } else { + CopyFrom(from); + } + return *this; + } + + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { + return GetDescriptor(); + } + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { + return default_instance().GetMetadata().descriptor; + } + static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { + return default_instance().GetMetadata().reflection; + } + static const GetRunResponse& default_instance() { + return *internal_default_instance(); + } + static inline const GetRunResponse* internal_default_instance() { + return reinterpret_cast( + &_GetRunResponse_default_instance_); + } + static constexpr int kIndexInFileMessages = + 13; + + friend void swap(GetRunResponse& a, GetRunResponse& b) { + a.Swap(&b); + } + inline void Swap(GetRunResponse* other) { + if (other == this) return; + if (GetOwningArena() == other->GetOwningArena()) { + InternalSwap(other); + } else { + ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); + } + } + void UnsafeArenaSwap(GetRunResponse* other) { + if (other == this) return; + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); + InternalSwap(other); + } + + // implements Message ---------------------------------------------- + + inline GetRunResponse* New() const final { + return new GetRunResponse(); + } + + GetRunResponse* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final { + return CreateMaybeMessage(arena); } using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; - void CopyFrom(const PushTaskResResponse& from); + void CopyFrom(const GetRunResponse& from); using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; - void MergeFrom(const PushTaskResResponse& from); + void MergeFrom(const GetRunResponse& from); private: static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, const ::PROTOBUF_NAMESPACE_ID::Message& from); public: @@ -1267,13 +2115,13 @@ class PushTaskResResponse final : void SharedCtor(); void SharedDtor(); void SetCachedSize(int size) const final; - void InternalSwap(PushTaskResResponse* other); + void InternalSwap(GetRunResponse* other); friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { - return "flwr.proto.PushTaskResResponse"; + return "flwr.proto.GetRunResponse"; } protected: - explicit PushTaskResResponse(::PROTOBUF_NAMESPACE_ID::Arena* arena, + explicit GetRunResponse(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned = false); private: static void ArenaDtor(void* object); @@ -1287,61 +2135,37 @@ class PushTaskResResponse final : // nested types ---------------------------------------------------- - // accessors ------------------------------------------------------- enum : int { - kResultsFieldNumber = 2, - kReconnectFieldNumber = 1, + kRunFieldNumber = 1, }; - // map results = 2; - int results_size() const; - private: - int _internal_results_size() const; - public: - void clear_results(); - private: - const ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::PROTOBUF_NAMESPACE_ID::uint32 >& - _internal_results() const; - ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::PROTOBUF_NAMESPACE_ID::uint32 >* - _internal_mutable_results(); - public: - const ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::PROTOBUF_NAMESPACE_ID::uint32 >& - results() const; - ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::PROTOBUF_NAMESPACE_ID::uint32 >* - mutable_results(); - - // .flwr.proto.Reconnect reconnect = 1; - bool has_reconnect() const; + // .flwr.proto.Run run = 1; + bool has_run() const; private: - bool _internal_has_reconnect() const; + bool _internal_has_run() const; public: - void clear_reconnect(); - const ::flwr::proto::Reconnect& reconnect() const; - PROTOBUF_MUST_USE_RESULT ::flwr::proto::Reconnect* release_reconnect(); - ::flwr::proto::Reconnect* mutable_reconnect(); - void set_allocated_reconnect(::flwr::proto::Reconnect* reconnect); + void clear_run(); + const ::flwr::proto::Run& run() const; + PROTOBUF_MUST_USE_RESULT ::flwr::proto::Run* release_run(); + ::flwr::proto::Run* mutable_run(); + void set_allocated_run(::flwr::proto::Run* run); private: - const ::flwr::proto::Reconnect& _internal_reconnect() const; - ::flwr::proto::Reconnect* _internal_mutable_reconnect(); + const ::flwr::proto::Run& _internal_run() const; + ::flwr::proto::Run* _internal_mutable_run(); public: - void unsafe_arena_set_allocated_reconnect( - ::flwr::proto::Reconnect* reconnect); - ::flwr::proto::Reconnect* unsafe_arena_release_reconnect(); + void unsafe_arena_set_allocated_run( + ::flwr::proto::Run* run); + ::flwr::proto::Run* unsafe_arena_release_run(); - // @@protoc_insertion_point(class_scope:flwr.proto.PushTaskResResponse) + // @@protoc_insertion_point(class_scope:flwr.proto.GetRunResponse) private: class _Internal; template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; typedef void InternalArenaConstructable_; typedef void DestructorSkippable_; - ::PROTOBUF_NAMESPACE_ID::internal::MapField< - PushTaskResResponse_ResultsEntry_DoNotUse, - std::string, ::PROTOBUF_NAMESPACE_ID::uint32, - ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_STRING, - ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_UINT32> results_; - ::flwr::proto::Reconnect* reconnect_; + ::flwr::proto::Run* run_; mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; friend struct ::TableStruct_flwr_2fproto_2ffleet_2eproto; }; @@ -1395,7 +2219,7 @@ class Reconnect final : &_Reconnect_default_instance_); } static constexpr int kIndexInFileMessages = - 9; + 14; friend void swap(Reconnect& a, Reconnect& b) { a.Swap(&b); @@ -1499,6 +2323,26 @@ class Reconnect final : #endif // __GNUC__ // CreateNodeRequest +// double ping_interval = 1; +inline void CreateNodeRequest::clear_ping_interval() { + ping_interval_ = 0; +} +inline double CreateNodeRequest::_internal_ping_interval() const { + return ping_interval_; +} +inline double CreateNodeRequest::ping_interval() const { + // @@protoc_insertion_point(field_get:flwr.proto.CreateNodeRequest.ping_interval) + return _internal_ping_interval(); +} +inline void CreateNodeRequest::_internal_set_ping_interval(double value) { + + ping_interval_ = value; +} +inline void CreateNodeRequest::set_ping_interval(double value) { + _internal_set_ping_interval(value); + // @@protoc_insertion_point(field_set:flwr.proto.CreateNodeRequest.ping_interval) +} + // ------------------------------------------------------------------- // CreateNodeResponse @@ -1685,6 +2529,140 @@ inline void DeleteNodeRequest::set_allocated_node(::flwr::proto::Node* node) { // ------------------------------------------------------------------- +// PingRequest + +// .flwr.proto.Node node = 1; +inline bool PingRequest::_internal_has_node() const { + return this != internal_default_instance() && node_ != nullptr; +} +inline bool PingRequest::has_node() const { + return _internal_has_node(); +} +inline const ::flwr::proto::Node& PingRequest::_internal_node() const { + const ::flwr::proto::Node* p = node_; + return p != nullptr ? *p : reinterpret_cast( + ::flwr::proto::_Node_default_instance_); +} +inline const ::flwr::proto::Node& PingRequest::node() const { + // @@protoc_insertion_point(field_get:flwr.proto.PingRequest.node) + return _internal_node(); +} +inline void PingRequest::unsafe_arena_set_allocated_node( + ::flwr::proto::Node* node) { + if (GetArenaForAllocation() == nullptr) { + delete reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(node_); + } + node_ = node; + if (node) { + + } else { + + } + // @@protoc_insertion_point(field_unsafe_arena_set_allocated:flwr.proto.PingRequest.node) +} +inline ::flwr::proto::Node* PingRequest::release_node() { + + ::flwr::proto::Node* temp = node_; + node_ = nullptr; +#ifdef PROTOBUF_FORCE_COPY_IN_RELEASE + auto* old = reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(temp); + temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); + if (GetArenaForAllocation() == nullptr) { delete old; } +#else // PROTOBUF_FORCE_COPY_IN_RELEASE + if (GetArenaForAllocation() != nullptr) { + temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); + } +#endif // !PROTOBUF_FORCE_COPY_IN_RELEASE + return temp; +} +inline ::flwr::proto::Node* PingRequest::unsafe_arena_release_node() { + // @@protoc_insertion_point(field_release:flwr.proto.PingRequest.node) + + ::flwr::proto::Node* temp = node_; + node_ = nullptr; + return temp; +} +inline ::flwr::proto::Node* PingRequest::_internal_mutable_node() { + + if (node_ == nullptr) { + auto* p = CreateMaybeMessage<::flwr::proto::Node>(GetArenaForAllocation()); + node_ = p; + } + return node_; +} +inline ::flwr::proto::Node* PingRequest::mutable_node() { + ::flwr::proto::Node* _msg = _internal_mutable_node(); + // @@protoc_insertion_point(field_mutable:flwr.proto.PingRequest.node) + return _msg; +} +inline void PingRequest::set_allocated_node(::flwr::proto::Node* node) { + ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation(); + if (message_arena == nullptr) { + delete reinterpret_cast< ::PROTOBUF_NAMESPACE_ID::MessageLite*>(node_); + } + if (node) { + ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena = + ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper< + ::PROTOBUF_NAMESPACE_ID::MessageLite>::GetOwningArena( + reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(node)); + if (message_arena != submessage_arena) { + node = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage( + message_arena, node, submessage_arena); + } + + } else { + + } + node_ = node; + // @@protoc_insertion_point(field_set_allocated:flwr.proto.PingRequest.node) +} + +// double ping_interval = 2; +inline void PingRequest::clear_ping_interval() { + ping_interval_ = 0; +} +inline double PingRequest::_internal_ping_interval() const { + return ping_interval_; +} +inline double PingRequest::ping_interval() const { + // @@protoc_insertion_point(field_get:flwr.proto.PingRequest.ping_interval) + return _internal_ping_interval(); +} +inline void PingRequest::_internal_set_ping_interval(double value) { + + ping_interval_ = value; +} +inline void PingRequest::set_ping_interval(double value) { + _internal_set_ping_interval(value); + // @@protoc_insertion_point(field_set:flwr.proto.PingRequest.ping_interval) +} + +// ------------------------------------------------------------------- + +// PingResponse + +// bool success = 1; +inline void PingResponse::clear_success() { + success_ = false; +} +inline bool PingResponse::_internal_success() const { + return success_; +} +inline bool PingResponse::success() const { + // @@protoc_insertion_point(field_get:flwr.proto.PingResponse.success) + return _internal_success(); +} +inline void PingResponse::_internal_set_success(bool value) { + + success_ = value; +} +inline void PingResponse::set_success(bool value) { + _internal_set_success(value); + // @@protoc_insertion_point(field_set:flwr.proto.PingResponse.success) +} + +// ------------------------------------------------------------------- + // PullTaskInsRequest // .flwr.proto.Node node = 1; @@ -2147,6 +3125,240 @@ PushTaskResResponse::mutable_results() { // ------------------------------------------------------------------- +// Run + +// sint64 run_id = 1; +inline void Run::clear_run_id() { + run_id_ = int64_t{0}; +} +inline ::PROTOBUF_NAMESPACE_ID::int64 Run::_internal_run_id() const { + return run_id_; +} +inline ::PROTOBUF_NAMESPACE_ID::int64 Run::run_id() const { + // @@protoc_insertion_point(field_get:flwr.proto.Run.run_id) + return _internal_run_id(); +} +inline void Run::_internal_set_run_id(::PROTOBUF_NAMESPACE_ID::int64 value) { + + run_id_ = value; +} +inline void Run::set_run_id(::PROTOBUF_NAMESPACE_ID::int64 value) { + _internal_set_run_id(value); + // @@protoc_insertion_point(field_set:flwr.proto.Run.run_id) +} + +// string fab_id = 2; +inline void Run::clear_fab_id() { + fab_id_.ClearToEmpty(); +} +inline const std::string& Run::fab_id() const { + // @@protoc_insertion_point(field_get:flwr.proto.Run.fab_id) + return _internal_fab_id(); +} +template +inline PROTOBUF_ALWAYS_INLINE +void Run::set_fab_id(ArgT0&& arg0, ArgT... args) { + + fab_id_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, static_cast(arg0), args..., GetArenaForAllocation()); + // @@protoc_insertion_point(field_set:flwr.proto.Run.fab_id) +} +inline std::string* Run::mutable_fab_id() { + std::string* _s = _internal_mutable_fab_id(); + // @@protoc_insertion_point(field_mutable:flwr.proto.Run.fab_id) + return _s; +} +inline const std::string& Run::_internal_fab_id() const { + return fab_id_.Get(); +} +inline void Run::_internal_set_fab_id(const std::string& value) { + + fab_id_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, value, GetArenaForAllocation()); +} +inline std::string* Run::_internal_mutable_fab_id() { + + return fab_id_.Mutable(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, GetArenaForAllocation()); +} +inline std::string* Run::release_fab_id() { + // @@protoc_insertion_point(field_release:flwr.proto.Run.fab_id) + return fab_id_.Release(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), GetArenaForAllocation()); +} +inline void Run::set_allocated_fab_id(std::string* fab_id) { + if (fab_id != nullptr) { + + } else { + + } + fab_id_.SetAllocated(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), fab_id, + GetArenaForAllocation()); + // @@protoc_insertion_point(field_set_allocated:flwr.proto.Run.fab_id) +} + +// string fab_version = 3; +inline void Run::clear_fab_version() { + fab_version_.ClearToEmpty(); +} +inline const std::string& Run::fab_version() const { + // @@protoc_insertion_point(field_get:flwr.proto.Run.fab_version) + return _internal_fab_version(); +} +template +inline PROTOBUF_ALWAYS_INLINE +void Run::set_fab_version(ArgT0&& arg0, ArgT... args) { + + fab_version_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, static_cast(arg0), args..., GetArenaForAllocation()); + // @@protoc_insertion_point(field_set:flwr.proto.Run.fab_version) +} +inline std::string* Run::mutable_fab_version() { + std::string* _s = _internal_mutable_fab_version(); + // @@protoc_insertion_point(field_mutable:flwr.proto.Run.fab_version) + return _s; +} +inline const std::string& Run::_internal_fab_version() const { + return fab_version_.Get(); +} +inline void Run::_internal_set_fab_version(const std::string& value) { + + fab_version_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, value, GetArenaForAllocation()); +} +inline std::string* Run::_internal_mutable_fab_version() { + + return fab_version_.Mutable(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, GetArenaForAllocation()); +} +inline std::string* Run::release_fab_version() { + // @@protoc_insertion_point(field_release:flwr.proto.Run.fab_version) + return fab_version_.Release(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), GetArenaForAllocation()); +} +inline void Run::set_allocated_fab_version(std::string* fab_version) { + if (fab_version != nullptr) { + + } else { + + } + fab_version_.SetAllocated(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), fab_version, + GetArenaForAllocation()); + // @@protoc_insertion_point(field_set_allocated:flwr.proto.Run.fab_version) +} + +// ------------------------------------------------------------------- + +// GetRunRequest + +// sint64 run_id = 1; +inline void GetRunRequest::clear_run_id() { + run_id_ = int64_t{0}; +} +inline ::PROTOBUF_NAMESPACE_ID::int64 GetRunRequest::_internal_run_id() const { + return run_id_; +} +inline ::PROTOBUF_NAMESPACE_ID::int64 GetRunRequest::run_id() const { + // @@protoc_insertion_point(field_get:flwr.proto.GetRunRequest.run_id) + return _internal_run_id(); +} +inline void GetRunRequest::_internal_set_run_id(::PROTOBUF_NAMESPACE_ID::int64 value) { + + run_id_ = value; +} +inline void GetRunRequest::set_run_id(::PROTOBUF_NAMESPACE_ID::int64 value) { + _internal_set_run_id(value); + // @@protoc_insertion_point(field_set:flwr.proto.GetRunRequest.run_id) +} + +// ------------------------------------------------------------------- + +// GetRunResponse + +// .flwr.proto.Run run = 1; +inline bool GetRunResponse::_internal_has_run() const { + return this != internal_default_instance() && run_ != nullptr; +} +inline bool GetRunResponse::has_run() const { + return _internal_has_run(); +} +inline void GetRunResponse::clear_run() { + if (GetArenaForAllocation() == nullptr && run_ != nullptr) { + delete run_; + } + run_ = nullptr; +} +inline const ::flwr::proto::Run& GetRunResponse::_internal_run() const { + const ::flwr::proto::Run* p = run_; + return p != nullptr ? *p : reinterpret_cast( + ::flwr::proto::_Run_default_instance_); +} +inline const ::flwr::proto::Run& GetRunResponse::run() const { + // @@protoc_insertion_point(field_get:flwr.proto.GetRunResponse.run) + return _internal_run(); +} +inline void GetRunResponse::unsafe_arena_set_allocated_run( + ::flwr::proto::Run* run) { + if (GetArenaForAllocation() == nullptr) { + delete reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(run_); + } + run_ = run; + if (run) { + + } else { + + } + // @@protoc_insertion_point(field_unsafe_arena_set_allocated:flwr.proto.GetRunResponse.run) +} +inline ::flwr::proto::Run* GetRunResponse::release_run() { + + ::flwr::proto::Run* temp = run_; + run_ = nullptr; +#ifdef PROTOBUF_FORCE_COPY_IN_RELEASE + auto* old = reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(temp); + temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); + if (GetArenaForAllocation() == nullptr) { delete old; } +#else // PROTOBUF_FORCE_COPY_IN_RELEASE + if (GetArenaForAllocation() != nullptr) { + temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); + } +#endif // !PROTOBUF_FORCE_COPY_IN_RELEASE + return temp; +} +inline ::flwr::proto::Run* GetRunResponse::unsafe_arena_release_run() { + // @@protoc_insertion_point(field_release:flwr.proto.GetRunResponse.run) + + ::flwr::proto::Run* temp = run_; + run_ = nullptr; + return temp; +} +inline ::flwr::proto::Run* GetRunResponse::_internal_mutable_run() { + + if (run_ == nullptr) { + auto* p = CreateMaybeMessage<::flwr::proto::Run>(GetArenaForAllocation()); + run_ = p; + } + return run_; +} +inline ::flwr::proto::Run* GetRunResponse::mutable_run() { + ::flwr::proto::Run* _msg = _internal_mutable_run(); + // @@protoc_insertion_point(field_mutable:flwr.proto.GetRunResponse.run) + return _msg; +} +inline void GetRunResponse::set_allocated_run(::flwr::proto::Run* run) { + ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation(); + if (message_arena == nullptr) { + delete run_; + } + if (run) { + ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena = + ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper<::flwr::proto::Run>::GetOwningArena(run); + if (message_arena != submessage_arena) { + run = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage( + message_arena, run, submessage_arena); + } + + } else { + + } + run_ = run; + // @@protoc_insertion_point(field_set_allocated:flwr.proto.GetRunResponse.run) +} + +// ------------------------------------------------------------------- + // Reconnect // uint64 reconnect = 1; @@ -2190,6 +3402,16 @@ inline void Reconnect::set_reconnect(::PROTOBUF_NAMESPACE_ID::uint64 value) { // ------------------------------------------------------------------- +// ------------------------------------------------------------------- + +// ------------------------------------------------------------------- + +// ------------------------------------------------------------------- + +// ------------------------------------------------------------------- + +// ------------------------------------------------------------------- + // @@protoc_insertion_point(namespace_scope) diff --git a/src/cc/flwr/include/flwr/proto/recordset.grpc.pb.cc b/src/cc/flwr/include/flwr/proto/recordset.grpc.pb.cc new file mode 100644 index 000000000000..4fb909308dc2 --- /dev/null +++ b/src/cc/flwr/include/flwr/proto/recordset.grpc.pb.cc @@ -0,0 +1,27 @@ +// Generated by the gRPC C++ plugin. +// If you make any local change, they will be lost. +// source: flwr/proto/recordset.proto + +#include "flwr/proto/recordset.pb.h" +#include "flwr/proto/recordset.grpc.pb.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +namespace flwr { +namespace proto { + +} // namespace flwr +} // namespace proto + diff --git a/src/cc/flwr/include/flwr/proto/recordset.grpc.pb.h b/src/cc/flwr/include/flwr/proto/recordset.grpc.pb.h new file mode 100644 index 000000000000..0aeae1ab16a6 --- /dev/null +++ b/src/cc/flwr/include/flwr/proto/recordset.grpc.pb.h @@ -0,0 +1,51 @@ +// Generated by the gRPC C++ plugin. +// If you make any local change, they will be lost. +// source: flwr/proto/recordset.proto +// Original file comments: +// Copyright 2024 Flower Labs GmbH. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// ============================================================================== +// +#ifndef GRPC_flwr_2fproto_2frecordset_2eproto__INCLUDED +#define GRPC_flwr_2fproto_2frecordset_2eproto__INCLUDED + +#include "flwr/proto/recordset.pb.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace flwr { +namespace proto { + +} // namespace proto +} // namespace flwr + + +#endif // GRPC_flwr_2fproto_2frecordset_2eproto__INCLUDED diff --git a/src/cc/flwr/include/flwr/proto/recordset.pb.cc b/src/cc/flwr/include/flwr/proto/recordset.pb.cc new file mode 100644 index 000000000000..a7cf72084d7a --- /dev/null +++ b/src/cc/flwr/include/flwr/proto/recordset.pb.cc @@ -0,0 +1,3907 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: flwr/proto/recordset.proto + +#include "flwr/proto/recordset.pb.h" + +#include + +#include +#include +#include +#include +#include +#include +#include +// @@protoc_insertion_point(includes) +#include + +PROTOBUF_PRAGMA_INIT_SEG +namespace flwr { +namespace proto { +constexpr DoubleList::DoubleList( + ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized) + : vals_(){} +struct DoubleListDefaultTypeInternal { + constexpr DoubleListDefaultTypeInternal() + : _instance(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized{}) {} + ~DoubleListDefaultTypeInternal() {} + union { + DoubleList _instance; + }; +}; +PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT DoubleListDefaultTypeInternal _DoubleList_default_instance_; +constexpr Sint64List::Sint64List( + ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized) + : vals_() + , _vals_cached_byte_size_(0){} +struct Sint64ListDefaultTypeInternal { + constexpr Sint64ListDefaultTypeInternal() + : _instance(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized{}) {} + ~Sint64ListDefaultTypeInternal() {} + union { + Sint64List _instance; + }; +}; +PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT Sint64ListDefaultTypeInternal _Sint64List_default_instance_; +constexpr BoolList::BoolList( + ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized) + : vals_(){} +struct BoolListDefaultTypeInternal { + constexpr BoolListDefaultTypeInternal() + : _instance(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized{}) {} + ~BoolListDefaultTypeInternal() {} + union { + BoolList _instance; + }; +}; +PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT BoolListDefaultTypeInternal _BoolList_default_instance_; +constexpr StringList::StringList( + ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized) + : vals_(){} +struct StringListDefaultTypeInternal { + constexpr StringListDefaultTypeInternal() + : _instance(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized{}) {} + ~StringListDefaultTypeInternal() {} + union { + StringList _instance; + }; +}; +PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT StringListDefaultTypeInternal _StringList_default_instance_; +constexpr BytesList::BytesList( + ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized) + : vals_(){} +struct BytesListDefaultTypeInternal { + constexpr BytesListDefaultTypeInternal() + : _instance(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized{}) {} + ~BytesListDefaultTypeInternal() {} + union { + BytesList _instance; + }; +}; +PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT BytesListDefaultTypeInternal _BytesList_default_instance_; +constexpr Array::Array( + ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized) + : shape_() + , _shape_cached_byte_size_(0) + , dtype_(&::PROTOBUF_NAMESPACE_ID::internal::fixed_address_empty_string) + , stype_(&::PROTOBUF_NAMESPACE_ID::internal::fixed_address_empty_string) + , data_(&::PROTOBUF_NAMESPACE_ID::internal::fixed_address_empty_string){} +struct ArrayDefaultTypeInternal { + constexpr ArrayDefaultTypeInternal() + : _instance(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized{}) {} + ~ArrayDefaultTypeInternal() {} + union { + Array _instance; + }; +}; +PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT ArrayDefaultTypeInternal _Array_default_instance_; +constexpr MetricsRecordValue::MetricsRecordValue( + ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized) + : _oneof_case_{}{} +struct MetricsRecordValueDefaultTypeInternal { + constexpr MetricsRecordValueDefaultTypeInternal() + : _instance(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized{}) {} + ~MetricsRecordValueDefaultTypeInternal() {} + union { + MetricsRecordValue _instance; + }; +}; +PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT MetricsRecordValueDefaultTypeInternal _MetricsRecordValue_default_instance_; +constexpr ConfigsRecordValue::ConfigsRecordValue( + ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized) + : _oneof_case_{}{} +struct ConfigsRecordValueDefaultTypeInternal { + constexpr ConfigsRecordValueDefaultTypeInternal() + : _instance(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized{}) {} + ~ConfigsRecordValueDefaultTypeInternal() {} + union { + ConfigsRecordValue _instance; + }; +}; +PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT ConfigsRecordValueDefaultTypeInternal _ConfigsRecordValue_default_instance_; +constexpr ParametersRecord::ParametersRecord( + ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized) + : data_keys_() + , data_values_(){} +struct ParametersRecordDefaultTypeInternal { + constexpr ParametersRecordDefaultTypeInternal() + : _instance(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized{}) {} + ~ParametersRecordDefaultTypeInternal() {} + union { + ParametersRecord _instance; + }; +}; +PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT ParametersRecordDefaultTypeInternal _ParametersRecord_default_instance_; +constexpr MetricsRecord_DataEntry_DoNotUse::MetricsRecord_DataEntry_DoNotUse( + ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized){} +struct MetricsRecord_DataEntry_DoNotUseDefaultTypeInternal { + constexpr MetricsRecord_DataEntry_DoNotUseDefaultTypeInternal() + : _instance(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized{}) {} + ~MetricsRecord_DataEntry_DoNotUseDefaultTypeInternal() {} + union { + MetricsRecord_DataEntry_DoNotUse _instance; + }; +}; +PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT MetricsRecord_DataEntry_DoNotUseDefaultTypeInternal _MetricsRecord_DataEntry_DoNotUse_default_instance_; +constexpr MetricsRecord::MetricsRecord( + ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized) + : data_(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized{}){} +struct MetricsRecordDefaultTypeInternal { + constexpr MetricsRecordDefaultTypeInternal() + : _instance(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized{}) {} + ~MetricsRecordDefaultTypeInternal() {} + union { + MetricsRecord _instance; + }; +}; +PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT MetricsRecordDefaultTypeInternal _MetricsRecord_default_instance_; +constexpr ConfigsRecord_DataEntry_DoNotUse::ConfigsRecord_DataEntry_DoNotUse( + ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized){} +struct ConfigsRecord_DataEntry_DoNotUseDefaultTypeInternal { + constexpr ConfigsRecord_DataEntry_DoNotUseDefaultTypeInternal() + : _instance(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized{}) {} + ~ConfigsRecord_DataEntry_DoNotUseDefaultTypeInternal() {} + union { + ConfigsRecord_DataEntry_DoNotUse _instance; + }; +}; +PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT ConfigsRecord_DataEntry_DoNotUseDefaultTypeInternal _ConfigsRecord_DataEntry_DoNotUse_default_instance_; +constexpr ConfigsRecord::ConfigsRecord( + ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized) + : data_(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized{}){} +struct ConfigsRecordDefaultTypeInternal { + constexpr ConfigsRecordDefaultTypeInternal() + : _instance(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized{}) {} + ~ConfigsRecordDefaultTypeInternal() {} + union { + ConfigsRecord _instance; + }; +}; +PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT ConfigsRecordDefaultTypeInternal _ConfigsRecord_default_instance_; +constexpr RecordSet_ParametersEntry_DoNotUse::RecordSet_ParametersEntry_DoNotUse( + ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized){} +struct RecordSet_ParametersEntry_DoNotUseDefaultTypeInternal { + constexpr RecordSet_ParametersEntry_DoNotUseDefaultTypeInternal() + : _instance(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized{}) {} + ~RecordSet_ParametersEntry_DoNotUseDefaultTypeInternal() {} + union { + RecordSet_ParametersEntry_DoNotUse _instance; + }; +}; +PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT RecordSet_ParametersEntry_DoNotUseDefaultTypeInternal _RecordSet_ParametersEntry_DoNotUse_default_instance_; +constexpr RecordSet_MetricsEntry_DoNotUse::RecordSet_MetricsEntry_DoNotUse( + ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized){} +struct RecordSet_MetricsEntry_DoNotUseDefaultTypeInternal { + constexpr RecordSet_MetricsEntry_DoNotUseDefaultTypeInternal() + : _instance(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized{}) {} + ~RecordSet_MetricsEntry_DoNotUseDefaultTypeInternal() {} + union { + RecordSet_MetricsEntry_DoNotUse _instance; + }; +}; +PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT RecordSet_MetricsEntry_DoNotUseDefaultTypeInternal _RecordSet_MetricsEntry_DoNotUse_default_instance_; +constexpr RecordSet_ConfigsEntry_DoNotUse::RecordSet_ConfigsEntry_DoNotUse( + ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized){} +struct RecordSet_ConfigsEntry_DoNotUseDefaultTypeInternal { + constexpr RecordSet_ConfigsEntry_DoNotUseDefaultTypeInternal() + : _instance(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized{}) {} + ~RecordSet_ConfigsEntry_DoNotUseDefaultTypeInternal() {} + union { + RecordSet_ConfigsEntry_DoNotUse _instance; + }; +}; +PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT RecordSet_ConfigsEntry_DoNotUseDefaultTypeInternal _RecordSet_ConfigsEntry_DoNotUse_default_instance_; +constexpr RecordSet::RecordSet( + ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized) + : parameters_(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized{}) + , metrics_(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized{}) + , configs_(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized{}){} +struct RecordSetDefaultTypeInternal { + constexpr RecordSetDefaultTypeInternal() + : _instance(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized{}) {} + ~RecordSetDefaultTypeInternal() {} + union { + RecordSet _instance; + }; +}; +PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT RecordSetDefaultTypeInternal _RecordSet_default_instance_; +} // namespace proto +} // namespace flwr +static ::PROTOBUF_NAMESPACE_ID::Metadata file_level_metadata_flwr_2fproto_2frecordset_2eproto[17]; +static constexpr ::PROTOBUF_NAMESPACE_ID::EnumDescriptor const** file_level_enum_descriptors_flwr_2fproto_2frecordset_2eproto = nullptr; +static constexpr ::PROTOBUF_NAMESPACE_ID::ServiceDescriptor const** file_level_service_descriptors_flwr_2fproto_2frecordset_2eproto = nullptr; + +const ::PROTOBUF_NAMESPACE_ID::uint32 TableStruct_flwr_2fproto_2frecordset_2eproto::offsets[] PROTOBUF_SECTION_VARIABLE(protodesc_cold) = { + ~0u, // no _has_bits_ + PROTOBUF_FIELD_OFFSET(::flwr::proto::DoubleList, _internal_metadata_), + ~0u, // no _extensions_ + ~0u, // no _oneof_case_ + ~0u, // no _weak_field_map_ + ~0u, // no _inlined_string_donated_ + PROTOBUF_FIELD_OFFSET(::flwr::proto::DoubleList, vals_), + ~0u, // no _has_bits_ + PROTOBUF_FIELD_OFFSET(::flwr::proto::Sint64List, _internal_metadata_), + ~0u, // no _extensions_ + ~0u, // no _oneof_case_ + ~0u, // no _weak_field_map_ + ~0u, // no _inlined_string_donated_ + PROTOBUF_FIELD_OFFSET(::flwr::proto::Sint64List, vals_), + ~0u, // no _has_bits_ + PROTOBUF_FIELD_OFFSET(::flwr::proto::BoolList, _internal_metadata_), + ~0u, // no _extensions_ + ~0u, // no _oneof_case_ + ~0u, // no _weak_field_map_ + ~0u, // no _inlined_string_donated_ + PROTOBUF_FIELD_OFFSET(::flwr::proto::BoolList, vals_), + ~0u, // no _has_bits_ + PROTOBUF_FIELD_OFFSET(::flwr::proto::StringList, _internal_metadata_), + ~0u, // no _extensions_ + ~0u, // no _oneof_case_ + ~0u, // no _weak_field_map_ + ~0u, // no _inlined_string_donated_ + PROTOBUF_FIELD_OFFSET(::flwr::proto::StringList, vals_), + ~0u, // no _has_bits_ + PROTOBUF_FIELD_OFFSET(::flwr::proto::BytesList, _internal_metadata_), + ~0u, // no _extensions_ + ~0u, // no _oneof_case_ + ~0u, // no _weak_field_map_ + ~0u, // no _inlined_string_donated_ + PROTOBUF_FIELD_OFFSET(::flwr::proto::BytesList, vals_), + ~0u, // no _has_bits_ + PROTOBUF_FIELD_OFFSET(::flwr::proto::Array, _internal_metadata_), + ~0u, // no _extensions_ + ~0u, // no _oneof_case_ + ~0u, // no _weak_field_map_ + ~0u, // no _inlined_string_donated_ + PROTOBUF_FIELD_OFFSET(::flwr::proto::Array, dtype_), + PROTOBUF_FIELD_OFFSET(::flwr::proto::Array, shape_), + PROTOBUF_FIELD_OFFSET(::flwr::proto::Array, stype_), + PROTOBUF_FIELD_OFFSET(::flwr::proto::Array, data_), + ~0u, // no _has_bits_ + PROTOBUF_FIELD_OFFSET(::flwr::proto::MetricsRecordValue, _internal_metadata_), + ~0u, // no _extensions_ + PROTOBUF_FIELD_OFFSET(::flwr::proto::MetricsRecordValue, _oneof_case_[0]), + ~0u, // no _weak_field_map_ + ~0u, // no _inlined_string_donated_ + ::PROTOBUF_NAMESPACE_ID::internal::kInvalidFieldOffsetTag, + ::PROTOBUF_NAMESPACE_ID::internal::kInvalidFieldOffsetTag, + ::PROTOBUF_NAMESPACE_ID::internal::kInvalidFieldOffsetTag, + ::PROTOBUF_NAMESPACE_ID::internal::kInvalidFieldOffsetTag, + PROTOBUF_FIELD_OFFSET(::flwr::proto::MetricsRecordValue, value_), + ~0u, // no _has_bits_ + PROTOBUF_FIELD_OFFSET(::flwr::proto::ConfigsRecordValue, _internal_metadata_), + ~0u, // no _extensions_ + PROTOBUF_FIELD_OFFSET(::flwr::proto::ConfigsRecordValue, _oneof_case_[0]), + ~0u, // no _weak_field_map_ + ~0u, // no _inlined_string_donated_ + ::PROTOBUF_NAMESPACE_ID::internal::kInvalidFieldOffsetTag, + ::PROTOBUF_NAMESPACE_ID::internal::kInvalidFieldOffsetTag, + ::PROTOBUF_NAMESPACE_ID::internal::kInvalidFieldOffsetTag, + ::PROTOBUF_NAMESPACE_ID::internal::kInvalidFieldOffsetTag, + ::PROTOBUF_NAMESPACE_ID::internal::kInvalidFieldOffsetTag, + ::PROTOBUF_NAMESPACE_ID::internal::kInvalidFieldOffsetTag, + ::PROTOBUF_NAMESPACE_ID::internal::kInvalidFieldOffsetTag, + ::PROTOBUF_NAMESPACE_ID::internal::kInvalidFieldOffsetTag, + ::PROTOBUF_NAMESPACE_ID::internal::kInvalidFieldOffsetTag, + ::PROTOBUF_NAMESPACE_ID::internal::kInvalidFieldOffsetTag, + PROTOBUF_FIELD_OFFSET(::flwr::proto::ConfigsRecordValue, value_), + ~0u, // no _has_bits_ + PROTOBUF_FIELD_OFFSET(::flwr::proto::ParametersRecord, _internal_metadata_), + ~0u, // no _extensions_ + ~0u, // no _oneof_case_ + ~0u, // no _weak_field_map_ + ~0u, // no _inlined_string_donated_ + PROTOBUF_FIELD_OFFSET(::flwr::proto::ParametersRecord, data_keys_), + PROTOBUF_FIELD_OFFSET(::flwr::proto::ParametersRecord, data_values_), + PROTOBUF_FIELD_OFFSET(::flwr::proto::MetricsRecord_DataEntry_DoNotUse, _has_bits_), + PROTOBUF_FIELD_OFFSET(::flwr::proto::MetricsRecord_DataEntry_DoNotUse, _internal_metadata_), + ~0u, // no _extensions_ + ~0u, // no _oneof_case_ + ~0u, // no _weak_field_map_ + ~0u, // no _inlined_string_donated_ + PROTOBUF_FIELD_OFFSET(::flwr::proto::MetricsRecord_DataEntry_DoNotUse, key_), + PROTOBUF_FIELD_OFFSET(::flwr::proto::MetricsRecord_DataEntry_DoNotUse, value_), + 0, + 1, + ~0u, // no _has_bits_ + PROTOBUF_FIELD_OFFSET(::flwr::proto::MetricsRecord, _internal_metadata_), + ~0u, // no _extensions_ + ~0u, // no _oneof_case_ + ~0u, // no _weak_field_map_ + ~0u, // no _inlined_string_donated_ + PROTOBUF_FIELD_OFFSET(::flwr::proto::MetricsRecord, data_), + PROTOBUF_FIELD_OFFSET(::flwr::proto::ConfigsRecord_DataEntry_DoNotUse, _has_bits_), + PROTOBUF_FIELD_OFFSET(::flwr::proto::ConfigsRecord_DataEntry_DoNotUse, _internal_metadata_), + ~0u, // no _extensions_ + ~0u, // no _oneof_case_ + ~0u, // no _weak_field_map_ + ~0u, // no _inlined_string_donated_ + PROTOBUF_FIELD_OFFSET(::flwr::proto::ConfigsRecord_DataEntry_DoNotUse, key_), + PROTOBUF_FIELD_OFFSET(::flwr::proto::ConfigsRecord_DataEntry_DoNotUse, value_), + 0, + 1, + ~0u, // no _has_bits_ + PROTOBUF_FIELD_OFFSET(::flwr::proto::ConfigsRecord, _internal_metadata_), + ~0u, // no _extensions_ + ~0u, // no _oneof_case_ + ~0u, // no _weak_field_map_ + ~0u, // no _inlined_string_donated_ + PROTOBUF_FIELD_OFFSET(::flwr::proto::ConfigsRecord, data_), + PROTOBUF_FIELD_OFFSET(::flwr::proto::RecordSet_ParametersEntry_DoNotUse, _has_bits_), + PROTOBUF_FIELD_OFFSET(::flwr::proto::RecordSet_ParametersEntry_DoNotUse, _internal_metadata_), + ~0u, // no _extensions_ + ~0u, // no _oneof_case_ + ~0u, // no _weak_field_map_ + ~0u, // no _inlined_string_donated_ + PROTOBUF_FIELD_OFFSET(::flwr::proto::RecordSet_ParametersEntry_DoNotUse, key_), + PROTOBUF_FIELD_OFFSET(::flwr::proto::RecordSet_ParametersEntry_DoNotUse, value_), + 0, + 1, + PROTOBUF_FIELD_OFFSET(::flwr::proto::RecordSet_MetricsEntry_DoNotUse, _has_bits_), + PROTOBUF_FIELD_OFFSET(::flwr::proto::RecordSet_MetricsEntry_DoNotUse, _internal_metadata_), + ~0u, // no _extensions_ + ~0u, // no _oneof_case_ + ~0u, // no _weak_field_map_ + ~0u, // no _inlined_string_donated_ + PROTOBUF_FIELD_OFFSET(::flwr::proto::RecordSet_MetricsEntry_DoNotUse, key_), + PROTOBUF_FIELD_OFFSET(::flwr::proto::RecordSet_MetricsEntry_DoNotUse, value_), + 0, + 1, + PROTOBUF_FIELD_OFFSET(::flwr::proto::RecordSet_ConfigsEntry_DoNotUse, _has_bits_), + PROTOBUF_FIELD_OFFSET(::flwr::proto::RecordSet_ConfigsEntry_DoNotUse, _internal_metadata_), + ~0u, // no _extensions_ + ~0u, // no _oneof_case_ + ~0u, // no _weak_field_map_ + ~0u, // no _inlined_string_donated_ + PROTOBUF_FIELD_OFFSET(::flwr::proto::RecordSet_ConfigsEntry_DoNotUse, key_), + PROTOBUF_FIELD_OFFSET(::flwr::proto::RecordSet_ConfigsEntry_DoNotUse, value_), + 0, + 1, + ~0u, // no _has_bits_ + PROTOBUF_FIELD_OFFSET(::flwr::proto::RecordSet, _internal_metadata_), + ~0u, // no _extensions_ + ~0u, // no _oneof_case_ + ~0u, // no _weak_field_map_ + ~0u, // no _inlined_string_donated_ + PROTOBUF_FIELD_OFFSET(::flwr::proto::RecordSet, parameters_), + PROTOBUF_FIELD_OFFSET(::flwr::proto::RecordSet, metrics_), + PROTOBUF_FIELD_OFFSET(::flwr::proto::RecordSet, configs_), +}; +static const ::PROTOBUF_NAMESPACE_ID::internal::MigrationSchema schemas[] PROTOBUF_SECTION_VARIABLE(protodesc_cold) = { + { 0, -1, -1, sizeof(::flwr::proto::DoubleList)}, + { 7, -1, -1, sizeof(::flwr::proto::Sint64List)}, + { 14, -1, -1, sizeof(::flwr::proto::BoolList)}, + { 21, -1, -1, sizeof(::flwr::proto::StringList)}, + { 28, -1, -1, sizeof(::flwr::proto::BytesList)}, + { 35, -1, -1, sizeof(::flwr::proto::Array)}, + { 45, -1, -1, sizeof(::flwr::proto::MetricsRecordValue)}, + { 56, -1, -1, sizeof(::flwr::proto::ConfigsRecordValue)}, + { 73, -1, -1, sizeof(::flwr::proto::ParametersRecord)}, + { 81, 89, -1, sizeof(::flwr::proto::MetricsRecord_DataEntry_DoNotUse)}, + { 91, -1, -1, sizeof(::flwr::proto::MetricsRecord)}, + { 98, 106, -1, sizeof(::flwr::proto::ConfigsRecord_DataEntry_DoNotUse)}, + { 108, -1, -1, sizeof(::flwr::proto::ConfigsRecord)}, + { 115, 123, -1, sizeof(::flwr::proto::RecordSet_ParametersEntry_DoNotUse)}, + { 125, 133, -1, sizeof(::flwr::proto::RecordSet_MetricsEntry_DoNotUse)}, + { 135, 143, -1, sizeof(::flwr::proto::RecordSet_ConfigsEntry_DoNotUse)}, + { 145, -1, -1, sizeof(::flwr::proto::RecordSet)}, +}; + +static ::PROTOBUF_NAMESPACE_ID::Message const * const file_default_instances[] = { + reinterpret_cast(&::flwr::proto::_DoubleList_default_instance_), + reinterpret_cast(&::flwr::proto::_Sint64List_default_instance_), + reinterpret_cast(&::flwr::proto::_BoolList_default_instance_), + reinterpret_cast(&::flwr::proto::_StringList_default_instance_), + reinterpret_cast(&::flwr::proto::_BytesList_default_instance_), + reinterpret_cast(&::flwr::proto::_Array_default_instance_), + reinterpret_cast(&::flwr::proto::_MetricsRecordValue_default_instance_), + reinterpret_cast(&::flwr::proto::_ConfigsRecordValue_default_instance_), + reinterpret_cast(&::flwr::proto::_ParametersRecord_default_instance_), + reinterpret_cast(&::flwr::proto::_MetricsRecord_DataEntry_DoNotUse_default_instance_), + reinterpret_cast(&::flwr::proto::_MetricsRecord_default_instance_), + reinterpret_cast(&::flwr::proto::_ConfigsRecord_DataEntry_DoNotUse_default_instance_), + reinterpret_cast(&::flwr::proto::_ConfigsRecord_default_instance_), + reinterpret_cast(&::flwr::proto::_RecordSet_ParametersEntry_DoNotUse_default_instance_), + reinterpret_cast(&::flwr::proto::_RecordSet_MetricsEntry_DoNotUse_default_instance_), + reinterpret_cast(&::flwr::proto::_RecordSet_ConfigsEntry_DoNotUse_default_instance_), + reinterpret_cast(&::flwr::proto::_RecordSet_default_instance_), +}; + +const char descriptor_table_protodef_flwr_2fproto_2frecordset_2eproto[] PROTOBUF_SECTION_VARIABLE(protodesc_cold) = + "\n\032flwr/proto/recordset.proto\022\nflwr.proto" + "\"\032\n\nDoubleList\022\014\n\004vals\030\001 \003(\001\"\032\n\nSint64Li" + "st\022\014\n\004vals\030\001 \003(\022\"\030\n\010BoolList\022\014\n\004vals\030\001 \003" + "(\010\"\032\n\nStringList\022\014\n\004vals\030\001 \003(\t\"\031\n\tBytesL" + "ist\022\014\n\004vals\030\001 \003(\014\"B\n\005Array\022\r\n\005dtype\030\001 \001(" + "\t\022\r\n\005shape\030\002 \003(\005\022\r\n\005stype\030\003 \001(\t\022\014\n\004data\030" + "\004 \001(\014\"\237\001\n\022MetricsRecordValue\022\020\n\006double\030\001" + " \001(\001H\000\022\020\n\006sint64\030\002 \001(\022H\000\022-\n\013double_list\030" + "\025 \001(\0132\026.flwr.proto.DoubleListH\000\022-\n\013sint6" + "4_list\030\026 \001(\0132\026.flwr.proto.Sint64ListH\000B\007" + "\n\005value\"\331\002\n\022ConfigsRecordValue\022\020\n\006double" + "\030\001 \001(\001H\000\022\020\n\006sint64\030\002 \001(\022H\000\022\016\n\004bool\030\003 \001(\010" + "H\000\022\020\n\006string\030\004 \001(\tH\000\022\017\n\005bytes\030\005 \001(\014H\000\022-\n" + "\013double_list\030\025 \001(\0132\026.flwr.proto.DoubleLi" + "stH\000\022-\n\013sint64_list\030\026 \001(\0132\026.flwr.proto.S" + "int64ListH\000\022)\n\tbool_list\030\027 \001(\0132\024.flwr.pr" + "oto.BoolListH\000\022-\n\013string_list\030\030 \001(\0132\026.fl" + "wr.proto.StringListH\000\022+\n\nbytes_list\030\031 \001(" + "\0132\025.flwr.proto.BytesListH\000B\007\n\005value\"M\n\020P" + "arametersRecord\022\021\n\tdata_keys\030\001 \003(\t\022&\n\013da" + "ta_values\030\002 \003(\0132\021.flwr.proto.Array\"\217\001\n\rM" + "etricsRecord\0221\n\004data\030\001 \003(\0132#.flwr.proto." + "MetricsRecord.DataEntry\032K\n\tDataEntry\022\013\n\003" + "key\030\001 \001(\t\022-\n\005value\030\002 \001(\0132\036.flwr.proto.Me" + "tricsRecordValue:\0028\001\"\217\001\n\rConfigsRecord\0221" + "\n\004data\030\001 \003(\0132#.flwr.proto.ConfigsRecord." + "DataEntry\032K\n\tDataEntry\022\013\n\003key\030\001 \001(\t\022-\n\005v" + "alue\030\002 \001(\0132\036.flwr.proto.ConfigsRecordVal" + "ue:\0028\001\"\227\003\n\tRecordSet\0229\n\nparameters\030\001 \003(\013" + "2%.flwr.proto.RecordSet.ParametersEntry\022" + "3\n\007metrics\030\002 \003(\0132\".flwr.proto.RecordSet." + "MetricsEntry\0223\n\007configs\030\003 \003(\0132\".flwr.pro" + "to.RecordSet.ConfigsEntry\032O\n\017ParametersE" + "ntry\022\013\n\003key\030\001 \001(\t\022+\n\005value\030\002 \001(\0132\034.flwr." + "proto.ParametersRecord:\0028\001\032I\n\014MetricsEnt" + "ry\022\013\n\003key\030\001 \001(\t\022(\n\005value\030\002 \001(\0132\031.flwr.pr" + "oto.MetricsRecord:\0028\001\032I\n\014ConfigsEntry\022\013\n" + "\003key\030\001 \001(\t\022(\n\005value\030\002 \001(\0132\031.flwr.proto.C" + "onfigsRecord:\0028\001b\006proto3" + ; +static ::PROTOBUF_NAMESPACE_ID::internal::once_flag descriptor_table_flwr_2fproto_2frecordset_2eproto_once; +const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable descriptor_table_flwr_2fproto_2frecordset_2eproto = { + false, false, 1544, descriptor_table_protodef_flwr_2fproto_2frecordset_2eproto, "flwr/proto/recordset.proto", + &descriptor_table_flwr_2fproto_2frecordset_2eproto_once, nullptr, 0, 17, + schemas, file_default_instances, TableStruct_flwr_2fproto_2frecordset_2eproto::offsets, + file_level_metadata_flwr_2fproto_2frecordset_2eproto, file_level_enum_descriptors_flwr_2fproto_2frecordset_2eproto, file_level_service_descriptors_flwr_2fproto_2frecordset_2eproto, +}; +PROTOBUF_ATTRIBUTE_WEAK const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable* descriptor_table_flwr_2fproto_2frecordset_2eproto_getter() { + return &descriptor_table_flwr_2fproto_2frecordset_2eproto; +} + +// Force running AddDescriptors() at dynamic initialization time. +PROTOBUF_ATTRIBUTE_INIT_PRIORITY static ::PROTOBUF_NAMESPACE_ID::internal::AddDescriptorsRunner dynamic_init_dummy_flwr_2fproto_2frecordset_2eproto(&descriptor_table_flwr_2fproto_2frecordset_2eproto); +namespace flwr { +namespace proto { + +// =================================================================== + +class DoubleList::_Internal { + public: +}; + +DoubleList::DoubleList(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned) + : ::PROTOBUF_NAMESPACE_ID::Message(arena, is_message_owned), + vals_(arena) { + SharedCtor(); + if (!is_message_owned) { + RegisterArenaDtor(arena); + } + // @@protoc_insertion_point(arena_constructor:flwr.proto.DoubleList) +} +DoubleList::DoubleList(const DoubleList& from) + : ::PROTOBUF_NAMESPACE_ID::Message(), + vals_(from.vals_) { + _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); + // @@protoc_insertion_point(copy_constructor:flwr.proto.DoubleList) +} + +void DoubleList::SharedCtor() { +} + +DoubleList::~DoubleList() { + // @@protoc_insertion_point(destructor:flwr.proto.DoubleList) + if (GetArenaForAllocation() != nullptr) return; + SharedDtor(); + _internal_metadata_.Delete<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); +} + +inline void DoubleList::SharedDtor() { + GOOGLE_DCHECK(GetArenaForAllocation() == nullptr); +} + +void DoubleList::ArenaDtor(void* object) { + DoubleList* _this = reinterpret_cast< DoubleList* >(object); + (void)_this; +} +void DoubleList::RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena*) { +} +void DoubleList::SetCachedSize(int size) const { + _cached_size_.Set(size); +} + +void DoubleList::Clear() { +// @@protoc_insertion_point(message_clear_start:flwr.proto.DoubleList) + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + // Prevent compiler warnings about cached_has_bits being unused + (void) cached_has_bits; + + vals_.Clear(); + _internal_metadata_.Clear<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); +} + +const char* DoubleList::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) { +#define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure + while (!ctx->Done(&ptr)) { + ::PROTOBUF_NAMESPACE_ID::uint32 tag; + ptr = ::PROTOBUF_NAMESPACE_ID::internal::ReadTag(ptr, &tag); + switch (tag >> 3) { + // repeated double vals = 1; + case 1: + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 10)) { + ptr = ::PROTOBUF_NAMESPACE_ID::internal::PackedDoubleParser(_internal_mutable_vals(), ptr, ctx); + CHK_(ptr); + } else if (static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 9) { + _internal_add_vals(::PROTOBUF_NAMESPACE_ID::internal::UnalignedLoad(ptr)); + ptr += sizeof(double); + } else + goto handle_unusual; + continue; + default: + goto handle_unusual; + } // switch + handle_unusual: + if ((tag == 0) || ((tag & 7) == 4)) { + CHK_(ptr); + ctx->SetLastTag(tag); + goto message_done; + } + ptr = UnknownFieldParse( + tag, + _internal_metadata_.mutable_unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(), + ptr, ctx); + CHK_(ptr != nullptr); + } // while +message_done: + return ptr; +failure: + ptr = nullptr; + goto message_done; +#undef CHK_ +} + +::PROTOBUF_NAMESPACE_ID::uint8* DoubleList::_InternalSerialize( + ::PROTOBUF_NAMESPACE_ID::uint8* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const { + // @@protoc_insertion_point(serialize_to_array_start:flwr.proto.DoubleList) + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + (void) cached_has_bits; + + // repeated double vals = 1; + if (this->_internal_vals_size() > 0) { + target = stream->WriteFixedPacked(1, _internal_vals(), target); + } + + if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) { + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::InternalSerializeUnknownFieldsToArray( + _internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance), target, stream); + } + // @@protoc_insertion_point(serialize_to_array_end:flwr.proto.DoubleList) + return target; +} + +size_t DoubleList::ByteSizeLong() const { +// @@protoc_insertion_point(message_byte_size_start:flwr.proto.DoubleList) + size_t total_size = 0; + + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + // Prevent compiler warnings about cached_has_bits being unused + (void) cached_has_bits; + + // repeated double vals = 1; + { + unsigned int count = static_cast(this->_internal_vals_size()); + size_t data_size = 8UL * count; + if (data_size > 0) { + total_size += 1 + + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::Int32Size( + static_cast<::PROTOBUF_NAMESPACE_ID::int32>(data_size)); + } + total_size += data_size; + } + + return MaybeComputeUnknownFieldsSize(total_size, &_cached_size_); +} + +const ::PROTOBUF_NAMESPACE_ID::Message::ClassData DoubleList::_class_data_ = { + ::PROTOBUF_NAMESPACE_ID::Message::CopyWithSizeCheck, + DoubleList::MergeImpl +}; +const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*DoubleList::GetClassData() const { return &_class_data_; } + +void DoubleList::MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, + const ::PROTOBUF_NAMESPACE_ID::Message& from) { + static_cast(to)->MergeFrom( + static_cast(from)); +} + + +void DoubleList::MergeFrom(const DoubleList& from) { +// @@protoc_insertion_point(class_specific_merge_from_start:flwr.proto.DoubleList) + GOOGLE_DCHECK_NE(&from, this); + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + (void) cached_has_bits; + + vals_.MergeFrom(from.vals_); + _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); +} + +void DoubleList::CopyFrom(const DoubleList& from) { +// @@protoc_insertion_point(class_specific_copy_from_start:flwr.proto.DoubleList) + if (&from == this) return; + Clear(); + MergeFrom(from); +} + +bool DoubleList::IsInitialized() const { + return true; +} + +void DoubleList::InternalSwap(DoubleList* other) { + using std::swap; + _internal_metadata_.InternalSwap(&other->_internal_metadata_); + vals_.InternalSwap(&other->vals_); +} + +::PROTOBUF_NAMESPACE_ID::Metadata DoubleList::GetMetadata() const { + return ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors( + &descriptor_table_flwr_2fproto_2frecordset_2eproto_getter, &descriptor_table_flwr_2fproto_2frecordset_2eproto_once, + file_level_metadata_flwr_2fproto_2frecordset_2eproto[0]); +} + +// =================================================================== + +class Sint64List::_Internal { + public: +}; + +Sint64List::Sint64List(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned) + : ::PROTOBUF_NAMESPACE_ID::Message(arena, is_message_owned), + vals_(arena) { + SharedCtor(); + if (!is_message_owned) { + RegisterArenaDtor(arena); + } + // @@protoc_insertion_point(arena_constructor:flwr.proto.Sint64List) +} +Sint64List::Sint64List(const Sint64List& from) + : ::PROTOBUF_NAMESPACE_ID::Message(), + vals_(from.vals_) { + _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); + // @@protoc_insertion_point(copy_constructor:flwr.proto.Sint64List) +} + +void Sint64List::SharedCtor() { +} + +Sint64List::~Sint64List() { + // @@protoc_insertion_point(destructor:flwr.proto.Sint64List) + if (GetArenaForAllocation() != nullptr) return; + SharedDtor(); + _internal_metadata_.Delete<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); +} + +inline void Sint64List::SharedDtor() { + GOOGLE_DCHECK(GetArenaForAllocation() == nullptr); +} + +void Sint64List::ArenaDtor(void* object) { + Sint64List* _this = reinterpret_cast< Sint64List* >(object); + (void)_this; +} +void Sint64List::RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena*) { +} +void Sint64List::SetCachedSize(int size) const { + _cached_size_.Set(size); +} + +void Sint64List::Clear() { +// @@protoc_insertion_point(message_clear_start:flwr.proto.Sint64List) + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + // Prevent compiler warnings about cached_has_bits being unused + (void) cached_has_bits; + + vals_.Clear(); + _internal_metadata_.Clear<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); +} + +const char* Sint64List::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) { +#define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure + while (!ctx->Done(&ptr)) { + ::PROTOBUF_NAMESPACE_ID::uint32 tag; + ptr = ::PROTOBUF_NAMESPACE_ID::internal::ReadTag(ptr, &tag); + switch (tag >> 3) { + // repeated sint64 vals = 1; + case 1: + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 10)) { + ptr = ::PROTOBUF_NAMESPACE_ID::internal::PackedSInt64Parser(_internal_mutable_vals(), ptr, ctx); + CHK_(ptr); + } else if (static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 8) { + _internal_add_vals(::PROTOBUF_NAMESPACE_ID::internal::ReadVarintZigZag64(&ptr)); + CHK_(ptr); + } else + goto handle_unusual; + continue; + default: + goto handle_unusual; + } // switch + handle_unusual: + if ((tag == 0) || ((tag & 7) == 4)) { + CHK_(ptr); + ctx->SetLastTag(tag); + goto message_done; + } + ptr = UnknownFieldParse( + tag, + _internal_metadata_.mutable_unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(), + ptr, ctx); + CHK_(ptr != nullptr); + } // while +message_done: + return ptr; +failure: + ptr = nullptr; + goto message_done; +#undef CHK_ +} + +::PROTOBUF_NAMESPACE_ID::uint8* Sint64List::_InternalSerialize( + ::PROTOBUF_NAMESPACE_ID::uint8* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const { + // @@protoc_insertion_point(serialize_to_array_start:flwr.proto.Sint64List) + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + (void) cached_has_bits; + + // repeated sint64 vals = 1; + { + int byte_size = _vals_cached_byte_size_.load(std::memory_order_relaxed); + if (byte_size > 0) { + target = stream->WriteSInt64Packed( + 1, _internal_vals(), byte_size, target); + } + } + + if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) { + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::InternalSerializeUnknownFieldsToArray( + _internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance), target, stream); + } + // @@protoc_insertion_point(serialize_to_array_end:flwr.proto.Sint64List) + return target; +} + +size_t Sint64List::ByteSizeLong() const { +// @@protoc_insertion_point(message_byte_size_start:flwr.proto.Sint64List) + size_t total_size = 0; + + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + // Prevent compiler warnings about cached_has_bits being unused + (void) cached_has_bits; + + // repeated sint64 vals = 1; + { + size_t data_size = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite:: + SInt64Size(this->vals_); + if (data_size > 0) { + total_size += 1 + + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::Int32Size( + static_cast<::PROTOBUF_NAMESPACE_ID::int32>(data_size)); + } + int cached_size = ::PROTOBUF_NAMESPACE_ID::internal::ToCachedSize(data_size); + _vals_cached_byte_size_.store(cached_size, + std::memory_order_relaxed); + total_size += data_size; + } + + return MaybeComputeUnknownFieldsSize(total_size, &_cached_size_); +} + +const ::PROTOBUF_NAMESPACE_ID::Message::ClassData Sint64List::_class_data_ = { + ::PROTOBUF_NAMESPACE_ID::Message::CopyWithSizeCheck, + Sint64List::MergeImpl +}; +const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*Sint64List::GetClassData() const { return &_class_data_; } + +void Sint64List::MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, + const ::PROTOBUF_NAMESPACE_ID::Message& from) { + static_cast(to)->MergeFrom( + static_cast(from)); +} + + +void Sint64List::MergeFrom(const Sint64List& from) { +// @@protoc_insertion_point(class_specific_merge_from_start:flwr.proto.Sint64List) + GOOGLE_DCHECK_NE(&from, this); + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + (void) cached_has_bits; + + vals_.MergeFrom(from.vals_); + _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); +} + +void Sint64List::CopyFrom(const Sint64List& from) { +// @@protoc_insertion_point(class_specific_copy_from_start:flwr.proto.Sint64List) + if (&from == this) return; + Clear(); + MergeFrom(from); +} + +bool Sint64List::IsInitialized() const { + return true; +} + +void Sint64List::InternalSwap(Sint64List* other) { + using std::swap; + _internal_metadata_.InternalSwap(&other->_internal_metadata_); + vals_.InternalSwap(&other->vals_); +} + +::PROTOBUF_NAMESPACE_ID::Metadata Sint64List::GetMetadata() const { + return ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors( + &descriptor_table_flwr_2fproto_2frecordset_2eproto_getter, &descriptor_table_flwr_2fproto_2frecordset_2eproto_once, + file_level_metadata_flwr_2fproto_2frecordset_2eproto[1]); +} + +// =================================================================== + +class BoolList::_Internal { + public: +}; + +BoolList::BoolList(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned) + : ::PROTOBUF_NAMESPACE_ID::Message(arena, is_message_owned), + vals_(arena) { + SharedCtor(); + if (!is_message_owned) { + RegisterArenaDtor(arena); + } + // @@protoc_insertion_point(arena_constructor:flwr.proto.BoolList) +} +BoolList::BoolList(const BoolList& from) + : ::PROTOBUF_NAMESPACE_ID::Message(), + vals_(from.vals_) { + _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); + // @@protoc_insertion_point(copy_constructor:flwr.proto.BoolList) +} + +void BoolList::SharedCtor() { +} + +BoolList::~BoolList() { + // @@protoc_insertion_point(destructor:flwr.proto.BoolList) + if (GetArenaForAllocation() != nullptr) return; + SharedDtor(); + _internal_metadata_.Delete<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); +} + +inline void BoolList::SharedDtor() { + GOOGLE_DCHECK(GetArenaForAllocation() == nullptr); +} + +void BoolList::ArenaDtor(void* object) { + BoolList* _this = reinterpret_cast< BoolList* >(object); + (void)_this; +} +void BoolList::RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena*) { +} +void BoolList::SetCachedSize(int size) const { + _cached_size_.Set(size); +} + +void BoolList::Clear() { +// @@protoc_insertion_point(message_clear_start:flwr.proto.BoolList) + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + // Prevent compiler warnings about cached_has_bits being unused + (void) cached_has_bits; + + vals_.Clear(); + _internal_metadata_.Clear<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); +} + +const char* BoolList::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) { +#define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure + while (!ctx->Done(&ptr)) { + ::PROTOBUF_NAMESPACE_ID::uint32 tag; + ptr = ::PROTOBUF_NAMESPACE_ID::internal::ReadTag(ptr, &tag); + switch (tag >> 3) { + // repeated bool vals = 1; + case 1: + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 10)) { + ptr = ::PROTOBUF_NAMESPACE_ID::internal::PackedBoolParser(_internal_mutable_vals(), ptr, ctx); + CHK_(ptr); + } else if (static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 8) { + _internal_add_vals(::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr)); + CHK_(ptr); + } else + goto handle_unusual; + continue; + default: + goto handle_unusual; + } // switch + handle_unusual: + if ((tag == 0) || ((tag & 7) == 4)) { + CHK_(ptr); + ctx->SetLastTag(tag); + goto message_done; + } + ptr = UnknownFieldParse( + tag, + _internal_metadata_.mutable_unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(), + ptr, ctx); + CHK_(ptr != nullptr); + } // while +message_done: + return ptr; +failure: + ptr = nullptr; + goto message_done; +#undef CHK_ +} + +::PROTOBUF_NAMESPACE_ID::uint8* BoolList::_InternalSerialize( + ::PROTOBUF_NAMESPACE_ID::uint8* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const { + // @@protoc_insertion_point(serialize_to_array_start:flwr.proto.BoolList) + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + (void) cached_has_bits; + + // repeated bool vals = 1; + if (this->_internal_vals_size() > 0) { + target = stream->WriteFixedPacked(1, _internal_vals(), target); + } + + if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) { + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::InternalSerializeUnknownFieldsToArray( + _internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance), target, stream); + } + // @@protoc_insertion_point(serialize_to_array_end:flwr.proto.BoolList) + return target; +} + +size_t BoolList::ByteSizeLong() const { +// @@protoc_insertion_point(message_byte_size_start:flwr.proto.BoolList) + size_t total_size = 0; + + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + // Prevent compiler warnings about cached_has_bits being unused + (void) cached_has_bits; + + // repeated bool vals = 1; + { + unsigned int count = static_cast(this->_internal_vals_size()); + size_t data_size = 1UL * count; + if (data_size > 0) { + total_size += 1 + + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::Int32Size( + static_cast<::PROTOBUF_NAMESPACE_ID::int32>(data_size)); + } + total_size += data_size; + } + + return MaybeComputeUnknownFieldsSize(total_size, &_cached_size_); +} + +const ::PROTOBUF_NAMESPACE_ID::Message::ClassData BoolList::_class_data_ = { + ::PROTOBUF_NAMESPACE_ID::Message::CopyWithSizeCheck, + BoolList::MergeImpl +}; +const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*BoolList::GetClassData() const { return &_class_data_; } + +void BoolList::MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, + const ::PROTOBUF_NAMESPACE_ID::Message& from) { + static_cast(to)->MergeFrom( + static_cast(from)); +} + + +void BoolList::MergeFrom(const BoolList& from) { +// @@protoc_insertion_point(class_specific_merge_from_start:flwr.proto.BoolList) + GOOGLE_DCHECK_NE(&from, this); + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + (void) cached_has_bits; + + vals_.MergeFrom(from.vals_); + _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); +} + +void BoolList::CopyFrom(const BoolList& from) { +// @@protoc_insertion_point(class_specific_copy_from_start:flwr.proto.BoolList) + if (&from == this) return; + Clear(); + MergeFrom(from); +} + +bool BoolList::IsInitialized() const { + return true; +} + +void BoolList::InternalSwap(BoolList* other) { + using std::swap; + _internal_metadata_.InternalSwap(&other->_internal_metadata_); + vals_.InternalSwap(&other->vals_); +} + +::PROTOBUF_NAMESPACE_ID::Metadata BoolList::GetMetadata() const { + return ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors( + &descriptor_table_flwr_2fproto_2frecordset_2eproto_getter, &descriptor_table_flwr_2fproto_2frecordset_2eproto_once, + file_level_metadata_flwr_2fproto_2frecordset_2eproto[2]); +} + +// =================================================================== + +class StringList::_Internal { + public: +}; + +StringList::StringList(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned) + : ::PROTOBUF_NAMESPACE_ID::Message(arena, is_message_owned), + vals_(arena) { + SharedCtor(); + if (!is_message_owned) { + RegisterArenaDtor(arena); + } + // @@protoc_insertion_point(arena_constructor:flwr.proto.StringList) +} +StringList::StringList(const StringList& from) + : ::PROTOBUF_NAMESPACE_ID::Message(), + vals_(from.vals_) { + _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); + // @@protoc_insertion_point(copy_constructor:flwr.proto.StringList) +} + +void StringList::SharedCtor() { +} + +StringList::~StringList() { + // @@protoc_insertion_point(destructor:flwr.proto.StringList) + if (GetArenaForAllocation() != nullptr) return; + SharedDtor(); + _internal_metadata_.Delete<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); +} + +inline void StringList::SharedDtor() { + GOOGLE_DCHECK(GetArenaForAllocation() == nullptr); +} + +void StringList::ArenaDtor(void* object) { + StringList* _this = reinterpret_cast< StringList* >(object); + (void)_this; +} +void StringList::RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena*) { +} +void StringList::SetCachedSize(int size) const { + _cached_size_.Set(size); +} + +void StringList::Clear() { +// @@protoc_insertion_point(message_clear_start:flwr.proto.StringList) + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + // Prevent compiler warnings about cached_has_bits being unused + (void) cached_has_bits; + + vals_.Clear(); + _internal_metadata_.Clear<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); +} + +const char* StringList::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) { +#define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure + while (!ctx->Done(&ptr)) { + ::PROTOBUF_NAMESPACE_ID::uint32 tag; + ptr = ::PROTOBUF_NAMESPACE_ID::internal::ReadTag(ptr, &tag); + switch (tag >> 3) { + // repeated string vals = 1; + case 1: + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 10)) { + ptr -= 1; + do { + ptr += 1; + auto str = _internal_add_vals(); + ptr = ::PROTOBUF_NAMESPACE_ID::internal::InlineGreedyStringParser(str, ptr, ctx); + CHK_(::PROTOBUF_NAMESPACE_ID::internal::VerifyUTF8(str, "flwr.proto.StringList.vals")); + CHK_(ptr); + if (!ctx->DataAvailable(ptr)) break; + } while (::PROTOBUF_NAMESPACE_ID::internal::ExpectTag<10>(ptr)); + } else + goto handle_unusual; + continue; + default: + goto handle_unusual; + } // switch + handle_unusual: + if ((tag == 0) || ((tag & 7) == 4)) { + CHK_(ptr); + ctx->SetLastTag(tag); + goto message_done; + } + ptr = UnknownFieldParse( + tag, + _internal_metadata_.mutable_unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(), + ptr, ctx); + CHK_(ptr != nullptr); + } // while +message_done: + return ptr; +failure: + ptr = nullptr; + goto message_done; +#undef CHK_ +} + +::PROTOBUF_NAMESPACE_ID::uint8* StringList::_InternalSerialize( + ::PROTOBUF_NAMESPACE_ID::uint8* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const { + // @@protoc_insertion_point(serialize_to_array_start:flwr.proto.StringList) + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + (void) cached_has_bits; + + // repeated string vals = 1; + for (int i = 0, n = this->_internal_vals_size(); i < n; i++) { + const auto& s = this->_internal_vals(i); + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String( + s.data(), static_cast(s.length()), + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE, + "flwr.proto.StringList.vals"); + target = stream->WriteString(1, s, target); + } + + if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) { + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::InternalSerializeUnknownFieldsToArray( + _internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance), target, stream); + } + // @@protoc_insertion_point(serialize_to_array_end:flwr.proto.StringList) + return target; +} + +size_t StringList::ByteSizeLong() const { +// @@protoc_insertion_point(message_byte_size_start:flwr.proto.StringList) + size_t total_size = 0; + + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + // Prevent compiler warnings about cached_has_bits being unused + (void) cached_has_bits; + + // repeated string vals = 1; + total_size += 1 * + ::PROTOBUF_NAMESPACE_ID::internal::FromIntSize(vals_.size()); + for (int i = 0, n = vals_.size(); i < n; i++) { + total_size += ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize( + vals_.Get(i)); + } + + return MaybeComputeUnknownFieldsSize(total_size, &_cached_size_); +} + +const ::PROTOBUF_NAMESPACE_ID::Message::ClassData StringList::_class_data_ = { + ::PROTOBUF_NAMESPACE_ID::Message::CopyWithSizeCheck, + StringList::MergeImpl +}; +const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*StringList::GetClassData() const { return &_class_data_; } + +void StringList::MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, + const ::PROTOBUF_NAMESPACE_ID::Message& from) { + static_cast(to)->MergeFrom( + static_cast(from)); +} + + +void StringList::MergeFrom(const StringList& from) { +// @@protoc_insertion_point(class_specific_merge_from_start:flwr.proto.StringList) + GOOGLE_DCHECK_NE(&from, this); + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + (void) cached_has_bits; + + vals_.MergeFrom(from.vals_); + _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); +} + +void StringList::CopyFrom(const StringList& from) { +// @@protoc_insertion_point(class_specific_copy_from_start:flwr.proto.StringList) + if (&from == this) return; + Clear(); + MergeFrom(from); +} + +bool StringList::IsInitialized() const { + return true; +} + +void StringList::InternalSwap(StringList* other) { + using std::swap; + _internal_metadata_.InternalSwap(&other->_internal_metadata_); + vals_.InternalSwap(&other->vals_); +} + +::PROTOBUF_NAMESPACE_ID::Metadata StringList::GetMetadata() const { + return ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors( + &descriptor_table_flwr_2fproto_2frecordset_2eproto_getter, &descriptor_table_flwr_2fproto_2frecordset_2eproto_once, + file_level_metadata_flwr_2fproto_2frecordset_2eproto[3]); +} + +// =================================================================== + +class BytesList::_Internal { + public: +}; + +BytesList::BytesList(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned) + : ::PROTOBUF_NAMESPACE_ID::Message(arena, is_message_owned), + vals_(arena) { + SharedCtor(); + if (!is_message_owned) { + RegisterArenaDtor(arena); + } + // @@protoc_insertion_point(arena_constructor:flwr.proto.BytesList) +} +BytesList::BytesList(const BytesList& from) + : ::PROTOBUF_NAMESPACE_ID::Message(), + vals_(from.vals_) { + _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); + // @@protoc_insertion_point(copy_constructor:flwr.proto.BytesList) +} + +void BytesList::SharedCtor() { +} + +BytesList::~BytesList() { + // @@protoc_insertion_point(destructor:flwr.proto.BytesList) + if (GetArenaForAllocation() != nullptr) return; + SharedDtor(); + _internal_metadata_.Delete<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); +} + +inline void BytesList::SharedDtor() { + GOOGLE_DCHECK(GetArenaForAllocation() == nullptr); +} + +void BytesList::ArenaDtor(void* object) { + BytesList* _this = reinterpret_cast< BytesList* >(object); + (void)_this; +} +void BytesList::RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena*) { +} +void BytesList::SetCachedSize(int size) const { + _cached_size_.Set(size); +} + +void BytesList::Clear() { +// @@protoc_insertion_point(message_clear_start:flwr.proto.BytesList) + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + // Prevent compiler warnings about cached_has_bits being unused + (void) cached_has_bits; + + vals_.Clear(); + _internal_metadata_.Clear<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); +} + +const char* BytesList::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) { +#define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure + while (!ctx->Done(&ptr)) { + ::PROTOBUF_NAMESPACE_ID::uint32 tag; + ptr = ::PROTOBUF_NAMESPACE_ID::internal::ReadTag(ptr, &tag); + switch (tag >> 3) { + // repeated bytes vals = 1; + case 1: + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 10)) { + ptr -= 1; + do { + ptr += 1; + auto str = _internal_add_vals(); + ptr = ::PROTOBUF_NAMESPACE_ID::internal::InlineGreedyStringParser(str, ptr, ctx); + CHK_(ptr); + if (!ctx->DataAvailable(ptr)) break; + } while (::PROTOBUF_NAMESPACE_ID::internal::ExpectTag<10>(ptr)); + } else + goto handle_unusual; + continue; + default: + goto handle_unusual; + } // switch + handle_unusual: + if ((tag == 0) || ((tag & 7) == 4)) { + CHK_(ptr); + ctx->SetLastTag(tag); + goto message_done; + } + ptr = UnknownFieldParse( + tag, + _internal_metadata_.mutable_unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(), + ptr, ctx); + CHK_(ptr != nullptr); + } // while +message_done: + return ptr; +failure: + ptr = nullptr; + goto message_done; +#undef CHK_ +} + +::PROTOBUF_NAMESPACE_ID::uint8* BytesList::_InternalSerialize( + ::PROTOBUF_NAMESPACE_ID::uint8* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const { + // @@protoc_insertion_point(serialize_to_array_start:flwr.proto.BytesList) + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + (void) cached_has_bits; + + // repeated bytes vals = 1; + for (int i = 0, n = this->_internal_vals_size(); i < n; i++) { + const auto& s = this->_internal_vals(i); + target = stream->WriteBytes(1, s, target); + } + + if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) { + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::InternalSerializeUnknownFieldsToArray( + _internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance), target, stream); + } + // @@protoc_insertion_point(serialize_to_array_end:flwr.proto.BytesList) + return target; +} + +size_t BytesList::ByteSizeLong() const { +// @@protoc_insertion_point(message_byte_size_start:flwr.proto.BytesList) + size_t total_size = 0; + + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + // Prevent compiler warnings about cached_has_bits being unused + (void) cached_has_bits; + + // repeated bytes vals = 1; + total_size += 1 * + ::PROTOBUF_NAMESPACE_ID::internal::FromIntSize(vals_.size()); + for (int i = 0, n = vals_.size(); i < n; i++) { + total_size += ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::BytesSize( + vals_.Get(i)); + } + + return MaybeComputeUnknownFieldsSize(total_size, &_cached_size_); +} + +const ::PROTOBUF_NAMESPACE_ID::Message::ClassData BytesList::_class_data_ = { + ::PROTOBUF_NAMESPACE_ID::Message::CopyWithSizeCheck, + BytesList::MergeImpl +}; +const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*BytesList::GetClassData() const { return &_class_data_; } + +void BytesList::MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, + const ::PROTOBUF_NAMESPACE_ID::Message& from) { + static_cast(to)->MergeFrom( + static_cast(from)); +} + + +void BytesList::MergeFrom(const BytesList& from) { +// @@protoc_insertion_point(class_specific_merge_from_start:flwr.proto.BytesList) + GOOGLE_DCHECK_NE(&from, this); + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + (void) cached_has_bits; + + vals_.MergeFrom(from.vals_); + _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); +} + +void BytesList::CopyFrom(const BytesList& from) { +// @@protoc_insertion_point(class_specific_copy_from_start:flwr.proto.BytesList) + if (&from == this) return; + Clear(); + MergeFrom(from); +} + +bool BytesList::IsInitialized() const { + return true; +} + +void BytesList::InternalSwap(BytesList* other) { + using std::swap; + _internal_metadata_.InternalSwap(&other->_internal_metadata_); + vals_.InternalSwap(&other->vals_); +} + +::PROTOBUF_NAMESPACE_ID::Metadata BytesList::GetMetadata() const { + return ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors( + &descriptor_table_flwr_2fproto_2frecordset_2eproto_getter, &descriptor_table_flwr_2fproto_2frecordset_2eproto_once, + file_level_metadata_flwr_2fproto_2frecordset_2eproto[4]); +} + +// =================================================================== + +class Array::_Internal { + public: +}; + +Array::Array(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned) + : ::PROTOBUF_NAMESPACE_ID::Message(arena, is_message_owned), + shape_(arena) { + SharedCtor(); + if (!is_message_owned) { + RegisterArenaDtor(arena); + } + // @@protoc_insertion_point(arena_constructor:flwr.proto.Array) +} +Array::Array(const Array& from) + : ::PROTOBUF_NAMESPACE_ID::Message(), + shape_(from.shape_) { + _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); + dtype_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); + if (!from._internal_dtype().empty()) { + dtype_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, from._internal_dtype(), + GetArenaForAllocation()); + } + stype_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); + if (!from._internal_stype().empty()) { + stype_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, from._internal_stype(), + GetArenaForAllocation()); + } + data_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); + if (!from._internal_data().empty()) { + data_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, from._internal_data(), + GetArenaForAllocation()); + } + // @@protoc_insertion_point(copy_constructor:flwr.proto.Array) +} + +void Array::SharedCtor() { +dtype_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); +stype_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); +data_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); +} + +Array::~Array() { + // @@protoc_insertion_point(destructor:flwr.proto.Array) + if (GetArenaForAllocation() != nullptr) return; + SharedDtor(); + _internal_metadata_.Delete<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); +} + +inline void Array::SharedDtor() { + GOOGLE_DCHECK(GetArenaForAllocation() == nullptr); + dtype_.DestroyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); + stype_.DestroyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); + data_.DestroyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); +} + +void Array::ArenaDtor(void* object) { + Array* _this = reinterpret_cast< Array* >(object); + (void)_this; +} +void Array::RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena*) { +} +void Array::SetCachedSize(int size) const { + _cached_size_.Set(size); +} + +void Array::Clear() { +// @@protoc_insertion_point(message_clear_start:flwr.proto.Array) + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + // Prevent compiler warnings about cached_has_bits being unused + (void) cached_has_bits; + + shape_.Clear(); + dtype_.ClearToEmpty(); + stype_.ClearToEmpty(); + data_.ClearToEmpty(); + _internal_metadata_.Clear<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); +} + +const char* Array::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) { +#define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure + while (!ctx->Done(&ptr)) { + ::PROTOBUF_NAMESPACE_ID::uint32 tag; + ptr = ::PROTOBUF_NAMESPACE_ID::internal::ReadTag(ptr, &tag); + switch (tag >> 3) { + // string dtype = 1; + case 1: + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 10)) { + auto str = _internal_mutable_dtype(); + ptr = ::PROTOBUF_NAMESPACE_ID::internal::InlineGreedyStringParser(str, ptr, ctx); + CHK_(::PROTOBUF_NAMESPACE_ID::internal::VerifyUTF8(str, "flwr.proto.Array.dtype")); + CHK_(ptr); + } else + goto handle_unusual; + continue; + // repeated int32 shape = 2; + case 2: + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 18)) { + ptr = ::PROTOBUF_NAMESPACE_ID::internal::PackedInt32Parser(_internal_mutable_shape(), ptr, ctx); + CHK_(ptr); + } else if (static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 16) { + _internal_add_shape(::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr)); + CHK_(ptr); + } else + goto handle_unusual; + continue; + // string stype = 3; + case 3: + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 26)) { + auto str = _internal_mutable_stype(); + ptr = ::PROTOBUF_NAMESPACE_ID::internal::InlineGreedyStringParser(str, ptr, ctx); + CHK_(::PROTOBUF_NAMESPACE_ID::internal::VerifyUTF8(str, "flwr.proto.Array.stype")); + CHK_(ptr); + } else + goto handle_unusual; + continue; + // bytes data = 4; + case 4: + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 34)) { + auto str = _internal_mutable_data(); + ptr = ::PROTOBUF_NAMESPACE_ID::internal::InlineGreedyStringParser(str, ptr, ctx); + CHK_(ptr); + } else + goto handle_unusual; + continue; + default: + goto handle_unusual; + } // switch + handle_unusual: + if ((tag == 0) || ((tag & 7) == 4)) { + CHK_(ptr); + ctx->SetLastTag(tag); + goto message_done; + } + ptr = UnknownFieldParse( + tag, + _internal_metadata_.mutable_unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(), + ptr, ctx); + CHK_(ptr != nullptr); + } // while +message_done: + return ptr; +failure: + ptr = nullptr; + goto message_done; +#undef CHK_ +} + +::PROTOBUF_NAMESPACE_ID::uint8* Array::_InternalSerialize( + ::PROTOBUF_NAMESPACE_ID::uint8* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const { + // @@protoc_insertion_point(serialize_to_array_start:flwr.proto.Array) + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + (void) cached_has_bits; + + // string dtype = 1; + if (!this->_internal_dtype().empty()) { + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String( + this->_internal_dtype().data(), static_cast(this->_internal_dtype().length()), + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE, + "flwr.proto.Array.dtype"); + target = stream->WriteStringMaybeAliased( + 1, this->_internal_dtype(), target); + } + + // repeated int32 shape = 2; + { + int byte_size = _shape_cached_byte_size_.load(std::memory_order_relaxed); + if (byte_size > 0) { + target = stream->WriteInt32Packed( + 2, _internal_shape(), byte_size, target); + } + } + + // string stype = 3; + if (!this->_internal_stype().empty()) { + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String( + this->_internal_stype().data(), static_cast(this->_internal_stype().length()), + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE, + "flwr.proto.Array.stype"); + target = stream->WriteStringMaybeAliased( + 3, this->_internal_stype(), target); + } + + // bytes data = 4; + if (!this->_internal_data().empty()) { + target = stream->WriteBytesMaybeAliased( + 4, this->_internal_data(), target); + } + + if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) { + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::InternalSerializeUnknownFieldsToArray( + _internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance), target, stream); + } + // @@protoc_insertion_point(serialize_to_array_end:flwr.proto.Array) + return target; +} + +size_t Array::ByteSizeLong() const { +// @@protoc_insertion_point(message_byte_size_start:flwr.proto.Array) + size_t total_size = 0; + + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + // Prevent compiler warnings about cached_has_bits being unused + (void) cached_has_bits; + + // repeated int32 shape = 2; + { + size_t data_size = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite:: + Int32Size(this->shape_); + if (data_size > 0) { + total_size += 1 + + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::Int32Size( + static_cast<::PROTOBUF_NAMESPACE_ID::int32>(data_size)); + } + int cached_size = ::PROTOBUF_NAMESPACE_ID::internal::ToCachedSize(data_size); + _shape_cached_byte_size_.store(cached_size, + std::memory_order_relaxed); + total_size += data_size; + } + + // string dtype = 1; + if (!this->_internal_dtype().empty()) { + total_size += 1 + + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize( + this->_internal_dtype()); + } + + // string stype = 3; + if (!this->_internal_stype().empty()) { + total_size += 1 + + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize( + this->_internal_stype()); + } + + // bytes data = 4; + if (!this->_internal_data().empty()) { + total_size += 1 + + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::BytesSize( + this->_internal_data()); + } + + return MaybeComputeUnknownFieldsSize(total_size, &_cached_size_); +} + +const ::PROTOBUF_NAMESPACE_ID::Message::ClassData Array::_class_data_ = { + ::PROTOBUF_NAMESPACE_ID::Message::CopyWithSizeCheck, + Array::MergeImpl +}; +const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*Array::GetClassData() const { return &_class_data_; } + +void Array::MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, + const ::PROTOBUF_NAMESPACE_ID::Message& from) { + static_cast(to)->MergeFrom( + static_cast(from)); +} + + +void Array::MergeFrom(const Array& from) { +// @@protoc_insertion_point(class_specific_merge_from_start:flwr.proto.Array) + GOOGLE_DCHECK_NE(&from, this); + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + (void) cached_has_bits; + + shape_.MergeFrom(from.shape_); + if (!from._internal_dtype().empty()) { + _internal_set_dtype(from._internal_dtype()); + } + if (!from._internal_stype().empty()) { + _internal_set_stype(from._internal_stype()); + } + if (!from._internal_data().empty()) { + _internal_set_data(from._internal_data()); + } + _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); +} + +void Array::CopyFrom(const Array& from) { +// @@protoc_insertion_point(class_specific_copy_from_start:flwr.proto.Array) + if (&from == this) return; + Clear(); + MergeFrom(from); +} + +bool Array::IsInitialized() const { + return true; +} + +void Array::InternalSwap(Array* other) { + using std::swap; + auto* lhs_arena = GetArenaForAllocation(); + auto* rhs_arena = other->GetArenaForAllocation(); + _internal_metadata_.InternalSwap(&other->_internal_metadata_); + shape_.InternalSwap(&other->shape_); + ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::InternalSwap( + &::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), + &dtype_, lhs_arena, + &other->dtype_, rhs_arena + ); + ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::InternalSwap( + &::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), + &stype_, lhs_arena, + &other->stype_, rhs_arena + ); + ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::InternalSwap( + &::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), + &data_, lhs_arena, + &other->data_, rhs_arena + ); +} + +::PROTOBUF_NAMESPACE_ID::Metadata Array::GetMetadata() const { + return ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors( + &descriptor_table_flwr_2fproto_2frecordset_2eproto_getter, &descriptor_table_flwr_2fproto_2frecordset_2eproto_once, + file_level_metadata_flwr_2fproto_2frecordset_2eproto[5]); +} + +// =================================================================== + +class MetricsRecordValue::_Internal { + public: + static const ::flwr::proto::DoubleList& double_list(const MetricsRecordValue* msg); + static const ::flwr::proto::Sint64List& sint64_list(const MetricsRecordValue* msg); +}; + +const ::flwr::proto::DoubleList& +MetricsRecordValue::_Internal::double_list(const MetricsRecordValue* msg) { + return *msg->value_.double_list_; +} +const ::flwr::proto::Sint64List& +MetricsRecordValue::_Internal::sint64_list(const MetricsRecordValue* msg) { + return *msg->value_.sint64_list_; +} +void MetricsRecordValue::set_allocated_double_list(::flwr::proto::DoubleList* double_list) { + ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation(); + clear_value(); + if (double_list) { + ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena = + ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper<::flwr::proto::DoubleList>::GetOwningArena(double_list); + if (message_arena != submessage_arena) { + double_list = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage( + message_arena, double_list, submessage_arena); + } + set_has_double_list(); + value_.double_list_ = double_list; + } + // @@protoc_insertion_point(field_set_allocated:flwr.proto.MetricsRecordValue.double_list) +} +void MetricsRecordValue::set_allocated_sint64_list(::flwr::proto::Sint64List* sint64_list) { + ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation(); + clear_value(); + if (sint64_list) { + ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena = + ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper<::flwr::proto::Sint64List>::GetOwningArena(sint64_list); + if (message_arena != submessage_arena) { + sint64_list = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage( + message_arena, sint64_list, submessage_arena); + } + set_has_sint64_list(); + value_.sint64_list_ = sint64_list; + } + // @@protoc_insertion_point(field_set_allocated:flwr.proto.MetricsRecordValue.sint64_list) +} +MetricsRecordValue::MetricsRecordValue(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned) + : ::PROTOBUF_NAMESPACE_ID::Message(arena, is_message_owned) { + SharedCtor(); + if (!is_message_owned) { + RegisterArenaDtor(arena); + } + // @@protoc_insertion_point(arena_constructor:flwr.proto.MetricsRecordValue) +} +MetricsRecordValue::MetricsRecordValue(const MetricsRecordValue& from) + : ::PROTOBUF_NAMESPACE_ID::Message() { + _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); + clear_has_value(); + switch (from.value_case()) { + case kDouble: { + _internal_set_double_(from._internal_double_()); + break; + } + case kSint64: { + _internal_set_sint64(from._internal_sint64()); + break; + } + case kDoubleList: { + _internal_mutable_double_list()->::flwr::proto::DoubleList::MergeFrom(from._internal_double_list()); + break; + } + case kSint64List: { + _internal_mutable_sint64_list()->::flwr::proto::Sint64List::MergeFrom(from._internal_sint64_list()); + break; + } + case VALUE_NOT_SET: { + break; + } + } + // @@protoc_insertion_point(copy_constructor:flwr.proto.MetricsRecordValue) +} + +void MetricsRecordValue::SharedCtor() { +clear_has_value(); +} + +MetricsRecordValue::~MetricsRecordValue() { + // @@protoc_insertion_point(destructor:flwr.proto.MetricsRecordValue) + if (GetArenaForAllocation() != nullptr) return; + SharedDtor(); + _internal_metadata_.Delete<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); +} + +inline void MetricsRecordValue::SharedDtor() { + GOOGLE_DCHECK(GetArenaForAllocation() == nullptr); + if (has_value()) { + clear_value(); + } +} + +void MetricsRecordValue::ArenaDtor(void* object) { + MetricsRecordValue* _this = reinterpret_cast< MetricsRecordValue* >(object); + (void)_this; +} +void MetricsRecordValue::RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena*) { +} +void MetricsRecordValue::SetCachedSize(int size) const { + _cached_size_.Set(size); +} + +void MetricsRecordValue::clear_value() { +// @@protoc_insertion_point(one_of_clear_start:flwr.proto.MetricsRecordValue) + switch (value_case()) { + case kDouble: { + // No need to clear + break; + } + case kSint64: { + // No need to clear + break; + } + case kDoubleList: { + if (GetArenaForAllocation() == nullptr) { + delete value_.double_list_; + } + break; + } + case kSint64List: { + if (GetArenaForAllocation() == nullptr) { + delete value_.sint64_list_; + } + break; + } + case VALUE_NOT_SET: { + break; + } + } + _oneof_case_[0] = VALUE_NOT_SET; +} + + +void MetricsRecordValue::Clear() { +// @@protoc_insertion_point(message_clear_start:flwr.proto.MetricsRecordValue) + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + // Prevent compiler warnings about cached_has_bits being unused + (void) cached_has_bits; + + clear_value(); + _internal_metadata_.Clear<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); +} + +const char* MetricsRecordValue::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) { +#define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure + while (!ctx->Done(&ptr)) { + ::PROTOBUF_NAMESPACE_ID::uint32 tag; + ptr = ::PROTOBUF_NAMESPACE_ID::internal::ReadTag(ptr, &tag); + switch (tag >> 3) { + // double double = 1; + case 1: + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 9)) { + _internal_set_double_(::PROTOBUF_NAMESPACE_ID::internal::UnalignedLoad(ptr)); + ptr += sizeof(double); + } else + goto handle_unusual; + continue; + // sint64 sint64 = 2; + case 2: + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 16)) { + _internal_set_sint64(::PROTOBUF_NAMESPACE_ID::internal::ReadVarintZigZag64(&ptr)); + CHK_(ptr); + } else + goto handle_unusual; + continue; + // .flwr.proto.DoubleList double_list = 21; + case 21: + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 170)) { + ptr = ctx->ParseMessage(_internal_mutable_double_list(), ptr); + CHK_(ptr); + } else + goto handle_unusual; + continue; + // .flwr.proto.Sint64List sint64_list = 22; + case 22: + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 178)) { + ptr = ctx->ParseMessage(_internal_mutable_sint64_list(), ptr); + CHK_(ptr); + } else + goto handle_unusual; + continue; + default: + goto handle_unusual; + } // switch + handle_unusual: + if ((tag == 0) || ((tag & 7) == 4)) { + CHK_(ptr); + ctx->SetLastTag(tag); + goto message_done; + } + ptr = UnknownFieldParse( + tag, + _internal_metadata_.mutable_unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(), + ptr, ctx); + CHK_(ptr != nullptr); + } // while +message_done: + return ptr; +failure: + ptr = nullptr; + goto message_done; +#undef CHK_ +} + +::PROTOBUF_NAMESPACE_ID::uint8* MetricsRecordValue::_InternalSerialize( + ::PROTOBUF_NAMESPACE_ID::uint8* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const { + // @@protoc_insertion_point(serialize_to_array_start:flwr.proto.MetricsRecordValue) + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + (void) cached_has_bits; + + // double double = 1; + if (_internal_has_double_()) { + target = stream->EnsureSpace(target); + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteDoubleToArray(1, this->_internal_double_(), target); + } + + // sint64 sint64 = 2; + if (_internal_has_sint64()) { + target = stream->EnsureSpace(target); + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteSInt64ToArray(2, this->_internal_sint64(), target); + } + + // .flwr.proto.DoubleList double_list = 21; + if (_internal_has_double_list()) { + target = stream->EnsureSpace(target); + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite:: + InternalWriteMessage( + 21, _Internal::double_list(this), target, stream); + } + + // .flwr.proto.Sint64List sint64_list = 22; + if (_internal_has_sint64_list()) { + target = stream->EnsureSpace(target); + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite:: + InternalWriteMessage( + 22, _Internal::sint64_list(this), target, stream); + } + + if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) { + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::InternalSerializeUnknownFieldsToArray( + _internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance), target, stream); + } + // @@protoc_insertion_point(serialize_to_array_end:flwr.proto.MetricsRecordValue) + return target; +} + +size_t MetricsRecordValue::ByteSizeLong() const { +// @@protoc_insertion_point(message_byte_size_start:flwr.proto.MetricsRecordValue) + size_t total_size = 0; + + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + // Prevent compiler warnings about cached_has_bits being unused + (void) cached_has_bits; + + switch (value_case()) { + // double double = 1; + case kDouble: { + total_size += 1 + 8; + break; + } + // sint64 sint64 = 2; + case kSint64: { + total_size += ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SInt64SizePlusOne(this->_internal_sint64()); + break; + } + // .flwr.proto.DoubleList double_list = 21; + case kDoubleList: { + total_size += 2 + + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize( + *value_.double_list_); + break; + } + // .flwr.proto.Sint64List sint64_list = 22; + case kSint64List: { + total_size += 2 + + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize( + *value_.sint64_list_); + break; + } + case VALUE_NOT_SET: { + break; + } + } + return MaybeComputeUnknownFieldsSize(total_size, &_cached_size_); +} + +const ::PROTOBUF_NAMESPACE_ID::Message::ClassData MetricsRecordValue::_class_data_ = { + ::PROTOBUF_NAMESPACE_ID::Message::CopyWithSizeCheck, + MetricsRecordValue::MergeImpl +}; +const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*MetricsRecordValue::GetClassData() const { return &_class_data_; } + +void MetricsRecordValue::MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, + const ::PROTOBUF_NAMESPACE_ID::Message& from) { + static_cast(to)->MergeFrom( + static_cast(from)); +} + + +void MetricsRecordValue::MergeFrom(const MetricsRecordValue& from) { +// @@protoc_insertion_point(class_specific_merge_from_start:flwr.proto.MetricsRecordValue) + GOOGLE_DCHECK_NE(&from, this); + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + (void) cached_has_bits; + + switch (from.value_case()) { + case kDouble: { + _internal_set_double_(from._internal_double_()); + break; + } + case kSint64: { + _internal_set_sint64(from._internal_sint64()); + break; + } + case kDoubleList: { + _internal_mutable_double_list()->::flwr::proto::DoubleList::MergeFrom(from._internal_double_list()); + break; + } + case kSint64List: { + _internal_mutable_sint64_list()->::flwr::proto::Sint64List::MergeFrom(from._internal_sint64_list()); + break; + } + case VALUE_NOT_SET: { + break; + } + } + _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); +} + +void MetricsRecordValue::CopyFrom(const MetricsRecordValue& from) { +// @@protoc_insertion_point(class_specific_copy_from_start:flwr.proto.MetricsRecordValue) + if (&from == this) return; + Clear(); + MergeFrom(from); +} + +bool MetricsRecordValue::IsInitialized() const { + return true; +} + +void MetricsRecordValue::InternalSwap(MetricsRecordValue* other) { + using std::swap; + _internal_metadata_.InternalSwap(&other->_internal_metadata_); + swap(value_, other->value_); + swap(_oneof_case_[0], other->_oneof_case_[0]); +} + +::PROTOBUF_NAMESPACE_ID::Metadata MetricsRecordValue::GetMetadata() const { + return ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors( + &descriptor_table_flwr_2fproto_2frecordset_2eproto_getter, &descriptor_table_flwr_2fproto_2frecordset_2eproto_once, + file_level_metadata_flwr_2fproto_2frecordset_2eproto[6]); +} + +// =================================================================== + +class ConfigsRecordValue::_Internal { + public: + static const ::flwr::proto::DoubleList& double_list(const ConfigsRecordValue* msg); + static const ::flwr::proto::Sint64List& sint64_list(const ConfigsRecordValue* msg); + static const ::flwr::proto::BoolList& bool_list(const ConfigsRecordValue* msg); + static const ::flwr::proto::StringList& string_list(const ConfigsRecordValue* msg); + static const ::flwr::proto::BytesList& bytes_list(const ConfigsRecordValue* msg); +}; + +const ::flwr::proto::DoubleList& +ConfigsRecordValue::_Internal::double_list(const ConfigsRecordValue* msg) { + return *msg->value_.double_list_; +} +const ::flwr::proto::Sint64List& +ConfigsRecordValue::_Internal::sint64_list(const ConfigsRecordValue* msg) { + return *msg->value_.sint64_list_; +} +const ::flwr::proto::BoolList& +ConfigsRecordValue::_Internal::bool_list(const ConfigsRecordValue* msg) { + return *msg->value_.bool_list_; +} +const ::flwr::proto::StringList& +ConfigsRecordValue::_Internal::string_list(const ConfigsRecordValue* msg) { + return *msg->value_.string_list_; +} +const ::flwr::proto::BytesList& +ConfigsRecordValue::_Internal::bytes_list(const ConfigsRecordValue* msg) { + return *msg->value_.bytes_list_; +} +void ConfigsRecordValue::set_allocated_double_list(::flwr::proto::DoubleList* double_list) { + ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation(); + clear_value(); + if (double_list) { + ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena = + ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper<::flwr::proto::DoubleList>::GetOwningArena(double_list); + if (message_arena != submessage_arena) { + double_list = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage( + message_arena, double_list, submessage_arena); + } + set_has_double_list(); + value_.double_list_ = double_list; + } + // @@protoc_insertion_point(field_set_allocated:flwr.proto.ConfigsRecordValue.double_list) +} +void ConfigsRecordValue::set_allocated_sint64_list(::flwr::proto::Sint64List* sint64_list) { + ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation(); + clear_value(); + if (sint64_list) { + ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena = + ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper<::flwr::proto::Sint64List>::GetOwningArena(sint64_list); + if (message_arena != submessage_arena) { + sint64_list = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage( + message_arena, sint64_list, submessage_arena); + } + set_has_sint64_list(); + value_.sint64_list_ = sint64_list; + } + // @@protoc_insertion_point(field_set_allocated:flwr.proto.ConfigsRecordValue.sint64_list) +} +void ConfigsRecordValue::set_allocated_bool_list(::flwr::proto::BoolList* bool_list) { + ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation(); + clear_value(); + if (bool_list) { + ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena = + ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper<::flwr::proto::BoolList>::GetOwningArena(bool_list); + if (message_arena != submessage_arena) { + bool_list = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage( + message_arena, bool_list, submessage_arena); + } + set_has_bool_list(); + value_.bool_list_ = bool_list; + } + // @@protoc_insertion_point(field_set_allocated:flwr.proto.ConfigsRecordValue.bool_list) +} +void ConfigsRecordValue::set_allocated_string_list(::flwr::proto::StringList* string_list) { + ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation(); + clear_value(); + if (string_list) { + ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena = + ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper<::flwr::proto::StringList>::GetOwningArena(string_list); + if (message_arena != submessage_arena) { + string_list = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage( + message_arena, string_list, submessage_arena); + } + set_has_string_list(); + value_.string_list_ = string_list; + } + // @@protoc_insertion_point(field_set_allocated:flwr.proto.ConfigsRecordValue.string_list) +} +void ConfigsRecordValue::set_allocated_bytes_list(::flwr::proto::BytesList* bytes_list) { + ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation(); + clear_value(); + if (bytes_list) { + ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena = + ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper<::flwr::proto::BytesList>::GetOwningArena(bytes_list); + if (message_arena != submessage_arena) { + bytes_list = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage( + message_arena, bytes_list, submessage_arena); + } + set_has_bytes_list(); + value_.bytes_list_ = bytes_list; + } + // @@protoc_insertion_point(field_set_allocated:flwr.proto.ConfigsRecordValue.bytes_list) +} +ConfigsRecordValue::ConfigsRecordValue(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned) + : ::PROTOBUF_NAMESPACE_ID::Message(arena, is_message_owned) { + SharedCtor(); + if (!is_message_owned) { + RegisterArenaDtor(arena); + } + // @@protoc_insertion_point(arena_constructor:flwr.proto.ConfigsRecordValue) +} +ConfigsRecordValue::ConfigsRecordValue(const ConfigsRecordValue& from) + : ::PROTOBUF_NAMESPACE_ID::Message() { + _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); + clear_has_value(); + switch (from.value_case()) { + case kDouble: { + _internal_set_double_(from._internal_double_()); + break; + } + case kSint64: { + _internal_set_sint64(from._internal_sint64()); + break; + } + case kBool: { + _internal_set_bool_(from._internal_bool_()); + break; + } + case kString: { + _internal_set_string(from._internal_string()); + break; + } + case kBytes: { + _internal_set_bytes(from._internal_bytes()); + break; + } + case kDoubleList: { + _internal_mutable_double_list()->::flwr::proto::DoubleList::MergeFrom(from._internal_double_list()); + break; + } + case kSint64List: { + _internal_mutable_sint64_list()->::flwr::proto::Sint64List::MergeFrom(from._internal_sint64_list()); + break; + } + case kBoolList: { + _internal_mutable_bool_list()->::flwr::proto::BoolList::MergeFrom(from._internal_bool_list()); + break; + } + case kStringList: { + _internal_mutable_string_list()->::flwr::proto::StringList::MergeFrom(from._internal_string_list()); + break; + } + case kBytesList: { + _internal_mutable_bytes_list()->::flwr::proto::BytesList::MergeFrom(from._internal_bytes_list()); + break; + } + case VALUE_NOT_SET: { + break; + } + } + // @@protoc_insertion_point(copy_constructor:flwr.proto.ConfigsRecordValue) +} + +void ConfigsRecordValue::SharedCtor() { +clear_has_value(); +} + +ConfigsRecordValue::~ConfigsRecordValue() { + // @@protoc_insertion_point(destructor:flwr.proto.ConfigsRecordValue) + if (GetArenaForAllocation() != nullptr) return; + SharedDtor(); + _internal_metadata_.Delete<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); +} + +inline void ConfigsRecordValue::SharedDtor() { + GOOGLE_DCHECK(GetArenaForAllocation() == nullptr); + if (has_value()) { + clear_value(); + } +} + +void ConfigsRecordValue::ArenaDtor(void* object) { + ConfigsRecordValue* _this = reinterpret_cast< ConfigsRecordValue* >(object); + (void)_this; +} +void ConfigsRecordValue::RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena*) { +} +void ConfigsRecordValue::SetCachedSize(int size) const { + _cached_size_.Set(size); +} + +void ConfigsRecordValue::clear_value() { +// @@protoc_insertion_point(one_of_clear_start:flwr.proto.ConfigsRecordValue) + switch (value_case()) { + case kDouble: { + // No need to clear + break; + } + case kSint64: { + // No need to clear + break; + } + case kBool: { + // No need to clear + break; + } + case kString: { + value_.string_.Destroy(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, GetArenaForAllocation()); + break; + } + case kBytes: { + value_.bytes_.Destroy(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, GetArenaForAllocation()); + break; + } + case kDoubleList: { + if (GetArenaForAllocation() == nullptr) { + delete value_.double_list_; + } + break; + } + case kSint64List: { + if (GetArenaForAllocation() == nullptr) { + delete value_.sint64_list_; + } + break; + } + case kBoolList: { + if (GetArenaForAllocation() == nullptr) { + delete value_.bool_list_; + } + break; + } + case kStringList: { + if (GetArenaForAllocation() == nullptr) { + delete value_.string_list_; + } + break; + } + case kBytesList: { + if (GetArenaForAllocation() == nullptr) { + delete value_.bytes_list_; + } + break; + } + case VALUE_NOT_SET: { + break; + } + } + _oneof_case_[0] = VALUE_NOT_SET; +} + + +void ConfigsRecordValue::Clear() { +// @@protoc_insertion_point(message_clear_start:flwr.proto.ConfigsRecordValue) + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + // Prevent compiler warnings about cached_has_bits being unused + (void) cached_has_bits; + + clear_value(); + _internal_metadata_.Clear<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); +} + +const char* ConfigsRecordValue::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) { +#define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure + while (!ctx->Done(&ptr)) { + ::PROTOBUF_NAMESPACE_ID::uint32 tag; + ptr = ::PROTOBUF_NAMESPACE_ID::internal::ReadTag(ptr, &tag); + switch (tag >> 3) { + // double double = 1; + case 1: + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 9)) { + _internal_set_double_(::PROTOBUF_NAMESPACE_ID::internal::UnalignedLoad(ptr)); + ptr += sizeof(double); + } else + goto handle_unusual; + continue; + // sint64 sint64 = 2; + case 2: + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 16)) { + _internal_set_sint64(::PROTOBUF_NAMESPACE_ID::internal::ReadVarintZigZag64(&ptr)); + CHK_(ptr); + } else + goto handle_unusual; + continue; + // bool bool = 3; + case 3: + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 24)) { + _internal_set_bool_(::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr)); + CHK_(ptr); + } else + goto handle_unusual; + continue; + // string string = 4; + case 4: + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 34)) { + auto str = _internal_mutable_string(); + ptr = ::PROTOBUF_NAMESPACE_ID::internal::InlineGreedyStringParser(str, ptr, ctx); + CHK_(::PROTOBUF_NAMESPACE_ID::internal::VerifyUTF8(str, "flwr.proto.ConfigsRecordValue.string")); + CHK_(ptr); + } else + goto handle_unusual; + continue; + // bytes bytes = 5; + case 5: + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 42)) { + auto str = _internal_mutable_bytes(); + ptr = ::PROTOBUF_NAMESPACE_ID::internal::InlineGreedyStringParser(str, ptr, ctx); + CHK_(ptr); + } else + goto handle_unusual; + continue; + // .flwr.proto.DoubleList double_list = 21; + case 21: + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 170)) { + ptr = ctx->ParseMessage(_internal_mutable_double_list(), ptr); + CHK_(ptr); + } else + goto handle_unusual; + continue; + // .flwr.proto.Sint64List sint64_list = 22; + case 22: + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 178)) { + ptr = ctx->ParseMessage(_internal_mutable_sint64_list(), ptr); + CHK_(ptr); + } else + goto handle_unusual; + continue; + // .flwr.proto.BoolList bool_list = 23; + case 23: + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 186)) { + ptr = ctx->ParseMessage(_internal_mutable_bool_list(), ptr); + CHK_(ptr); + } else + goto handle_unusual; + continue; + // .flwr.proto.StringList string_list = 24; + case 24: + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 194)) { + ptr = ctx->ParseMessage(_internal_mutable_string_list(), ptr); + CHK_(ptr); + } else + goto handle_unusual; + continue; + // .flwr.proto.BytesList bytes_list = 25; + case 25: + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 202)) { + ptr = ctx->ParseMessage(_internal_mutable_bytes_list(), ptr); + CHK_(ptr); + } else + goto handle_unusual; + continue; + default: + goto handle_unusual; + } // switch + handle_unusual: + if ((tag == 0) || ((tag & 7) == 4)) { + CHK_(ptr); + ctx->SetLastTag(tag); + goto message_done; + } + ptr = UnknownFieldParse( + tag, + _internal_metadata_.mutable_unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(), + ptr, ctx); + CHK_(ptr != nullptr); + } // while +message_done: + return ptr; +failure: + ptr = nullptr; + goto message_done; +#undef CHK_ +} + +::PROTOBUF_NAMESPACE_ID::uint8* ConfigsRecordValue::_InternalSerialize( + ::PROTOBUF_NAMESPACE_ID::uint8* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const { + // @@protoc_insertion_point(serialize_to_array_start:flwr.proto.ConfigsRecordValue) + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + (void) cached_has_bits; + + // double double = 1; + if (_internal_has_double_()) { + target = stream->EnsureSpace(target); + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteDoubleToArray(1, this->_internal_double_(), target); + } + + // sint64 sint64 = 2; + if (_internal_has_sint64()) { + target = stream->EnsureSpace(target); + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteSInt64ToArray(2, this->_internal_sint64(), target); + } + + // bool bool = 3; + if (_internal_has_bool_()) { + target = stream->EnsureSpace(target); + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteBoolToArray(3, this->_internal_bool_(), target); + } + + // string string = 4; + if (_internal_has_string()) { + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String( + this->_internal_string().data(), static_cast(this->_internal_string().length()), + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE, + "flwr.proto.ConfigsRecordValue.string"); + target = stream->WriteStringMaybeAliased( + 4, this->_internal_string(), target); + } + + // bytes bytes = 5; + if (_internal_has_bytes()) { + target = stream->WriteBytesMaybeAliased( + 5, this->_internal_bytes(), target); + } + + // .flwr.proto.DoubleList double_list = 21; + if (_internal_has_double_list()) { + target = stream->EnsureSpace(target); + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite:: + InternalWriteMessage( + 21, _Internal::double_list(this), target, stream); + } + + // .flwr.proto.Sint64List sint64_list = 22; + if (_internal_has_sint64_list()) { + target = stream->EnsureSpace(target); + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite:: + InternalWriteMessage( + 22, _Internal::sint64_list(this), target, stream); + } + + // .flwr.proto.BoolList bool_list = 23; + if (_internal_has_bool_list()) { + target = stream->EnsureSpace(target); + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite:: + InternalWriteMessage( + 23, _Internal::bool_list(this), target, stream); + } + + // .flwr.proto.StringList string_list = 24; + if (_internal_has_string_list()) { + target = stream->EnsureSpace(target); + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite:: + InternalWriteMessage( + 24, _Internal::string_list(this), target, stream); + } + + // .flwr.proto.BytesList bytes_list = 25; + if (_internal_has_bytes_list()) { + target = stream->EnsureSpace(target); + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite:: + InternalWriteMessage( + 25, _Internal::bytes_list(this), target, stream); + } + + if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) { + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::InternalSerializeUnknownFieldsToArray( + _internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance), target, stream); + } + // @@protoc_insertion_point(serialize_to_array_end:flwr.proto.ConfigsRecordValue) + return target; +} + +size_t ConfigsRecordValue::ByteSizeLong() const { +// @@protoc_insertion_point(message_byte_size_start:flwr.proto.ConfigsRecordValue) + size_t total_size = 0; + + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + // Prevent compiler warnings about cached_has_bits being unused + (void) cached_has_bits; + + switch (value_case()) { + // double double = 1; + case kDouble: { + total_size += 1 + 8; + break; + } + // sint64 sint64 = 2; + case kSint64: { + total_size += ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SInt64SizePlusOne(this->_internal_sint64()); + break; + } + // bool bool = 3; + case kBool: { + total_size += 1 + 1; + break; + } + // string string = 4; + case kString: { + total_size += 1 + + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize( + this->_internal_string()); + break; + } + // bytes bytes = 5; + case kBytes: { + total_size += 1 + + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::BytesSize( + this->_internal_bytes()); + break; + } + // .flwr.proto.DoubleList double_list = 21; + case kDoubleList: { + total_size += 2 + + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize( + *value_.double_list_); + break; + } + // .flwr.proto.Sint64List sint64_list = 22; + case kSint64List: { + total_size += 2 + + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize( + *value_.sint64_list_); + break; + } + // .flwr.proto.BoolList bool_list = 23; + case kBoolList: { + total_size += 2 + + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize( + *value_.bool_list_); + break; + } + // .flwr.proto.StringList string_list = 24; + case kStringList: { + total_size += 2 + + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize( + *value_.string_list_); + break; + } + // .flwr.proto.BytesList bytes_list = 25; + case kBytesList: { + total_size += 2 + + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize( + *value_.bytes_list_); + break; + } + case VALUE_NOT_SET: { + break; + } + } + return MaybeComputeUnknownFieldsSize(total_size, &_cached_size_); +} + +const ::PROTOBUF_NAMESPACE_ID::Message::ClassData ConfigsRecordValue::_class_data_ = { + ::PROTOBUF_NAMESPACE_ID::Message::CopyWithSizeCheck, + ConfigsRecordValue::MergeImpl +}; +const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*ConfigsRecordValue::GetClassData() const { return &_class_data_; } + +void ConfigsRecordValue::MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, + const ::PROTOBUF_NAMESPACE_ID::Message& from) { + static_cast(to)->MergeFrom( + static_cast(from)); +} + + +void ConfigsRecordValue::MergeFrom(const ConfigsRecordValue& from) { +// @@protoc_insertion_point(class_specific_merge_from_start:flwr.proto.ConfigsRecordValue) + GOOGLE_DCHECK_NE(&from, this); + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + (void) cached_has_bits; + + switch (from.value_case()) { + case kDouble: { + _internal_set_double_(from._internal_double_()); + break; + } + case kSint64: { + _internal_set_sint64(from._internal_sint64()); + break; + } + case kBool: { + _internal_set_bool_(from._internal_bool_()); + break; + } + case kString: { + _internal_set_string(from._internal_string()); + break; + } + case kBytes: { + _internal_set_bytes(from._internal_bytes()); + break; + } + case kDoubleList: { + _internal_mutable_double_list()->::flwr::proto::DoubleList::MergeFrom(from._internal_double_list()); + break; + } + case kSint64List: { + _internal_mutable_sint64_list()->::flwr::proto::Sint64List::MergeFrom(from._internal_sint64_list()); + break; + } + case kBoolList: { + _internal_mutable_bool_list()->::flwr::proto::BoolList::MergeFrom(from._internal_bool_list()); + break; + } + case kStringList: { + _internal_mutable_string_list()->::flwr::proto::StringList::MergeFrom(from._internal_string_list()); + break; + } + case kBytesList: { + _internal_mutable_bytes_list()->::flwr::proto::BytesList::MergeFrom(from._internal_bytes_list()); + break; + } + case VALUE_NOT_SET: { + break; + } + } + _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); +} + +void ConfigsRecordValue::CopyFrom(const ConfigsRecordValue& from) { +// @@protoc_insertion_point(class_specific_copy_from_start:flwr.proto.ConfigsRecordValue) + if (&from == this) return; + Clear(); + MergeFrom(from); +} + +bool ConfigsRecordValue::IsInitialized() const { + return true; +} + +void ConfigsRecordValue::InternalSwap(ConfigsRecordValue* other) { + using std::swap; + _internal_metadata_.InternalSwap(&other->_internal_metadata_); + swap(value_, other->value_); + swap(_oneof_case_[0], other->_oneof_case_[0]); +} + +::PROTOBUF_NAMESPACE_ID::Metadata ConfigsRecordValue::GetMetadata() const { + return ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors( + &descriptor_table_flwr_2fproto_2frecordset_2eproto_getter, &descriptor_table_flwr_2fproto_2frecordset_2eproto_once, + file_level_metadata_flwr_2fproto_2frecordset_2eproto[7]); +} + +// =================================================================== + +class ParametersRecord::_Internal { + public: +}; + +ParametersRecord::ParametersRecord(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned) + : ::PROTOBUF_NAMESPACE_ID::Message(arena, is_message_owned), + data_keys_(arena), + data_values_(arena) { + SharedCtor(); + if (!is_message_owned) { + RegisterArenaDtor(arena); + } + // @@protoc_insertion_point(arena_constructor:flwr.proto.ParametersRecord) +} +ParametersRecord::ParametersRecord(const ParametersRecord& from) + : ::PROTOBUF_NAMESPACE_ID::Message(), + data_keys_(from.data_keys_), + data_values_(from.data_values_) { + _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); + // @@protoc_insertion_point(copy_constructor:flwr.proto.ParametersRecord) +} + +void ParametersRecord::SharedCtor() { +} + +ParametersRecord::~ParametersRecord() { + // @@protoc_insertion_point(destructor:flwr.proto.ParametersRecord) + if (GetArenaForAllocation() != nullptr) return; + SharedDtor(); + _internal_metadata_.Delete<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); +} + +inline void ParametersRecord::SharedDtor() { + GOOGLE_DCHECK(GetArenaForAllocation() == nullptr); +} + +void ParametersRecord::ArenaDtor(void* object) { + ParametersRecord* _this = reinterpret_cast< ParametersRecord* >(object); + (void)_this; +} +void ParametersRecord::RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena*) { +} +void ParametersRecord::SetCachedSize(int size) const { + _cached_size_.Set(size); +} + +void ParametersRecord::Clear() { +// @@protoc_insertion_point(message_clear_start:flwr.proto.ParametersRecord) + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + // Prevent compiler warnings about cached_has_bits being unused + (void) cached_has_bits; + + data_keys_.Clear(); + data_values_.Clear(); + _internal_metadata_.Clear<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); +} + +const char* ParametersRecord::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) { +#define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure + while (!ctx->Done(&ptr)) { + ::PROTOBUF_NAMESPACE_ID::uint32 tag; + ptr = ::PROTOBUF_NAMESPACE_ID::internal::ReadTag(ptr, &tag); + switch (tag >> 3) { + // repeated string data_keys = 1; + case 1: + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 10)) { + ptr -= 1; + do { + ptr += 1; + auto str = _internal_add_data_keys(); + ptr = ::PROTOBUF_NAMESPACE_ID::internal::InlineGreedyStringParser(str, ptr, ctx); + CHK_(::PROTOBUF_NAMESPACE_ID::internal::VerifyUTF8(str, "flwr.proto.ParametersRecord.data_keys")); + CHK_(ptr); + if (!ctx->DataAvailable(ptr)) break; + } while (::PROTOBUF_NAMESPACE_ID::internal::ExpectTag<10>(ptr)); + } else + goto handle_unusual; + continue; + // repeated .flwr.proto.Array data_values = 2; + case 2: + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 18)) { + ptr -= 1; + do { + ptr += 1; + ptr = ctx->ParseMessage(_internal_add_data_values(), ptr); + CHK_(ptr); + if (!ctx->DataAvailable(ptr)) break; + } while (::PROTOBUF_NAMESPACE_ID::internal::ExpectTag<18>(ptr)); + } else + goto handle_unusual; + continue; + default: + goto handle_unusual; + } // switch + handle_unusual: + if ((tag == 0) || ((tag & 7) == 4)) { + CHK_(ptr); + ctx->SetLastTag(tag); + goto message_done; + } + ptr = UnknownFieldParse( + tag, + _internal_metadata_.mutable_unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(), + ptr, ctx); + CHK_(ptr != nullptr); + } // while +message_done: + return ptr; +failure: + ptr = nullptr; + goto message_done; +#undef CHK_ +} + +::PROTOBUF_NAMESPACE_ID::uint8* ParametersRecord::_InternalSerialize( + ::PROTOBUF_NAMESPACE_ID::uint8* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const { + // @@protoc_insertion_point(serialize_to_array_start:flwr.proto.ParametersRecord) + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + (void) cached_has_bits; + + // repeated string data_keys = 1; + for (int i = 0, n = this->_internal_data_keys_size(); i < n; i++) { + const auto& s = this->_internal_data_keys(i); + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String( + s.data(), static_cast(s.length()), + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE, + "flwr.proto.ParametersRecord.data_keys"); + target = stream->WriteString(1, s, target); + } + + // repeated .flwr.proto.Array data_values = 2; + for (unsigned int i = 0, + n = static_cast(this->_internal_data_values_size()); i < n; i++) { + target = stream->EnsureSpace(target); + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite:: + InternalWriteMessage(2, this->_internal_data_values(i), target, stream); + } + + if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) { + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::InternalSerializeUnknownFieldsToArray( + _internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance), target, stream); + } + // @@protoc_insertion_point(serialize_to_array_end:flwr.proto.ParametersRecord) + return target; +} + +size_t ParametersRecord::ByteSizeLong() const { +// @@protoc_insertion_point(message_byte_size_start:flwr.proto.ParametersRecord) + size_t total_size = 0; + + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + // Prevent compiler warnings about cached_has_bits being unused + (void) cached_has_bits; + + // repeated string data_keys = 1; + total_size += 1 * + ::PROTOBUF_NAMESPACE_ID::internal::FromIntSize(data_keys_.size()); + for (int i = 0, n = data_keys_.size(); i < n; i++) { + total_size += ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize( + data_keys_.Get(i)); + } + + // repeated .flwr.proto.Array data_values = 2; + total_size += 1UL * this->_internal_data_values_size(); + for (const auto& msg : this->data_values_) { + total_size += + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(msg); + } + + return MaybeComputeUnknownFieldsSize(total_size, &_cached_size_); +} + +const ::PROTOBUF_NAMESPACE_ID::Message::ClassData ParametersRecord::_class_data_ = { + ::PROTOBUF_NAMESPACE_ID::Message::CopyWithSizeCheck, + ParametersRecord::MergeImpl +}; +const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*ParametersRecord::GetClassData() const { return &_class_data_; } + +void ParametersRecord::MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, + const ::PROTOBUF_NAMESPACE_ID::Message& from) { + static_cast(to)->MergeFrom( + static_cast(from)); +} + + +void ParametersRecord::MergeFrom(const ParametersRecord& from) { +// @@protoc_insertion_point(class_specific_merge_from_start:flwr.proto.ParametersRecord) + GOOGLE_DCHECK_NE(&from, this); + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + (void) cached_has_bits; + + data_keys_.MergeFrom(from.data_keys_); + data_values_.MergeFrom(from.data_values_); + _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); +} + +void ParametersRecord::CopyFrom(const ParametersRecord& from) { +// @@protoc_insertion_point(class_specific_copy_from_start:flwr.proto.ParametersRecord) + if (&from == this) return; + Clear(); + MergeFrom(from); +} + +bool ParametersRecord::IsInitialized() const { + return true; +} + +void ParametersRecord::InternalSwap(ParametersRecord* other) { + using std::swap; + _internal_metadata_.InternalSwap(&other->_internal_metadata_); + data_keys_.InternalSwap(&other->data_keys_); + data_values_.InternalSwap(&other->data_values_); +} + +::PROTOBUF_NAMESPACE_ID::Metadata ParametersRecord::GetMetadata() const { + return ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors( + &descriptor_table_flwr_2fproto_2frecordset_2eproto_getter, &descriptor_table_flwr_2fproto_2frecordset_2eproto_once, + file_level_metadata_flwr_2fproto_2frecordset_2eproto[8]); +} + +// =================================================================== + +MetricsRecord_DataEntry_DoNotUse::MetricsRecord_DataEntry_DoNotUse() {} +MetricsRecord_DataEntry_DoNotUse::MetricsRecord_DataEntry_DoNotUse(::PROTOBUF_NAMESPACE_ID::Arena* arena) + : SuperType(arena) {} +void MetricsRecord_DataEntry_DoNotUse::MergeFrom(const MetricsRecord_DataEntry_DoNotUse& other) { + MergeFromInternal(other); +} +::PROTOBUF_NAMESPACE_ID::Metadata MetricsRecord_DataEntry_DoNotUse::GetMetadata() const { + return ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors( + &descriptor_table_flwr_2fproto_2frecordset_2eproto_getter, &descriptor_table_flwr_2fproto_2frecordset_2eproto_once, + file_level_metadata_flwr_2fproto_2frecordset_2eproto[9]); +} + +// =================================================================== + +class MetricsRecord::_Internal { + public: +}; + +MetricsRecord::MetricsRecord(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned) + : ::PROTOBUF_NAMESPACE_ID::Message(arena, is_message_owned), + data_(arena) { + SharedCtor(); + if (!is_message_owned) { + RegisterArenaDtor(arena); + } + // @@protoc_insertion_point(arena_constructor:flwr.proto.MetricsRecord) +} +MetricsRecord::MetricsRecord(const MetricsRecord& from) + : ::PROTOBUF_NAMESPACE_ID::Message() { + _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); + data_.MergeFrom(from.data_); + // @@protoc_insertion_point(copy_constructor:flwr.proto.MetricsRecord) +} + +void MetricsRecord::SharedCtor() { +} + +MetricsRecord::~MetricsRecord() { + // @@protoc_insertion_point(destructor:flwr.proto.MetricsRecord) + if (GetArenaForAllocation() != nullptr) return; + SharedDtor(); + _internal_metadata_.Delete<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); +} + +inline void MetricsRecord::SharedDtor() { + GOOGLE_DCHECK(GetArenaForAllocation() == nullptr); +} + +void MetricsRecord::ArenaDtor(void* object) { + MetricsRecord* _this = reinterpret_cast< MetricsRecord* >(object); + (void)_this; + _this->data_. ~MapField(); +} +inline void MetricsRecord::RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena* arena) { + if (arena != nullptr) { + arena->OwnCustomDestructor(this, &MetricsRecord::ArenaDtor); + } +} +void MetricsRecord::SetCachedSize(int size) const { + _cached_size_.Set(size); +} + +void MetricsRecord::Clear() { +// @@protoc_insertion_point(message_clear_start:flwr.proto.MetricsRecord) + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + // Prevent compiler warnings about cached_has_bits being unused + (void) cached_has_bits; + + data_.Clear(); + _internal_metadata_.Clear<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); +} + +const char* MetricsRecord::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) { +#define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure + while (!ctx->Done(&ptr)) { + ::PROTOBUF_NAMESPACE_ID::uint32 tag; + ptr = ::PROTOBUF_NAMESPACE_ID::internal::ReadTag(ptr, &tag); + switch (tag >> 3) { + // map data = 1; + case 1: + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 10)) { + ptr -= 1; + do { + ptr += 1; + ptr = ctx->ParseMessage(&data_, ptr); + CHK_(ptr); + if (!ctx->DataAvailable(ptr)) break; + } while (::PROTOBUF_NAMESPACE_ID::internal::ExpectTag<10>(ptr)); + } else + goto handle_unusual; + continue; + default: + goto handle_unusual; + } // switch + handle_unusual: + if ((tag == 0) || ((tag & 7) == 4)) { + CHK_(ptr); + ctx->SetLastTag(tag); + goto message_done; + } + ptr = UnknownFieldParse( + tag, + _internal_metadata_.mutable_unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(), + ptr, ctx); + CHK_(ptr != nullptr); + } // while +message_done: + return ptr; +failure: + ptr = nullptr; + goto message_done; +#undef CHK_ +} + +::PROTOBUF_NAMESPACE_ID::uint8* MetricsRecord::_InternalSerialize( + ::PROTOBUF_NAMESPACE_ID::uint8* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const { + // @@protoc_insertion_point(serialize_to_array_start:flwr.proto.MetricsRecord) + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + (void) cached_has_bits; + + // map data = 1; + if (!this->_internal_data().empty()) { + typedef ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::MetricsRecordValue >::const_pointer + ConstPtr; + typedef ConstPtr SortItem; + typedef ::PROTOBUF_NAMESPACE_ID::internal::CompareByDerefFirst Less; + struct Utf8Check { + static void Check(ConstPtr p) { + (void)p; + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String( + p->first.data(), static_cast(p->first.length()), + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE, + "flwr.proto.MetricsRecord.DataEntry.key"); + } + }; + + if (stream->IsSerializationDeterministic() && + this->_internal_data().size() > 1) { + ::std::unique_ptr items( + new SortItem[this->_internal_data().size()]); + typedef ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::MetricsRecordValue >::size_type size_type; + size_type n = 0; + for (::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::MetricsRecordValue >::const_iterator + it = this->_internal_data().begin(); + it != this->_internal_data().end(); ++it, ++n) { + items[static_cast(n)] = SortItem(&*it); + } + ::std::sort(&items[0], &items[static_cast(n)], Less()); + for (size_type i = 0; i < n; i++) { + target = MetricsRecord_DataEntry_DoNotUse::Funcs::InternalSerialize(1, items[static_cast(i)]->first, items[static_cast(i)]->second, target, stream); + Utf8Check::Check(&(*items[static_cast(i)])); + } + } else { + for (::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::MetricsRecordValue >::const_iterator + it = this->_internal_data().begin(); + it != this->_internal_data().end(); ++it) { + target = MetricsRecord_DataEntry_DoNotUse::Funcs::InternalSerialize(1, it->first, it->second, target, stream); + Utf8Check::Check(&(*it)); + } + } + } + + if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) { + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::InternalSerializeUnknownFieldsToArray( + _internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance), target, stream); + } + // @@protoc_insertion_point(serialize_to_array_end:flwr.proto.MetricsRecord) + return target; +} + +size_t MetricsRecord::ByteSizeLong() const { +// @@protoc_insertion_point(message_byte_size_start:flwr.proto.MetricsRecord) + size_t total_size = 0; + + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + // Prevent compiler warnings about cached_has_bits being unused + (void) cached_has_bits; + + // map data = 1; + total_size += 1 * + ::PROTOBUF_NAMESPACE_ID::internal::FromIntSize(this->_internal_data_size()); + for (::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::MetricsRecordValue >::const_iterator + it = this->_internal_data().begin(); + it != this->_internal_data().end(); ++it) { + total_size += MetricsRecord_DataEntry_DoNotUse::Funcs::ByteSizeLong(it->first, it->second); + } + + return MaybeComputeUnknownFieldsSize(total_size, &_cached_size_); +} + +const ::PROTOBUF_NAMESPACE_ID::Message::ClassData MetricsRecord::_class_data_ = { + ::PROTOBUF_NAMESPACE_ID::Message::CopyWithSizeCheck, + MetricsRecord::MergeImpl +}; +const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*MetricsRecord::GetClassData() const { return &_class_data_; } + +void MetricsRecord::MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, + const ::PROTOBUF_NAMESPACE_ID::Message& from) { + static_cast(to)->MergeFrom( + static_cast(from)); +} + + +void MetricsRecord::MergeFrom(const MetricsRecord& from) { +// @@protoc_insertion_point(class_specific_merge_from_start:flwr.proto.MetricsRecord) + GOOGLE_DCHECK_NE(&from, this); + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + (void) cached_has_bits; + + data_.MergeFrom(from.data_); + _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); +} + +void MetricsRecord::CopyFrom(const MetricsRecord& from) { +// @@protoc_insertion_point(class_specific_copy_from_start:flwr.proto.MetricsRecord) + if (&from == this) return; + Clear(); + MergeFrom(from); +} + +bool MetricsRecord::IsInitialized() const { + return true; +} + +void MetricsRecord::InternalSwap(MetricsRecord* other) { + using std::swap; + _internal_metadata_.InternalSwap(&other->_internal_metadata_); + data_.InternalSwap(&other->data_); +} + +::PROTOBUF_NAMESPACE_ID::Metadata MetricsRecord::GetMetadata() const { + return ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors( + &descriptor_table_flwr_2fproto_2frecordset_2eproto_getter, &descriptor_table_flwr_2fproto_2frecordset_2eproto_once, + file_level_metadata_flwr_2fproto_2frecordset_2eproto[10]); +} + +// =================================================================== + +ConfigsRecord_DataEntry_DoNotUse::ConfigsRecord_DataEntry_DoNotUse() {} +ConfigsRecord_DataEntry_DoNotUse::ConfigsRecord_DataEntry_DoNotUse(::PROTOBUF_NAMESPACE_ID::Arena* arena) + : SuperType(arena) {} +void ConfigsRecord_DataEntry_DoNotUse::MergeFrom(const ConfigsRecord_DataEntry_DoNotUse& other) { + MergeFromInternal(other); +} +::PROTOBUF_NAMESPACE_ID::Metadata ConfigsRecord_DataEntry_DoNotUse::GetMetadata() const { + return ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors( + &descriptor_table_flwr_2fproto_2frecordset_2eproto_getter, &descriptor_table_flwr_2fproto_2frecordset_2eproto_once, + file_level_metadata_flwr_2fproto_2frecordset_2eproto[11]); +} + +// =================================================================== + +class ConfigsRecord::_Internal { + public: +}; + +ConfigsRecord::ConfigsRecord(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned) + : ::PROTOBUF_NAMESPACE_ID::Message(arena, is_message_owned), + data_(arena) { + SharedCtor(); + if (!is_message_owned) { + RegisterArenaDtor(arena); + } + // @@protoc_insertion_point(arena_constructor:flwr.proto.ConfigsRecord) +} +ConfigsRecord::ConfigsRecord(const ConfigsRecord& from) + : ::PROTOBUF_NAMESPACE_ID::Message() { + _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); + data_.MergeFrom(from.data_); + // @@protoc_insertion_point(copy_constructor:flwr.proto.ConfigsRecord) +} + +void ConfigsRecord::SharedCtor() { +} + +ConfigsRecord::~ConfigsRecord() { + // @@protoc_insertion_point(destructor:flwr.proto.ConfigsRecord) + if (GetArenaForAllocation() != nullptr) return; + SharedDtor(); + _internal_metadata_.Delete<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); +} + +inline void ConfigsRecord::SharedDtor() { + GOOGLE_DCHECK(GetArenaForAllocation() == nullptr); +} + +void ConfigsRecord::ArenaDtor(void* object) { + ConfigsRecord* _this = reinterpret_cast< ConfigsRecord* >(object); + (void)_this; + _this->data_. ~MapField(); +} +inline void ConfigsRecord::RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena* arena) { + if (arena != nullptr) { + arena->OwnCustomDestructor(this, &ConfigsRecord::ArenaDtor); + } +} +void ConfigsRecord::SetCachedSize(int size) const { + _cached_size_.Set(size); +} + +void ConfigsRecord::Clear() { +// @@protoc_insertion_point(message_clear_start:flwr.proto.ConfigsRecord) + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + // Prevent compiler warnings about cached_has_bits being unused + (void) cached_has_bits; + + data_.Clear(); + _internal_metadata_.Clear<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); +} + +const char* ConfigsRecord::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) { +#define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure + while (!ctx->Done(&ptr)) { + ::PROTOBUF_NAMESPACE_ID::uint32 tag; + ptr = ::PROTOBUF_NAMESPACE_ID::internal::ReadTag(ptr, &tag); + switch (tag >> 3) { + // map data = 1; + case 1: + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 10)) { + ptr -= 1; + do { + ptr += 1; + ptr = ctx->ParseMessage(&data_, ptr); + CHK_(ptr); + if (!ctx->DataAvailable(ptr)) break; + } while (::PROTOBUF_NAMESPACE_ID::internal::ExpectTag<10>(ptr)); + } else + goto handle_unusual; + continue; + default: + goto handle_unusual; + } // switch + handle_unusual: + if ((tag == 0) || ((tag & 7) == 4)) { + CHK_(ptr); + ctx->SetLastTag(tag); + goto message_done; + } + ptr = UnknownFieldParse( + tag, + _internal_metadata_.mutable_unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(), + ptr, ctx); + CHK_(ptr != nullptr); + } // while +message_done: + return ptr; +failure: + ptr = nullptr; + goto message_done; +#undef CHK_ +} + +::PROTOBUF_NAMESPACE_ID::uint8* ConfigsRecord::_InternalSerialize( + ::PROTOBUF_NAMESPACE_ID::uint8* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const { + // @@protoc_insertion_point(serialize_to_array_start:flwr.proto.ConfigsRecord) + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + (void) cached_has_bits; + + // map data = 1; + if (!this->_internal_data().empty()) { + typedef ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::ConfigsRecordValue >::const_pointer + ConstPtr; + typedef ConstPtr SortItem; + typedef ::PROTOBUF_NAMESPACE_ID::internal::CompareByDerefFirst Less; + struct Utf8Check { + static void Check(ConstPtr p) { + (void)p; + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String( + p->first.data(), static_cast(p->first.length()), + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE, + "flwr.proto.ConfigsRecord.DataEntry.key"); + } + }; + + if (stream->IsSerializationDeterministic() && + this->_internal_data().size() > 1) { + ::std::unique_ptr items( + new SortItem[this->_internal_data().size()]); + typedef ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::ConfigsRecordValue >::size_type size_type; + size_type n = 0; + for (::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::ConfigsRecordValue >::const_iterator + it = this->_internal_data().begin(); + it != this->_internal_data().end(); ++it, ++n) { + items[static_cast(n)] = SortItem(&*it); + } + ::std::sort(&items[0], &items[static_cast(n)], Less()); + for (size_type i = 0; i < n; i++) { + target = ConfigsRecord_DataEntry_DoNotUse::Funcs::InternalSerialize(1, items[static_cast(i)]->first, items[static_cast(i)]->second, target, stream); + Utf8Check::Check(&(*items[static_cast(i)])); + } + } else { + for (::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::ConfigsRecordValue >::const_iterator + it = this->_internal_data().begin(); + it != this->_internal_data().end(); ++it) { + target = ConfigsRecord_DataEntry_DoNotUse::Funcs::InternalSerialize(1, it->first, it->second, target, stream); + Utf8Check::Check(&(*it)); + } + } + } + + if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) { + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::InternalSerializeUnknownFieldsToArray( + _internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance), target, stream); + } + // @@protoc_insertion_point(serialize_to_array_end:flwr.proto.ConfigsRecord) + return target; +} + +size_t ConfigsRecord::ByteSizeLong() const { +// @@protoc_insertion_point(message_byte_size_start:flwr.proto.ConfigsRecord) + size_t total_size = 0; + + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + // Prevent compiler warnings about cached_has_bits being unused + (void) cached_has_bits; + + // map data = 1; + total_size += 1 * + ::PROTOBUF_NAMESPACE_ID::internal::FromIntSize(this->_internal_data_size()); + for (::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::ConfigsRecordValue >::const_iterator + it = this->_internal_data().begin(); + it != this->_internal_data().end(); ++it) { + total_size += ConfigsRecord_DataEntry_DoNotUse::Funcs::ByteSizeLong(it->first, it->second); + } + + return MaybeComputeUnknownFieldsSize(total_size, &_cached_size_); +} + +const ::PROTOBUF_NAMESPACE_ID::Message::ClassData ConfigsRecord::_class_data_ = { + ::PROTOBUF_NAMESPACE_ID::Message::CopyWithSizeCheck, + ConfigsRecord::MergeImpl +}; +const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*ConfigsRecord::GetClassData() const { return &_class_data_; } + +void ConfigsRecord::MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, + const ::PROTOBUF_NAMESPACE_ID::Message& from) { + static_cast(to)->MergeFrom( + static_cast(from)); +} + + +void ConfigsRecord::MergeFrom(const ConfigsRecord& from) { +// @@protoc_insertion_point(class_specific_merge_from_start:flwr.proto.ConfigsRecord) + GOOGLE_DCHECK_NE(&from, this); + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + (void) cached_has_bits; + + data_.MergeFrom(from.data_); + _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); +} + +void ConfigsRecord::CopyFrom(const ConfigsRecord& from) { +// @@protoc_insertion_point(class_specific_copy_from_start:flwr.proto.ConfigsRecord) + if (&from == this) return; + Clear(); + MergeFrom(from); +} + +bool ConfigsRecord::IsInitialized() const { + return true; +} + +void ConfigsRecord::InternalSwap(ConfigsRecord* other) { + using std::swap; + _internal_metadata_.InternalSwap(&other->_internal_metadata_); + data_.InternalSwap(&other->data_); +} + +::PROTOBUF_NAMESPACE_ID::Metadata ConfigsRecord::GetMetadata() const { + return ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors( + &descriptor_table_flwr_2fproto_2frecordset_2eproto_getter, &descriptor_table_flwr_2fproto_2frecordset_2eproto_once, + file_level_metadata_flwr_2fproto_2frecordset_2eproto[12]); +} + +// =================================================================== + +RecordSet_ParametersEntry_DoNotUse::RecordSet_ParametersEntry_DoNotUse() {} +RecordSet_ParametersEntry_DoNotUse::RecordSet_ParametersEntry_DoNotUse(::PROTOBUF_NAMESPACE_ID::Arena* arena) + : SuperType(arena) {} +void RecordSet_ParametersEntry_DoNotUse::MergeFrom(const RecordSet_ParametersEntry_DoNotUse& other) { + MergeFromInternal(other); +} +::PROTOBUF_NAMESPACE_ID::Metadata RecordSet_ParametersEntry_DoNotUse::GetMetadata() const { + return ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors( + &descriptor_table_flwr_2fproto_2frecordset_2eproto_getter, &descriptor_table_flwr_2fproto_2frecordset_2eproto_once, + file_level_metadata_flwr_2fproto_2frecordset_2eproto[13]); +} + +// =================================================================== + +RecordSet_MetricsEntry_DoNotUse::RecordSet_MetricsEntry_DoNotUse() {} +RecordSet_MetricsEntry_DoNotUse::RecordSet_MetricsEntry_DoNotUse(::PROTOBUF_NAMESPACE_ID::Arena* arena) + : SuperType(arena) {} +void RecordSet_MetricsEntry_DoNotUse::MergeFrom(const RecordSet_MetricsEntry_DoNotUse& other) { + MergeFromInternal(other); +} +::PROTOBUF_NAMESPACE_ID::Metadata RecordSet_MetricsEntry_DoNotUse::GetMetadata() const { + return ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors( + &descriptor_table_flwr_2fproto_2frecordset_2eproto_getter, &descriptor_table_flwr_2fproto_2frecordset_2eproto_once, + file_level_metadata_flwr_2fproto_2frecordset_2eproto[14]); +} + +// =================================================================== + +RecordSet_ConfigsEntry_DoNotUse::RecordSet_ConfigsEntry_DoNotUse() {} +RecordSet_ConfigsEntry_DoNotUse::RecordSet_ConfigsEntry_DoNotUse(::PROTOBUF_NAMESPACE_ID::Arena* arena) + : SuperType(arena) {} +void RecordSet_ConfigsEntry_DoNotUse::MergeFrom(const RecordSet_ConfigsEntry_DoNotUse& other) { + MergeFromInternal(other); +} +::PROTOBUF_NAMESPACE_ID::Metadata RecordSet_ConfigsEntry_DoNotUse::GetMetadata() const { + return ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors( + &descriptor_table_flwr_2fproto_2frecordset_2eproto_getter, &descriptor_table_flwr_2fproto_2frecordset_2eproto_once, + file_level_metadata_flwr_2fproto_2frecordset_2eproto[15]); +} + +// =================================================================== + +class RecordSet::_Internal { + public: +}; + +RecordSet::RecordSet(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned) + : ::PROTOBUF_NAMESPACE_ID::Message(arena, is_message_owned), + parameters_(arena), + metrics_(arena), + configs_(arena) { + SharedCtor(); + if (!is_message_owned) { + RegisterArenaDtor(arena); + } + // @@protoc_insertion_point(arena_constructor:flwr.proto.RecordSet) +} +RecordSet::RecordSet(const RecordSet& from) + : ::PROTOBUF_NAMESPACE_ID::Message() { + _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); + parameters_.MergeFrom(from.parameters_); + metrics_.MergeFrom(from.metrics_); + configs_.MergeFrom(from.configs_); + // @@protoc_insertion_point(copy_constructor:flwr.proto.RecordSet) +} + +void RecordSet::SharedCtor() { +} + +RecordSet::~RecordSet() { + // @@protoc_insertion_point(destructor:flwr.proto.RecordSet) + if (GetArenaForAllocation() != nullptr) return; + SharedDtor(); + _internal_metadata_.Delete<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); +} + +inline void RecordSet::SharedDtor() { + GOOGLE_DCHECK(GetArenaForAllocation() == nullptr); +} + +void RecordSet::ArenaDtor(void* object) { + RecordSet* _this = reinterpret_cast< RecordSet* >(object); + (void)_this; + _this->parameters_. ~MapField(); + _this->metrics_. ~MapField(); + _this->configs_. ~MapField(); +} +inline void RecordSet::RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena* arena) { + if (arena != nullptr) { + arena->OwnCustomDestructor(this, &RecordSet::ArenaDtor); + } +} +void RecordSet::SetCachedSize(int size) const { + _cached_size_.Set(size); +} + +void RecordSet::Clear() { +// @@protoc_insertion_point(message_clear_start:flwr.proto.RecordSet) + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + // Prevent compiler warnings about cached_has_bits being unused + (void) cached_has_bits; + + parameters_.Clear(); + metrics_.Clear(); + configs_.Clear(); + _internal_metadata_.Clear<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); +} + +const char* RecordSet::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) { +#define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure + while (!ctx->Done(&ptr)) { + ::PROTOBUF_NAMESPACE_ID::uint32 tag; + ptr = ::PROTOBUF_NAMESPACE_ID::internal::ReadTag(ptr, &tag); + switch (tag >> 3) { + // map parameters = 1; + case 1: + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 10)) { + ptr -= 1; + do { + ptr += 1; + ptr = ctx->ParseMessage(¶meters_, ptr); + CHK_(ptr); + if (!ctx->DataAvailable(ptr)) break; + } while (::PROTOBUF_NAMESPACE_ID::internal::ExpectTag<10>(ptr)); + } else + goto handle_unusual; + continue; + // map metrics = 2; + case 2: + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 18)) { + ptr -= 1; + do { + ptr += 1; + ptr = ctx->ParseMessage(&metrics_, ptr); + CHK_(ptr); + if (!ctx->DataAvailable(ptr)) break; + } while (::PROTOBUF_NAMESPACE_ID::internal::ExpectTag<18>(ptr)); + } else + goto handle_unusual; + continue; + // map configs = 3; + case 3: + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 26)) { + ptr -= 1; + do { + ptr += 1; + ptr = ctx->ParseMessage(&configs_, ptr); + CHK_(ptr); + if (!ctx->DataAvailable(ptr)) break; + } while (::PROTOBUF_NAMESPACE_ID::internal::ExpectTag<26>(ptr)); + } else + goto handle_unusual; + continue; + default: + goto handle_unusual; + } // switch + handle_unusual: + if ((tag == 0) || ((tag & 7) == 4)) { + CHK_(ptr); + ctx->SetLastTag(tag); + goto message_done; + } + ptr = UnknownFieldParse( + tag, + _internal_metadata_.mutable_unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(), + ptr, ctx); + CHK_(ptr != nullptr); + } // while +message_done: + return ptr; +failure: + ptr = nullptr; + goto message_done; +#undef CHK_ +} + +::PROTOBUF_NAMESPACE_ID::uint8* RecordSet::_InternalSerialize( + ::PROTOBUF_NAMESPACE_ID::uint8* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const { + // @@protoc_insertion_point(serialize_to_array_start:flwr.proto.RecordSet) + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + (void) cached_has_bits; + + // map parameters = 1; + if (!this->_internal_parameters().empty()) { + typedef ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::ParametersRecord >::const_pointer + ConstPtr; + typedef ConstPtr SortItem; + typedef ::PROTOBUF_NAMESPACE_ID::internal::CompareByDerefFirst Less; + struct Utf8Check { + static void Check(ConstPtr p) { + (void)p; + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String( + p->first.data(), static_cast(p->first.length()), + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE, + "flwr.proto.RecordSet.ParametersEntry.key"); + } + }; + + if (stream->IsSerializationDeterministic() && + this->_internal_parameters().size() > 1) { + ::std::unique_ptr items( + new SortItem[this->_internal_parameters().size()]); + typedef ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::ParametersRecord >::size_type size_type; + size_type n = 0; + for (::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::ParametersRecord >::const_iterator + it = this->_internal_parameters().begin(); + it != this->_internal_parameters().end(); ++it, ++n) { + items[static_cast(n)] = SortItem(&*it); + } + ::std::sort(&items[0], &items[static_cast(n)], Less()); + for (size_type i = 0; i < n; i++) { + target = RecordSet_ParametersEntry_DoNotUse::Funcs::InternalSerialize(1, items[static_cast(i)]->first, items[static_cast(i)]->second, target, stream); + Utf8Check::Check(&(*items[static_cast(i)])); + } + } else { + for (::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::ParametersRecord >::const_iterator + it = this->_internal_parameters().begin(); + it != this->_internal_parameters().end(); ++it) { + target = RecordSet_ParametersEntry_DoNotUse::Funcs::InternalSerialize(1, it->first, it->second, target, stream); + Utf8Check::Check(&(*it)); + } + } + } + + // map metrics = 2; + if (!this->_internal_metrics().empty()) { + typedef ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::MetricsRecord >::const_pointer + ConstPtr; + typedef ConstPtr SortItem; + typedef ::PROTOBUF_NAMESPACE_ID::internal::CompareByDerefFirst Less; + struct Utf8Check { + static void Check(ConstPtr p) { + (void)p; + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String( + p->first.data(), static_cast(p->first.length()), + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE, + "flwr.proto.RecordSet.MetricsEntry.key"); + } + }; + + if (stream->IsSerializationDeterministic() && + this->_internal_metrics().size() > 1) { + ::std::unique_ptr items( + new SortItem[this->_internal_metrics().size()]); + typedef ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::MetricsRecord >::size_type size_type; + size_type n = 0; + for (::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::MetricsRecord >::const_iterator + it = this->_internal_metrics().begin(); + it != this->_internal_metrics().end(); ++it, ++n) { + items[static_cast(n)] = SortItem(&*it); + } + ::std::sort(&items[0], &items[static_cast(n)], Less()); + for (size_type i = 0; i < n; i++) { + target = RecordSet_MetricsEntry_DoNotUse::Funcs::InternalSerialize(2, items[static_cast(i)]->first, items[static_cast(i)]->second, target, stream); + Utf8Check::Check(&(*items[static_cast(i)])); + } + } else { + for (::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::MetricsRecord >::const_iterator + it = this->_internal_metrics().begin(); + it != this->_internal_metrics().end(); ++it) { + target = RecordSet_MetricsEntry_DoNotUse::Funcs::InternalSerialize(2, it->first, it->second, target, stream); + Utf8Check::Check(&(*it)); + } + } + } + + // map configs = 3; + if (!this->_internal_configs().empty()) { + typedef ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::ConfigsRecord >::const_pointer + ConstPtr; + typedef ConstPtr SortItem; + typedef ::PROTOBUF_NAMESPACE_ID::internal::CompareByDerefFirst Less; + struct Utf8Check { + static void Check(ConstPtr p) { + (void)p; + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String( + p->first.data(), static_cast(p->first.length()), + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE, + "flwr.proto.RecordSet.ConfigsEntry.key"); + } + }; + + if (stream->IsSerializationDeterministic() && + this->_internal_configs().size() > 1) { + ::std::unique_ptr items( + new SortItem[this->_internal_configs().size()]); + typedef ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::ConfigsRecord >::size_type size_type; + size_type n = 0; + for (::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::ConfigsRecord >::const_iterator + it = this->_internal_configs().begin(); + it != this->_internal_configs().end(); ++it, ++n) { + items[static_cast(n)] = SortItem(&*it); + } + ::std::sort(&items[0], &items[static_cast(n)], Less()); + for (size_type i = 0; i < n; i++) { + target = RecordSet_ConfigsEntry_DoNotUse::Funcs::InternalSerialize(3, items[static_cast(i)]->first, items[static_cast(i)]->second, target, stream); + Utf8Check::Check(&(*items[static_cast(i)])); + } + } else { + for (::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::ConfigsRecord >::const_iterator + it = this->_internal_configs().begin(); + it != this->_internal_configs().end(); ++it) { + target = RecordSet_ConfigsEntry_DoNotUse::Funcs::InternalSerialize(3, it->first, it->second, target, stream); + Utf8Check::Check(&(*it)); + } + } + } + + if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) { + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::InternalSerializeUnknownFieldsToArray( + _internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance), target, stream); + } + // @@protoc_insertion_point(serialize_to_array_end:flwr.proto.RecordSet) + return target; +} + +size_t RecordSet::ByteSizeLong() const { +// @@protoc_insertion_point(message_byte_size_start:flwr.proto.RecordSet) + size_t total_size = 0; + + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + // Prevent compiler warnings about cached_has_bits being unused + (void) cached_has_bits; + + // map parameters = 1; + total_size += 1 * + ::PROTOBUF_NAMESPACE_ID::internal::FromIntSize(this->_internal_parameters_size()); + for (::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::ParametersRecord >::const_iterator + it = this->_internal_parameters().begin(); + it != this->_internal_parameters().end(); ++it) { + total_size += RecordSet_ParametersEntry_DoNotUse::Funcs::ByteSizeLong(it->first, it->second); + } + + // map metrics = 2; + total_size += 1 * + ::PROTOBUF_NAMESPACE_ID::internal::FromIntSize(this->_internal_metrics_size()); + for (::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::MetricsRecord >::const_iterator + it = this->_internal_metrics().begin(); + it != this->_internal_metrics().end(); ++it) { + total_size += RecordSet_MetricsEntry_DoNotUse::Funcs::ByteSizeLong(it->first, it->second); + } + + // map configs = 3; + total_size += 1 * + ::PROTOBUF_NAMESPACE_ID::internal::FromIntSize(this->_internal_configs_size()); + for (::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::ConfigsRecord >::const_iterator + it = this->_internal_configs().begin(); + it != this->_internal_configs().end(); ++it) { + total_size += RecordSet_ConfigsEntry_DoNotUse::Funcs::ByteSizeLong(it->first, it->second); + } + + return MaybeComputeUnknownFieldsSize(total_size, &_cached_size_); +} + +const ::PROTOBUF_NAMESPACE_ID::Message::ClassData RecordSet::_class_data_ = { + ::PROTOBUF_NAMESPACE_ID::Message::CopyWithSizeCheck, + RecordSet::MergeImpl +}; +const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*RecordSet::GetClassData() const { return &_class_data_; } + +void RecordSet::MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, + const ::PROTOBUF_NAMESPACE_ID::Message& from) { + static_cast(to)->MergeFrom( + static_cast(from)); +} + + +void RecordSet::MergeFrom(const RecordSet& from) { +// @@protoc_insertion_point(class_specific_merge_from_start:flwr.proto.RecordSet) + GOOGLE_DCHECK_NE(&from, this); + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + (void) cached_has_bits; + + parameters_.MergeFrom(from.parameters_); + metrics_.MergeFrom(from.metrics_); + configs_.MergeFrom(from.configs_); + _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); +} + +void RecordSet::CopyFrom(const RecordSet& from) { +// @@protoc_insertion_point(class_specific_copy_from_start:flwr.proto.RecordSet) + if (&from == this) return; + Clear(); + MergeFrom(from); +} + +bool RecordSet::IsInitialized() const { + return true; +} + +void RecordSet::InternalSwap(RecordSet* other) { + using std::swap; + _internal_metadata_.InternalSwap(&other->_internal_metadata_); + parameters_.InternalSwap(&other->parameters_); + metrics_.InternalSwap(&other->metrics_); + configs_.InternalSwap(&other->configs_); +} + +::PROTOBUF_NAMESPACE_ID::Metadata RecordSet::GetMetadata() const { + return ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors( + &descriptor_table_flwr_2fproto_2frecordset_2eproto_getter, &descriptor_table_flwr_2fproto_2frecordset_2eproto_once, + file_level_metadata_flwr_2fproto_2frecordset_2eproto[16]); +} + +// @@protoc_insertion_point(namespace_scope) +} // namespace proto +} // namespace flwr +PROTOBUF_NAMESPACE_OPEN +template<> PROTOBUF_NOINLINE ::flwr::proto::DoubleList* Arena::CreateMaybeMessage< ::flwr::proto::DoubleList >(Arena* arena) { + return Arena::CreateMessageInternal< ::flwr::proto::DoubleList >(arena); +} +template<> PROTOBUF_NOINLINE ::flwr::proto::Sint64List* Arena::CreateMaybeMessage< ::flwr::proto::Sint64List >(Arena* arena) { + return Arena::CreateMessageInternal< ::flwr::proto::Sint64List >(arena); +} +template<> PROTOBUF_NOINLINE ::flwr::proto::BoolList* Arena::CreateMaybeMessage< ::flwr::proto::BoolList >(Arena* arena) { + return Arena::CreateMessageInternal< ::flwr::proto::BoolList >(arena); +} +template<> PROTOBUF_NOINLINE ::flwr::proto::StringList* Arena::CreateMaybeMessage< ::flwr::proto::StringList >(Arena* arena) { + return Arena::CreateMessageInternal< ::flwr::proto::StringList >(arena); +} +template<> PROTOBUF_NOINLINE ::flwr::proto::BytesList* Arena::CreateMaybeMessage< ::flwr::proto::BytesList >(Arena* arena) { + return Arena::CreateMessageInternal< ::flwr::proto::BytesList >(arena); +} +template<> PROTOBUF_NOINLINE ::flwr::proto::Array* Arena::CreateMaybeMessage< ::flwr::proto::Array >(Arena* arena) { + return Arena::CreateMessageInternal< ::flwr::proto::Array >(arena); +} +template<> PROTOBUF_NOINLINE ::flwr::proto::MetricsRecordValue* Arena::CreateMaybeMessage< ::flwr::proto::MetricsRecordValue >(Arena* arena) { + return Arena::CreateMessageInternal< ::flwr::proto::MetricsRecordValue >(arena); +} +template<> PROTOBUF_NOINLINE ::flwr::proto::ConfigsRecordValue* Arena::CreateMaybeMessage< ::flwr::proto::ConfigsRecordValue >(Arena* arena) { + return Arena::CreateMessageInternal< ::flwr::proto::ConfigsRecordValue >(arena); +} +template<> PROTOBUF_NOINLINE ::flwr::proto::ParametersRecord* Arena::CreateMaybeMessage< ::flwr::proto::ParametersRecord >(Arena* arena) { + return Arena::CreateMessageInternal< ::flwr::proto::ParametersRecord >(arena); +} +template<> PROTOBUF_NOINLINE ::flwr::proto::MetricsRecord_DataEntry_DoNotUse* Arena::CreateMaybeMessage< ::flwr::proto::MetricsRecord_DataEntry_DoNotUse >(Arena* arena) { + return Arena::CreateMessageInternal< ::flwr::proto::MetricsRecord_DataEntry_DoNotUse >(arena); +} +template<> PROTOBUF_NOINLINE ::flwr::proto::MetricsRecord* Arena::CreateMaybeMessage< ::flwr::proto::MetricsRecord >(Arena* arena) { + return Arena::CreateMessageInternal< ::flwr::proto::MetricsRecord >(arena); +} +template<> PROTOBUF_NOINLINE ::flwr::proto::ConfigsRecord_DataEntry_DoNotUse* Arena::CreateMaybeMessage< ::flwr::proto::ConfigsRecord_DataEntry_DoNotUse >(Arena* arena) { + return Arena::CreateMessageInternal< ::flwr::proto::ConfigsRecord_DataEntry_DoNotUse >(arena); +} +template<> PROTOBUF_NOINLINE ::flwr::proto::ConfigsRecord* Arena::CreateMaybeMessage< ::flwr::proto::ConfigsRecord >(Arena* arena) { + return Arena::CreateMessageInternal< ::flwr::proto::ConfigsRecord >(arena); +} +template<> PROTOBUF_NOINLINE ::flwr::proto::RecordSet_ParametersEntry_DoNotUse* Arena::CreateMaybeMessage< ::flwr::proto::RecordSet_ParametersEntry_DoNotUse >(Arena* arena) { + return Arena::CreateMessageInternal< ::flwr::proto::RecordSet_ParametersEntry_DoNotUse >(arena); +} +template<> PROTOBUF_NOINLINE ::flwr::proto::RecordSet_MetricsEntry_DoNotUse* Arena::CreateMaybeMessage< ::flwr::proto::RecordSet_MetricsEntry_DoNotUse >(Arena* arena) { + return Arena::CreateMessageInternal< ::flwr::proto::RecordSet_MetricsEntry_DoNotUse >(arena); +} +template<> PROTOBUF_NOINLINE ::flwr::proto::RecordSet_ConfigsEntry_DoNotUse* Arena::CreateMaybeMessage< ::flwr::proto::RecordSet_ConfigsEntry_DoNotUse >(Arena* arena) { + return Arena::CreateMessageInternal< ::flwr::proto::RecordSet_ConfigsEntry_DoNotUse >(arena); +} +template<> PROTOBUF_NOINLINE ::flwr::proto::RecordSet* Arena::CreateMaybeMessage< ::flwr::proto::RecordSet >(Arena* arena) { + return Arena::CreateMessageInternal< ::flwr::proto::RecordSet >(arena); +} +PROTOBUF_NAMESPACE_CLOSE + +// @@protoc_insertion_point(global_scope) +#include diff --git a/src/cc/flwr/include/flwr/proto/recordset.pb.h b/src/cc/flwr/include/flwr/proto/recordset.pb.h new file mode 100644 index 000000000000..74c336cf61ad --- /dev/null +++ b/src/cc/flwr/include/flwr/proto/recordset.pb.h @@ -0,0 +1,4255 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: flwr/proto/recordset.proto + +#ifndef GOOGLE_PROTOBUF_INCLUDED_flwr_2fproto_2frecordset_2eproto +#define GOOGLE_PROTOBUF_INCLUDED_flwr_2fproto_2frecordset_2eproto + +#include +#include + +#include +#if PROTOBUF_VERSION < 3018000 +#error This file was generated by a newer version of protoc which is +#error incompatible with your Protocol Buffer headers. Please update +#error your headers. +#endif +#if 3018001 < PROTOBUF_MIN_PROTOC_VERSION +#error This file was generated by an older version of protoc which is +#error incompatible with your Protocol Buffer headers. Please +#error regenerate this file with a newer version of protoc. +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include // IWYU pragma: export +#include // IWYU pragma: export +#include // IWYU pragma: export +#include +#include +#include +// @@protoc_insertion_point(includes) +#include +#define PROTOBUF_INTERNAL_EXPORT_flwr_2fproto_2frecordset_2eproto +PROTOBUF_NAMESPACE_OPEN +namespace internal { +class AnyMetadata; +} // namespace internal +PROTOBUF_NAMESPACE_CLOSE + +// Internal implementation detail -- do not use these members. +struct TableStruct_flwr_2fproto_2frecordset_2eproto { + static const ::PROTOBUF_NAMESPACE_ID::internal::ParseTableField entries[] + PROTOBUF_SECTION_VARIABLE(protodesc_cold); + static const ::PROTOBUF_NAMESPACE_ID::internal::AuxiliaryParseTableField aux[] + PROTOBUF_SECTION_VARIABLE(protodesc_cold); + static const ::PROTOBUF_NAMESPACE_ID::internal::ParseTable schema[17] + PROTOBUF_SECTION_VARIABLE(protodesc_cold); + static const ::PROTOBUF_NAMESPACE_ID::internal::FieldMetadata field_metadata[]; + static const ::PROTOBUF_NAMESPACE_ID::internal::SerializationTable serialization_table[]; + static const ::PROTOBUF_NAMESPACE_ID::uint32 offsets[]; +}; +extern const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable descriptor_table_flwr_2fproto_2frecordset_2eproto; +namespace flwr { +namespace proto { +class Array; +struct ArrayDefaultTypeInternal; +extern ArrayDefaultTypeInternal _Array_default_instance_; +class BoolList; +struct BoolListDefaultTypeInternal; +extern BoolListDefaultTypeInternal _BoolList_default_instance_; +class BytesList; +struct BytesListDefaultTypeInternal; +extern BytesListDefaultTypeInternal _BytesList_default_instance_; +class ConfigsRecord; +struct ConfigsRecordDefaultTypeInternal; +extern ConfigsRecordDefaultTypeInternal _ConfigsRecord_default_instance_; +class ConfigsRecordValue; +struct ConfigsRecordValueDefaultTypeInternal; +extern ConfigsRecordValueDefaultTypeInternal _ConfigsRecordValue_default_instance_; +class ConfigsRecord_DataEntry_DoNotUse; +struct ConfigsRecord_DataEntry_DoNotUseDefaultTypeInternal; +extern ConfigsRecord_DataEntry_DoNotUseDefaultTypeInternal _ConfigsRecord_DataEntry_DoNotUse_default_instance_; +class DoubleList; +struct DoubleListDefaultTypeInternal; +extern DoubleListDefaultTypeInternal _DoubleList_default_instance_; +class MetricsRecord; +struct MetricsRecordDefaultTypeInternal; +extern MetricsRecordDefaultTypeInternal _MetricsRecord_default_instance_; +class MetricsRecordValue; +struct MetricsRecordValueDefaultTypeInternal; +extern MetricsRecordValueDefaultTypeInternal _MetricsRecordValue_default_instance_; +class MetricsRecord_DataEntry_DoNotUse; +struct MetricsRecord_DataEntry_DoNotUseDefaultTypeInternal; +extern MetricsRecord_DataEntry_DoNotUseDefaultTypeInternal _MetricsRecord_DataEntry_DoNotUse_default_instance_; +class ParametersRecord; +struct ParametersRecordDefaultTypeInternal; +extern ParametersRecordDefaultTypeInternal _ParametersRecord_default_instance_; +class RecordSet; +struct RecordSetDefaultTypeInternal; +extern RecordSetDefaultTypeInternal _RecordSet_default_instance_; +class RecordSet_ConfigsEntry_DoNotUse; +struct RecordSet_ConfigsEntry_DoNotUseDefaultTypeInternal; +extern RecordSet_ConfigsEntry_DoNotUseDefaultTypeInternal _RecordSet_ConfigsEntry_DoNotUse_default_instance_; +class RecordSet_MetricsEntry_DoNotUse; +struct RecordSet_MetricsEntry_DoNotUseDefaultTypeInternal; +extern RecordSet_MetricsEntry_DoNotUseDefaultTypeInternal _RecordSet_MetricsEntry_DoNotUse_default_instance_; +class RecordSet_ParametersEntry_DoNotUse; +struct RecordSet_ParametersEntry_DoNotUseDefaultTypeInternal; +extern RecordSet_ParametersEntry_DoNotUseDefaultTypeInternal _RecordSet_ParametersEntry_DoNotUse_default_instance_; +class Sint64List; +struct Sint64ListDefaultTypeInternal; +extern Sint64ListDefaultTypeInternal _Sint64List_default_instance_; +class StringList; +struct StringListDefaultTypeInternal; +extern StringListDefaultTypeInternal _StringList_default_instance_; +} // namespace proto +} // namespace flwr +PROTOBUF_NAMESPACE_OPEN +template<> ::flwr::proto::Array* Arena::CreateMaybeMessage<::flwr::proto::Array>(Arena*); +template<> ::flwr::proto::BoolList* Arena::CreateMaybeMessage<::flwr::proto::BoolList>(Arena*); +template<> ::flwr::proto::BytesList* Arena::CreateMaybeMessage<::flwr::proto::BytesList>(Arena*); +template<> ::flwr::proto::ConfigsRecord* Arena::CreateMaybeMessage<::flwr::proto::ConfigsRecord>(Arena*); +template<> ::flwr::proto::ConfigsRecordValue* Arena::CreateMaybeMessage<::flwr::proto::ConfigsRecordValue>(Arena*); +template<> ::flwr::proto::ConfigsRecord_DataEntry_DoNotUse* Arena::CreateMaybeMessage<::flwr::proto::ConfigsRecord_DataEntry_DoNotUse>(Arena*); +template<> ::flwr::proto::DoubleList* Arena::CreateMaybeMessage<::flwr::proto::DoubleList>(Arena*); +template<> ::flwr::proto::MetricsRecord* Arena::CreateMaybeMessage<::flwr::proto::MetricsRecord>(Arena*); +template<> ::flwr::proto::MetricsRecordValue* Arena::CreateMaybeMessage<::flwr::proto::MetricsRecordValue>(Arena*); +template<> ::flwr::proto::MetricsRecord_DataEntry_DoNotUse* Arena::CreateMaybeMessage<::flwr::proto::MetricsRecord_DataEntry_DoNotUse>(Arena*); +template<> ::flwr::proto::ParametersRecord* Arena::CreateMaybeMessage<::flwr::proto::ParametersRecord>(Arena*); +template<> ::flwr::proto::RecordSet* Arena::CreateMaybeMessage<::flwr::proto::RecordSet>(Arena*); +template<> ::flwr::proto::RecordSet_ConfigsEntry_DoNotUse* Arena::CreateMaybeMessage<::flwr::proto::RecordSet_ConfigsEntry_DoNotUse>(Arena*); +template<> ::flwr::proto::RecordSet_MetricsEntry_DoNotUse* Arena::CreateMaybeMessage<::flwr::proto::RecordSet_MetricsEntry_DoNotUse>(Arena*); +template<> ::flwr::proto::RecordSet_ParametersEntry_DoNotUse* Arena::CreateMaybeMessage<::flwr::proto::RecordSet_ParametersEntry_DoNotUse>(Arena*); +template<> ::flwr::proto::Sint64List* Arena::CreateMaybeMessage<::flwr::proto::Sint64List>(Arena*); +template<> ::flwr::proto::StringList* Arena::CreateMaybeMessage<::flwr::proto::StringList>(Arena*); +PROTOBUF_NAMESPACE_CLOSE +namespace flwr { +namespace proto { + +// =================================================================== + +class DoubleList final : + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:flwr.proto.DoubleList) */ { + public: + inline DoubleList() : DoubleList(nullptr) {} + ~DoubleList() override; + explicit constexpr DoubleList(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + + DoubleList(const DoubleList& from); + DoubleList(DoubleList&& from) noexcept + : DoubleList() { + *this = ::std::move(from); + } + + inline DoubleList& operator=(const DoubleList& from) { + CopyFrom(from); + return *this; + } + inline DoubleList& operator=(DoubleList&& from) noexcept { + if (this == &from) return *this; + if (GetOwningArena() == from.GetOwningArena() + #ifdef PROTOBUF_FORCE_COPY_IN_MOVE + && GetOwningArena() != nullptr + #endif // !PROTOBUF_FORCE_COPY_IN_MOVE + ) { + InternalSwap(&from); + } else { + CopyFrom(from); + } + return *this; + } + + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { + return GetDescriptor(); + } + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { + return default_instance().GetMetadata().descriptor; + } + static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { + return default_instance().GetMetadata().reflection; + } + static const DoubleList& default_instance() { + return *internal_default_instance(); + } + static inline const DoubleList* internal_default_instance() { + return reinterpret_cast( + &_DoubleList_default_instance_); + } + static constexpr int kIndexInFileMessages = + 0; + + friend void swap(DoubleList& a, DoubleList& b) { + a.Swap(&b); + } + inline void Swap(DoubleList* other) { + if (other == this) return; + if (GetOwningArena() == other->GetOwningArena()) { + InternalSwap(other); + } else { + ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); + } + } + void UnsafeArenaSwap(DoubleList* other) { + if (other == this) return; + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); + InternalSwap(other); + } + + // implements Message ---------------------------------------------- + + inline DoubleList* New() const final { + return new DoubleList(); + } + + DoubleList* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final { + return CreateMaybeMessage(arena); + } + using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; + void CopyFrom(const DoubleList& from); + using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + void MergeFrom(const DoubleList& from); + private: + static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, const ::PROTOBUF_NAMESPACE_ID::Message& from); + public: + PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; + bool IsInitialized() const final; + + size_t ByteSizeLong() const final; + const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; + ::PROTOBUF_NAMESPACE_ID::uint8* _InternalSerialize( + ::PROTOBUF_NAMESPACE_ID::uint8* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final; + int GetCachedSize() const final { return _cached_size_.Get(); } + + private: + void SharedCtor(); + void SharedDtor(); + void SetCachedSize(int size) const final; + void InternalSwap(DoubleList* other); + friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { + return "flwr.proto.DoubleList"; + } + protected: + explicit DoubleList(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned = false); + private: + static void ArenaDtor(void* object); + inline void RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena* arena); + public: + + static const ClassData _class_data_; + const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final; + + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + + // nested types ---------------------------------------------------- + + // accessors ------------------------------------------------------- + + enum : int { + kValsFieldNumber = 1, + }; + // repeated double vals = 1; + int vals_size() const; + private: + int _internal_vals_size() const; + public: + void clear_vals(); + private: + double _internal_vals(int index) const; + const ::PROTOBUF_NAMESPACE_ID::RepeatedField< double >& + _internal_vals() const; + void _internal_add_vals(double value); + ::PROTOBUF_NAMESPACE_ID::RepeatedField< double >* + _internal_mutable_vals(); + public: + double vals(int index) const; + void set_vals(int index, double value); + void add_vals(double value); + const ::PROTOBUF_NAMESPACE_ID::RepeatedField< double >& + vals() const; + ::PROTOBUF_NAMESPACE_ID::RepeatedField< double >* + mutable_vals(); + + // @@protoc_insertion_point(class_scope:flwr.proto.DoubleList) + private: + class _Internal; + + template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; + typedef void InternalArenaConstructable_; + typedef void DestructorSkippable_; + ::PROTOBUF_NAMESPACE_ID::RepeatedField< double > vals_; + mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; + friend struct ::TableStruct_flwr_2fproto_2frecordset_2eproto; +}; +// ------------------------------------------------------------------- + +class Sint64List final : + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:flwr.proto.Sint64List) */ { + public: + inline Sint64List() : Sint64List(nullptr) {} + ~Sint64List() override; + explicit constexpr Sint64List(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + + Sint64List(const Sint64List& from); + Sint64List(Sint64List&& from) noexcept + : Sint64List() { + *this = ::std::move(from); + } + + inline Sint64List& operator=(const Sint64List& from) { + CopyFrom(from); + return *this; + } + inline Sint64List& operator=(Sint64List&& from) noexcept { + if (this == &from) return *this; + if (GetOwningArena() == from.GetOwningArena() + #ifdef PROTOBUF_FORCE_COPY_IN_MOVE + && GetOwningArena() != nullptr + #endif // !PROTOBUF_FORCE_COPY_IN_MOVE + ) { + InternalSwap(&from); + } else { + CopyFrom(from); + } + return *this; + } + + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { + return GetDescriptor(); + } + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { + return default_instance().GetMetadata().descriptor; + } + static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { + return default_instance().GetMetadata().reflection; + } + static const Sint64List& default_instance() { + return *internal_default_instance(); + } + static inline const Sint64List* internal_default_instance() { + return reinterpret_cast( + &_Sint64List_default_instance_); + } + static constexpr int kIndexInFileMessages = + 1; + + friend void swap(Sint64List& a, Sint64List& b) { + a.Swap(&b); + } + inline void Swap(Sint64List* other) { + if (other == this) return; + if (GetOwningArena() == other->GetOwningArena()) { + InternalSwap(other); + } else { + ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); + } + } + void UnsafeArenaSwap(Sint64List* other) { + if (other == this) return; + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); + InternalSwap(other); + } + + // implements Message ---------------------------------------------- + + inline Sint64List* New() const final { + return new Sint64List(); + } + + Sint64List* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final { + return CreateMaybeMessage(arena); + } + using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; + void CopyFrom(const Sint64List& from); + using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + void MergeFrom(const Sint64List& from); + private: + static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, const ::PROTOBUF_NAMESPACE_ID::Message& from); + public: + PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; + bool IsInitialized() const final; + + size_t ByteSizeLong() const final; + const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; + ::PROTOBUF_NAMESPACE_ID::uint8* _InternalSerialize( + ::PROTOBUF_NAMESPACE_ID::uint8* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final; + int GetCachedSize() const final { return _cached_size_.Get(); } + + private: + void SharedCtor(); + void SharedDtor(); + void SetCachedSize(int size) const final; + void InternalSwap(Sint64List* other); + friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { + return "flwr.proto.Sint64List"; + } + protected: + explicit Sint64List(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned = false); + private: + static void ArenaDtor(void* object); + inline void RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena* arena); + public: + + static const ClassData _class_data_; + const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final; + + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + + // nested types ---------------------------------------------------- + + // accessors ------------------------------------------------------- + + enum : int { + kValsFieldNumber = 1, + }; + // repeated sint64 vals = 1; + int vals_size() const; + private: + int _internal_vals_size() const; + public: + void clear_vals(); + private: + ::PROTOBUF_NAMESPACE_ID::int64 _internal_vals(int index) const; + const ::PROTOBUF_NAMESPACE_ID::RepeatedField< ::PROTOBUF_NAMESPACE_ID::int64 >& + _internal_vals() const; + void _internal_add_vals(::PROTOBUF_NAMESPACE_ID::int64 value); + ::PROTOBUF_NAMESPACE_ID::RepeatedField< ::PROTOBUF_NAMESPACE_ID::int64 >* + _internal_mutable_vals(); + public: + ::PROTOBUF_NAMESPACE_ID::int64 vals(int index) const; + void set_vals(int index, ::PROTOBUF_NAMESPACE_ID::int64 value); + void add_vals(::PROTOBUF_NAMESPACE_ID::int64 value); + const ::PROTOBUF_NAMESPACE_ID::RepeatedField< ::PROTOBUF_NAMESPACE_ID::int64 >& + vals() const; + ::PROTOBUF_NAMESPACE_ID::RepeatedField< ::PROTOBUF_NAMESPACE_ID::int64 >* + mutable_vals(); + + // @@protoc_insertion_point(class_scope:flwr.proto.Sint64List) + private: + class _Internal; + + template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; + typedef void InternalArenaConstructable_; + typedef void DestructorSkippable_; + ::PROTOBUF_NAMESPACE_ID::RepeatedField< ::PROTOBUF_NAMESPACE_ID::int64 > vals_; + mutable std::atomic _vals_cached_byte_size_; + mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; + friend struct ::TableStruct_flwr_2fproto_2frecordset_2eproto; +}; +// ------------------------------------------------------------------- + +class BoolList final : + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:flwr.proto.BoolList) */ { + public: + inline BoolList() : BoolList(nullptr) {} + ~BoolList() override; + explicit constexpr BoolList(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + + BoolList(const BoolList& from); + BoolList(BoolList&& from) noexcept + : BoolList() { + *this = ::std::move(from); + } + + inline BoolList& operator=(const BoolList& from) { + CopyFrom(from); + return *this; + } + inline BoolList& operator=(BoolList&& from) noexcept { + if (this == &from) return *this; + if (GetOwningArena() == from.GetOwningArena() + #ifdef PROTOBUF_FORCE_COPY_IN_MOVE + && GetOwningArena() != nullptr + #endif // !PROTOBUF_FORCE_COPY_IN_MOVE + ) { + InternalSwap(&from); + } else { + CopyFrom(from); + } + return *this; + } + + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { + return GetDescriptor(); + } + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { + return default_instance().GetMetadata().descriptor; + } + static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { + return default_instance().GetMetadata().reflection; + } + static const BoolList& default_instance() { + return *internal_default_instance(); + } + static inline const BoolList* internal_default_instance() { + return reinterpret_cast( + &_BoolList_default_instance_); + } + static constexpr int kIndexInFileMessages = + 2; + + friend void swap(BoolList& a, BoolList& b) { + a.Swap(&b); + } + inline void Swap(BoolList* other) { + if (other == this) return; + if (GetOwningArena() == other->GetOwningArena()) { + InternalSwap(other); + } else { + ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); + } + } + void UnsafeArenaSwap(BoolList* other) { + if (other == this) return; + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); + InternalSwap(other); + } + + // implements Message ---------------------------------------------- + + inline BoolList* New() const final { + return new BoolList(); + } + + BoolList* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final { + return CreateMaybeMessage(arena); + } + using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; + void CopyFrom(const BoolList& from); + using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + void MergeFrom(const BoolList& from); + private: + static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, const ::PROTOBUF_NAMESPACE_ID::Message& from); + public: + PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; + bool IsInitialized() const final; + + size_t ByteSizeLong() const final; + const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; + ::PROTOBUF_NAMESPACE_ID::uint8* _InternalSerialize( + ::PROTOBUF_NAMESPACE_ID::uint8* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final; + int GetCachedSize() const final { return _cached_size_.Get(); } + + private: + void SharedCtor(); + void SharedDtor(); + void SetCachedSize(int size) const final; + void InternalSwap(BoolList* other); + friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { + return "flwr.proto.BoolList"; + } + protected: + explicit BoolList(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned = false); + private: + static void ArenaDtor(void* object); + inline void RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena* arena); + public: + + static const ClassData _class_data_; + const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final; + + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + + // nested types ---------------------------------------------------- + + // accessors ------------------------------------------------------- + + enum : int { + kValsFieldNumber = 1, + }; + // repeated bool vals = 1; + int vals_size() const; + private: + int _internal_vals_size() const; + public: + void clear_vals(); + private: + bool _internal_vals(int index) const; + const ::PROTOBUF_NAMESPACE_ID::RepeatedField< bool >& + _internal_vals() const; + void _internal_add_vals(bool value); + ::PROTOBUF_NAMESPACE_ID::RepeatedField< bool >* + _internal_mutable_vals(); + public: + bool vals(int index) const; + void set_vals(int index, bool value); + void add_vals(bool value); + const ::PROTOBUF_NAMESPACE_ID::RepeatedField< bool >& + vals() const; + ::PROTOBUF_NAMESPACE_ID::RepeatedField< bool >* + mutable_vals(); + + // @@protoc_insertion_point(class_scope:flwr.proto.BoolList) + private: + class _Internal; + + template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; + typedef void InternalArenaConstructable_; + typedef void DestructorSkippable_; + ::PROTOBUF_NAMESPACE_ID::RepeatedField< bool > vals_; + mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; + friend struct ::TableStruct_flwr_2fproto_2frecordset_2eproto; +}; +// ------------------------------------------------------------------- + +class StringList final : + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:flwr.proto.StringList) */ { + public: + inline StringList() : StringList(nullptr) {} + ~StringList() override; + explicit constexpr StringList(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + + StringList(const StringList& from); + StringList(StringList&& from) noexcept + : StringList() { + *this = ::std::move(from); + } + + inline StringList& operator=(const StringList& from) { + CopyFrom(from); + return *this; + } + inline StringList& operator=(StringList&& from) noexcept { + if (this == &from) return *this; + if (GetOwningArena() == from.GetOwningArena() + #ifdef PROTOBUF_FORCE_COPY_IN_MOVE + && GetOwningArena() != nullptr + #endif // !PROTOBUF_FORCE_COPY_IN_MOVE + ) { + InternalSwap(&from); + } else { + CopyFrom(from); + } + return *this; + } + + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { + return GetDescriptor(); + } + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { + return default_instance().GetMetadata().descriptor; + } + static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { + return default_instance().GetMetadata().reflection; + } + static const StringList& default_instance() { + return *internal_default_instance(); + } + static inline const StringList* internal_default_instance() { + return reinterpret_cast( + &_StringList_default_instance_); + } + static constexpr int kIndexInFileMessages = + 3; + + friend void swap(StringList& a, StringList& b) { + a.Swap(&b); + } + inline void Swap(StringList* other) { + if (other == this) return; + if (GetOwningArena() == other->GetOwningArena()) { + InternalSwap(other); + } else { + ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); + } + } + void UnsafeArenaSwap(StringList* other) { + if (other == this) return; + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); + InternalSwap(other); + } + + // implements Message ---------------------------------------------- + + inline StringList* New() const final { + return new StringList(); + } + + StringList* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final { + return CreateMaybeMessage(arena); + } + using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; + void CopyFrom(const StringList& from); + using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + void MergeFrom(const StringList& from); + private: + static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, const ::PROTOBUF_NAMESPACE_ID::Message& from); + public: + PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; + bool IsInitialized() const final; + + size_t ByteSizeLong() const final; + const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; + ::PROTOBUF_NAMESPACE_ID::uint8* _InternalSerialize( + ::PROTOBUF_NAMESPACE_ID::uint8* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final; + int GetCachedSize() const final { return _cached_size_.Get(); } + + private: + void SharedCtor(); + void SharedDtor(); + void SetCachedSize(int size) const final; + void InternalSwap(StringList* other); + friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { + return "flwr.proto.StringList"; + } + protected: + explicit StringList(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned = false); + private: + static void ArenaDtor(void* object); + inline void RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena* arena); + public: + + static const ClassData _class_data_; + const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final; + + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + + // nested types ---------------------------------------------------- + + // accessors ------------------------------------------------------- + + enum : int { + kValsFieldNumber = 1, + }; + // repeated string vals = 1; + int vals_size() const; + private: + int _internal_vals_size() const; + public: + void clear_vals(); + const std::string& vals(int index) const; + std::string* mutable_vals(int index); + void set_vals(int index, const std::string& value); + void set_vals(int index, std::string&& value); + void set_vals(int index, const char* value); + void set_vals(int index, const char* value, size_t size); + std::string* add_vals(); + void add_vals(const std::string& value); + void add_vals(std::string&& value); + void add_vals(const char* value); + void add_vals(const char* value, size_t size); + const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField& vals() const; + ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField* mutable_vals(); + private: + const std::string& _internal_vals(int index) const; + std::string* _internal_add_vals(); + public: + + // @@protoc_insertion_point(class_scope:flwr.proto.StringList) + private: + class _Internal; + + template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; + typedef void InternalArenaConstructable_; + typedef void DestructorSkippable_; + ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField vals_; + mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; + friend struct ::TableStruct_flwr_2fproto_2frecordset_2eproto; +}; +// ------------------------------------------------------------------- + +class BytesList final : + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:flwr.proto.BytesList) */ { + public: + inline BytesList() : BytesList(nullptr) {} + ~BytesList() override; + explicit constexpr BytesList(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + + BytesList(const BytesList& from); + BytesList(BytesList&& from) noexcept + : BytesList() { + *this = ::std::move(from); + } + + inline BytesList& operator=(const BytesList& from) { + CopyFrom(from); + return *this; + } + inline BytesList& operator=(BytesList&& from) noexcept { + if (this == &from) return *this; + if (GetOwningArena() == from.GetOwningArena() + #ifdef PROTOBUF_FORCE_COPY_IN_MOVE + && GetOwningArena() != nullptr + #endif // !PROTOBUF_FORCE_COPY_IN_MOVE + ) { + InternalSwap(&from); + } else { + CopyFrom(from); + } + return *this; + } + + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { + return GetDescriptor(); + } + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { + return default_instance().GetMetadata().descriptor; + } + static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { + return default_instance().GetMetadata().reflection; + } + static const BytesList& default_instance() { + return *internal_default_instance(); + } + static inline const BytesList* internal_default_instance() { + return reinterpret_cast( + &_BytesList_default_instance_); + } + static constexpr int kIndexInFileMessages = + 4; + + friend void swap(BytesList& a, BytesList& b) { + a.Swap(&b); + } + inline void Swap(BytesList* other) { + if (other == this) return; + if (GetOwningArena() == other->GetOwningArena()) { + InternalSwap(other); + } else { + ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); + } + } + void UnsafeArenaSwap(BytesList* other) { + if (other == this) return; + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); + InternalSwap(other); + } + + // implements Message ---------------------------------------------- + + inline BytesList* New() const final { + return new BytesList(); + } + + BytesList* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final { + return CreateMaybeMessage(arena); + } + using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; + void CopyFrom(const BytesList& from); + using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + void MergeFrom(const BytesList& from); + private: + static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, const ::PROTOBUF_NAMESPACE_ID::Message& from); + public: + PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; + bool IsInitialized() const final; + + size_t ByteSizeLong() const final; + const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; + ::PROTOBUF_NAMESPACE_ID::uint8* _InternalSerialize( + ::PROTOBUF_NAMESPACE_ID::uint8* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final; + int GetCachedSize() const final { return _cached_size_.Get(); } + + private: + void SharedCtor(); + void SharedDtor(); + void SetCachedSize(int size) const final; + void InternalSwap(BytesList* other); + friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { + return "flwr.proto.BytesList"; + } + protected: + explicit BytesList(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned = false); + private: + static void ArenaDtor(void* object); + inline void RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena* arena); + public: + + static const ClassData _class_data_; + const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final; + + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + + // nested types ---------------------------------------------------- + + // accessors ------------------------------------------------------- + + enum : int { + kValsFieldNumber = 1, + }; + // repeated bytes vals = 1; + int vals_size() const; + private: + int _internal_vals_size() const; + public: + void clear_vals(); + const std::string& vals(int index) const; + std::string* mutable_vals(int index); + void set_vals(int index, const std::string& value); + void set_vals(int index, std::string&& value); + void set_vals(int index, const char* value); + void set_vals(int index, const void* value, size_t size); + std::string* add_vals(); + void add_vals(const std::string& value); + void add_vals(std::string&& value); + void add_vals(const char* value); + void add_vals(const void* value, size_t size); + const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField& vals() const; + ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField* mutable_vals(); + private: + const std::string& _internal_vals(int index) const; + std::string* _internal_add_vals(); + public: + + // @@protoc_insertion_point(class_scope:flwr.proto.BytesList) + private: + class _Internal; + + template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; + typedef void InternalArenaConstructable_; + typedef void DestructorSkippable_; + ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField vals_; + mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; + friend struct ::TableStruct_flwr_2fproto_2frecordset_2eproto; +}; +// ------------------------------------------------------------------- + +class Array final : + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:flwr.proto.Array) */ { + public: + inline Array() : Array(nullptr) {} + ~Array() override; + explicit constexpr Array(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + + Array(const Array& from); + Array(Array&& from) noexcept + : Array() { + *this = ::std::move(from); + } + + inline Array& operator=(const Array& from) { + CopyFrom(from); + return *this; + } + inline Array& operator=(Array&& from) noexcept { + if (this == &from) return *this; + if (GetOwningArena() == from.GetOwningArena() + #ifdef PROTOBUF_FORCE_COPY_IN_MOVE + && GetOwningArena() != nullptr + #endif // !PROTOBUF_FORCE_COPY_IN_MOVE + ) { + InternalSwap(&from); + } else { + CopyFrom(from); + } + return *this; + } + + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { + return GetDescriptor(); + } + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { + return default_instance().GetMetadata().descriptor; + } + static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { + return default_instance().GetMetadata().reflection; + } + static const Array& default_instance() { + return *internal_default_instance(); + } + static inline const Array* internal_default_instance() { + return reinterpret_cast( + &_Array_default_instance_); + } + static constexpr int kIndexInFileMessages = + 5; + + friend void swap(Array& a, Array& b) { + a.Swap(&b); + } + inline void Swap(Array* other) { + if (other == this) return; + if (GetOwningArena() == other->GetOwningArena()) { + InternalSwap(other); + } else { + ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); + } + } + void UnsafeArenaSwap(Array* other) { + if (other == this) return; + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); + InternalSwap(other); + } + + // implements Message ---------------------------------------------- + + inline Array* New() const final { + return new Array(); + } + + Array* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final { + return CreateMaybeMessage(arena); + } + using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; + void CopyFrom(const Array& from); + using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + void MergeFrom(const Array& from); + private: + static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, const ::PROTOBUF_NAMESPACE_ID::Message& from); + public: + PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; + bool IsInitialized() const final; + + size_t ByteSizeLong() const final; + const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; + ::PROTOBUF_NAMESPACE_ID::uint8* _InternalSerialize( + ::PROTOBUF_NAMESPACE_ID::uint8* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final; + int GetCachedSize() const final { return _cached_size_.Get(); } + + private: + void SharedCtor(); + void SharedDtor(); + void SetCachedSize(int size) const final; + void InternalSwap(Array* other); + friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { + return "flwr.proto.Array"; + } + protected: + explicit Array(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned = false); + private: + static void ArenaDtor(void* object); + inline void RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena* arena); + public: + + static const ClassData _class_data_; + const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final; + + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + + // nested types ---------------------------------------------------- + + // accessors ------------------------------------------------------- + + enum : int { + kShapeFieldNumber = 2, + kDtypeFieldNumber = 1, + kStypeFieldNumber = 3, + kDataFieldNumber = 4, + }; + // repeated int32 shape = 2; + int shape_size() const; + private: + int _internal_shape_size() const; + public: + void clear_shape(); + private: + ::PROTOBUF_NAMESPACE_ID::int32 _internal_shape(int index) const; + const ::PROTOBUF_NAMESPACE_ID::RepeatedField< ::PROTOBUF_NAMESPACE_ID::int32 >& + _internal_shape() const; + void _internal_add_shape(::PROTOBUF_NAMESPACE_ID::int32 value); + ::PROTOBUF_NAMESPACE_ID::RepeatedField< ::PROTOBUF_NAMESPACE_ID::int32 >* + _internal_mutable_shape(); + public: + ::PROTOBUF_NAMESPACE_ID::int32 shape(int index) const; + void set_shape(int index, ::PROTOBUF_NAMESPACE_ID::int32 value); + void add_shape(::PROTOBUF_NAMESPACE_ID::int32 value); + const ::PROTOBUF_NAMESPACE_ID::RepeatedField< ::PROTOBUF_NAMESPACE_ID::int32 >& + shape() const; + ::PROTOBUF_NAMESPACE_ID::RepeatedField< ::PROTOBUF_NAMESPACE_ID::int32 >* + mutable_shape(); + + // string dtype = 1; + void clear_dtype(); + const std::string& dtype() const; + template + void set_dtype(ArgT0&& arg0, ArgT... args); + std::string* mutable_dtype(); + PROTOBUF_MUST_USE_RESULT std::string* release_dtype(); + void set_allocated_dtype(std::string* dtype); + private: + const std::string& _internal_dtype() const; + inline PROTOBUF_ALWAYS_INLINE void _internal_set_dtype(const std::string& value); + std::string* _internal_mutable_dtype(); + public: + + // string stype = 3; + void clear_stype(); + const std::string& stype() const; + template + void set_stype(ArgT0&& arg0, ArgT... args); + std::string* mutable_stype(); + PROTOBUF_MUST_USE_RESULT std::string* release_stype(); + void set_allocated_stype(std::string* stype); + private: + const std::string& _internal_stype() const; + inline PROTOBUF_ALWAYS_INLINE void _internal_set_stype(const std::string& value); + std::string* _internal_mutable_stype(); + public: + + // bytes data = 4; + void clear_data(); + const std::string& data() const; + template + void set_data(ArgT0&& arg0, ArgT... args); + std::string* mutable_data(); + PROTOBUF_MUST_USE_RESULT std::string* release_data(); + void set_allocated_data(std::string* data); + private: + const std::string& _internal_data() const; + inline PROTOBUF_ALWAYS_INLINE void _internal_set_data(const std::string& value); + std::string* _internal_mutable_data(); + public: + + // @@protoc_insertion_point(class_scope:flwr.proto.Array) + private: + class _Internal; + + template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; + typedef void InternalArenaConstructable_; + typedef void DestructorSkippable_; + ::PROTOBUF_NAMESPACE_ID::RepeatedField< ::PROTOBUF_NAMESPACE_ID::int32 > shape_; + mutable std::atomic _shape_cached_byte_size_; + ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr dtype_; + ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr stype_; + ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr data_; + mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; + friend struct ::TableStruct_flwr_2fproto_2frecordset_2eproto; +}; +// ------------------------------------------------------------------- + +class MetricsRecordValue final : + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:flwr.proto.MetricsRecordValue) */ { + public: + inline MetricsRecordValue() : MetricsRecordValue(nullptr) {} + ~MetricsRecordValue() override; + explicit constexpr MetricsRecordValue(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + + MetricsRecordValue(const MetricsRecordValue& from); + MetricsRecordValue(MetricsRecordValue&& from) noexcept + : MetricsRecordValue() { + *this = ::std::move(from); + } + + inline MetricsRecordValue& operator=(const MetricsRecordValue& from) { + CopyFrom(from); + return *this; + } + inline MetricsRecordValue& operator=(MetricsRecordValue&& from) noexcept { + if (this == &from) return *this; + if (GetOwningArena() == from.GetOwningArena() + #ifdef PROTOBUF_FORCE_COPY_IN_MOVE + && GetOwningArena() != nullptr + #endif // !PROTOBUF_FORCE_COPY_IN_MOVE + ) { + InternalSwap(&from); + } else { + CopyFrom(from); + } + return *this; + } + + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { + return GetDescriptor(); + } + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { + return default_instance().GetMetadata().descriptor; + } + static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { + return default_instance().GetMetadata().reflection; + } + static const MetricsRecordValue& default_instance() { + return *internal_default_instance(); + } + enum ValueCase { + kDouble = 1, + kSint64 = 2, + kDoubleList = 21, + kSint64List = 22, + VALUE_NOT_SET = 0, + }; + + static inline const MetricsRecordValue* internal_default_instance() { + return reinterpret_cast( + &_MetricsRecordValue_default_instance_); + } + static constexpr int kIndexInFileMessages = + 6; + + friend void swap(MetricsRecordValue& a, MetricsRecordValue& b) { + a.Swap(&b); + } + inline void Swap(MetricsRecordValue* other) { + if (other == this) return; + if (GetOwningArena() == other->GetOwningArena()) { + InternalSwap(other); + } else { + ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); + } + } + void UnsafeArenaSwap(MetricsRecordValue* other) { + if (other == this) return; + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); + InternalSwap(other); + } + + // implements Message ---------------------------------------------- + + inline MetricsRecordValue* New() const final { + return new MetricsRecordValue(); + } + + MetricsRecordValue* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final { + return CreateMaybeMessage(arena); + } + using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; + void CopyFrom(const MetricsRecordValue& from); + using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + void MergeFrom(const MetricsRecordValue& from); + private: + static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, const ::PROTOBUF_NAMESPACE_ID::Message& from); + public: + PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; + bool IsInitialized() const final; + + size_t ByteSizeLong() const final; + const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; + ::PROTOBUF_NAMESPACE_ID::uint8* _InternalSerialize( + ::PROTOBUF_NAMESPACE_ID::uint8* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final; + int GetCachedSize() const final { return _cached_size_.Get(); } + + private: + void SharedCtor(); + void SharedDtor(); + void SetCachedSize(int size) const final; + void InternalSwap(MetricsRecordValue* other); + friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { + return "flwr.proto.MetricsRecordValue"; + } + protected: + explicit MetricsRecordValue(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned = false); + private: + static void ArenaDtor(void* object); + inline void RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena* arena); + public: + + static const ClassData _class_data_; + const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final; + + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + + // nested types ---------------------------------------------------- + + // accessors ------------------------------------------------------- + + enum : int { + kDoubleFieldNumber = 1, + kSint64FieldNumber = 2, + kDoubleListFieldNumber = 21, + kSint64ListFieldNumber = 22, + }; + // double double = 1; + bool has_double_() const; + private: + bool _internal_has_double_() const; + public: + void clear_double_(); + double double_() const; + void set_double_(double value); + private: + double _internal_double_() const; + void _internal_set_double_(double value); + public: + + // sint64 sint64 = 2; + bool has_sint64() const; + private: + bool _internal_has_sint64() const; + public: + void clear_sint64(); + ::PROTOBUF_NAMESPACE_ID::int64 sint64() const; + void set_sint64(::PROTOBUF_NAMESPACE_ID::int64 value); + private: + ::PROTOBUF_NAMESPACE_ID::int64 _internal_sint64() const; + void _internal_set_sint64(::PROTOBUF_NAMESPACE_ID::int64 value); + public: + + // .flwr.proto.DoubleList double_list = 21; + bool has_double_list() const; + private: + bool _internal_has_double_list() const; + public: + void clear_double_list(); + const ::flwr::proto::DoubleList& double_list() const; + PROTOBUF_MUST_USE_RESULT ::flwr::proto::DoubleList* release_double_list(); + ::flwr::proto::DoubleList* mutable_double_list(); + void set_allocated_double_list(::flwr::proto::DoubleList* double_list); + private: + const ::flwr::proto::DoubleList& _internal_double_list() const; + ::flwr::proto::DoubleList* _internal_mutable_double_list(); + public: + void unsafe_arena_set_allocated_double_list( + ::flwr::proto::DoubleList* double_list); + ::flwr::proto::DoubleList* unsafe_arena_release_double_list(); + + // .flwr.proto.Sint64List sint64_list = 22; + bool has_sint64_list() const; + private: + bool _internal_has_sint64_list() const; + public: + void clear_sint64_list(); + const ::flwr::proto::Sint64List& sint64_list() const; + PROTOBUF_MUST_USE_RESULT ::flwr::proto::Sint64List* release_sint64_list(); + ::flwr::proto::Sint64List* mutable_sint64_list(); + void set_allocated_sint64_list(::flwr::proto::Sint64List* sint64_list); + private: + const ::flwr::proto::Sint64List& _internal_sint64_list() const; + ::flwr::proto::Sint64List* _internal_mutable_sint64_list(); + public: + void unsafe_arena_set_allocated_sint64_list( + ::flwr::proto::Sint64List* sint64_list); + ::flwr::proto::Sint64List* unsafe_arena_release_sint64_list(); + + void clear_value(); + ValueCase value_case() const; + // @@protoc_insertion_point(class_scope:flwr.proto.MetricsRecordValue) + private: + class _Internal; + void set_has_double_(); + void set_has_sint64(); + void set_has_double_list(); + void set_has_sint64_list(); + + inline bool has_value() const; + inline void clear_has_value(); + + template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; + typedef void InternalArenaConstructable_; + typedef void DestructorSkippable_; + union ValueUnion { + constexpr ValueUnion() : _constinit_{} {} + ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized _constinit_; + double double__; + ::PROTOBUF_NAMESPACE_ID::int64 sint64_; + ::flwr::proto::DoubleList* double_list_; + ::flwr::proto::Sint64List* sint64_list_; + } value_; + mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; + ::PROTOBUF_NAMESPACE_ID::uint32 _oneof_case_[1]; + + friend struct ::TableStruct_flwr_2fproto_2frecordset_2eproto; +}; +// ------------------------------------------------------------------- + +class ConfigsRecordValue final : + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:flwr.proto.ConfigsRecordValue) */ { + public: + inline ConfigsRecordValue() : ConfigsRecordValue(nullptr) {} + ~ConfigsRecordValue() override; + explicit constexpr ConfigsRecordValue(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + + ConfigsRecordValue(const ConfigsRecordValue& from); + ConfigsRecordValue(ConfigsRecordValue&& from) noexcept + : ConfigsRecordValue() { + *this = ::std::move(from); + } + + inline ConfigsRecordValue& operator=(const ConfigsRecordValue& from) { + CopyFrom(from); + return *this; + } + inline ConfigsRecordValue& operator=(ConfigsRecordValue&& from) noexcept { + if (this == &from) return *this; + if (GetOwningArena() == from.GetOwningArena() + #ifdef PROTOBUF_FORCE_COPY_IN_MOVE + && GetOwningArena() != nullptr + #endif // !PROTOBUF_FORCE_COPY_IN_MOVE + ) { + InternalSwap(&from); + } else { + CopyFrom(from); + } + return *this; + } + + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { + return GetDescriptor(); + } + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { + return default_instance().GetMetadata().descriptor; + } + static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { + return default_instance().GetMetadata().reflection; + } + static const ConfigsRecordValue& default_instance() { + return *internal_default_instance(); + } + enum ValueCase { + kDouble = 1, + kSint64 = 2, + kBool = 3, + kString = 4, + kBytes = 5, + kDoubleList = 21, + kSint64List = 22, + kBoolList = 23, + kStringList = 24, + kBytesList = 25, + VALUE_NOT_SET = 0, + }; + + static inline const ConfigsRecordValue* internal_default_instance() { + return reinterpret_cast( + &_ConfigsRecordValue_default_instance_); + } + static constexpr int kIndexInFileMessages = + 7; + + friend void swap(ConfigsRecordValue& a, ConfigsRecordValue& b) { + a.Swap(&b); + } + inline void Swap(ConfigsRecordValue* other) { + if (other == this) return; + if (GetOwningArena() == other->GetOwningArena()) { + InternalSwap(other); + } else { + ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); + } + } + void UnsafeArenaSwap(ConfigsRecordValue* other) { + if (other == this) return; + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); + InternalSwap(other); + } + + // implements Message ---------------------------------------------- + + inline ConfigsRecordValue* New() const final { + return new ConfigsRecordValue(); + } + + ConfigsRecordValue* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final { + return CreateMaybeMessage(arena); + } + using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; + void CopyFrom(const ConfigsRecordValue& from); + using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + void MergeFrom(const ConfigsRecordValue& from); + private: + static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, const ::PROTOBUF_NAMESPACE_ID::Message& from); + public: + PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; + bool IsInitialized() const final; + + size_t ByteSizeLong() const final; + const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; + ::PROTOBUF_NAMESPACE_ID::uint8* _InternalSerialize( + ::PROTOBUF_NAMESPACE_ID::uint8* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final; + int GetCachedSize() const final { return _cached_size_.Get(); } + + private: + void SharedCtor(); + void SharedDtor(); + void SetCachedSize(int size) const final; + void InternalSwap(ConfigsRecordValue* other); + friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { + return "flwr.proto.ConfigsRecordValue"; + } + protected: + explicit ConfigsRecordValue(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned = false); + private: + static void ArenaDtor(void* object); + inline void RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena* arena); + public: + + static const ClassData _class_data_; + const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final; + + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + + // nested types ---------------------------------------------------- + + // accessors ------------------------------------------------------- + + enum : int { + kDoubleFieldNumber = 1, + kSint64FieldNumber = 2, + kBoolFieldNumber = 3, + kStringFieldNumber = 4, + kBytesFieldNumber = 5, + kDoubleListFieldNumber = 21, + kSint64ListFieldNumber = 22, + kBoolListFieldNumber = 23, + kStringListFieldNumber = 24, + kBytesListFieldNumber = 25, + }; + // double double = 1; + bool has_double_() const; + private: + bool _internal_has_double_() const; + public: + void clear_double_(); + double double_() const; + void set_double_(double value); + private: + double _internal_double_() const; + void _internal_set_double_(double value); + public: + + // sint64 sint64 = 2; + bool has_sint64() const; + private: + bool _internal_has_sint64() const; + public: + void clear_sint64(); + ::PROTOBUF_NAMESPACE_ID::int64 sint64() const; + void set_sint64(::PROTOBUF_NAMESPACE_ID::int64 value); + private: + ::PROTOBUF_NAMESPACE_ID::int64 _internal_sint64() const; + void _internal_set_sint64(::PROTOBUF_NAMESPACE_ID::int64 value); + public: + + // bool bool = 3; + bool has_bool_() const; + private: + bool _internal_has_bool_() const; + public: + void clear_bool_(); + bool bool_() const; + void set_bool_(bool value); + private: + bool _internal_bool_() const; + void _internal_set_bool_(bool value); + public: + + // string string = 4; + bool has_string() const; + private: + bool _internal_has_string() const; + public: + void clear_string(); + const std::string& string() const; + template + void set_string(ArgT0&& arg0, ArgT... args); + std::string* mutable_string(); + PROTOBUF_MUST_USE_RESULT std::string* release_string(); + void set_allocated_string(std::string* string); + private: + const std::string& _internal_string() const; + inline PROTOBUF_ALWAYS_INLINE void _internal_set_string(const std::string& value); + std::string* _internal_mutable_string(); + public: + + // bytes bytes = 5; + bool has_bytes() const; + private: + bool _internal_has_bytes() const; + public: + void clear_bytes(); + const std::string& bytes() const; + template + void set_bytes(ArgT0&& arg0, ArgT... args); + std::string* mutable_bytes(); + PROTOBUF_MUST_USE_RESULT std::string* release_bytes(); + void set_allocated_bytes(std::string* bytes); + private: + const std::string& _internal_bytes() const; + inline PROTOBUF_ALWAYS_INLINE void _internal_set_bytes(const std::string& value); + std::string* _internal_mutable_bytes(); + public: + + // .flwr.proto.DoubleList double_list = 21; + bool has_double_list() const; + private: + bool _internal_has_double_list() const; + public: + void clear_double_list(); + const ::flwr::proto::DoubleList& double_list() const; + PROTOBUF_MUST_USE_RESULT ::flwr::proto::DoubleList* release_double_list(); + ::flwr::proto::DoubleList* mutable_double_list(); + void set_allocated_double_list(::flwr::proto::DoubleList* double_list); + private: + const ::flwr::proto::DoubleList& _internal_double_list() const; + ::flwr::proto::DoubleList* _internal_mutable_double_list(); + public: + void unsafe_arena_set_allocated_double_list( + ::flwr::proto::DoubleList* double_list); + ::flwr::proto::DoubleList* unsafe_arena_release_double_list(); + + // .flwr.proto.Sint64List sint64_list = 22; + bool has_sint64_list() const; + private: + bool _internal_has_sint64_list() const; + public: + void clear_sint64_list(); + const ::flwr::proto::Sint64List& sint64_list() const; + PROTOBUF_MUST_USE_RESULT ::flwr::proto::Sint64List* release_sint64_list(); + ::flwr::proto::Sint64List* mutable_sint64_list(); + void set_allocated_sint64_list(::flwr::proto::Sint64List* sint64_list); + private: + const ::flwr::proto::Sint64List& _internal_sint64_list() const; + ::flwr::proto::Sint64List* _internal_mutable_sint64_list(); + public: + void unsafe_arena_set_allocated_sint64_list( + ::flwr::proto::Sint64List* sint64_list); + ::flwr::proto::Sint64List* unsafe_arena_release_sint64_list(); + + // .flwr.proto.BoolList bool_list = 23; + bool has_bool_list() const; + private: + bool _internal_has_bool_list() const; + public: + void clear_bool_list(); + const ::flwr::proto::BoolList& bool_list() const; + PROTOBUF_MUST_USE_RESULT ::flwr::proto::BoolList* release_bool_list(); + ::flwr::proto::BoolList* mutable_bool_list(); + void set_allocated_bool_list(::flwr::proto::BoolList* bool_list); + private: + const ::flwr::proto::BoolList& _internal_bool_list() const; + ::flwr::proto::BoolList* _internal_mutable_bool_list(); + public: + void unsafe_arena_set_allocated_bool_list( + ::flwr::proto::BoolList* bool_list); + ::flwr::proto::BoolList* unsafe_arena_release_bool_list(); + + // .flwr.proto.StringList string_list = 24; + bool has_string_list() const; + private: + bool _internal_has_string_list() const; + public: + void clear_string_list(); + const ::flwr::proto::StringList& string_list() const; + PROTOBUF_MUST_USE_RESULT ::flwr::proto::StringList* release_string_list(); + ::flwr::proto::StringList* mutable_string_list(); + void set_allocated_string_list(::flwr::proto::StringList* string_list); + private: + const ::flwr::proto::StringList& _internal_string_list() const; + ::flwr::proto::StringList* _internal_mutable_string_list(); + public: + void unsafe_arena_set_allocated_string_list( + ::flwr::proto::StringList* string_list); + ::flwr::proto::StringList* unsafe_arena_release_string_list(); + + // .flwr.proto.BytesList bytes_list = 25; + bool has_bytes_list() const; + private: + bool _internal_has_bytes_list() const; + public: + void clear_bytes_list(); + const ::flwr::proto::BytesList& bytes_list() const; + PROTOBUF_MUST_USE_RESULT ::flwr::proto::BytesList* release_bytes_list(); + ::flwr::proto::BytesList* mutable_bytes_list(); + void set_allocated_bytes_list(::flwr::proto::BytesList* bytes_list); + private: + const ::flwr::proto::BytesList& _internal_bytes_list() const; + ::flwr::proto::BytesList* _internal_mutable_bytes_list(); + public: + void unsafe_arena_set_allocated_bytes_list( + ::flwr::proto::BytesList* bytes_list); + ::flwr::proto::BytesList* unsafe_arena_release_bytes_list(); + + void clear_value(); + ValueCase value_case() const; + // @@protoc_insertion_point(class_scope:flwr.proto.ConfigsRecordValue) + private: + class _Internal; + void set_has_double_(); + void set_has_sint64(); + void set_has_bool_(); + void set_has_string(); + void set_has_bytes(); + void set_has_double_list(); + void set_has_sint64_list(); + void set_has_bool_list(); + void set_has_string_list(); + void set_has_bytes_list(); + + inline bool has_value() const; + inline void clear_has_value(); + + template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; + typedef void InternalArenaConstructable_; + typedef void DestructorSkippable_; + union ValueUnion { + constexpr ValueUnion() : _constinit_{} {} + ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized _constinit_; + double double__; + ::PROTOBUF_NAMESPACE_ID::int64 sint64_; + bool bool__; + ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr string_; + ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr bytes_; + ::flwr::proto::DoubleList* double_list_; + ::flwr::proto::Sint64List* sint64_list_; + ::flwr::proto::BoolList* bool_list_; + ::flwr::proto::StringList* string_list_; + ::flwr::proto::BytesList* bytes_list_; + } value_; + mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; + ::PROTOBUF_NAMESPACE_ID::uint32 _oneof_case_[1]; + + friend struct ::TableStruct_flwr_2fproto_2frecordset_2eproto; +}; +// ------------------------------------------------------------------- + +class ParametersRecord final : + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:flwr.proto.ParametersRecord) */ { + public: + inline ParametersRecord() : ParametersRecord(nullptr) {} + ~ParametersRecord() override; + explicit constexpr ParametersRecord(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + + ParametersRecord(const ParametersRecord& from); + ParametersRecord(ParametersRecord&& from) noexcept + : ParametersRecord() { + *this = ::std::move(from); + } + + inline ParametersRecord& operator=(const ParametersRecord& from) { + CopyFrom(from); + return *this; + } + inline ParametersRecord& operator=(ParametersRecord&& from) noexcept { + if (this == &from) return *this; + if (GetOwningArena() == from.GetOwningArena() + #ifdef PROTOBUF_FORCE_COPY_IN_MOVE + && GetOwningArena() != nullptr + #endif // !PROTOBUF_FORCE_COPY_IN_MOVE + ) { + InternalSwap(&from); + } else { + CopyFrom(from); + } + return *this; + } + + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { + return GetDescriptor(); + } + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { + return default_instance().GetMetadata().descriptor; + } + static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { + return default_instance().GetMetadata().reflection; + } + static const ParametersRecord& default_instance() { + return *internal_default_instance(); + } + static inline const ParametersRecord* internal_default_instance() { + return reinterpret_cast( + &_ParametersRecord_default_instance_); + } + static constexpr int kIndexInFileMessages = + 8; + + friend void swap(ParametersRecord& a, ParametersRecord& b) { + a.Swap(&b); + } + inline void Swap(ParametersRecord* other) { + if (other == this) return; + if (GetOwningArena() == other->GetOwningArena()) { + InternalSwap(other); + } else { + ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); + } + } + void UnsafeArenaSwap(ParametersRecord* other) { + if (other == this) return; + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); + InternalSwap(other); + } + + // implements Message ---------------------------------------------- + + inline ParametersRecord* New() const final { + return new ParametersRecord(); + } + + ParametersRecord* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final { + return CreateMaybeMessage(arena); + } + using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; + void CopyFrom(const ParametersRecord& from); + using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + void MergeFrom(const ParametersRecord& from); + private: + static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, const ::PROTOBUF_NAMESPACE_ID::Message& from); + public: + PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; + bool IsInitialized() const final; + + size_t ByteSizeLong() const final; + const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; + ::PROTOBUF_NAMESPACE_ID::uint8* _InternalSerialize( + ::PROTOBUF_NAMESPACE_ID::uint8* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final; + int GetCachedSize() const final { return _cached_size_.Get(); } + + private: + void SharedCtor(); + void SharedDtor(); + void SetCachedSize(int size) const final; + void InternalSwap(ParametersRecord* other); + friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { + return "flwr.proto.ParametersRecord"; + } + protected: + explicit ParametersRecord(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned = false); + private: + static void ArenaDtor(void* object); + inline void RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena* arena); + public: + + static const ClassData _class_data_; + const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final; + + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + + // nested types ---------------------------------------------------- + + // accessors ------------------------------------------------------- + + enum : int { + kDataKeysFieldNumber = 1, + kDataValuesFieldNumber = 2, + }; + // repeated string data_keys = 1; + int data_keys_size() const; + private: + int _internal_data_keys_size() const; + public: + void clear_data_keys(); + const std::string& data_keys(int index) const; + std::string* mutable_data_keys(int index); + void set_data_keys(int index, const std::string& value); + void set_data_keys(int index, std::string&& value); + void set_data_keys(int index, const char* value); + void set_data_keys(int index, const char* value, size_t size); + std::string* add_data_keys(); + void add_data_keys(const std::string& value); + void add_data_keys(std::string&& value); + void add_data_keys(const char* value); + void add_data_keys(const char* value, size_t size); + const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField& data_keys() const; + ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField* mutable_data_keys(); + private: + const std::string& _internal_data_keys(int index) const; + std::string* _internal_add_data_keys(); + public: + + // repeated .flwr.proto.Array data_values = 2; + int data_values_size() const; + private: + int _internal_data_values_size() const; + public: + void clear_data_values(); + ::flwr::proto::Array* mutable_data_values(int index); + ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::flwr::proto::Array >* + mutable_data_values(); + private: + const ::flwr::proto::Array& _internal_data_values(int index) const; + ::flwr::proto::Array* _internal_add_data_values(); + public: + const ::flwr::proto::Array& data_values(int index) const; + ::flwr::proto::Array* add_data_values(); + const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::flwr::proto::Array >& + data_values() const; + + // @@protoc_insertion_point(class_scope:flwr.proto.ParametersRecord) + private: + class _Internal; + + template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; + typedef void InternalArenaConstructable_; + typedef void DestructorSkippable_; + ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField data_keys_; + ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::flwr::proto::Array > data_values_; + mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; + friend struct ::TableStruct_flwr_2fproto_2frecordset_2eproto; +}; +// ------------------------------------------------------------------- + +class MetricsRecord_DataEntry_DoNotUse : public ::PROTOBUF_NAMESPACE_ID::internal::MapEntry { +public: + typedef ::PROTOBUF_NAMESPACE_ID::internal::MapEntry SuperType; + MetricsRecord_DataEntry_DoNotUse(); + explicit constexpr MetricsRecord_DataEntry_DoNotUse( + ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + explicit MetricsRecord_DataEntry_DoNotUse(::PROTOBUF_NAMESPACE_ID::Arena* arena); + void MergeFrom(const MetricsRecord_DataEntry_DoNotUse& other); + static const MetricsRecord_DataEntry_DoNotUse* internal_default_instance() { return reinterpret_cast(&_MetricsRecord_DataEntry_DoNotUse_default_instance_); } + static bool ValidateKey(std::string* s) { + return ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(s->data(), static_cast(s->size()), ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::PARSE, "flwr.proto.MetricsRecord.DataEntry.key"); + } + static bool ValidateValue(void*) { return true; } + using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; +}; + +// ------------------------------------------------------------------- + +class MetricsRecord final : + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:flwr.proto.MetricsRecord) */ { + public: + inline MetricsRecord() : MetricsRecord(nullptr) {} + ~MetricsRecord() override; + explicit constexpr MetricsRecord(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + + MetricsRecord(const MetricsRecord& from); + MetricsRecord(MetricsRecord&& from) noexcept + : MetricsRecord() { + *this = ::std::move(from); + } + + inline MetricsRecord& operator=(const MetricsRecord& from) { + CopyFrom(from); + return *this; + } + inline MetricsRecord& operator=(MetricsRecord&& from) noexcept { + if (this == &from) return *this; + if (GetOwningArena() == from.GetOwningArena() + #ifdef PROTOBUF_FORCE_COPY_IN_MOVE + && GetOwningArena() != nullptr + #endif // !PROTOBUF_FORCE_COPY_IN_MOVE + ) { + InternalSwap(&from); + } else { + CopyFrom(from); + } + return *this; + } + + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { + return GetDescriptor(); + } + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { + return default_instance().GetMetadata().descriptor; + } + static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { + return default_instance().GetMetadata().reflection; + } + static const MetricsRecord& default_instance() { + return *internal_default_instance(); + } + static inline const MetricsRecord* internal_default_instance() { + return reinterpret_cast( + &_MetricsRecord_default_instance_); + } + static constexpr int kIndexInFileMessages = + 10; + + friend void swap(MetricsRecord& a, MetricsRecord& b) { + a.Swap(&b); + } + inline void Swap(MetricsRecord* other) { + if (other == this) return; + if (GetOwningArena() == other->GetOwningArena()) { + InternalSwap(other); + } else { + ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); + } + } + void UnsafeArenaSwap(MetricsRecord* other) { + if (other == this) return; + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); + InternalSwap(other); + } + + // implements Message ---------------------------------------------- + + inline MetricsRecord* New() const final { + return new MetricsRecord(); + } + + MetricsRecord* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final { + return CreateMaybeMessage(arena); + } + using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; + void CopyFrom(const MetricsRecord& from); + using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + void MergeFrom(const MetricsRecord& from); + private: + static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, const ::PROTOBUF_NAMESPACE_ID::Message& from); + public: + PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; + bool IsInitialized() const final; + + size_t ByteSizeLong() const final; + const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; + ::PROTOBUF_NAMESPACE_ID::uint8* _InternalSerialize( + ::PROTOBUF_NAMESPACE_ID::uint8* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final; + int GetCachedSize() const final { return _cached_size_.Get(); } + + private: + void SharedCtor(); + void SharedDtor(); + void SetCachedSize(int size) const final; + void InternalSwap(MetricsRecord* other); + friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { + return "flwr.proto.MetricsRecord"; + } + protected: + explicit MetricsRecord(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned = false); + private: + static void ArenaDtor(void* object); + inline void RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena* arena); + public: + + static const ClassData _class_data_; + const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final; + + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + + // nested types ---------------------------------------------------- + + + // accessors ------------------------------------------------------- + + enum : int { + kDataFieldNumber = 1, + }; + // map data = 1; + int data_size() const; + private: + int _internal_data_size() const; + public: + void clear_data(); + private: + const ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::MetricsRecordValue >& + _internal_data() const; + ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::MetricsRecordValue >* + _internal_mutable_data(); + public: + const ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::MetricsRecordValue >& + data() const; + ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::MetricsRecordValue >* + mutable_data(); + + // @@protoc_insertion_point(class_scope:flwr.proto.MetricsRecord) + private: + class _Internal; + + template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; + typedef void InternalArenaConstructable_; + typedef void DestructorSkippable_; + ::PROTOBUF_NAMESPACE_ID::internal::MapField< + MetricsRecord_DataEntry_DoNotUse, + std::string, ::flwr::proto::MetricsRecordValue, + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_STRING, + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_MESSAGE> data_; + mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; + friend struct ::TableStruct_flwr_2fproto_2frecordset_2eproto; +}; +// ------------------------------------------------------------------- + +class ConfigsRecord_DataEntry_DoNotUse : public ::PROTOBUF_NAMESPACE_ID::internal::MapEntry { +public: + typedef ::PROTOBUF_NAMESPACE_ID::internal::MapEntry SuperType; + ConfigsRecord_DataEntry_DoNotUse(); + explicit constexpr ConfigsRecord_DataEntry_DoNotUse( + ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + explicit ConfigsRecord_DataEntry_DoNotUse(::PROTOBUF_NAMESPACE_ID::Arena* arena); + void MergeFrom(const ConfigsRecord_DataEntry_DoNotUse& other); + static const ConfigsRecord_DataEntry_DoNotUse* internal_default_instance() { return reinterpret_cast(&_ConfigsRecord_DataEntry_DoNotUse_default_instance_); } + static bool ValidateKey(std::string* s) { + return ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(s->data(), static_cast(s->size()), ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::PARSE, "flwr.proto.ConfigsRecord.DataEntry.key"); + } + static bool ValidateValue(void*) { return true; } + using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; +}; + +// ------------------------------------------------------------------- + +class ConfigsRecord final : + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:flwr.proto.ConfigsRecord) */ { + public: + inline ConfigsRecord() : ConfigsRecord(nullptr) {} + ~ConfigsRecord() override; + explicit constexpr ConfigsRecord(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + + ConfigsRecord(const ConfigsRecord& from); + ConfigsRecord(ConfigsRecord&& from) noexcept + : ConfigsRecord() { + *this = ::std::move(from); + } + + inline ConfigsRecord& operator=(const ConfigsRecord& from) { + CopyFrom(from); + return *this; + } + inline ConfigsRecord& operator=(ConfigsRecord&& from) noexcept { + if (this == &from) return *this; + if (GetOwningArena() == from.GetOwningArena() + #ifdef PROTOBUF_FORCE_COPY_IN_MOVE + && GetOwningArena() != nullptr + #endif // !PROTOBUF_FORCE_COPY_IN_MOVE + ) { + InternalSwap(&from); + } else { + CopyFrom(from); + } + return *this; + } + + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { + return GetDescriptor(); + } + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { + return default_instance().GetMetadata().descriptor; + } + static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { + return default_instance().GetMetadata().reflection; + } + static const ConfigsRecord& default_instance() { + return *internal_default_instance(); + } + static inline const ConfigsRecord* internal_default_instance() { + return reinterpret_cast( + &_ConfigsRecord_default_instance_); + } + static constexpr int kIndexInFileMessages = + 12; + + friend void swap(ConfigsRecord& a, ConfigsRecord& b) { + a.Swap(&b); + } + inline void Swap(ConfigsRecord* other) { + if (other == this) return; + if (GetOwningArena() == other->GetOwningArena()) { + InternalSwap(other); + } else { + ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); + } + } + void UnsafeArenaSwap(ConfigsRecord* other) { + if (other == this) return; + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); + InternalSwap(other); + } + + // implements Message ---------------------------------------------- + + inline ConfigsRecord* New() const final { + return new ConfigsRecord(); + } + + ConfigsRecord* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final { + return CreateMaybeMessage(arena); + } + using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; + void CopyFrom(const ConfigsRecord& from); + using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + void MergeFrom(const ConfigsRecord& from); + private: + static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, const ::PROTOBUF_NAMESPACE_ID::Message& from); + public: + PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; + bool IsInitialized() const final; + + size_t ByteSizeLong() const final; + const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; + ::PROTOBUF_NAMESPACE_ID::uint8* _InternalSerialize( + ::PROTOBUF_NAMESPACE_ID::uint8* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final; + int GetCachedSize() const final { return _cached_size_.Get(); } + + private: + void SharedCtor(); + void SharedDtor(); + void SetCachedSize(int size) const final; + void InternalSwap(ConfigsRecord* other); + friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { + return "flwr.proto.ConfigsRecord"; + } + protected: + explicit ConfigsRecord(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned = false); + private: + static void ArenaDtor(void* object); + inline void RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena* arena); + public: + + static const ClassData _class_data_; + const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final; + + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + + // nested types ---------------------------------------------------- + + + // accessors ------------------------------------------------------- + + enum : int { + kDataFieldNumber = 1, + }; + // map data = 1; + int data_size() const; + private: + int _internal_data_size() const; + public: + void clear_data(); + private: + const ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::ConfigsRecordValue >& + _internal_data() const; + ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::ConfigsRecordValue >* + _internal_mutable_data(); + public: + const ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::ConfigsRecordValue >& + data() const; + ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::ConfigsRecordValue >* + mutable_data(); + + // @@protoc_insertion_point(class_scope:flwr.proto.ConfigsRecord) + private: + class _Internal; + + template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; + typedef void InternalArenaConstructable_; + typedef void DestructorSkippable_; + ::PROTOBUF_NAMESPACE_ID::internal::MapField< + ConfigsRecord_DataEntry_DoNotUse, + std::string, ::flwr::proto::ConfigsRecordValue, + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_STRING, + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_MESSAGE> data_; + mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; + friend struct ::TableStruct_flwr_2fproto_2frecordset_2eproto; +}; +// ------------------------------------------------------------------- + +class RecordSet_ParametersEntry_DoNotUse : public ::PROTOBUF_NAMESPACE_ID::internal::MapEntry { +public: + typedef ::PROTOBUF_NAMESPACE_ID::internal::MapEntry SuperType; + RecordSet_ParametersEntry_DoNotUse(); + explicit constexpr RecordSet_ParametersEntry_DoNotUse( + ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + explicit RecordSet_ParametersEntry_DoNotUse(::PROTOBUF_NAMESPACE_ID::Arena* arena); + void MergeFrom(const RecordSet_ParametersEntry_DoNotUse& other); + static const RecordSet_ParametersEntry_DoNotUse* internal_default_instance() { return reinterpret_cast(&_RecordSet_ParametersEntry_DoNotUse_default_instance_); } + static bool ValidateKey(std::string* s) { + return ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(s->data(), static_cast(s->size()), ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::PARSE, "flwr.proto.RecordSet.ParametersEntry.key"); + } + static bool ValidateValue(void*) { return true; } + using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; +}; + +// ------------------------------------------------------------------- + +class RecordSet_MetricsEntry_DoNotUse : public ::PROTOBUF_NAMESPACE_ID::internal::MapEntry { +public: + typedef ::PROTOBUF_NAMESPACE_ID::internal::MapEntry SuperType; + RecordSet_MetricsEntry_DoNotUse(); + explicit constexpr RecordSet_MetricsEntry_DoNotUse( + ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + explicit RecordSet_MetricsEntry_DoNotUse(::PROTOBUF_NAMESPACE_ID::Arena* arena); + void MergeFrom(const RecordSet_MetricsEntry_DoNotUse& other); + static const RecordSet_MetricsEntry_DoNotUse* internal_default_instance() { return reinterpret_cast(&_RecordSet_MetricsEntry_DoNotUse_default_instance_); } + static bool ValidateKey(std::string* s) { + return ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(s->data(), static_cast(s->size()), ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::PARSE, "flwr.proto.RecordSet.MetricsEntry.key"); + } + static bool ValidateValue(void*) { return true; } + using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; +}; + +// ------------------------------------------------------------------- + +class RecordSet_ConfigsEntry_DoNotUse : public ::PROTOBUF_NAMESPACE_ID::internal::MapEntry { +public: + typedef ::PROTOBUF_NAMESPACE_ID::internal::MapEntry SuperType; + RecordSet_ConfigsEntry_DoNotUse(); + explicit constexpr RecordSet_ConfigsEntry_DoNotUse( + ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + explicit RecordSet_ConfigsEntry_DoNotUse(::PROTOBUF_NAMESPACE_ID::Arena* arena); + void MergeFrom(const RecordSet_ConfigsEntry_DoNotUse& other); + static const RecordSet_ConfigsEntry_DoNotUse* internal_default_instance() { return reinterpret_cast(&_RecordSet_ConfigsEntry_DoNotUse_default_instance_); } + static bool ValidateKey(std::string* s) { + return ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(s->data(), static_cast(s->size()), ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::PARSE, "flwr.proto.RecordSet.ConfigsEntry.key"); + } + static bool ValidateValue(void*) { return true; } + using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; +}; + +// ------------------------------------------------------------------- + +class RecordSet final : + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:flwr.proto.RecordSet) */ { + public: + inline RecordSet() : RecordSet(nullptr) {} + ~RecordSet() override; + explicit constexpr RecordSet(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + + RecordSet(const RecordSet& from); + RecordSet(RecordSet&& from) noexcept + : RecordSet() { + *this = ::std::move(from); + } + + inline RecordSet& operator=(const RecordSet& from) { + CopyFrom(from); + return *this; + } + inline RecordSet& operator=(RecordSet&& from) noexcept { + if (this == &from) return *this; + if (GetOwningArena() == from.GetOwningArena() + #ifdef PROTOBUF_FORCE_COPY_IN_MOVE + && GetOwningArena() != nullptr + #endif // !PROTOBUF_FORCE_COPY_IN_MOVE + ) { + InternalSwap(&from); + } else { + CopyFrom(from); + } + return *this; + } + + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { + return GetDescriptor(); + } + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { + return default_instance().GetMetadata().descriptor; + } + static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { + return default_instance().GetMetadata().reflection; + } + static const RecordSet& default_instance() { + return *internal_default_instance(); + } + static inline const RecordSet* internal_default_instance() { + return reinterpret_cast( + &_RecordSet_default_instance_); + } + static constexpr int kIndexInFileMessages = + 16; + + friend void swap(RecordSet& a, RecordSet& b) { + a.Swap(&b); + } + inline void Swap(RecordSet* other) { + if (other == this) return; + if (GetOwningArena() == other->GetOwningArena()) { + InternalSwap(other); + } else { + ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); + } + } + void UnsafeArenaSwap(RecordSet* other) { + if (other == this) return; + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); + InternalSwap(other); + } + + // implements Message ---------------------------------------------- + + inline RecordSet* New() const final { + return new RecordSet(); + } + + RecordSet* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final { + return CreateMaybeMessage(arena); + } + using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; + void CopyFrom(const RecordSet& from); + using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + void MergeFrom(const RecordSet& from); + private: + static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, const ::PROTOBUF_NAMESPACE_ID::Message& from); + public: + PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; + bool IsInitialized() const final; + + size_t ByteSizeLong() const final; + const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; + ::PROTOBUF_NAMESPACE_ID::uint8* _InternalSerialize( + ::PROTOBUF_NAMESPACE_ID::uint8* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final; + int GetCachedSize() const final { return _cached_size_.Get(); } + + private: + void SharedCtor(); + void SharedDtor(); + void SetCachedSize(int size) const final; + void InternalSwap(RecordSet* other); + friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { + return "flwr.proto.RecordSet"; + } + protected: + explicit RecordSet(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned = false); + private: + static void ArenaDtor(void* object); + inline void RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena* arena); + public: + + static const ClassData _class_data_; + const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final; + + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + + // nested types ---------------------------------------------------- + + + // accessors ------------------------------------------------------- + + enum : int { + kParametersFieldNumber = 1, + kMetricsFieldNumber = 2, + kConfigsFieldNumber = 3, + }; + // map parameters = 1; + int parameters_size() const; + private: + int _internal_parameters_size() const; + public: + void clear_parameters(); + private: + const ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::ParametersRecord >& + _internal_parameters() const; + ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::ParametersRecord >* + _internal_mutable_parameters(); + public: + const ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::ParametersRecord >& + parameters() const; + ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::ParametersRecord >* + mutable_parameters(); + + // map metrics = 2; + int metrics_size() const; + private: + int _internal_metrics_size() const; + public: + void clear_metrics(); + private: + const ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::MetricsRecord >& + _internal_metrics() const; + ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::MetricsRecord >* + _internal_mutable_metrics(); + public: + const ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::MetricsRecord >& + metrics() const; + ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::MetricsRecord >* + mutable_metrics(); + + // map configs = 3; + int configs_size() const; + private: + int _internal_configs_size() const; + public: + void clear_configs(); + private: + const ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::ConfigsRecord >& + _internal_configs() const; + ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::ConfigsRecord >* + _internal_mutable_configs(); + public: + const ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::ConfigsRecord >& + configs() const; + ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::ConfigsRecord >* + mutable_configs(); + + // @@protoc_insertion_point(class_scope:flwr.proto.RecordSet) + private: + class _Internal; + + template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; + typedef void InternalArenaConstructable_; + typedef void DestructorSkippable_; + ::PROTOBUF_NAMESPACE_ID::internal::MapField< + RecordSet_ParametersEntry_DoNotUse, + std::string, ::flwr::proto::ParametersRecord, + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_STRING, + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_MESSAGE> parameters_; + ::PROTOBUF_NAMESPACE_ID::internal::MapField< + RecordSet_MetricsEntry_DoNotUse, + std::string, ::flwr::proto::MetricsRecord, + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_STRING, + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_MESSAGE> metrics_; + ::PROTOBUF_NAMESPACE_ID::internal::MapField< + RecordSet_ConfigsEntry_DoNotUse, + std::string, ::flwr::proto::ConfigsRecord, + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_STRING, + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_MESSAGE> configs_; + mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; + friend struct ::TableStruct_flwr_2fproto_2frecordset_2eproto; +}; +// =================================================================== + + +// =================================================================== + +#ifdef __GNUC__ + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wstrict-aliasing" +#endif // __GNUC__ +// DoubleList + +// repeated double vals = 1; +inline int DoubleList::_internal_vals_size() const { + return vals_.size(); +} +inline int DoubleList::vals_size() const { + return _internal_vals_size(); +} +inline void DoubleList::clear_vals() { + vals_.Clear(); +} +inline double DoubleList::_internal_vals(int index) const { + return vals_.Get(index); +} +inline double DoubleList::vals(int index) const { + // @@protoc_insertion_point(field_get:flwr.proto.DoubleList.vals) + return _internal_vals(index); +} +inline void DoubleList::set_vals(int index, double value) { + vals_.Set(index, value); + // @@protoc_insertion_point(field_set:flwr.proto.DoubleList.vals) +} +inline void DoubleList::_internal_add_vals(double value) { + vals_.Add(value); +} +inline void DoubleList::add_vals(double value) { + _internal_add_vals(value); + // @@protoc_insertion_point(field_add:flwr.proto.DoubleList.vals) +} +inline const ::PROTOBUF_NAMESPACE_ID::RepeatedField< double >& +DoubleList::_internal_vals() const { + return vals_; +} +inline const ::PROTOBUF_NAMESPACE_ID::RepeatedField< double >& +DoubleList::vals() const { + // @@protoc_insertion_point(field_list:flwr.proto.DoubleList.vals) + return _internal_vals(); +} +inline ::PROTOBUF_NAMESPACE_ID::RepeatedField< double >* +DoubleList::_internal_mutable_vals() { + return &vals_; +} +inline ::PROTOBUF_NAMESPACE_ID::RepeatedField< double >* +DoubleList::mutable_vals() { + // @@protoc_insertion_point(field_mutable_list:flwr.proto.DoubleList.vals) + return _internal_mutable_vals(); +} + +// ------------------------------------------------------------------- + +// Sint64List + +// repeated sint64 vals = 1; +inline int Sint64List::_internal_vals_size() const { + return vals_.size(); +} +inline int Sint64List::vals_size() const { + return _internal_vals_size(); +} +inline void Sint64List::clear_vals() { + vals_.Clear(); +} +inline ::PROTOBUF_NAMESPACE_ID::int64 Sint64List::_internal_vals(int index) const { + return vals_.Get(index); +} +inline ::PROTOBUF_NAMESPACE_ID::int64 Sint64List::vals(int index) const { + // @@protoc_insertion_point(field_get:flwr.proto.Sint64List.vals) + return _internal_vals(index); +} +inline void Sint64List::set_vals(int index, ::PROTOBUF_NAMESPACE_ID::int64 value) { + vals_.Set(index, value); + // @@protoc_insertion_point(field_set:flwr.proto.Sint64List.vals) +} +inline void Sint64List::_internal_add_vals(::PROTOBUF_NAMESPACE_ID::int64 value) { + vals_.Add(value); +} +inline void Sint64List::add_vals(::PROTOBUF_NAMESPACE_ID::int64 value) { + _internal_add_vals(value); + // @@protoc_insertion_point(field_add:flwr.proto.Sint64List.vals) +} +inline const ::PROTOBUF_NAMESPACE_ID::RepeatedField< ::PROTOBUF_NAMESPACE_ID::int64 >& +Sint64List::_internal_vals() const { + return vals_; +} +inline const ::PROTOBUF_NAMESPACE_ID::RepeatedField< ::PROTOBUF_NAMESPACE_ID::int64 >& +Sint64List::vals() const { + // @@protoc_insertion_point(field_list:flwr.proto.Sint64List.vals) + return _internal_vals(); +} +inline ::PROTOBUF_NAMESPACE_ID::RepeatedField< ::PROTOBUF_NAMESPACE_ID::int64 >* +Sint64List::_internal_mutable_vals() { + return &vals_; +} +inline ::PROTOBUF_NAMESPACE_ID::RepeatedField< ::PROTOBUF_NAMESPACE_ID::int64 >* +Sint64List::mutable_vals() { + // @@protoc_insertion_point(field_mutable_list:flwr.proto.Sint64List.vals) + return _internal_mutable_vals(); +} + +// ------------------------------------------------------------------- + +// BoolList + +// repeated bool vals = 1; +inline int BoolList::_internal_vals_size() const { + return vals_.size(); +} +inline int BoolList::vals_size() const { + return _internal_vals_size(); +} +inline void BoolList::clear_vals() { + vals_.Clear(); +} +inline bool BoolList::_internal_vals(int index) const { + return vals_.Get(index); +} +inline bool BoolList::vals(int index) const { + // @@protoc_insertion_point(field_get:flwr.proto.BoolList.vals) + return _internal_vals(index); +} +inline void BoolList::set_vals(int index, bool value) { + vals_.Set(index, value); + // @@protoc_insertion_point(field_set:flwr.proto.BoolList.vals) +} +inline void BoolList::_internal_add_vals(bool value) { + vals_.Add(value); +} +inline void BoolList::add_vals(bool value) { + _internal_add_vals(value); + // @@protoc_insertion_point(field_add:flwr.proto.BoolList.vals) +} +inline const ::PROTOBUF_NAMESPACE_ID::RepeatedField< bool >& +BoolList::_internal_vals() const { + return vals_; +} +inline const ::PROTOBUF_NAMESPACE_ID::RepeatedField< bool >& +BoolList::vals() const { + // @@protoc_insertion_point(field_list:flwr.proto.BoolList.vals) + return _internal_vals(); +} +inline ::PROTOBUF_NAMESPACE_ID::RepeatedField< bool >* +BoolList::_internal_mutable_vals() { + return &vals_; +} +inline ::PROTOBUF_NAMESPACE_ID::RepeatedField< bool >* +BoolList::mutable_vals() { + // @@protoc_insertion_point(field_mutable_list:flwr.proto.BoolList.vals) + return _internal_mutable_vals(); +} + +// ------------------------------------------------------------------- + +// StringList + +// repeated string vals = 1; +inline int StringList::_internal_vals_size() const { + return vals_.size(); +} +inline int StringList::vals_size() const { + return _internal_vals_size(); +} +inline void StringList::clear_vals() { + vals_.Clear(); +} +inline std::string* StringList::add_vals() { + std::string* _s = _internal_add_vals(); + // @@protoc_insertion_point(field_add_mutable:flwr.proto.StringList.vals) + return _s; +} +inline const std::string& StringList::_internal_vals(int index) const { + return vals_.Get(index); +} +inline const std::string& StringList::vals(int index) const { + // @@protoc_insertion_point(field_get:flwr.proto.StringList.vals) + return _internal_vals(index); +} +inline std::string* StringList::mutable_vals(int index) { + // @@protoc_insertion_point(field_mutable:flwr.proto.StringList.vals) + return vals_.Mutable(index); +} +inline void StringList::set_vals(int index, const std::string& value) { + vals_.Mutable(index)->assign(value); + // @@protoc_insertion_point(field_set:flwr.proto.StringList.vals) +} +inline void StringList::set_vals(int index, std::string&& value) { + vals_.Mutable(index)->assign(std::move(value)); + // @@protoc_insertion_point(field_set:flwr.proto.StringList.vals) +} +inline void StringList::set_vals(int index, const char* value) { + GOOGLE_DCHECK(value != nullptr); + vals_.Mutable(index)->assign(value); + // @@protoc_insertion_point(field_set_char:flwr.proto.StringList.vals) +} +inline void StringList::set_vals(int index, const char* value, size_t size) { + vals_.Mutable(index)->assign( + reinterpret_cast(value), size); + // @@protoc_insertion_point(field_set_pointer:flwr.proto.StringList.vals) +} +inline std::string* StringList::_internal_add_vals() { + return vals_.Add(); +} +inline void StringList::add_vals(const std::string& value) { + vals_.Add()->assign(value); + // @@protoc_insertion_point(field_add:flwr.proto.StringList.vals) +} +inline void StringList::add_vals(std::string&& value) { + vals_.Add(std::move(value)); + // @@protoc_insertion_point(field_add:flwr.proto.StringList.vals) +} +inline void StringList::add_vals(const char* value) { + GOOGLE_DCHECK(value != nullptr); + vals_.Add()->assign(value); + // @@protoc_insertion_point(field_add_char:flwr.proto.StringList.vals) +} +inline void StringList::add_vals(const char* value, size_t size) { + vals_.Add()->assign(reinterpret_cast(value), size); + // @@protoc_insertion_point(field_add_pointer:flwr.proto.StringList.vals) +} +inline const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField& +StringList::vals() const { + // @@protoc_insertion_point(field_list:flwr.proto.StringList.vals) + return vals_; +} +inline ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField* +StringList::mutable_vals() { + // @@protoc_insertion_point(field_mutable_list:flwr.proto.StringList.vals) + return &vals_; +} + +// ------------------------------------------------------------------- + +// BytesList + +// repeated bytes vals = 1; +inline int BytesList::_internal_vals_size() const { + return vals_.size(); +} +inline int BytesList::vals_size() const { + return _internal_vals_size(); +} +inline void BytesList::clear_vals() { + vals_.Clear(); +} +inline std::string* BytesList::add_vals() { + std::string* _s = _internal_add_vals(); + // @@protoc_insertion_point(field_add_mutable:flwr.proto.BytesList.vals) + return _s; +} +inline const std::string& BytesList::_internal_vals(int index) const { + return vals_.Get(index); +} +inline const std::string& BytesList::vals(int index) const { + // @@protoc_insertion_point(field_get:flwr.proto.BytesList.vals) + return _internal_vals(index); +} +inline std::string* BytesList::mutable_vals(int index) { + // @@protoc_insertion_point(field_mutable:flwr.proto.BytesList.vals) + return vals_.Mutable(index); +} +inline void BytesList::set_vals(int index, const std::string& value) { + vals_.Mutable(index)->assign(value); + // @@protoc_insertion_point(field_set:flwr.proto.BytesList.vals) +} +inline void BytesList::set_vals(int index, std::string&& value) { + vals_.Mutable(index)->assign(std::move(value)); + // @@protoc_insertion_point(field_set:flwr.proto.BytesList.vals) +} +inline void BytesList::set_vals(int index, const char* value) { + GOOGLE_DCHECK(value != nullptr); + vals_.Mutable(index)->assign(value); + // @@protoc_insertion_point(field_set_char:flwr.proto.BytesList.vals) +} +inline void BytesList::set_vals(int index, const void* value, size_t size) { + vals_.Mutable(index)->assign( + reinterpret_cast(value), size); + // @@protoc_insertion_point(field_set_pointer:flwr.proto.BytesList.vals) +} +inline std::string* BytesList::_internal_add_vals() { + return vals_.Add(); +} +inline void BytesList::add_vals(const std::string& value) { + vals_.Add()->assign(value); + // @@protoc_insertion_point(field_add:flwr.proto.BytesList.vals) +} +inline void BytesList::add_vals(std::string&& value) { + vals_.Add(std::move(value)); + // @@protoc_insertion_point(field_add:flwr.proto.BytesList.vals) +} +inline void BytesList::add_vals(const char* value) { + GOOGLE_DCHECK(value != nullptr); + vals_.Add()->assign(value); + // @@protoc_insertion_point(field_add_char:flwr.proto.BytesList.vals) +} +inline void BytesList::add_vals(const void* value, size_t size) { + vals_.Add()->assign(reinterpret_cast(value), size); + // @@protoc_insertion_point(field_add_pointer:flwr.proto.BytesList.vals) +} +inline const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField& +BytesList::vals() const { + // @@protoc_insertion_point(field_list:flwr.proto.BytesList.vals) + return vals_; +} +inline ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField* +BytesList::mutable_vals() { + // @@protoc_insertion_point(field_mutable_list:flwr.proto.BytesList.vals) + return &vals_; +} + +// ------------------------------------------------------------------- + +// Array + +// string dtype = 1; +inline void Array::clear_dtype() { + dtype_.ClearToEmpty(); +} +inline const std::string& Array::dtype() const { + // @@protoc_insertion_point(field_get:flwr.proto.Array.dtype) + return _internal_dtype(); +} +template +inline PROTOBUF_ALWAYS_INLINE +void Array::set_dtype(ArgT0&& arg0, ArgT... args) { + + dtype_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, static_cast(arg0), args..., GetArenaForAllocation()); + // @@protoc_insertion_point(field_set:flwr.proto.Array.dtype) +} +inline std::string* Array::mutable_dtype() { + std::string* _s = _internal_mutable_dtype(); + // @@protoc_insertion_point(field_mutable:flwr.proto.Array.dtype) + return _s; +} +inline const std::string& Array::_internal_dtype() const { + return dtype_.Get(); +} +inline void Array::_internal_set_dtype(const std::string& value) { + + dtype_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, value, GetArenaForAllocation()); +} +inline std::string* Array::_internal_mutable_dtype() { + + return dtype_.Mutable(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, GetArenaForAllocation()); +} +inline std::string* Array::release_dtype() { + // @@protoc_insertion_point(field_release:flwr.proto.Array.dtype) + return dtype_.Release(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), GetArenaForAllocation()); +} +inline void Array::set_allocated_dtype(std::string* dtype) { + if (dtype != nullptr) { + + } else { + + } + dtype_.SetAllocated(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), dtype, + GetArenaForAllocation()); + // @@protoc_insertion_point(field_set_allocated:flwr.proto.Array.dtype) +} + +// repeated int32 shape = 2; +inline int Array::_internal_shape_size() const { + return shape_.size(); +} +inline int Array::shape_size() const { + return _internal_shape_size(); +} +inline void Array::clear_shape() { + shape_.Clear(); +} +inline ::PROTOBUF_NAMESPACE_ID::int32 Array::_internal_shape(int index) const { + return shape_.Get(index); +} +inline ::PROTOBUF_NAMESPACE_ID::int32 Array::shape(int index) const { + // @@protoc_insertion_point(field_get:flwr.proto.Array.shape) + return _internal_shape(index); +} +inline void Array::set_shape(int index, ::PROTOBUF_NAMESPACE_ID::int32 value) { + shape_.Set(index, value); + // @@protoc_insertion_point(field_set:flwr.proto.Array.shape) +} +inline void Array::_internal_add_shape(::PROTOBUF_NAMESPACE_ID::int32 value) { + shape_.Add(value); +} +inline void Array::add_shape(::PROTOBUF_NAMESPACE_ID::int32 value) { + _internal_add_shape(value); + // @@protoc_insertion_point(field_add:flwr.proto.Array.shape) +} +inline const ::PROTOBUF_NAMESPACE_ID::RepeatedField< ::PROTOBUF_NAMESPACE_ID::int32 >& +Array::_internal_shape() const { + return shape_; +} +inline const ::PROTOBUF_NAMESPACE_ID::RepeatedField< ::PROTOBUF_NAMESPACE_ID::int32 >& +Array::shape() const { + // @@protoc_insertion_point(field_list:flwr.proto.Array.shape) + return _internal_shape(); +} +inline ::PROTOBUF_NAMESPACE_ID::RepeatedField< ::PROTOBUF_NAMESPACE_ID::int32 >* +Array::_internal_mutable_shape() { + return &shape_; +} +inline ::PROTOBUF_NAMESPACE_ID::RepeatedField< ::PROTOBUF_NAMESPACE_ID::int32 >* +Array::mutable_shape() { + // @@protoc_insertion_point(field_mutable_list:flwr.proto.Array.shape) + return _internal_mutable_shape(); +} + +// string stype = 3; +inline void Array::clear_stype() { + stype_.ClearToEmpty(); +} +inline const std::string& Array::stype() const { + // @@protoc_insertion_point(field_get:flwr.proto.Array.stype) + return _internal_stype(); +} +template +inline PROTOBUF_ALWAYS_INLINE +void Array::set_stype(ArgT0&& arg0, ArgT... args) { + + stype_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, static_cast(arg0), args..., GetArenaForAllocation()); + // @@protoc_insertion_point(field_set:flwr.proto.Array.stype) +} +inline std::string* Array::mutable_stype() { + std::string* _s = _internal_mutable_stype(); + // @@protoc_insertion_point(field_mutable:flwr.proto.Array.stype) + return _s; +} +inline const std::string& Array::_internal_stype() const { + return stype_.Get(); +} +inline void Array::_internal_set_stype(const std::string& value) { + + stype_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, value, GetArenaForAllocation()); +} +inline std::string* Array::_internal_mutable_stype() { + + return stype_.Mutable(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, GetArenaForAllocation()); +} +inline std::string* Array::release_stype() { + // @@protoc_insertion_point(field_release:flwr.proto.Array.stype) + return stype_.Release(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), GetArenaForAllocation()); +} +inline void Array::set_allocated_stype(std::string* stype) { + if (stype != nullptr) { + + } else { + + } + stype_.SetAllocated(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), stype, + GetArenaForAllocation()); + // @@protoc_insertion_point(field_set_allocated:flwr.proto.Array.stype) +} + +// bytes data = 4; +inline void Array::clear_data() { + data_.ClearToEmpty(); +} +inline const std::string& Array::data() const { + // @@protoc_insertion_point(field_get:flwr.proto.Array.data) + return _internal_data(); +} +template +inline PROTOBUF_ALWAYS_INLINE +void Array::set_data(ArgT0&& arg0, ArgT... args) { + + data_.SetBytes(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, static_cast(arg0), args..., GetArenaForAllocation()); + // @@protoc_insertion_point(field_set:flwr.proto.Array.data) +} +inline std::string* Array::mutable_data() { + std::string* _s = _internal_mutable_data(); + // @@protoc_insertion_point(field_mutable:flwr.proto.Array.data) + return _s; +} +inline const std::string& Array::_internal_data() const { + return data_.Get(); +} +inline void Array::_internal_set_data(const std::string& value) { + + data_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, value, GetArenaForAllocation()); +} +inline std::string* Array::_internal_mutable_data() { + + return data_.Mutable(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, GetArenaForAllocation()); +} +inline std::string* Array::release_data() { + // @@protoc_insertion_point(field_release:flwr.proto.Array.data) + return data_.Release(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), GetArenaForAllocation()); +} +inline void Array::set_allocated_data(std::string* data) { + if (data != nullptr) { + + } else { + + } + data_.SetAllocated(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), data, + GetArenaForAllocation()); + // @@protoc_insertion_point(field_set_allocated:flwr.proto.Array.data) +} + +// ------------------------------------------------------------------- + +// MetricsRecordValue + +// double double = 1; +inline bool MetricsRecordValue::_internal_has_double_() const { + return value_case() == kDouble; +} +inline bool MetricsRecordValue::has_double_() const { + return _internal_has_double_(); +} +inline void MetricsRecordValue::set_has_double_() { + _oneof_case_[0] = kDouble; +} +inline void MetricsRecordValue::clear_double_() { + if (_internal_has_double_()) { + value_.double__ = 0; + clear_has_value(); + } +} +inline double MetricsRecordValue::_internal_double_() const { + if (_internal_has_double_()) { + return value_.double__; + } + return 0; +} +inline void MetricsRecordValue::_internal_set_double_(double value) { + if (!_internal_has_double_()) { + clear_value(); + set_has_double_(); + } + value_.double__ = value; +} +inline double MetricsRecordValue::double_() const { + // @@protoc_insertion_point(field_get:flwr.proto.MetricsRecordValue.double) + return _internal_double_(); +} +inline void MetricsRecordValue::set_double_(double value) { + _internal_set_double_(value); + // @@protoc_insertion_point(field_set:flwr.proto.MetricsRecordValue.double) +} + +// sint64 sint64 = 2; +inline bool MetricsRecordValue::_internal_has_sint64() const { + return value_case() == kSint64; +} +inline bool MetricsRecordValue::has_sint64() const { + return _internal_has_sint64(); +} +inline void MetricsRecordValue::set_has_sint64() { + _oneof_case_[0] = kSint64; +} +inline void MetricsRecordValue::clear_sint64() { + if (_internal_has_sint64()) { + value_.sint64_ = int64_t{0}; + clear_has_value(); + } +} +inline ::PROTOBUF_NAMESPACE_ID::int64 MetricsRecordValue::_internal_sint64() const { + if (_internal_has_sint64()) { + return value_.sint64_; + } + return int64_t{0}; +} +inline void MetricsRecordValue::_internal_set_sint64(::PROTOBUF_NAMESPACE_ID::int64 value) { + if (!_internal_has_sint64()) { + clear_value(); + set_has_sint64(); + } + value_.sint64_ = value; +} +inline ::PROTOBUF_NAMESPACE_ID::int64 MetricsRecordValue::sint64() const { + // @@protoc_insertion_point(field_get:flwr.proto.MetricsRecordValue.sint64) + return _internal_sint64(); +} +inline void MetricsRecordValue::set_sint64(::PROTOBUF_NAMESPACE_ID::int64 value) { + _internal_set_sint64(value); + // @@protoc_insertion_point(field_set:flwr.proto.MetricsRecordValue.sint64) +} + +// .flwr.proto.DoubleList double_list = 21; +inline bool MetricsRecordValue::_internal_has_double_list() const { + return value_case() == kDoubleList; +} +inline bool MetricsRecordValue::has_double_list() const { + return _internal_has_double_list(); +} +inline void MetricsRecordValue::set_has_double_list() { + _oneof_case_[0] = kDoubleList; +} +inline void MetricsRecordValue::clear_double_list() { + if (_internal_has_double_list()) { + if (GetArenaForAllocation() == nullptr) { + delete value_.double_list_; + } + clear_has_value(); + } +} +inline ::flwr::proto::DoubleList* MetricsRecordValue::release_double_list() { + // @@protoc_insertion_point(field_release:flwr.proto.MetricsRecordValue.double_list) + if (_internal_has_double_list()) { + clear_has_value(); + ::flwr::proto::DoubleList* temp = value_.double_list_; + if (GetArenaForAllocation() != nullptr) { + temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); + } + value_.double_list_ = nullptr; + return temp; + } else { + return nullptr; + } +} +inline const ::flwr::proto::DoubleList& MetricsRecordValue::_internal_double_list() const { + return _internal_has_double_list() + ? *value_.double_list_ + : reinterpret_cast< ::flwr::proto::DoubleList&>(::flwr::proto::_DoubleList_default_instance_); +} +inline const ::flwr::proto::DoubleList& MetricsRecordValue::double_list() const { + // @@protoc_insertion_point(field_get:flwr.proto.MetricsRecordValue.double_list) + return _internal_double_list(); +} +inline ::flwr::proto::DoubleList* MetricsRecordValue::unsafe_arena_release_double_list() { + // @@protoc_insertion_point(field_unsafe_arena_release:flwr.proto.MetricsRecordValue.double_list) + if (_internal_has_double_list()) { + clear_has_value(); + ::flwr::proto::DoubleList* temp = value_.double_list_; + value_.double_list_ = nullptr; + return temp; + } else { + return nullptr; + } +} +inline void MetricsRecordValue::unsafe_arena_set_allocated_double_list(::flwr::proto::DoubleList* double_list) { + clear_value(); + if (double_list) { + set_has_double_list(); + value_.double_list_ = double_list; + } + // @@protoc_insertion_point(field_unsafe_arena_set_allocated:flwr.proto.MetricsRecordValue.double_list) +} +inline ::flwr::proto::DoubleList* MetricsRecordValue::_internal_mutable_double_list() { + if (!_internal_has_double_list()) { + clear_value(); + set_has_double_list(); + value_.double_list_ = CreateMaybeMessage< ::flwr::proto::DoubleList >(GetArenaForAllocation()); + } + return value_.double_list_; +} +inline ::flwr::proto::DoubleList* MetricsRecordValue::mutable_double_list() { + ::flwr::proto::DoubleList* _msg = _internal_mutable_double_list(); + // @@protoc_insertion_point(field_mutable:flwr.proto.MetricsRecordValue.double_list) + return _msg; +} + +// .flwr.proto.Sint64List sint64_list = 22; +inline bool MetricsRecordValue::_internal_has_sint64_list() const { + return value_case() == kSint64List; +} +inline bool MetricsRecordValue::has_sint64_list() const { + return _internal_has_sint64_list(); +} +inline void MetricsRecordValue::set_has_sint64_list() { + _oneof_case_[0] = kSint64List; +} +inline void MetricsRecordValue::clear_sint64_list() { + if (_internal_has_sint64_list()) { + if (GetArenaForAllocation() == nullptr) { + delete value_.sint64_list_; + } + clear_has_value(); + } +} +inline ::flwr::proto::Sint64List* MetricsRecordValue::release_sint64_list() { + // @@protoc_insertion_point(field_release:flwr.proto.MetricsRecordValue.sint64_list) + if (_internal_has_sint64_list()) { + clear_has_value(); + ::flwr::proto::Sint64List* temp = value_.sint64_list_; + if (GetArenaForAllocation() != nullptr) { + temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); + } + value_.sint64_list_ = nullptr; + return temp; + } else { + return nullptr; + } +} +inline const ::flwr::proto::Sint64List& MetricsRecordValue::_internal_sint64_list() const { + return _internal_has_sint64_list() + ? *value_.sint64_list_ + : reinterpret_cast< ::flwr::proto::Sint64List&>(::flwr::proto::_Sint64List_default_instance_); +} +inline const ::flwr::proto::Sint64List& MetricsRecordValue::sint64_list() const { + // @@protoc_insertion_point(field_get:flwr.proto.MetricsRecordValue.sint64_list) + return _internal_sint64_list(); +} +inline ::flwr::proto::Sint64List* MetricsRecordValue::unsafe_arena_release_sint64_list() { + // @@protoc_insertion_point(field_unsafe_arena_release:flwr.proto.MetricsRecordValue.sint64_list) + if (_internal_has_sint64_list()) { + clear_has_value(); + ::flwr::proto::Sint64List* temp = value_.sint64_list_; + value_.sint64_list_ = nullptr; + return temp; + } else { + return nullptr; + } +} +inline void MetricsRecordValue::unsafe_arena_set_allocated_sint64_list(::flwr::proto::Sint64List* sint64_list) { + clear_value(); + if (sint64_list) { + set_has_sint64_list(); + value_.sint64_list_ = sint64_list; + } + // @@protoc_insertion_point(field_unsafe_arena_set_allocated:flwr.proto.MetricsRecordValue.sint64_list) +} +inline ::flwr::proto::Sint64List* MetricsRecordValue::_internal_mutable_sint64_list() { + if (!_internal_has_sint64_list()) { + clear_value(); + set_has_sint64_list(); + value_.sint64_list_ = CreateMaybeMessage< ::flwr::proto::Sint64List >(GetArenaForAllocation()); + } + return value_.sint64_list_; +} +inline ::flwr::proto::Sint64List* MetricsRecordValue::mutable_sint64_list() { + ::flwr::proto::Sint64List* _msg = _internal_mutable_sint64_list(); + // @@protoc_insertion_point(field_mutable:flwr.proto.MetricsRecordValue.sint64_list) + return _msg; +} + +inline bool MetricsRecordValue::has_value() const { + return value_case() != VALUE_NOT_SET; +} +inline void MetricsRecordValue::clear_has_value() { + _oneof_case_[0] = VALUE_NOT_SET; +} +inline MetricsRecordValue::ValueCase MetricsRecordValue::value_case() const { + return MetricsRecordValue::ValueCase(_oneof_case_[0]); +} +// ------------------------------------------------------------------- + +// ConfigsRecordValue + +// double double = 1; +inline bool ConfigsRecordValue::_internal_has_double_() const { + return value_case() == kDouble; +} +inline bool ConfigsRecordValue::has_double_() const { + return _internal_has_double_(); +} +inline void ConfigsRecordValue::set_has_double_() { + _oneof_case_[0] = kDouble; +} +inline void ConfigsRecordValue::clear_double_() { + if (_internal_has_double_()) { + value_.double__ = 0; + clear_has_value(); + } +} +inline double ConfigsRecordValue::_internal_double_() const { + if (_internal_has_double_()) { + return value_.double__; + } + return 0; +} +inline void ConfigsRecordValue::_internal_set_double_(double value) { + if (!_internal_has_double_()) { + clear_value(); + set_has_double_(); + } + value_.double__ = value; +} +inline double ConfigsRecordValue::double_() const { + // @@protoc_insertion_point(field_get:flwr.proto.ConfigsRecordValue.double) + return _internal_double_(); +} +inline void ConfigsRecordValue::set_double_(double value) { + _internal_set_double_(value); + // @@protoc_insertion_point(field_set:flwr.proto.ConfigsRecordValue.double) +} + +// sint64 sint64 = 2; +inline bool ConfigsRecordValue::_internal_has_sint64() const { + return value_case() == kSint64; +} +inline bool ConfigsRecordValue::has_sint64() const { + return _internal_has_sint64(); +} +inline void ConfigsRecordValue::set_has_sint64() { + _oneof_case_[0] = kSint64; +} +inline void ConfigsRecordValue::clear_sint64() { + if (_internal_has_sint64()) { + value_.sint64_ = int64_t{0}; + clear_has_value(); + } +} +inline ::PROTOBUF_NAMESPACE_ID::int64 ConfigsRecordValue::_internal_sint64() const { + if (_internal_has_sint64()) { + return value_.sint64_; + } + return int64_t{0}; +} +inline void ConfigsRecordValue::_internal_set_sint64(::PROTOBUF_NAMESPACE_ID::int64 value) { + if (!_internal_has_sint64()) { + clear_value(); + set_has_sint64(); + } + value_.sint64_ = value; +} +inline ::PROTOBUF_NAMESPACE_ID::int64 ConfigsRecordValue::sint64() const { + // @@protoc_insertion_point(field_get:flwr.proto.ConfigsRecordValue.sint64) + return _internal_sint64(); +} +inline void ConfigsRecordValue::set_sint64(::PROTOBUF_NAMESPACE_ID::int64 value) { + _internal_set_sint64(value); + // @@protoc_insertion_point(field_set:flwr.proto.ConfigsRecordValue.sint64) +} + +// bool bool = 3; +inline bool ConfigsRecordValue::_internal_has_bool_() const { + return value_case() == kBool; +} +inline bool ConfigsRecordValue::has_bool_() const { + return _internal_has_bool_(); +} +inline void ConfigsRecordValue::set_has_bool_() { + _oneof_case_[0] = kBool; +} +inline void ConfigsRecordValue::clear_bool_() { + if (_internal_has_bool_()) { + value_.bool__ = false; + clear_has_value(); + } +} +inline bool ConfigsRecordValue::_internal_bool_() const { + if (_internal_has_bool_()) { + return value_.bool__; + } + return false; +} +inline void ConfigsRecordValue::_internal_set_bool_(bool value) { + if (!_internal_has_bool_()) { + clear_value(); + set_has_bool_(); + } + value_.bool__ = value; +} +inline bool ConfigsRecordValue::bool_() const { + // @@protoc_insertion_point(field_get:flwr.proto.ConfigsRecordValue.bool) + return _internal_bool_(); +} +inline void ConfigsRecordValue::set_bool_(bool value) { + _internal_set_bool_(value); + // @@protoc_insertion_point(field_set:flwr.proto.ConfigsRecordValue.bool) +} + +// string string = 4; +inline bool ConfigsRecordValue::_internal_has_string() const { + return value_case() == kString; +} +inline bool ConfigsRecordValue::has_string() const { + return _internal_has_string(); +} +inline void ConfigsRecordValue::set_has_string() { + _oneof_case_[0] = kString; +} +inline void ConfigsRecordValue::clear_string() { + if (_internal_has_string()) { + value_.string_.Destroy(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, GetArenaForAllocation()); + clear_has_value(); + } +} +inline const std::string& ConfigsRecordValue::string() const { + // @@protoc_insertion_point(field_get:flwr.proto.ConfigsRecordValue.string) + return _internal_string(); +} +template +inline void ConfigsRecordValue::set_string(ArgT0&& arg0, ArgT... args) { + if (!_internal_has_string()) { + clear_value(); + set_has_string(); + value_.string_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); + } + value_.string_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, static_cast(arg0), args..., GetArenaForAllocation()); + // @@protoc_insertion_point(field_set:flwr.proto.ConfigsRecordValue.string) +} +inline std::string* ConfigsRecordValue::mutable_string() { + std::string* _s = _internal_mutable_string(); + // @@protoc_insertion_point(field_mutable:flwr.proto.ConfigsRecordValue.string) + return _s; +} +inline const std::string& ConfigsRecordValue::_internal_string() const { + if (_internal_has_string()) { + return value_.string_.Get(); + } + return ::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(); +} +inline void ConfigsRecordValue::_internal_set_string(const std::string& value) { + if (!_internal_has_string()) { + clear_value(); + set_has_string(); + value_.string_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); + } + value_.string_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, value, GetArenaForAllocation()); +} +inline std::string* ConfigsRecordValue::_internal_mutable_string() { + if (!_internal_has_string()) { + clear_value(); + set_has_string(); + value_.string_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); + } + return value_.string_.Mutable( + ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, GetArenaForAllocation()); +} +inline std::string* ConfigsRecordValue::release_string() { + // @@protoc_insertion_point(field_release:flwr.proto.ConfigsRecordValue.string) + if (_internal_has_string()) { + clear_has_value(); + return value_.string_.ReleaseNonDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), GetArenaForAllocation()); + } else { + return nullptr; + } +} +inline void ConfigsRecordValue::set_allocated_string(std::string* string) { + if (has_value()) { + clear_value(); + } + if (string != nullptr) { + set_has_string(); + value_.string_.UnsafeSetDefault(string); + ::PROTOBUF_NAMESPACE_ID::Arena* arena = GetArenaForAllocation(); + if (arena != nullptr) { + arena->Own(string); + } + } + // @@protoc_insertion_point(field_set_allocated:flwr.proto.ConfigsRecordValue.string) +} + +// bytes bytes = 5; +inline bool ConfigsRecordValue::_internal_has_bytes() const { + return value_case() == kBytes; +} +inline bool ConfigsRecordValue::has_bytes() const { + return _internal_has_bytes(); +} +inline void ConfigsRecordValue::set_has_bytes() { + _oneof_case_[0] = kBytes; +} +inline void ConfigsRecordValue::clear_bytes() { + if (_internal_has_bytes()) { + value_.bytes_.Destroy(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, GetArenaForAllocation()); + clear_has_value(); + } +} +inline const std::string& ConfigsRecordValue::bytes() const { + // @@protoc_insertion_point(field_get:flwr.proto.ConfigsRecordValue.bytes) + return _internal_bytes(); +} +template +inline void ConfigsRecordValue::set_bytes(ArgT0&& arg0, ArgT... args) { + if (!_internal_has_bytes()) { + clear_value(); + set_has_bytes(); + value_.bytes_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); + } + value_.bytes_.SetBytes(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, static_cast(arg0), args..., GetArenaForAllocation()); + // @@protoc_insertion_point(field_set:flwr.proto.ConfigsRecordValue.bytes) +} +inline std::string* ConfigsRecordValue::mutable_bytes() { + std::string* _s = _internal_mutable_bytes(); + // @@protoc_insertion_point(field_mutable:flwr.proto.ConfigsRecordValue.bytes) + return _s; +} +inline const std::string& ConfigsRecordValue::_internal_bytes() const { + if (_internal_has_bytes()) { + return value_.bytes_.Get(); + } + return ::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(); +} +inline void ConfigsRecordValue::_internal_set_bytes(const std::string& value) { + if (!_internal_has_bytes()) { + clear_value(); + set_has_bytes(); + value_.bytes_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); + } + value_.bytes_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, value, GetArenaForAllocation()); +} +inline std::string* ConfigsRecordValue::_internal_mutable_bytes() { + if (!_internal_has_bytes()) { + clear_value(); + set_has_bytes(); + value_.bytes_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); + } + return value_.bytes_.Mutable( + ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, GetArenaForAllocation()); +} +inline std::string* ConfigsRecordValue::release_bytes() { + // @@protoc_insertion_point(field_release:flwr.proto.ConfigsRecordValue.bytes) + if (_internal_has_bytes()) { + clear_has_value(); + return value_.bytes_.ReleaseNonDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), GetArenaForAllocation()); + } else { + return nullptr; + } +} +inline void ConfigsRecordValue::set_allocated_bytes(std::string* bytes) { + if (has_value()) { + clear_value(); + } + if (bytes != nullptr) { + set_has_bytes(); + value_.bytes_.UnsafeSetDefault(bytes); + ::PROTOBUF_NAMESPACE_ID::Arena* arena = GetArenaForAllocation(); + if (arena != nullptr) { + arena->Own(bytes); + } + } + // @@protoc_insertion_point(field_set_allocated:flwr.proto.ConfigsRecordValue.bytes) +} + +// .flwr.proto.DoubleList double_list = 21; +inline bool ConfigsRecordValue::_internal_has_double_list() const { + return value_case() == kDoubleList; +} +inline bool ConfigsRecordValue::has_double_list() const { + return _internal_has_double_list(); +} +inline void ConfigsRecordValue::set_has_double_list() { + _oneof_case_[0] = kDoubleList; +} +inline void ConfigsRecordValue::clear_double_list() { + if (_internal_has_double_list()) { + if (GetArenaForAllocation() == nullptr) { + delete value_.double_list_; + } + clear_has_value(); + } +} +inline ::flwr::proto::DoubleList* ConfigsRecordValue::release_double_list() { + // @@protoc_insertion_point(field_release:flwr.proto.ConfigsRecordValue.double_list) + if (_internal_has_double_list()) { + clear_has_value(); + ::flwr::proto::DoubleList* temp = value_.double_list_; + if (GetArenaForAllocation() != nullptr) { + temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); + } + value_.double_list_ = nullptr; + return temp; + } else { + return nullptr; + } +} +inline const ::flwr::proto::DoubleList& ConfigsRecordValue::_internal_double_list() const { + return _internal_has_double_list() + ? *value_.double_list_ + : reinterpret_cast< ::flwr::proto::DoubleList&>(::flwr::proto::_DoubleList_default_instance_); +} +inline const ::flwr::proto::DoubleList& ConfigsRecordValue::double_list() const { + // @@protoc_insertion_point(field_get:flwr.proto.ConfigsRecordValue.double_list) + return _internal_double_list(); +} +inline ::flwr::proto::DoubleList* ConfigsRecordValue::unsafe_arena_release_double_list() { + // @@protoc_insertion_point(field_unsafe_arena_release:flwr.proto.ConfigsRecordValue.double_list) + if (_internal_has_double_list()) { + clear_has_value(); + ::flwr::proto::DoubleList* temp = value_.double_list_; + value_.double_list_ = nullptr; + return temp; + } else { + return nullptr; + } +} +inline void ConfigsRecordValue::unsafe_arena_set_allocated_double_list(::flwr::proto::DoubleList* double_list) { + clear_value(); + if (double_list) { + set_has_double_list(); + value_.double_list_ = double_list; + } + // @@protoc_insertion_point(field_unsafe_arena_set_allocated:flwr.proto.ConfigsRecordValue.double_list) +} +inline ::flwr::proto::DoubleList* ConfigsRecordValue::_internal_mutable_double_list() { + if (!_internal_has_double_list()) { + clear_value(); + set_has_double_list(); + value_.double_list_ = CreateMaybeMessage< ::flwr::proto::DoubleList >(GetArenaForAllocation()); + } + return value_.double_list_; +} +inline ::flwr::proto::DoubleList* ConfigsRecordValue::mutable_double_list() { + ::flwr::proto::DoubleList* _msg = _internal_mutable_double_list(); + // @@protoc_insertion_point(field_mutable:flwr.proto.ConfigsRecordValue.double_list) + return _msg; +} + +// .flwr.proto.Sint64List sint64_list = 22; +inline bool ConfigsRecordValue::_internal_has_sint64_list() const { + return value_case() == kSint64List; +} +inline bool ConfigsRecordValue::has_sint64_list() const { + return _internal_has_sint64_list(); +} +inline void ConfigsRecordValue::set_has_sint64_list() { + _oneof_case_[0] = kSint64List; +} +inline void ConfigsRecordValue::clear_sint64_list() { + if (_internal_has_sint64_list()) { + if (GetArenaForAllocation() == nullptr) { + delete value_.sint64_list_; + } + clear_has_value(); + } +} +inline ::flwr::proto::Sint64List* ConfigsRecordValue::release_sint64_list() { + // @@protoc_insertion_point(field_release:flwr.proto.ConfigsRecordValue.sint64_list) + if (_internal_has_sint64_list()) { + clear_has_value(); + ::flwr::proto::Sint64List* temp = value_.sint64_list_; + if (GetArenaForAllocation() != nullptr) { + temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); + } + value_.sint64_list_ = nullptr; + return temp; + } else { + return nullptr; + } +} +inline const ::flwr::proto::Sint64List& ConfigsRecordValue::_internal_sint64_list() const { + return _internal_has_sint64_list() + ? *value_.sint64_list_ + : reinterpret_cast< ::flwr::proto::Sint64List&>(::flwr::proto::_Sint64List_default_instance_); +} +inline const ::flwr::proto::Sint64List& ConfigsRecordValue::sint64_list() const { + // @@protoc_insertion_point(field_get:flwr.proto.ConfigsRecordValue.sint64_list) + return _internal_sint64_list(); +} +inline ::flwr::proto::Sint64List* ConfigsRecordValue::unsafe_arena_release_sint64_list() { + // @@protoc_insertion_point(field_unsafe_arena_release:flwr.proto.ConfigsRecordValue.sint64_list) + if (_internal_has_sint64_list()) { + clear_has_value(); + ::flwr::proto::Sint64List* temp = value_.sint64_list_; + value_.sint64_list_ = nullptr; + return temp; + } else { + return nullptr; + } +} +inline void ConfigsRecordValue::unsafe_arena_set_allocated_sint64_list(::flwr::proto::Sint64List* sint64_list) { + clear_value(); + if (sint64_list) { + set_has_sint64_list(); + value_.sint64_list_ = sint64_list; + } + // @@protoc_insertion_point(field_unsafe_arena_set_allocated:flwr.proto.ConfigsRecordValue.sint64_list) +} +inline ::flwr::proto::Sint64List* ConfigsRecordValue::_internal_mutable_sint64_list() { + if (!_internal_has_sint64_list()) { + clear_value(); + set_has_sint64_list(); + value_.sint64_list_ = CreateMaybeMessage< ::flwr::proto::Sint64List >(GetArenaForAllocation()); + } + return value_.sint64_list_; +} +inline ::flwr::proto::Sint64List* ConfigsRecordValue::mutable_sint64_list() { + ::flwr::proto::Sint64List* _msg = _internal_mutable_sint64_list(); + // @@protoc_insertion_point(field_mutable:flwr.proto.ConfigsRecordValue.sint64_list) + return _msg; +} + +// .flwr.proto.BoolList bool_list = 23; +inline bool ConfigsRecordValue::_internal_has_bool_list() const { + return value_case() == kBoolList; +} +inline bool ConfigsRecordValue::has_bool_list() const { + return _internal_has_bool_list(); +} +inline void ConfigsRecordValue::set_has_bool_list() { + _oneof_case_[0] = kBoolList; +} +inline void ConfigsRecordValue::clear_bool_list() { + if (_internal_has_bool_list()) { + if (GetArenaForAllocation() == nullptr) { + delete value_.bool_list_; + } + clear_has_value(); + } +} +inline ::flwr::proto::BoolList* ConfigsRecordValue::release_bool_list() { + // @@protoc_insertion_point(field_release:flwr.proto.ConfigsRecordValue.bool_list) + if (_internal_has_bool_list()) { + clear_has_value(); + ::flwr::proto::BoolList* temp = value_.bool_list_; + if (GetArenaForAllocation() != nullptr) { + temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); + } + value_.bool_list_ = nullptr; + return temp; + } else { + return nullptr; + } +} +inline const ::flwr::proto::BoolList& ConfigsRecordValue::_internal_bool_list() const { + return _internal_has_bool_list() + ? *value_.bool_list_ + : reinterpret_cast< ::flwr::proto::BoolList&>(::flwr::proto::_BoolList_default_instance_); +} +inline const ::flwr::proto::BoolList& ConfigsRecordValue::bool_list() const { + // @@protoc_insertion_point(field_get:flwr.proto.ConfigsRecordValue.bool_list) + return _internal_bool_list(); +} +inline ::flwr::proto::BoolList* ConfigsRecordValue::unsafe_arena_release_bool_list() { + // @@protoc_insertion_point(field_unsafe_arena_release:flwr.proto.ConfigsRecordValue.bool_list) + if (_internal_has_bool_list()) { + clear_has_value(); + ::flwr::proto::BoolList* temp = value_.bool_list_; + value_.bool_list_ = nullptr; + return temp; + } else { + return nullptr; + } +} +inline void ConfigsRecordValue::unsafe_arena_set_allocated_bool_list(::flwr::proto::BoolList* bool_list) { + clear_value(); + if (bool_list) { + set_has_bool_list(); + value_.bool_list_ = bool_list; + } + // @@protoc_insertion_point(field_unsafe_arena_set_allocated:flwr.proto.ConfigsRecordValue.bool_list) +} +inline ::flwr::proto::BoolList* ConfigsRecordValue::_internal_mutable_bool_list() { + if (!_internal_has_bool_list()) { + clear_value(); + set_has_bool_list(); + value_.bool_list_ = CreateMaybeMessage< ::flwr::proto::BoolList >(GetArenaForAllocation()); + } + return value_.bool_list_; +} +inline ::flwr::proto::BoolList* ConfigsRecordValue::mutable_bool_list() { + ::flwr::proto::BoolList* _msg = _internal_mutable_bool_list(); + // @@protoc_insertion_point(field_mutable:flwr.proto.ConfigsRecordValue.bool_list) + return _msg; +} + +// .flwr.proto.StringList string_list = 24; +inline bool ConfigsRecordValue::_internal_has_string_list() const { + return value_case() == kStringList; +} +inline bool ConfigsRecordValue::has_string_list() const { + return _internal_has_string_list(); +} +inline void ConfigsRecordValue::set_has_string_list() { + _oneof_case_[0] = kStringList; +} +inline void ConfigsRecordValue::clear_string_list() { + if (_internal_has_string_list()) { + if (GetArenaForAllocation() == nullptr) { + delete value_.string_list_; + } + clear_has_value(); + } +} +inline ::flwr::proto::StringList* ConfigsRecordValue::release_string_list() { + // @@protoc_insertion_point(field_release:flwr.proto.ConfigsRecordValue.string_list) + if (_internal_has_string_list()) { + clear_has_value(); + ::flwr::proto::StringList* temp = value_.string_list_; + if (GetArenaForAllocation() != nullptr) { + temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); + } + value_.string_list_ = nullptr; + return temp; + } else { + return nullptr; + } +} +inline const ::flwr::proto::StringList& ConfigsRecordValue::_internal_string_list() const { + return _internal_has_string_list() + ? *value_.string_list_ + : reinterpret_cast< ::flwr::proto::StringList&>(::flwr::proto::_StringList_default_instance_); +} +inline const ::flwr::proto::StringList& ConfigsRecordValue::string_list() const { + // @@protoc_insertion_point(field_get:flwr.proto.ConfigsRecordValue.string_list) + return _internal_string_list(); +} +inline ::flwr::proto::StringList* ConfigsRecordValue::unsafe_arena_release_string_list() { + // @@protoc_insertion_point(field_unsafe_arena_release:flwr.proto.ConfigsRecordValue.string_list) + if (_internal_has_string_list()) { + clear_has_value(); + ::flwr::proto::StringList* temp = value_.string_list_; + value_.string_list_ = nullptr; + return temp; + } else { + return nullptr; + } +} +inline void ConfigsRecordValue::unsafe_arena_set_allocated_string_list(::flwr::proto::StringList* string_list) { + clear_value(); + if (string_list) { + set_has_string_list(); + value_.string_list_ = string_list; + } + // @@protoc_insertion_point(field_unsafe_arena_set_allocated:flwr.proto.ConfigsRecordValue.string_list) +} +inline ::flwr::proto::StringList* ConfigsRecordValue::_internal_mutable_string_list() { + if (!_internal_has_string_list()) { + clear_value(); + set_has_string_list(); + value_.string_list_ = CreateMaybeMessage< ::flwr::proto::StringList >(GetArenaForAllocation()); + } + return value_.string_list_; +} +inline ::flwr::proto::StringList* ConfigsRecordValue::mutable_string_list() { + ::flwr::proto::StringList* _msg = _internal_mutable_string_list(); + // @@protoc_insertion_point(field_mutable:flwr.proto.ConfigsRecordValue.string_list) + return _msg; +} + +// .flwr.proto.BytesList bytes_list = 25; +inline bool ConfigsRecordValue::_internal_has_bytes_list() const { + return value_case() == kBytesList; +} +inline bool ConfigsRecordValue::has_bytes_list() const { + return _internal_has_bytes_list(); +} +inline void ConfigsRecordValue::set_has_bytes_list() { + _oneof_case_[0] = kBytesList; +} +inline void ConfigsRecordValue::clear_bytes_list() { + if (_internal_has_bytes_list()) { + if (GetArenaForAllocation() == nullptr) { + delete value_.bytes_list_; + } + clear_has_value(); + } +} +inline ::flwr::proto::BytesList* ConfigsRecordValue::release_bytes_list() { + // @@protoc_insertion_point(field_release:flwr.proto.ConfigsRecordValue.bytes_list) + if (_internal_has_bytes_list()) { + clear_has_value(); + ::flwr::proto::BytesList* temp = value_.bytes_list_; + if (GetArenaForAllocation() != nullptr) { + temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); + } + value_.bytes_list_ = nullptr; + return temp; + } else { + return nullptr; + } +} +inline const ::flwr::proto::BytesList& ConfigsRecordValue::_internal_bytes_list() const { + return _internal_has_bytes_list() + ? *value_.bytes_list_ + : reinterpret_cast< ::flwr::proto::BytesList&>(::flwr::proto::_BytesList_default_instance_); +} +inline const ::flwr::proto::BytesList& ConfigsRecordValue::bytes_list() const { + // @@protoc_insertion_point(field_get:flwr.proto.ConfigsRecordValue.bytes_list) + return _internal_bytes_list(); +} +inline ::flwr::proto::BytesList* ConfigsRecordValue::unsafe_arena_release_bytes_list() { + // @@protoc_insertion_point(field_unsafe_arena_release:flwr.proto.ConfigsRecordValue.bytes_list) + if (_internal_has_bytes_list()) { + clear_has_value(); + ::flwr::proto::BytesList* temp = value_.bytes_list_; + value_.bytes_list_ = nullptr; + return temp; + } else { + return nullptr; + } +} +inline void ConfigsRecordValue::unsafe_arena_set_allocated_bytes_list(::flwr::proto::BytesList* bytes_list) { + clear_value(); + if (bytes_list) { + set_has_bytes_list(); + value_.bytes_list_ = bytes_list; + } + // @@protoc_insertion_point(field_unsafe_arena_set_allocated:flwr.proto.ConfigsRecordValue.bytes_list) +} +inline ::flwr::proto::BytesList* ConfigsRecordValue::_internal_mutable_bytes_list() { + if (!_internal_has_bytes_list()) { + clear_value(); + set_has_bytes_list(); + value_.bytes_list_ = CreateMaybeMessage< ::flwr::proto::BytesList >(GetArenaForAllocation()); + } + return value_.bytes_list_; +} +inline ::flwr::proto::BytesList* ConfigsRecordValue::mutable_bytes_list() { + ::flwr::proto::BytesList* _msg = _internal_mutable_bytes_list(); + // @@protoc_insertion_point(field_mutable:flwr.proto.ConfigsRecordValue.bytes_list) + return _msg; +} + +inline bool ConfigsRecordValue::has_value() const { + return value_case() != VALUE_NOT_SET; +} +inline void ConfigsRecordValue::clear_has_value() { + _oneof_case_[0] = VALUE_NOT_SET; +} +inline ConfigsRecordValue::ValueCase ConfigsRecordValue::value_case() const { + return ConfigsRecordValue::ValueCase(_oneof_case_[0]); +} +// ------------------------------------------------------------------- + +// ParametersRecord + +// repeated string data_keys = 1; +inline int ParametersRecord::_internal_data_keys_size() const { + return data_keys_.size(); +} +inline int ParametersRecord::data_keys_size() const { + return _internal_data_keys_size(); +} +inline void ParametersRecord::clear_data_keys() { + data_keys_.Clear(); +} +inline std::string* ParametersRecord::add_data_keys() { + std::string* _s = _internal_add_data_keys(); + // @@protoc_insertion_point(field_add_mutable:flwr.proto.ParametersRecord.data_keys) + return _s; +} +inline const std::string& ParametersRecord::_internal_data_keys(int index) const { + return data_keys_.Get(index); +} +inline const std::string& ParametersRecord::data_keys(int index) const { + // @@protoc_insertion_point(field_get:flwr.proto.ParametersRecord.data_keys) + return _internal_data_keys(index); +} +inline std::string* ParametersRecord::mutable_data_keys(int index) { + // @@protoc_insertion_point(field_mutable:flwr.proto.ParametersRecord.data_keys) + return data_keys_.Mutable(index); +} +inline void ParametersRecord::set_data_keys(int index, const std::string& value) { + data_keys_.Mutable(index)->assign(value); + // @@protoc_insertion_point(field_set:flwr.proto.ParametersRecord.data_keys) +} +inline void ParametersRecord::set_data_keys(int index, std::string&& value) { + data_keys_.Mutable(index)->assign(std::move(value)); + // @@protoc_insertion_point(field_set:flwr.proto.ParametersRecord.data_keys) +} +inline void ParametersRecord::set_data_keys(int index, const char* value) { + GOOGLE_DCHECK(value != nullptr); + data_keys_.Mutable(index)->assign(value); + // @@protoc_insertion_point(field_set_char:flwr.proto.ParametersRecord.data_keys) +} +inline void ParametersRecord::set_data_keys(int index, const char* value, size_t size) { + data_keys_.Mutable(index)->assign( + reinterpret_cast(value), size); + // @@protoc_insertion_point(field_set_pointer:flwr.proto.ParametersRecord.data_keys) +} +inline std::string* ParametersRecord::_internal_add_data_keys() { + return data_keys_.Add(); +} +inline void ParametersRecord::add_data_keys(const std::string& value) { + data_keys_.Add()->assign(value); + // @@protoc_insertion_point(field_add:flwr.proto.ParametersRecord.data_keys) +} +inline void ParametersRecord::add_data_keys(std::string&& value) { + data_keys_.Add(std::move(value)); + // @@protoc_insertion_point(field_add:flwr.proto.ParametersRecord.data_keys) +} +inline void ParametersRecord::add_data_keys(const char* value) { + GOOGLE_DCHECK(value != nullptr); + data_keys_.Add()->assign(value); + // @@protoc_insertion_point(field_add_char:flwr.proto.ParametersRecord.data_keys) +} +inline void ParametersRecord::add_data_keys(const char* value, size_t size) { + data_keys_.Add()->assign(reinterpret_cast(value), size); + // @@protoc_insertion_point(field_add_pointer:flwr.proto.ParametersRecord.data_keys) +} +inline const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField& +ParametersRecord::data_keys() const { + // @@protoc_insertion_point(field_list:flwr.proto.ParametersRecord.data_keys) + return data_keys_; +} +inline ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField* +ParametersRecord::mutable_data_keys() { + // @@protoc_insertion_point(field_mutable_list:flwr.proto.ParametersRecord.data_keys) + return &data_keys_; +} + +// repeated .flwr.proto.Array data_values = 2; +inline int ParametersRecord::_internal_data_values_size() const { + return data_values_.size(); +} +inline int ParametersRecord::data_values_size() const { + return _internal_data_values_size(); +} +inline void ParametersRecord::clear_data_values() { + data_values_.Clear(); +} +inline ::flwr::proto::Array* ParametersRecord::mutable_data_values(int index) { + // @@protoc_insertion_point(field_mutable:flwr.proto.ParametersRecord.data_values) + return data_values_.Mutable(index); +} +inline ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::flwr::proto::Array >* +ParametersRecord::mutable_data_values() { + // @@protoc_insertion_point(field_mutable_list:flwr.proto.ParametersRecord.data_values) + return &data_values_; +} +inline const ::flwr::proto::Array& ParametersRecord::_internal_data_values(int index) const { + return data_values_.Get(index); +} +inline const ::flwr::proto::Array& ParametersRecord::data_values(int index) const { + // @@protoc_insertion_point(field_get:flwr.proto.ParametersRecord.data_values) + return _internal_data_values(index); +} +inline ::flwr::proto::Array* ParametersRecord::_internal_add_data_values() { + return data_values_.Add(); +} +inline ::flwr::proto::Array* ParametersRecord::add_data_values() { + ::flwr::proto::Array* _add = _internal_add_data_values(); + // @@protoc_insertion_point(field_add:flwr.proto.ParametersRecord.data_values) + return _add; +} +inline const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::flwr::proto::Array >& +ParametersRecord::data_values() const { + // @@protoc_insertion_point(field_list:flwr.proto.ParametersRecord.data_values) + return data_values_; +} + +// ------------------------------------------------------------------- + +// ------------------------------------------------------------------- + +// MetricsRecord + +// map data = 1; +inline int MetricsRecord::_internal_data_size() const { + return data_.size(); +} +inline int MetricsRecord::data_size() const { + return _internal_data_size(); +} +inline void MetricsRecord::clear_data() { + data_.Clear(); +} +inline const ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::MetricsRecordValue >& +MetricsRecord::_internal_data() const { + return data_.GetMap(); +} +inline const ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::MetricsRecordValue >& +MetricsRecord::data() const { + // @@protoc_insertion_point(field_map:flwr.proto.MetricsRecord.data) + return _internal_data(); +} +inline ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::MetricsRecordValue >* +MetricsRecord::_internal_mutable_data() { + return data_.MutableMap(); +} +inline ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::MetricsRecordValue >* +MetricsRecord::mutable_data() { + // @@protoc_insertion_point(field_mutable_map:flwr.proto.MetricsRecord.data) + return _internal_mutable_data(); +} + +// ------------------------------------------------------------------- + +// ------------------------------------------------------------------- + +// ConfigsRecord + +// map data = 1; +inline int ConfigsRecord::_internal_data_size() const { + return data_.size(); +} +inline int ConfigsRecord::data_size() const { + return _internal_data_size(); +} +inline void ConfigsRecord::clear_data() { + data_.Clear(); +} +inline const ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::ConfigsRecordValue >& +ConfigsRecord::_internal_data() const { + return data_.GetMap(); +} +inline const ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::ConfigsRecordValue >& +ConfigsRecord::data() const { + // @@protoc_insertion_point(field_map:flwr.proto.ConfigsRecord.data) + return _internal_data(); +} +inline ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::ConfigsRecordValue >* +ConfigsRecord::_internal_mutable_data() { + return data_.MutableMap(); +} +inline ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::ConfigsRecordValue >* +ConfigsRecord::mutable_data() { + // @@protoc_insertion_point(field_mutable_map:flwr.proto.ConfigsRecord.data) + return _internal_mutable_data(); +} + +// ------------------------------------------------------------------- + +// ------------------------------------------------------------------- + +// ------------------------------------------------------------------- + +// ------------------------------------------------------------------- + +// RecordSet + +// map parameters = 1; +inline int RecordSet::_internal_parameters_size() const { + return parameters_.size(); +} +inline int RecordSet::parameters_size() const { + return _internal_parameters_size(); +} +inline void RecordSet::clear_parameters() { + parameters_.Clear(); +} +inline const ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::ParametersRecord >& +RecordSet::_internal_parameters() const { + return parameters_.GetMap(); +} +inline const ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::ParametersRecord >& +RecordSet::parameters() const { + // @@protoc_insertion_point(field_map:flwr.proto.RecordSet.parameters) + return _internal_parameters(); +} +inline ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::ParametersRecord >* +RecordSet::_internal_mutable_parameters() { + return parameters_.MutableMap(); +} +inline ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::ParametersRecord >* +RecordSet::mutable_parameters() { + // @@protoc_insertion_point(field_mutable_map:flwr.proto.RecordSet.parameters) + return _internal_mutable_parameters(); +} + +// map metrics = 2; +inline int RecordSet::_internal_metrics_size() const { + return metrics_.size(); +} +inline int RecordSet::metrics_size() const { + return _internal_metrics_size(); +} +inline void RecordSet::clear_metrics() { + metrics_.Clear(); +} +inline const ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::MetricsRecord >& +RecordSet::_internal_metrics() const { + return metrics_.GetMap(); +} +inline const ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::MetricsRecord >& +RecordSet::metrics() const { + // @@protoc_insertion_point(field_map:flwr.proto.RecordSet.metrics) + return _internal_metrics(); +} +inline ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::MetricsRecord >* +RecordSet::_internal_mutable_metrics() { + return metrics_.MutableMap(); +} +inline ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::MetricsRecord >* +RecordSet::mutable_metrics() { + // @@protoc_insertion_point(field_mutable_map:flwr.proto.RecordSet.metrics) + return _internal_mutable_metrics(); +} + +// map configs = 3; +inline int RecordSet::_internal_configs_size() const { + return configs_.size(); +} +inline int RecordSet::configs_size() const { + return _internal_configs_size(); +} +inline void RecordSet::clear_configs() { + configs_.Clear(); +} +inline const ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::ConfigsRecord >& +RecordSet::_internal_configs() const { + return configs_.GetMap(); +} +inline const ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::ConfigsRecord >& +RecordSet::configs() const { + // @@protoc_insertion_point(field_map:flwr.proto.RecordSet.configs) + return _internal_configs(); +} +inline ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::ConfigsRecord >* +RecordSet::_internal_mutable_configs() { + return configs_.MutableMap(); +} +inline ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::ConfigsRecord >* +RecordSet::mutable_configs() { + // @@protoc_insertion_point(field_mutable_map:flwr.proto.RecordSet.configs) + return _internal_mutable_configs(); +} + +#ifdef __GNUC__ + #pragma GCC diagnostic pop +#endif // __GNUC__ +// ------------------------------------------------------------------- + +// ------------------------------------------------------------------- + +// ------------------------------------------------------------------- + +// ------------------------------------------------------------------- + +// ------------------------------------------------------------------- + +// ------------------------------------------------------------------- + +// ------------------------------------------------------------------- + +// ------------------------------------------------------------------- + +// ------------------------------------------------------------------- + +// ------------------------------------------------------------------- + +// ------------------------------------------------------------------- + +// ------------------------------------------------------------------- + +// ------------------------------------------------------------------- + +// ------------------------------------------------------------------- + +// ------------------------------------------------------------------- + +// ------------------------------------------------------------------- + + +// @@protoc_insertion_point(namespace_scope) + +} // namespace proto +} // namespace flwr + +// @@protoc_insertion_point(global_scope) + +#include +#endif // GOOGLE_PROTOBUF_INCLUDED_GOOGLE_PROTOBUF_INCLUDED_flwr_2fproto_2frecordset_2eproto diff --git a/src/cc/flwr/include/flwr/proto/task.pb.cc b/src/cc/flwr/include/flwr/proto/task.pb.cc index 14f1259e5ba7..04fa3e8e2625 100644 --- a/src/cc/flwr/include/flwr/proto/task.pb.cc +++ b/src/cc/flwr/include/flwr/proto/task.pb.cc @@ -21,14 +21,15 @@ namespace proto { constexpr Task::Task( ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized) : ancestry_() - , created_at_(&::PROTOBUF_NAMESPACE_ID::internal::fixed_address_empty_string) , delivered_at_(&::PROTOBUF_NAMESPACE_ID::internal::fixed_address_empty_string) - , ttl_(&::PROTOBUF_NAMESPACE_ID::internal::fixed_address_empty_string) + , task_type_(&::PROTOBUF_NAMESPACE_ID::internal::fixed_address_empty_string) , producer_(nullptr) , consumer_(nullptr) - , sa_(nullptr) - , legacy_server_message_(nullptr) - , legacy_client_message_(nullptr){} + , recordset_(nullptr) + , error_(nullptr) + , created_at_(0) + , pushed_at_(0) + , ttl_(0){} struct TaskDefaultTypeInternal { constexpr TaskDefaultTypeInternal() : _instance(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized{}) {} @@ -43,7 +44,7 @@ constexpr TaskIns::TaskIns( : task_id_(&::PROTOBUF_NAMESPACE_ID::internal::fixed_address_empty_string) , group_id_(&::PROTOBUF_NAMESPACE_ID::internal::fixed_address_empty_string) , task_(nullptr) - , workload_id_(int64_t{0}){} + , run_id_(int64_t{0}){} struct TaskInsDefaultTypeInternal { constexpr TaskInsDefaultTypeInternal() : _instance(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized{}) {} @@ -58,7 +59,7 @@ constexpr TaskRes::TaskRes( : task_id_(&::PROTOBUF_NAMESPACE_ID::internal::fixed_address_empty_string) , group_id_(&::PROTOBUF_NAMESPACE_ID::internal::fixed_address_empty_string) , task_(nullptr) - , workload_id_(int64_t{0}){} + , run_id_(int64_t{0}){} struct TaskResDefaultTypeInternal { constexpr TaskResDefaultTypeInternal() : _instance(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized{}) {} @@ -68,105 +69,9 @@ struct TaskResDefaultTypeInternal { }; }; PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT TaskResDefaultTypeInternal _TaskRes_default_instance_; -constexpr Value_DoubleList::Value_DoubleList( - ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized) - : vals_(){} -struct Value_DoubleListDefaultTypeInternal { - constexpr Value_DoubleListDefaultTypeInternal() - : _instance(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized{}) {} - ~Value_DoubleListDefaultTypeInternal() {} - union { - Value_DoubleList _instance; - }; -}; -PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT Value_DoubleListDefaultTypeInternal _Value_DoubleList_default_instance_; -constexpr Value_Sint64List::Value_Sint64List( - ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized) - : vals_() - , _vals_cached_byte_size_(0){} -struct Value_Sint64ListDefaultTypeInternal { - constexpr Value_Sint64ListDefaultTypeInternal() - : _instance(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized{}) {} - ~Value_Sint64ListDefaultTypeInternal() {} - union { - Value_Sint64List _instance; - }; -}; -PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT Value_Sint64ListDefaultTypeInternal _Value_Sint64List_default_instance_; -constexpr Value_BoolList::Value_BoolList( - ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized) - : vals_(){} -struct Value_BoolListDefaultTypeInternal { - constexpr Value_BoolListDefaultTypeInternal() - : _instance(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized{}) {} - ~Value_BoolListDefaultTypeInternal() {} - union { - Value_BoolList _instance; - }; -}; -PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT Value_BoolListDefaultTypeInternal _Value_BoolList_default_instance_; -constexpr Value_StringList::Value_StringList( - ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized) - : vals_(){} -struct Value_StringListDefaultTypeInternal { - constexpr Value_StringListDefaultTypeInternal() - : _instance(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized{}) {} - ~Value_StringListDefaultTypeInternal() {} - union { - Value_StringList _instance; - }; -}; -PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT Value_StringListDefaultTypeInternal _Value_StringList_default_instance_; -constexpr Value_BytesList::Value_BytesList( - ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized) - : vals_(){} -struct Value_BytesListDefaultTypeInternal { - constexpr Value_BytesListDefaultTypeInternal() - : _instance(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized{}) {} - ~Value_BytesListDefaultTypeInternal() {} - union { - Value_BytesList _instance; - }; -}; -PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT Value_BytesListDefaultTypeInternal _Value_BytesList_default_instance_; -constexpr Value::Value( - ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized) - : _oneof_case_{}{} -struct ValueDefaultTypeInternal { - constexpr ValueDefaultTypeInternal() - : _instance(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized{}) {} - ~ValueDefaultTypeInternal() {} - union { - Value _instance; - }; -}; -PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT ValueDefaultTypeInternal _Value_default_instance_; -constexpr SecureAggregation_NamedValuesEntry_DoNotUse::SecureAggregation_NamedValuesEntry_DoNotUse( - ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized){} -struct SecureAggregation_NamedValuesEntry_DoNotUseDefaultTypeInternal { - constexpr SecureAggregation_NamedValuesEntry_DoNotUseDefaultTypeInternal() - : _instance(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized{}) {} - ~SecureAggregation_NamedValuesEntry_DoNotUseDefaultTypeInternal() {} - union { - SecureAggregation_NamedValuesEntry_DoNotUse _instance; - }; -}; -PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT SecureAggregation_NamedValuesEntry_DoNotUseDefaultTypeInternal _SecureAggregation_NamedValuesEntry_DoNotUse_default_instance_; -constexpr SecureAggregation::SecureAggregation( - ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized) - : named_values_(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized{}){} -struct SecureAggregationDefaultTypeInternal { - constexpr SecureAggregationDefaultTypeInternal() - : _instance(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized{}) {} - ~SecureAggregationDefaultTypeInternal() {} - union { - SecureAggregation _instance; - }; -}; -PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT SecureAggregationDefaultTypeInternal _SecureAggregation_default_instance_; } // namespace proto } // namespace flwr -static ::PROTOBUF_NAMESPACE_ID::Metadata file_level_metadata_flwr_2fproto_2ftask_2eproto[11]; +static ::PROTOBUF_NAMESPACE_ID::Metadata file_level_metadata_flwr_2fproto_2ftask_2eproto[3]; static constexpr ::PROTOBUF_NAMESPACE_ID::EnumDescriptor const** file_level_enum_descriptors_flwr_2fproto_2ftask_2eproto = nullptr; static constexpr ::PROTOBUF_NAMESPACE_ID::ServiceDescriptor const** file_level_service_descriptors_flwr_2fproto_2ftask_2eproto = nullptr; @@ -181,11 +86,12 @@ const ::PROTOBUF_NAMESPACE_ID::uint32 TableStruct_flwr_2fproto_2ftask_2eproto::o PROTOBUF_FIELD_OFFSET(::flwr::proto::Task, consumer_), PROTOBUF_FIELD_OFFSET(::flwr::proto::Task, created_at_), PROTOBUF_FIELD_OFFSET(::flwr::proto::Task, delivered_at_), + PROTOBUF_FIELD_OFFSET(::flwr::proto::Task, pushed_at_), PROTOBUF_FIELD_OFFSET(::flwr::proto::Task, ttl_), PROTOBUF_FIELD_OFFSET(::flwr::proto::Task, ancestry_), - PROTOBUF_FIELD_OFFSET(::flwr::proto::Task, sa_), - PROTOBUF_FIELD_OFFSET(::flwr::proto::Task, legacy_server_message_), - PROTOBUF_FIELD_OFFSET(::flwr::proto::Task, legacy_client_message_), + PROTOBUF_FIELD_OFFSET(::flwr::proto::Task, task_type_), + PROTOBUF_FIELD_OFFSET(::flwr::proto::Task, recordset_), + PROTOBUF_FIELD_OFFSET(::flwr::proto::Task, error_), ~0u, // no _has_bits_ PROTOBUF_FIELD_OFFSET(::flwr::proto::TaskIns, _internal_metadata_), ~0u, // no _extensions_ @@ -194,7 +100,7 @@ const ::PROTOBUF_NAMESPACE_ID::uint32 TableStruct_flwr_2fproto_2ftask_2eproto::o ~0u, // no _inlined_string_donated_ PROTOBUF_FIELD_OFFSET(::flwr::proto::TaskIns, task_id_), PROTOBUF_FIELD_OFFSET(::flwr::proto::TaskIns, group_id_), - PROTOBUF_FIELD_OFFSET(::flwr::proto::TaskIns, workload_id_), + PROTOBUF_FIELD_OFFSET(::flwr::proto::TaskIns, run_id_), PROTOBUF_FIELD_OFFSET(::flwr::proto::TaskIns, task_), ~0u, // no _has_bits_ PROTOBUF_FIELD_OFFSET(::flwr::proto::TaskRes, _internal_metadata_), @@ -204,148 +110,49 @@ const ::PROTOBUF_NAMESPACE_ID::uint32 TableStruct_flwr_2fproto_2ftask_2eproto::o ~0u, // no _inlined_string_donated_ PROTOBUF_FIELD_OFFSET(::flwr::proto::TaskRes, task_id_), PROTOBUF_FIELD_OFFSET(::flwr::proto::TaskRes, group_id_), - PROTOBUF_FIELD_OFFSET(::flwr::proto::TaskRes, workload_id_), + PROTOBUF_FIELD_OFFSET(::flwr::proto::TaskRes, run_id_), PROTOBUF_FIELD_OFFSET(::flwr::proto::TaskRes, task_), - ~0u, // no _has_bits_ - PROTOBUF_FIELD_OFFSET(::flwr::proto::Value_DoubleList, _internal_metadata_), - ~0u, // no _extensions_ - ~0u, // no _oneof_case_ - ~0u, // no _weak_field_map_ - ~0u, // no _inlined_string_donated_ - PROTOBUF_FIELD_OFFSET(::flwr::proto::Value_DoubleList, vals_), - ~0u, // no _has_bits_ - PROTOBUF_FIELD_OFFSET(::flwr::proto::Value_Sint64List, _internal_metadata_), - ~0u, // no _extensions_ - ~0u, // no _oneof_case_ - ~0u, // no _weak_field_map_ - ~0u, // no _inlined_string_donated_ - PROTOBUF_FIELD_OFFSET(::flwr::proto::Value_Sint64List, vals_), - ~0u, // no _has_bits_ - PROTOBUF_FIELD_OFFSET(::flwr::proto::Value_BoolList, _internal_metadata_), - ~0u, // no _extensions_ - ~0u, // no _oneof_case_ - ~0u, // no _weak_field_map_ - ~0u, // no _inlined_string_donated_ - PROTOBUF_FIELD_OFFSET(::flwr::proto::Value_BoolList, vals_), - ~0u, // no _has_bits_ - PROTOBUF_FIELD_OFFSET(::flwr::proto::Value_StringList, _internal_metadata_), - ~0u, // no _extensions_ - ~0u, // no _oneof_case_ - ~0u, // no _weak_field_map_ - ~0u, // no _inlined_string_donated_ - PROTOBUF_FIELD_OFFSET(::flwr::proto::Value_StringList, vals_), - ~0u, // no _has_bits_ - PROTOBUF_FIELD_OFFSET(::flwr::proto::Value_BytesList, _internal_metadata_), - ~0u, // no _extensions_ - ~0u, // no _oneof_case_ - ~0u, // no _weak_field_map_ - ~0u, // no _inlined_string_donated_ - PROTOBUF_FIELD_OFFSET(::flwr::proto::Value_BytesList, vals_), - ~0u, // no _has_bits_ - PROTOBUF_FIELD_OFFSET(::flwr::proto::Value, _internal_metadata_), - ~0u, // no _extensions_ - PROTOBUF_FIELD_OFFSET(::flwr::proto::Value, _oneof_case_[0]), - ~0u, // no _weak_field_map_ - ~0u, // no _inlined_string_donated_ - ::PROTOBUF_NAMESPACE_ID::internal::kInvalidFieldOffsetTag, - ::PROTOBUF_NAMESPACE_ID::internal::kInvalidFieldOffsetTag, - ::PROTOBUF_NAMESPACE_ID::internal::kInvalidFieldOffsetTag, - ::PROTOBUF_NAMESPACE_ID::internal::kInvalidFieldOffsetTag, - ::PROTOBUF_NAMESPACE_ID::internal::kInvalidFieldOffsetTag, - ::PROTOBUF_NAMESPACE_ID::internal::kInvalidFieldOffsetTag, - ::PROTOBUF_NAMESPACE_ID::internal::kInvalidFieldOffsetTag, - ::PROTOBUF_NAMESPACE_ID::internal::kInvalidFieldOffsetTag, - ::PROTOBUF_NAMESPACE_ID::internal::kInvalidFieldOffsetTag, - ::PROTOBUF_NAMESPACE_ID::internal::kInvalidFieldOffsetTag, - PROTOBUF_FIELD_OFFSET(::flwr::proto::Value, value_), - PROTOBUF_FIELD_OFFSET(::flwr::proto::SecureAggregation_NamedValuesEntry_DoNotUse, _has_bits_), - PROTOBUF_FIELD_OFFSET(::flwr::proto::SecureAggregation_NamedValuesEntry_DoNotUse, _internal_metadata_), - ~0u, // no _extensions_ - ~0u, // no _oneof_case_ - ~0u, // no _weak_field_map_ - ~0u, // no _inlined_string_donated_ - PROTOBUF_FIELD_OFFSET(::flwr::proto::SecureAggregation_NamedValuesEntry_DoNotUse, key_), - PROTOBUF_FIELD_OFFSET(::flwr::proto::SecureAggregation_NamedValuesEntry_DoNotUse, value_), - 0, - 1, - ~0u, // no _has_bits_ - PROTOBUF_FIELD_OFFSET(::flwr::proto::SecureAggregation, _internal_metadata_), - ~0u, // no _extensions_ - ~0u, // no _oneof_case_ - ~0u, // no _weak_field_map_ - ~0u, // no _inlined_string_donated_ - PROTOBUF_FIELD_OFFSET(::flwr::proto::SecureAggregation, named_values_), }; static const ::PROTOBUF_NAMESPACE_ID::internal::MigrationSchema schemas[] PROTOBUF_SECTION_VARIABLE(protodesc_cold) = { { 0, -1, -1, sizeof(::flwr::proto::Task)}, - { 15, -1, -1, sizeof(::flwr::proto::TaskIns)}, - { 25, -1, -1, sizeof(::flwr::proto::TaskRes)}, - { 35, -1, -1, sizeof(::flwr::proto::Value_DoubleList)}, - { 42, -1, -1, sizeof(::flwr::proto::Value_Sint64List)}, - { 49, -1, -1, sizeof(::flwr::proto::Value_BoolList)}, - { 56, -1, -1, sizeof(::flwr::proto::Value_StringList)}, - { 63, -1, -1, sizeof(::flwr::proto::Value_BytesList)}, - { 70, -1, -1, sizeof(::flwr::proto::Value)}, - { 87, 95, -1, sizeof(::flwr::proto::SecureAggregation_NamedValuesEntry_DoNotUse)}, - { 97, -1, -1, sizeof(::flwr::proto::SecureAggregation)}, + { 16, -1, -1, sizeof(::flwr::proto::TaskIns)}, + { 26, -1, -1, sizeof(::flwr::proto::TaskRes)}, }; static ::PROTOBUF_NAMESPACE_ID::Message const * const file_default_instances[] = { reinterpret_cast(&::flwr::proto::_Task_default_instance_), reinterpret_cast(&::flwr::proto::_TaskIns_default_instance_), reinterpret_cast(&::flwr::proto::_TaskRes_default_instance_), - reinterpret_cast(&::flwr::proto::_Value_DoubleList_default_instance_), - reinterpret_cast(&::flwr::proto::_Value_Sint64List_default_instance_), - reinterpret_cast(&::flwr::proto::_Value_BoolList_default_instance_), - reinterpret_cast(&::flwr::proto::_Value_StringList_default_instance_), - reinterpret_cast(&::flwr::proto::_Value_BytesList_default_instance_), - reinterpret_cast(&::flwr::proto::_Value_default_instance_), - reinterpret_cast(&::flwr::proto::_SecureAggregation_NamedValuesEntry_DoNotUse_default_instance_), - reinterpret_cast(&::flwr::proto::_SecureAggregation_default_instance_), }; const char descriptor_table_protodef_flwr_2fproto_2ftask_2eproto[] PROTOBUF_SECTION_VARIABLE(protodesc_cold) = "\n\025flwr/proto/task.proto\022\nflwr.proto\032\025flw" - "r/proto/node.proto\032\032flwr/proto/transport" - ".proto\"\276\002\n\004Task\022\"\n\010producer\030\001 \001(\0132\020.flwr" - ".proto.Node\022\"\n\010consumer\030\002 \001(\0132\020.flwr.pro" - "to.Node\022\022\n\ncreated_at\030\003 \001(\t\022\024\n\014delivered" - "_at\030\004 \001(\t\022\013\n\003ttl\030\005 \001(\t\022\020\n\010ancestry\030\006 \003(\t" - "\022)\n\002sa\030\007 \001(\0132\035.flwr.proto.SecureAggregat" - "ion\022<\n\025legacy_server_message\030e \001(\0132\031.flw" - "r.proto.ServerMessageB\002\030\001\022<\n\025legacy_clie" - "nt_message\030f \001(\0132\031.flwr.proto.ClientMess" - "ageB\002\030\001\"a\n\007TaskIns\022\017\n\007task_id\030\001 \001(\t\022\020\n\010g" - "roup_id\030\002 \001(\t\022\023\n\013workload_id\030\003 \001(\022\022\036\n\004ta" - "sk\030\004 \001(\0132\020.flwr.proto.Task\"a\n\007TaskRes\022\017\n" - "\007task_id\030\001 \001(\t\022\020\n\010group_id\030\002 \001(\t\022\023\n\013work" - "load_id\030\003 \001(\022\022\036\n\004task\030\004 \001(\0132\020.flwr.proto" - ".Task\"\363\003\n\005Value\022\020\n\006double\030\001 \001(\001H\000\022\020\n\006sin" - "t64\030\002 \001(\022H\000\022\016\n\004bool\030\003 \001(\010H\000\022\020\n\006string\030\004 " - "\001(\tH\000\022\017\n\005bytes\030\005 \001(\014H\000\0223\n\013double_list\030\025 " - "\001(\0132\034.flwr.proto.Value.DoubleListH\000\0223\n\013s" - "int64_list\030\026 \001(\0132\034.flwr.proto.Value.Sint" - "64ListH\000\022/\n\tbool_list\030\027 \001(\0132\032.flwr.proto" - ".Value.BoolListH\000\0223\n\013string_list\030\030 \001(\0132\034" - ".flwr.proto.Value.StringListH\000\0221\n\nbytes_" - "list\030\031 \001(\0132\033.flwr.proto.Value.BytesListH" - "\000\032\032\n\nDoubleList\022\014\n\004vals\030\001 \003(\001\032\032\n\nSint64L" - "ist\022\014\n\004vals\030\001 \003(\022\032\030\n\010BoolList\022\014\n\004vals\030\001 " - "\003(\010\032\032\n\nStringList\022\014\n\004vals\030\001 \003(\t\032\031\n\tBytes" - "List\022\014\n\004vals\030\001 \003(\014B\007\n\005value\"\240\001\n\021SecureAg" - "gregation\022D\n\014named_values\030\001 \003(\0132..flwr.p" - "roto.SecureAggregation.NamedValuesEntry\032" - "E\n\020NamedValuesEntry\022\013\n\003key\030\001 \001(\t\022 \n\005valu" - "e\030\002 \001(\0132\021.flwr.proto.Value:\0028\001b\006proto3" + "r/proto/node.proto\032\032flwr/proto/recordset" + ".proto\032\032flwr/proto/transport.proto\032\026flwr" + "/proto/error.proto\"\211\002\n\004Task\022\"\n\010producer\030" + "\001 \001(\0132\020.flwr.proto.Node\022\"\n\010consumer\030\002 \001(" + "\0132\020.flwr.proto.Node\022\022\n\ncreated_at\030\003 \001(\001\022" + "\024\n\014delivered_at\030\004 \001(\t\022\021\n\tpushed_at\030\005 \001(\001" + "\022\013\n\003ttl\030\006 \001(\001\022\020\n\010ancestry\030\007 \003(\t\022\021\n\ttask_" + "type\030\010 \001(\t\022(\n\trecordset\030\t \001(\0132\025.flwr.pro" + "to.RecordSet\022 \n\005error\030\n \001(\0132\021.flwr.proto" + ".Error\"\\\n\007TaskIns\022\017\n\007task_id\030\001 \001(\t\022\020\n\010gr" + "oup_id\030\002 \001(\t\022\016\n\006run_id\030\003 \001(\022\022\036\n\004task\030\004 \001" + "(\0132\020.flwr.proto.Task\"\\\n\007TaskRes\022\017\n\007task_" + "id\030\001 \001(\t\022\020\n\010group_id\030\002 \001(\t\022\016\n\006run_id\030\003 \001" + "(\022\022\036\n\004task\030\004 \001(\0132\020.flwr.proto.Taskb\006prot" + "o3" ; -static const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable*const descriptor_table_flwr_2fproto_2ftask_2eproto_deps[2] = { +static const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable*const descriptor_table_flwr_2fproto_2ftask_2eproto_deps[4] = { + &::descriptor_table_flwr_2fproto_2ferror_2eproto, &::descriptor_table_flwr_2fproto_2fnode_2eproto, + &::descriptor_table_flwr_2fproto_2frecordset_2eproto, &::descriptor_table_flwr_2fproto_2ftransport_2eproto, }; static ::PROTOBUF_NAMESPACE_ID::internal::once_flag descriptor_table_flwr_2fproto_2ftask_2eproto_once; const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable descriptor_table_flwr_2fproto_2ftask_2eproto = { - false, false, 1278, descriptor_table_protodef_flwr_2fproto_2ftask_2eproto, "flwr/proto/task.proto", - &descriptor_table_flwr_2fproto_2ftask_2eproto_once, descriptor_table_flwr_2fproto_2ftask_2eproto_deps, 2, 11, + false, false, 602, descriptor_table_protodef_flwr_2fproto_2ftask_2eproto, "flwr/proto/task.proto", + &descriptor_table_flwr_2fproto_2ftask_2eproto_once, descriptor_table_flwr_2fproto_2ftask_2eproto_deps, 4, 3, schemas, file_default_instances, TableStruct_flwr_2fproto_2ftask_2eproto::offsets, file_level_metadata_flwr_2fproto_2ftask_2eproto, file_level_enum_descriptors_flwr_2fproto_2ftask_2eproto, file_level_service_descriptors_flwr_2fproto_2ftask_2eproto, }; @@ -364,9 +171,8 @@ class Task::_Internal { public: static const ::flwr::proto::Node& producer(const Task* msg); static const ::flwr::proto::Node& consumer(const Task* msg); - static const ::flwr::proto::SecureAggregation& sa(const Task* msg); - static const ::flwr::proto::ServerMessage& legacy_server_message(const Task* msg); - static const ::flwr::proto::ClientMessage& legacy_client_message(const Task* msg); + static const ::flwr::proto::RecordSet& recordset(const Task* msg); + static const ::flwr::proto::Error& error(const Task* msg); }; const ::flwr::proto::Node& @@ -377,17 +183,13 @@ const ::flwr::proto::Node& Task::_Internal::consumer(const Task* msg) { return *msg->consumer_; } -const ::flwr::proto::SecureAggregation& -Task::_Internal::sa(const Task* msg) { - return *msg->sa_; -} -const ::flwr::proto::ServerMessage& -Task::_Internal::legacy_server_message(const Task* msg) { - return *msg->legacy_server_message_; +const ::flwr::proto::RecordSet& +Task::_Internal::recordset(const Task* msg) { + return *msg->recordset_; } -const ::flwr::proto::ClientMessage& -Task::_Internal::legacy_client_message(const Task* msg) { - return *msg->legacy_client_message_; +const ::flwr::proto::Error& +Task::_Internal::error(const Task* msg) { + return *msg->error_; } void Task::clear_producer() { if (GetArenaForAllocation() == nullptr && producer_ != nullptr) { @@ -401,17 +203,17 @@ void Task::clear_consumer() { } consumer_ = nullptr; } -void Task::clear_legacy_server_message() { - if (GetArenaForAllocation() == nullptr && legacy_server_message_ != nullptr) { - delete legacy_server_message_; +void Task::clear_recordset() { + if (GetArenaForAllocation() == nullptr && recordset_ != nullptr) { + delete recordset_; } - legacy_server_message_ = nullptr; + recordset_ = nullptr; } -void Task::clear_legacy_client_message() { - if (GetArenaForAllocation() == nullptr && legacy_client_message_ != nullptr) { - delete legacy_client_message_; +void Task::clear_error() { + if (GetArenaForAllocation() == nullptr && error_ != nullptr) { + delete error_; } - legacy_client_message_ = nullptr; + error_ = nullptr; } Task::Task(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned) @@ -427,19 +229,14 @@ Task::Task(const Task& from) : ::PROTOBUF_NAMESPACE_ID::Message(), ancestry_(from.ancestry_) { _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); - created_at_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); - if (!from._internal_created_at().empty()) { - created_at_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, from._internal_created_at(), - GetArenaForAllocation()); - } delivered_at_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); if (!from._internal_delivered_at().empty()) { delivered_at_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, from._internal_delivered_at(), GetArenaForAllocation()); } - ttl_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); - if (!from._internal_ttl().empty()) { - ttl_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, from._internal_ttl(), + task_type_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); + if (!from._internal_task_type().empty()) { + task_type_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, from._internal_task_type(), GetArenaForAllocation()); } if (from._internal_has_producer()) { @@ -452,32 +249,29 @@ Task::Task(const Task& from) } else { consumer_ = nullptr; } - if (from._internal_has_sa()) { - sa_ = new ::flwr::proto::SecureAggregation(*from.sa_); + if (from._internal_has_recordset()) { + recordset_ = new ::flwr::proto::RecordSet(*from.recordset_); } else { - sa_ = nullptr; + recordset_ = nullptr; } - if (from._internal_has_legacy_server_message()) { - legacy_server_message_ = new ::flwr::proto::ServerMessage(*from.legacy_server_message_); + if (from._internal_has_error()) { + error_ = new ::flwr::proto::Error(*from.error_); } else { - legacy_server_message_ = nullptr; - } - if (from._internal_has_legacy_client_message()) { - legacy_client_message_ = new ::flwr::proto::ClientMessage(*from.legacy_client_message_); - } else { - legacy_client_message_ = nullptr; + error_ = nullptr; } + ::memcpy(&created_at_, &from.created_at_, + static_cast(reinterpret_cast(&ttl_) - + reinterpret_cast(&created_at_)) + sizeof(ttl_)); // @@protoc_insertion_point(copy_constructor:flwr.proto.Task) } void Task::SharedCtor() { -created_at_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); delivered_at_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); -ttl_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); +task_type_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); ::memset(reinterpret_cast(this) + static_cast( reinterpret_cast(&producer_) - reinterpret_cast(this)), - 0, static_cast(reinterpret_cast(&legacy_client_message_) - - reinterpret_cast(&producer_)) + sizeof(legacy_client_message_)); + 0, static_cast(reinterpret_cast(&ttl_) - + reinterpret_cast(&producer_)) + sizeof(ttl_)); } Task::~Task() { @@ -489,14 +283,12 @@ Task::~Task() { inline void Task::SharedDtor() { GOOGLE_DCHECK(GetArenaForAllocation() == nullptr); - created_at_.DestroyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); delivered_at_.DestroyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); - ttl_.DestroyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); + task_type_.DestroyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); if (this != internal_default_instance()) delete producer_; if (this != internal_default_instance()) delete consumer_; - if (this != internal_default_instance()) delete sa_; - if (this != internal_default_instance()) delete legacy_server_message_; - if (this != internal_default_instance()) delete legacy_client_message_; + if (this != internal_default_instance()) delete recordset_; + if (this != internal_default_instance()) delete error_; } void Task::ArenaDtor(void* object) { @@ -516,9 +308,8 @@ void Task::Clear() { (void) cached_has_bits; ancestry_.Clear(); - created_at_.ClearToEmpty(); delivered_at_.ClearToEmpty(); - ttl_.ClearToEmpty(); + task_type_.ClearToEmpty(); if (GetArenaForAllocation() == nullptr && producer_ != nullptr) { delete producer_; } @@ -527,18 +318,17 @@ void Task::Clear() { delete consumer_; } consumer_ = nullptr; - if (GetArenaForAllocation() == nullptr && sa_ != nullptr) { - delete sa_; + if (GetArenaForAllocation() == nullptr && recordset_ != nullptr) { + delete recordset_; } - sa_ = nullptr; - if (GetArenaForAllocation() == nullptr && legacy_server_message_ != nullptr) { - delete legacy_server_message_; + recordset_ = nullptr; + if (GetArenaForAllocation() == nullptr && error_ != nullptr) { + delete error_; } - legacy_server_message_ = nullptr; - if (GetArenaForAllocation() == nullptr && legacy_client_message_ != nullptr) { - delete legacy_client_message_; - } - legacy_client_message_ = nullptr; + error_ = nullptr; + ::memset(&created_at_, 0, static_cast( + reinterpret_cast(&ttl_) - + reinterpret_cast(&created_at_)) + sizeof(ttl_)); _internal_metadata_.Clear<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); } @@ -564,13 +354,11 @@ const char* Task::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::inter } else goto handle_unusual; continue; - // string created_at = 3; + // double created_at = 3; case 3: - if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 26)) { - auto str = _internal_mutable_created_at(); - ptr = ::PROTOBUF_NAMESPACE_ID::internal::InlineGreedyStringParser(str, ptr, ctx); - CHK_(::PROTOBUF_NAMESPACE_ID::internal::VerifyUTF8(str, "flwr.proto.Task.created_at")); - CHK_(ptr); + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 25)) { + created_at_ = ::PROTOBUF_NAMESPACE_ID::internal::UnalignedLoad(ptr); + ptr += sizeof(double); } else goto handle_unusual; continue; @@ -584,19 +372,25 @@ const char* Task::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::inter } else goto handle_unusual; continue; - // string ttl = 5; + // double pushed_at = 5; case 5: - if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 42)) { - auto str = _internal_mutable_ttl(); - ptr = ::PROTOBUF_NAMESPACE_ID::internal::InlineGreedyStringParser(str, ptr, ctx); - CHK_(::PROTOBUF_NAMESPACE_ID::internal::VerifyUTF8(str, "flwr.proto.Task.ttl")); - CHK_(ptr); + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 41)) { + pushed_at_ = ::PROTOBUF_NAMESPACE_ID::internal::UnalignedLoad(ptr); + ptr += sizeof(double); } else goto handle_unusual; continue; - // repeated string ancestry = 6; + // double ttl = 6; case 6: - if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 50)) { + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 49)) { + ttl_ = ::PROTOBUF_NAMESPACE_ID::internal::UnalignedLoad(ptr); + ptr += sizeof(double); + } else + goto handle_unusual; + continue; + // repeated string ancestry = 7; + case 7: + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 58)) { ptr -= 1; do { ptr += 1; @@ -605,30 +399,32 @@ const char* Task::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::inter CHK_(::PROTOBUF_NAMESPACE_ID::internal::VerifyUTF8(str, "flwr.proto.Task.ancestry")); CHK_(ptr); if (!ctx->DataAvailable(ptr)) break; - } while (::PROTOBUF_NAMESPACE_ID::internal::ExpectTag<50>(ptr)); + } while (::PROTOBUF_NAMESPACE_ID::internal::ExpectTag<58>(ptr)); } else goto handle_unusual; continue; - // .flwr.proto.SecureAggregation sa = 7; - case 7: - if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 58)) { - ptr = ctx->ParseMessage(_internal_mutable_sa(), ptr); + // string task_type = 8; + case 8: + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 66)) { + auto str = _internal_mutable_task_type(); + ptr = ::PROTOBUF_NAMESPACE_ID::internal::InlineGreedyStringParser(str, ptr, ctx); + CHK_(::PROTOBUF_NAMESPACE_ID::internal::VerifyUTF8(str, "flwr.proto.Task.task_type")); CHK_(ptr); } else goto handle_unusual; continue; - // .flwr.proto.ServerMessage legacy_server_message = 101 [deprecated = true]; - case 101: - if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 42)) { - ptr = ctx->ParseMessage(_internal_mutable_legacy_server_message(), ptr); + // .flwr.proto.RecordSet recordset = 9; + case 9: + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 74)) { + ptr = ctx->ParseMessage(_internal_mutable_recordset(), ptr); CHK_(ptr); } else goto handle_unusual; continue; - // .flwr.proto.ClientMessage legacy_client_message = 102 [deprecated = true]; - case 102: - if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 50)) { - ptr = ctx->ParseMessage(_internal_mutable_legacy_client_message(), ptr); + // .flwr.proto.Error error = 10; + case 10: + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 82)) { + ptr = ctx->ParseMessage(_internal_mutable_error(), ptr); CHK_(ptr); } else goto handle_unusual; @@ -678,14 +474,10 @@ ::PROTOBUF_NAMESPACE_ID::uint8* Task::_InternalSerialize( 2, _Internal::consumer(this), target, stream); } - // string created_at = 3; - if (!this->_internal_created_at().empty()) { - ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String( - this->_internal_created_at().data(), static_cast(this->_internal_created_at().length()), - ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE, - "flwr.proto.Task.created_at"); - target = stream->WriteStringMaybeAliased( - 3, this->_internal_created_at(), target); + // double created_at = 3; + if (!(this->_internal_created_at() <= 0 && this->_internal_created_at() >= 0)) { + target = stream->EnsureSpace(target); + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteDoubleToArray(3, this->_internal_created_at(), target); } // string delivered_at = 4; @@ -698,48 +490,52 @@ ::PROTOBUF_NAMESPACE_ID::uint8* Task::_InternalSerialize( 4, this->_internal_delivered_at(), target); } - // string ttl = 5; - if (!this->_internal_ttl().empty()) { - ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String( - this->_internal_ttl().data(), static_cast(this->_internal_ttl().length()), - ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE, - "flwr.proto.Task.ttl"); - target = stream->WriteStringMaybeAliased( - 5, this->_internal_ttl(), target); + // double pushed_at = 5; + if (!(this->_internal_pushed_at() <= 0 && this->_internal_pushed_at() >= 0)) { + target = stream->EnsureSpace(target); + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteDoubleToArray(5, this->_internal_pushed_at(), target); + } + + // double ttl = 6; + if (!(this->_internal_ttl() <= 0 && this->_internal_ttl() >= 0)) { + target = stream->EnsureSpace(target); + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteDoubleToArray(6, this->_internal_ttl(), target); } - // repeated string ancestry = 6; + // repeated string ancestry = 7; for (int i = 0, n = this->_internal_ancestry_size(); i < n; i++) { const auto& s = this->_internal_ancestry(i); ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String( s.data(), static_cast(s.length()), ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE, "flwr.proto.Task.ancestry"); - target = stream->WriteString(6, s, target); + target = stream->WriteString(7, s, target); } - // .flwr.proto.SecureAggregation sa = 7; - if (this->_internal_has_sa()) { - target = stream->EnsureSpace(target); - target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite:: - InternalWriteMessage( - 7, _Internal::sa(this), target, stream); + // string task_type = 8; + if (!this->_internal_task_type().empty()) { + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String( + this->_internal_task_type().data(), static_cast(this->_internal_task_type().length()), + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE, + "flwr.proto.Task.task_type"); + target = stream->WriteStringMaybeAliased( + 8, this->_internal_task_type(), target); } - // .flwr.proto.ServerMessage legacy_server_message = 101 [deprecated = true]; - if (this->_internal_has_legacy_server_message()) { + // .flwr.proto.RecordSet recordset = 9; + if (this->_internal_has_recordset()) { target = stream->EnsureSpace(target); target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite:: InternalWriteMessage( - 101, _Internal::legacy_server_message(this), target, stream); + 9, _Internal::recordset(this), target, stream); } - // .flwr.proto.ClientMessage legacy_client_message = 102 [deprecated = true]; - if (this->_internal_has_legacy_client_message()) { + // .flwr.proto.Error error = 10; + if (this->_internal_has_error()) { target = stream->EnsureSpace(target); target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite:: InternalWriteMessage( - 102, _Internal::legacy_client_message(this), target, stream); + 10, _Internal::error(this), target, stream); } if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) { @@ -758,7 +554,7 @@ size_t Task::ByteSizeLong() const { // Prevent compiler warnings about cached_has_bits being unused (void) cached_has_bits; - // repeated string ancestry = 6; + // repeated string ancestry = 7; total_size += 1 * ::PROTOBUF_NAMESPACE_ID::internal::FromIntSize(ancestry_.size()); for (int i = 0, n = ancestry_.size(); i < n; i++) { @@ -766,13 +562,6 @@ size_t Task::ByteSizeLong() const { ancestry_.Get(i)); } - // string created_at = 3; - if (!this->_internal_created_at().empty()) { - total_size += 1 + - ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize( - this->_internal_created_at()); - } - // string delivered_at = 4; if (!this->_internal_delivered_at().empty()) { total_size += 1 + @@ -780,11 +569,11 @@ size_t Task::ByteSizeLong() const { this->_internal_delivered_at()); } - // string ttl = 5; - if (!this->_internal_ttl().empty()) { + // string task_type = 8; + if (!this->_internal_task_type().empty()) { total_size += 1 + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize( - this->_internal_ttl()); + this->_internal_task_type()); } // .flwr.proto.Node producer = 1; @@ -801,25 +590,33 @@ size_t Task::ByteSizeLong() const { *consumer_); } - // .flwr.proto.SecureAggregation sa = 7; - if (this->_internal_has_sa()) { + // .flwr.proto.RecordSet recordset = 9; + if (this->_internal_has_recordset()) { total_size += 1 + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize( - *sa_); + *recordset_); } - // .flwr.proto.ServerMessage legacy_server_message = 101 [deprecated = true]; - if (this->_internal_has_legacy_server_message()) { - total_size += 2 + + // .flwr.proto.Error error = 10; + if (this->_internal_has_error()) { + total_size += 1 + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize( - *legacy_server_message_); + *error_); } - // .flwr.proto.ClientMessage legacy_client_message = 102 [deprecated = true]; - if (this->_internal_has_legacy_client_message()) { - total_size += 2 + - ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize( - *legacy_client_message_); + // double created_at = 3; + if (!(this->_internal_created_at() <= 0 && this->_internal_created_at() >= 0)) { + total_size += 1 + 8; + } + + // double pushed_at = 5; + if (!(this->_internal_pushed_at() <= 0 && this->_internal_pushed_at() >= 0)) { + total_size += 1 + 8; + } + + // double ttl = 6; + if (!(this->_internal_ttl() <= 0 && this->_internal_ttl() >= 0)) { + total_size += 1 + 8; } return MaybeComputeUnknownFieldsSize(total_size, &_cached_size_); @@ -845,14 +642,11 @@ void Task::MergeFrom(const Task& from) { (void) cached_has_bits; ancestry_.MergeFrom(from.ancestry_); - if (!from._internal_created_at().empty()) { - _internal_set_created_at(from._internal_created_at()); - } if (!from._internal_delivered_at().empty()) { _internal_set_delivered_at(from._internal_delivered_at()); } - if (!from._internal_ttl().empty()) { - _internal_set_ttl(from._internal_ttl()); + if (!from._internal_task_type().empty()) { + _internal_set_task_type(from._internal_task_type()); } if (from._internal_has_producer()) { _internal_mutable_producer()->::flwr::proto::Node::MergeFrom(from._internal_producer()); @@ -860,14 +654,20 @@ void Task::MergeFrom(const Task& from) { if (from._internal_has_consumer()) { _internal_mutable_consumer()->::flwr::proto::Node::MergeFrom(from._internal_consumer()); } - if (from._internal_has_sa()) { - _internal_mutable_sa()->::flwr::proto::SecureAggregation::MergeFrom(from._internal_sa()); + if (from._internal_has_recordset()) { + _internal_mutable_recordset()->::flwr::proto::RecordSet::MergeFrom(from._internal_recordset()); } - if (from._internal_has_legacy_server_message()) { - _internal_mutable_legacy_server_message()->::flwr::proto::ServerMessage::MergeFrom(from._internal_legacy_server_message()); + if (from._internal_has_error()) { + _internal_mutable_error()->::flwr::proto::Error::MergeFrom(from._internal_error()); } - if (from._internal_has_legacy_client_message()) { - _internal_mutable_legacy_client_message()->::flwr::proto::ClientMessage::MergeFrom(from._internal_legacy_client_message()); + if (!(from._internal_created_at() <= 0 && from._internal_created_at() >= 0)) { + _internal_set_created_at(from._internal_created_at()); + } + if (!(from._internal_pushed_at() <= 0 && from._internal_pushed_at() >= 0)) { + _internal_set_pushed_at(from._internal_pushed_at()); + } + if (!(from._internal_ttl() <= 0 && from._internal_ttl() >= 0)) { + _internal_set_ttl(from._internal_ttl()); } _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); } @@ -889,11 +689,6 @@ void Task::InternalSwap(Task* other) { auto* rhs_arena = other->GetArenaForAllocation(); _internal_metadata_.InternalSwap(&other->_internal_metadata_); ancestry_.InternalSwap(&other->ancestry_); - ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::InternalSwap( - &::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), - &created_at_, lhs_arena, - &other->created_at_, rhs_arena - ); ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::InternalSwap( &::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), &delivered_at_, lhs_arena, @@ -901,12 +696,12 @@ void Task::InternalSwap(Task* other) { ); ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::InternalSwap( &::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), - &ttl_, lhs_arena, - &other->ttl_, rhs_arena + &task_type_, lhs_arena, + &other->task_type_, rhs_arena ); ::PROTOBUF_NAMESPACE_ID::internal::memswap< - PROTOBUF_FIELD_OFFSET(Task, legacy_client_message_) - + sizeof(Task::legacy_client_message_) + PROTOBUF_FIELD_OFFSET(Task, ttl_) + + sizeof(Task::ttl_) - PROTOBUF_FIELD_OFFSET(Task, producer_)>( reinterpret_cast(&producer_), reinterpret_cast(&other->producer_)); @@ -956,7 +751,7 @@ TaskIns::TaskIns(const TaskIns& from) } else { task_ = nullptr; } - workload_id_ = from.workload_id_; + run_id_ = from.run_id_; // @@protoc_insertion_point(copy_constructor:flwr.proto.TaskIns) } @@ -965,8 +760,8 @@ task_id_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlre group_id_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); ::memset(reinterpret_cast(this) + static_cast( reinterpret_cast(&task_) - reinterpret_cast(this)), - 0, static_cast(reinterpret_cast(&workload_id_) - - reinterpret_cast(&task_)) + sizeof(workload_id_)); + 0, static_cast(reinterpret_cast(&run_id_) - + reinterpret_cast(&task_)) + sizeof(run_id_)); } TaskIns::~TaskIns() { @@ -1005,7 +800,7 @@ void TaskIns::Clear() { delete task_; } task_ = nullptr; - workload_id_ = int64_t{0}; + run_id_ = int64_t{0}; _internal_metadata_.Clear<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); } @@ -1035,10 +830,10 @@ const char* TaskIns::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::in } else goto handle_unusual; continue; - // sint64 workload_id = 3; + // sint64 run_id = 3; case 3: if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 24)) { - workload_id_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarintZigZag64(&ptr); + run_id_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarintZigZag64(&ptr); CHK_(ptr); } else goto handle_unusual; @@ -1100,10 +895,10 @@ ::PROTOBUF_NAMESPACE_ID::uint8* TaskIns::_InternalSerialize( 2, this->_internal_group_id(), target); } - // sint64 workload_id = 3; - if (this->_internal_workload_id() != 0) { + // sint64 run_id = 3; + if (this->_internal_run_id() != 0) { target = stream->EnsureSpace(target); - target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteSInt64ToArray(3, this->_internal_workload_id(), target); + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteSInt64ToArray(3, this->_internal_run_id(), target); } // .flwr.proto.Task task = 4; @@ -1151,9 +946,9 @@ size_t TaskIns::ByteSizeLong() const { *task_); } - // sint64 workload_id = 3; - if (this->_internal_workload_id() != 0) { - total_size += ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SInt64SizePlusOne(this->_internal_workload_id()); + // sint64 run_id = 3; + if (this->_internal_run_id() != 0) { + total_size += ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SInt64SizePlusOne(this->_internal_run_id()); } return MaybeComputeUnknownFieldsSize(total_size, &_cached_size_); @@ -1187,8 +982,8 @@ void TaskIns::MergeFrom(const TaskIns& from) { if (from._internal_has_task()) { _internal_mutable_task()->::flwr::proto::Task::MergeFrom(from._internal_task()); } - if (from._internal_workload_id() != 0) { - _internal_set_workload_id(from._internal_workload_id()); + if (from._internal_run_id() != 0) { + _internal_set_run_id(from._internal_run_id()); } _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); } @@ -1220,8 +1015,8 @@ void TaskIns::InternalSwap(TaskIns* other) { &other->group_id_, rhs_arena ); ::PROTOBUF_NAMESPACE_ID::internal::memswap< - PROTOBUF_FIELD_OFFSET(TaskIns, workload_id_) - + sizeof(TaskIns::workload_id_) + PROTOBUF_FIELD_OFFSET(TaskIns, run_id_) + + sizeof(TaskIns::run_id_) - PROTOBUF_FIELD_OFFSET(TaskIns, task_)>( reinterpret_cast(&task_), reinterpret_cast(&other->task_)); @@ -1271,7 +1066,7 @@ TaskRes::TaskRes(const TaskRes& from) } else { task_ = nullptr; } - workload_id_ = from.workload_id_; + run_id_ = from.run_id_; // @@protoc_insertion_point(copy_constructor:flwr.proto.TaskRes) } @@ -1280,8 +1075,8 @@ task_id_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlre group_id_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); ::memset(reinterpret_cast(this) + static_cast( reinterpret_cast(&task_) - reinterpret_cast(this)), - 0, static_cast(reinterpret_cast(&workload_id_) - - reinterpret_cast(&task_)) + sizeof(workload_id_)); + 0, static_cast(reinterpret_cast(&run_id_) - + reinterpret_cast(&task_)) + sizeof(run_id_)); } TaskRes::~TaskRes() { @@ -1320,7 +1115,7 @@ void TaskRes::Clear() { delete task_; } task_ = nullptr; - workload_id_ = int64_t{0}; + run_id_ = int64_t{0}; _internal_metadata_.Clear<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); } @@ -1350,10 +1145,10 @@ const char* TaskRes::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::in } else goto handle_unusual; continue; - // sint64 workload_id = 3; + // sint64 run_id = 3; case 3: if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 24)) { - workload_id_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarintZigZag64(&ptr); + run_id_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarintZigZag64(&ptr); CHK_(ptr); } else goto handle_unusual; @@ -1415,10 +1210,10 @@ ::PROTOBUF_NAMESPACE_ID::uint8* TaskRes::_InternalSerialize( 2, this->_internal_group_id(), target); } - // sint64 workload_id = 3; - if (this->_internal_workload_id() != 0) { + // sint64 run_id = 3; + if (this->_internal_run_id() != 0) { target = stream->EnsureSpace(target); - target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteSInt64ToArray(3, this->_internal_workload_id(), target); + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteSInt64ToArray(3, this->_internal_run_id(), target); } // .flwr.proto.Task task = 4; @@ -1466,9 +1261,9 @@ size_t TaskRes::ByteSizeLong() const { *task_); } - // sint64 workload_id = 3; - if (this->_internal_workload_id() != 0) { - total_size += ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SInt64SizePlusOne(this->_internal_workload_id()); + // sint64 run_id = 3; + if (this->_internal_run_id() != 0) { + total_size += ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SInt64SizePlusOne(this->_internal_run_id()); } return MaybeComputeUnknownFieldsSize(total_size, &_cached_size_); @@ -1502,8 +1297,8 @@ void TaskRes::MergeFrom(const TaskRes& from) { if (from._internal_has_task()) { _internal_mutable_task()->::flwr::proto::Task::MergeFrom(from._internal_task()); } - if (from._internal_workload_id() != 0) { - _internal_set_workload_id(from._internal_workload_id()); + if (from._internal_run_id() != 0) { + _internal_set_run_id(from._internal_run_id()); } _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); } @@ -1535,8 +1330,8 @@ void TaskRes::InternalSwap(TaskRes* other) { &other->group_id_, rhs_arena ); ::PROTOBUF_NAMESPACE_ID::internal::memswap< - PROTOBUF_FIELD_OFFSET(TaskRes, workload_id_) - + sizeof(TaskRes::workload_id_) + PROTOBUF_FIELD_OFFSET(TaskRes, run_id_) + + sizeof(TaskRes::run_id_) - PROTOBUF_FIELD_OFFSET(TaskRes, task_)>( reinterpret_cast(&task_), reinterpret_cast(&other->task_)); @@ -1548,1856 +1343,19 @@ ::PROTOBUF_NAMESPACE_ID::Metadata TaskRes::GetMetadata() const { file_level_metadata_flwr_2fproto_2ftask_2eproto[2]); } -// =================================================================== - -class Value_DoubleList::_Internal { - public: -}; - -Value_DoubleList::Value_DoubleList(::PROTOBUF_NAMESPACE_ID::Arena* arena, - bool is_message_owned) - : ::PROTOBUF_NAMESPACE_ID::Message(arena, is_message_owned), - vals_(arena) { - SharedCtor(); - if (!is_message_owned) { - RegisterArenaDtor(arena); - } - // @@protoc_insertion_point(arena_constructor:flwr.proto.Value.DoubleList) -} -Value_DoubleList::Value_DoubleList(const Value_DoubleList& from) - : ::PROTOBUF_NAMESPACE_ID::Message(), - vals_(from.vals_) { - _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); - // @@protoc_insertion_point(copy_constructor:flwr.proto.Value.DoubleList) -} - -void Value_DoubleList::SharedCtor() { -} - -Value_DoubleList::~Value_DoubleList() { - // @@protoc_insertion_point(destructor:flwr.proto.Value.DoubleList) - if (GetArenaForAllocation() != nullptr) return; - SharedDtor(); - _internal_metadata_.Delete<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); -} - -inline void Value_DoubleList::SharedDtor() { - GOOGLE_DCHECK(GetArenaForAllocation() == nullptr); -} - -void Value_DoubleList::ArenaDtor(void* object) { - Value_DoubleList* _this = reinterpret_cast< Value_DoubleList* >(object); - (void)_this; -} -void Value_DoubleList::RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena*) { -} -void Value_DoubleList::SetCachedSize(int size) const { - _cached_size_.Set(size); -} - -void Value_DoubleList::Clear() { -// @@protoc_insertion_point(message_clear_start:flwr.proto.Value.DoubleList) - ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; - // Prevent compiler warnings about cached_has_bits being unused - (void) cached_has_bits; - - vals_.Clear(); - _internal_metadata_.Clear<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); -} - -const char* Value_DoubleList::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) { -#define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure - while (!ctx->Done(&ptr)) { - ::PROTOBUF_NAMESPACE_ID::uint32 tag; - ptr = ::PROTOBUF_NAMESPACE_ID::internal::ReadTag(ptr, &tag); - switch (tag >> 3) { - // repeated double vals = 1; - case 1: - if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 10)) { - ptr = ::PROTOBUF_NAMESPACE_ID::internal::PackedDoubleParser(_internal_mutable_vals(), ptr, ctx); - CHK_(ptr); - } else if (static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 9) { - _internal_add_vals(::PROTOBUF_NAMESPACE_ID::internal::UnalignedLoad(ptr)); - ptr += sizeof(double); - } else - goto handle_unusual; - continue; - default: - goto handle_unusual; - } // switch - handle_unusual: - if ((tag == 0) || ((tag & 7) == 4)) { - CHK_(ptr); - ctx->SetLastTag(tag); - goto message_done; - } - ptr = UnknownFieldParse( - tag, - _internal_metadata_.mutable_unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(), - ptr, ctx); - CHK_(ptr != nullptr); - } // while -message_done: - return ptr; -failure: - ptr = nullptr; - goto message_done; -#undef CHK_ -} - -::PROTOBUF_NAMESPACE_ID::uint8* Value_DoubleList::_InternalSerialize( - ::PROTOBUF_NAMESPACE_ID::uint8* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const { - // @@protoc_insertion_point(serialize_to_array_start:flwr.proto.Value.DoubleList) - ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; - (void) cached_has_bits; - - // repeated double vals = 1; - if (this->_internal_vals_size() > 0) { - target = stream->WriteFixedPacked(1, _internal_vals(), target); - } - - if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) { - target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::InternalSerializeUnknownFieldsToArray( - _internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance), target, stream); - } - // @@protoc_insertion_point(serialize_to_array_end:flwr.proto.Value.DoubleList) - return target; -} - -size_t Value_DoubleList::ByteSizeLong() const { -// @@protoc_insertion_point(message_byte_size_start:flwr.proto.Value.DoubleList) - size_t total_size = 0; - - ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; - // Prevent compiler warnings about cached_has_bits being unused - (void) cached_has_bits; - - // repeated double vals = 1; - { - unsigned int count = static_cast(this->_internal_vals_size()); - size_t data_size = 8UL * count; - if (data_size > 0) { - total_size += 1 + - ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::Int32Size( - static_cast<::PROTOBUF_NAMESPACE_ID::int32>(data_size)); - } - total_size += data_size; - } - - return MaybeComputeUnknownFieldsSize(total_size, &_cached_size_); -} - -const ::PROTOBUF_NAMESPACE_ID::Message::ClassData Value_DoubleList::_class_data_ = { - ::PROTOBUF_NAMESPACE_ID::Message::CopyWithSizeCheck, - Value_DoubleList::MergeImpl -}; -const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*Value_DoubleList::GetClassData() const { return &_class_data_; } - -void Value_DoubleList::MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, - const ::PROTOBUF_NAMESPACE_ID::Message& from) { - static_cast(to)->MergeFrom( - static_cast(from)); +// @@protoc_insertion_point(namespace_scope) +} // namespace proto +} // namespace flwr +PROTOBUF_NAMESPACE_OPEN +template<> PROTOBUF_NOINLINE ::flwr::proto::Task* Arena::CreateMaybeMessage< ::flwr::proto::Task >(Arena* arena) { + return Arena::CreateMessageInternal< ::flwr::proto::Task >(arena); } - - -void Value_DoubleList::MergeFrom(const Value_DoubleList& from) { -// @@protoc_insertion_point(class_specific_merge_from_start:flwr.proto.Value.DoubleList) - GOOGLE_DCHECK_NE(&from, this); - ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; - (void) cached_has_bits; - - vals_.MergeFrom(from.vals_); - _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); -} - -void Value_DoubleList::CopyFrom(const Value_DoubleList& from) { -// @@protoc_insertion_point(class_specific_copy_from_start:flwr.proto.Value.DoubleList) - if (&from == this) return; - Clear(); - MergeFrom(from); -} - -bool Value_DoubleList::IsInitialized() const { - return true; -} - -void Value_DoubleList::InternalSwap(Value_DoubleList* other) { - using std::swap; - _internal_metadata_.InternalSwap(&other->_internal_metadata_); - vals_.InternalSwap(&other->vals_); -} - -::PROTOBUF_NAMESPACE_ID::Metadata Value_DoubleList::GetMetadata() const { - return ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors( - &descriptor_table_flwr_2fproto_2ftask_2eproto_getter, &descriptor_table_flwr_2fproto_2ftask_2eproto_once, - file_level_metadata_flwr_2fproto_2ftask_2eproto[3]); -} - -// =================================================================== - -class Value_Sint64List::_Internal { - public: -}; - -Value_Sint64List::Value_Sint64List(::PROTOBUF_NAMESPACE_ID::Arena* arena, - bool is_message_owned) - : ::PROTOBUF_NAMESPACE_ID::Message(arena, is_message_owned), - vals_(arena) { - SharedCtor(); - if (!is_message_owned) { - RegisterArenaDtor(arena); - } - // @@protoc_insertion_point(arena_constructor:flwr.proto.Value.Sint64List) -} -Value_Sint64List::Value_Sint64List(const Value_Sint64List& from) - : ::PROTOBUF_NAMESPACE_ID::Message(), - vals_(from.vals_) { - _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); - // @@protoc_insertion_point(copy_constructor:flwr.proto.Value.Sint64List) -} - -void Value_Sint64List::SharedCtor() { -} - -Value_Sint64List::~Value_Sint64List() { - // @@protoc_insertion_point(destructor:flwr.proto.Value.Sint64List) - if (GetArenaForAllocation() != nullptr) return; - SharedDtor(); - _internal_metadata_.Delete<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); -} - -inline void Value_Sint64List::SharedDtor() { - GOOGLE_DCHECK(GetArenaForAllocation() == nullptr); -} - -void Value_Sint64List::ArenaDtor(void* object) { - Value_Sint64List* _this = reinterpret_cast< Value_Sint64List* >(object); - (void)_this; -} -void Value_Sint64List::RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena*) { -} -void Value_Sint64List::SetCachedSize(int size) const { - _cached_size_.Set(size); -} - -void Value_Sint64List::Clear() { -// @@protoc_insertion_point(message_clear_start:flwr.proto.Value.Sint64List) - ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; - // Prevent compiler warnings about cached_has_bits being unused - (void) cached_has_bits; - - vals_.Clear(); - _internal_metadata_.Clear<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); -} - -const char* Value_Sint64List::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) { -#define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure - while (!ctx->Done(&ptr)) { - ::PROTOBUF_NAMESPACE_ID::uint32 tag; - ptr = ::PROTOBUF_NAMESPACE_ID::internal::ReadTag(ptr, &tag); - switch (tag >> 3) { - // repeated sint64 vals = 1; - case 1: - if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 10)) { - ptr = ::PROTOBUF_NAMESPACE_ID::internal::PackedSInt64Parser(_internal_mutable_vals(), ptr, ctx); - CHK_(ptr); - } else if (static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 8) { - _internal_add_vals(::PROTOBUF_NAMESPACE_ID::internal::ReadVarintZigZag64(&ptr)); - CHK_(ptr); - } else - goto handle_unusual; - continue; - default: - goto handle_unusual; - } // switch - handle_unusual: - if ((tag == 0) || ((tag & 7) == 4)) { - CHK_(ptr); - ctx->SetLastTag(tag); - goto message_done; - } - ptr = UnknownFieldParse( - tag, - _internal_metadata_.mutable_unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(), - ptr, ctx); - CHK_(ptr != nullptr); - } // while -message_done: - return ptr; -failure: - ptr = nullptr; - goto message_done; -#undef CHK_ -} - -::PROTOBUF_NAMESPACE_ID::uint8* Value_Sint64List::_InternalSerialize( - ::PROTOBUF_NAMESPACE_ID::uint8* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const { - // @@protoc_insertion_point(serialize_to_array_start:flwr.proto.Value.Sint64List) - ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; - (void) cached_has_bits; - - // repeated sint64 vals = 1; - { - int byte_size = _vals_cached_byte_size_.load(std::memory_order_relaxed); - if (byte_size > 0) { - target = stream->WriteSInt64Packed( - 1, _internal_vals(), byte_size, target); - } - } - - if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) { - target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::InternalSerializeUnknownFieldsToArray( - _internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance), target, stream); - } - // @@protoc_insertion_point(serialize_to_array_end:flwr.proto.Value.Sint64List) - return target; -} - -size_t Value_Sint64List::ByteSizeLong() const { -// @@protoc_insertion_point(message_byte_size_start:flwr.proto.Value.Sint64List) - size_t total_size = 0; - - ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; - // Prevent compiler warnings about cached_has_bits being unused - (void) cached_has_bits; - - // repeated sint64 vals = 1; - { - size_t data_size = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite:: - SInt64Size(this->vals_); - if (data_size > 0) { - total_size += 1 + - ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::Int32Size( - static_cast<::PROTOBUF_NAMESPACE_ID::int32>(data_size)); - } - int cached_size = ::PROTOBUF_NAMESPACE_ID::internal::ToCachedSize(data_size); - _vals_cached_byte_size_.store(cached_size, - std::memory_order_relaxed); - total_size += data_size; - } - - return MaybeComputeUnknownFieldsSize(total_size, &_cached_size_); -} - -const ::PROTOBUF_NAMESPACE_ID::Message::ClassData Value_Sint64List::_class_data_ = { - ::PROTOBUF_NAMESPACE_ID::Message::CopyWithSizeCheck, - Value_Sint64List::MergeImpl -}; -const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*Value_Sint64List::GetClassData() const { return &_class_data_; } - -void Value_Sint64List::MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, - const ::PROTOBUF_NAMESPACE_ID::Message& from) { - static_cast(to)->MergeFrom( - static_cast(from)); -} - - -void Value_Sint64List::MergeFrom(const Value_Sint64List& from) { -// @@protoc_insertion_point(class_specific_merge_from_start:flwr.proto.Value.Sint64List) - GOOGLE_DCHECK_NE(&from, this); - ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; - (void) cached_has_bits; - - vals_.MergeFrom(from.vals_); - _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); -} - -void Value_Sint64List::CopyFrom(const Value_Sint64List& from) { -// @@protoc_insertion_point(class_specific_copy_from_start:flwr.proto.Value.Sint64List) - if (&from == this) return; - Clear(); - MergeFrom(from); -} - -bool Value_Sint64List::IsInitialized() const { - return true; -} - -void Value_Sint64List::InternalSwap(Value_Sint64List* other) { - using std::swap; - _internal_metadata_.InternalSwap(&other->_internal_metadata_); - vals_.InternalSwap(&other->vals_); -} - -::PROTOBUF_NAMESPACE_ID::Metadata Value_Sint64List::GetMetadata() const { - return ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors( - &descriptor_table_flwr_2fproto_2ftask_2eproto_getter, &descriptor_table_flwr_2fproto_2ftask_2eproto_once, - file_level_metadata_flwr_2fproto_2ftask_2eproto[4]); -} - -// =================================================================== - -class Value_BoolList::_Internal { - public: -}; - -Value_BoolList::Value_BoolList(::PROTOBUF_NAMESPACE_ID::Arena* arena, - bool is_message_owned) - : ::PROTOBUF_NAMESPACE_ID::Message(arena, is_message_owned), - vals_(arena) { - SharedCtor(); - if (!is_message_owned) { - RegisterArenaDtor(arena); - } - // @@protoc_insertion_point(arena_constructor:flwr.proto.Value.BoolList) -} -Value_BoolList::Value_BoolList(const Value_BoolList& from) - : ::PROTOBUF_NAMESPACE_ID::Message(), - vals_(from.vals_) { - _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); - // @@protoc_insertion_point(copy_constructor:flwr.proto.Value.BoolList) -} - -void Value_BoolList::SharedCtor() { -} - -Value_BoolList::~Value_BoolList() { - // @@protoc_insertion_point(destructor:flwr.proto.Value.BoolList) - if (GetArenaForAllocation() != nullptr) return; - SharedDtor(); - _internal_metadata_.Delete<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); -} - -inline void Value_BoolList::SharedDtor() { - GOOGLE_DCHECK(GetArenaForAllocation() == nullptr); -} - -void Value_BoolList::ArenaDtor(void* object) { - Value_BoolList* _this = reinterpret_cast< Value_BoolList* >(object); - (void)_this; -} -void Value_BoolList::RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena*) { -} -void Value_BoolList::SetCachedSize(int size) const { - _cached_size_.Set(size); -} - -void Value_BoolList::Clear() { -// @@protoc_insertion_point(message_clear_start:flwr.proto.Value.BoolList) - ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; - // Prevent compiler warnings about cached_has_bits being unused - (void) cached_has_bits; - - vals_.Clear(); - _internal_metadata_.Clear<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); -} - -const char* Value_BoolList::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) { -#define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure - while (!ctx->Done(&ptr)) { - ::PROTOBUF_NAMESPACE_ID::uint32 tag; - ptr = ::PROTOBUF_NAMESPACE_ID::internal::ReadTag(ptr, &tag); - switch (tag >> 3) { - // repeated bool vals = 1; - case 1: - if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 10)) { - ptr = ::PROTOBUF_NAMESPACE_ID::internal::PackedBoolParser(_internal_mutable_vals(), ptr, ctx); - CHK_(ptr); - } else if (static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 8) { - _internal_add_vals(::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr)); - CHK_(ptr); - } else - goto handle_unusual; - continue; - default: - goto handle_unusual; - } // switch - handle_unusual: - if ((tag == 0) || ((tag & 7) == 4)) { - CHK_(ptr); - ctx->SetLastTag(tag); - goto message_done; - } - ptr = UnknownFieldParse( - tag, - _internal_metadata_.mutable_unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(), - ptr, ctx); - CHK_(ptr != nullptr); - } // while -message_done: - return ptr; -failure: - ptr = nullptr; - goto message_done; -#undef CHK_ -} - -::PROTOBUF_NAMESPACE_ID::uint8* Value_BoolList::_InternalSerialize( - ::PROTOBUF_NAMESPACE_ID::uint8* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const { - // @@protoc_insertion_point(serialize_to_array_start:flwr.proto.Value.BoolList) - ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; - (void) cached_has_bits; - - // repeated bool vals = 1; - if (this->_internal_vals_size() > 0) { - target = stream->WriteFixedPacked(1, _internal_vals(), target); - } - - if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) { - target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::InternalSerializeUnknownFieldsToArray( - _internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance), target, stream); - } - // @@protoc_insertion_point(serialize_to_array_end:flwr.proto.Value.BoolList) - return target; -} - -size_t Value_BoolList::ByteSizeLong() const { -// @@protoc_insertion_point(message_byte_size_start:flwr.proto.Value.BoolList) - size_t total_size = 0; - - ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; - // Prevent compiler warnings about cached_has_bits being unused - (void) cached_has_bits; - - // repeated bool vals = 1; - { - unsigned int count = static_cast(this->_internal_vals_size()); - size_t data_size = 1UL * count; - if (data_size > 0) { - total_size += 1 + - ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::Int32Size( - static_cast<::PROTOBUF_NAMESPACE_ID::int32>(data_size)); - } - total_size += data_size; - } - - return MaybeComputeUnknownFieldsSize(total_size, &_cached_size_); -} - -const ::PROTOBUF_NAMESPACE_ID::Message::ClassData Value_BoolList::_class_data_ = { - ::PROTOBUF_NAMESPACE_ID::Message::CopyWithSizeCheck, - Value_BoolList::MergeImpl -}; -const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*Value_BoolList::GetClassData() const { return &_class_data_; } - -void Value_BoolList::MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, - const ::PROTOBUF_NAMESPACE_ID::Message& from) { - static_cast(to)->MergeFrom( - static_cast(from)); -} - - -void Value_BoolList::MergeFrom(const Value_BoolList& from) { -// @@protoc_insertion_point(class_specific_merge_from_start:flwr.proto.Value.BoolList) - GOOGLE_DCHECK_NE(&from, this); - ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; - (void) cached_has_bits; - - vals_.MergeFrom(from.vals_); - _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); -} - -void Value_BoolList::CopyFrom(const Value_BoolList& from) { -// @@protoc_insertion_point(class_specific_copy_from_start:flwr.proto.Value.BoolList) - if (&from == this) return; - Clear(); - MergeFrom(from); -} - -bool Value_BoolList::IsInitialized() const { - return true; -} - -void Value_BoolList::InternalSwap(Value_BoolList* other) { - using std::swap; - _internal_metadata_.InternalSwap(&other->_internal_metadata_); - vals_.InternalSwap(&other->vals_); -} - -::PROTOBUF_NAMESPACE_ID::Metadata Value_BoolList::GetMetadata() const { - return ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors( - &descriptor_table_flwr_2fproto_2ftask_2eproto_getter, &descriptor_table_flwr_2fproto_2ftask_2eproto_once, - file_level_metadata_flwr_2fproto_2ftask_2eproto[5]); -} - -// =================================================================== - -class Value_StringList::_Internal { - public: -}; - -Value_StringList::Value_StringList(::PROTOBUF_NAMESPACE_ID::Arena* arena, - bool is_message_owned) - : ::PROTOBUF_NAMESPACE_ID::Message(arena, is_message_owned), - vals_(arena) { - SharedCtor(); - if (!is_message_owned) { - RegisterArenaDtor(arena); - } - // @@protoc_insertion_point(arena_constructor:flwr.proto.Value.StringList) -} -Value_StringList::Value_StringList(const Value_StringList& from) - : ::PROTOBUF_NAMESPACE_ID::Message(), - vals_(from.vals_) { - _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); - // @@protoc_insertion_point(copy_constructor:flwr.proto.Value.StringList) -} - -void Value_StringList::SharedCtor() { -} - -Value_StringList::~Value_StringList() { - // @@protoc_insertion_point(destructor:flwr.proto.Value.StringList) - if (GetArenaForAllocation() != nullptr) return; - SharedDtor(); - _internal_metadata_.Delete<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); -} - -inline void Value_StringList::SharedDtor() { - GOOGLE_DCHECK(GetArenaForAllocation() == nullptr); -} - -void Value_StringList::ArenaDtor(void* object) { - Value_StringList* _this = reinterpret_cast< Value_StringList* >(object); - (void)_this; -} -void Value_StringList::RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena*) { -} -void Value_StringList::SetCachedSize(int size) const { - _cached_size_.Set(size); -} - -void Value_StringList::Clear() { -// @@protoc_insertion_point(message_clear_start:flwr.proto.Value.StringList) - ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; - // Prevent compiler warnings about cached_has_bits being unused - (void) cached_has_bits; - - vals_.Clear(); - _internal_metadata_.Clear<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); -} - -const char* Value_StringList::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) { -#define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure - while (!ctx->Done(&ptr)) { - ::PROTOBUF_NAMESPACE_ID::uint32 tag; - ptr = ::PROTOBUF_NAMESPACE_ID::internal::ReadTag(ptr, &tag); - switch (tag >> 3) { - // repeated string vals = 1; - case 1: - if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 10)) { - ptr -= 1; - do { - ptr += 1; - auto str = _internal_add_vals(); - ptr = ::PROTOBUF_NAMESPACE_ID::internal::InlineGreedyStringParser(str, ptr, ctx); - CHK_(::PROTOBUF_NAMESPACE_ID::internal::VerifyUTF8(str, "flwr.proto.Value.StringList.vals")); - CHK_(ptr); - if (!ctx->DataAvailable(ptr)) break; - } while (::PROTOBUF_NAMESPACE_ID::internal::ExpectTag<10>(ptr)); - } else - goto handle_unusual; - continue; - default: - goto handle_unusual; - } // switch - handle_unusual: - if ((tag == 0) || ((tag & 7) == 4)) { - CHK_(ptr); - ctx->SetLastTag(tag); - goto message_done; - } - ptr = UnknownFieldParse( - tag, - _internal_metadata_.mutable_unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(), - ptr, ctx); - CHK_(ptr != nullptr); - } // while -message_done: - return ptr; -failure: - ptr = nullptr; - goto message_done; -#undef CHK_ -} - -::PROTOBUF_NAMESPACE_ID::uint8* Value_StringList::_InternalSerialize( - ::PROTOBUF_NAMESPACE_ID::uint8* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const { - // @@protoc_insertion_point(serialize_to_array_start:flwr.proto.Value.StringList) - ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; - (void) cached_has_bits; - - // repeated string vals = 1; - for (int i = 0, n = this->_internal_vals_size(); i < n; i++) { - const auto& s = this->_internal_vals(i); - ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String( - s.data(), static_cast(s.length()), - ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE, - "flwr.proto.Value.StringList.vals"); - target = stream->WriteString(1, s, target); - } - - if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) { - target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::InternalSerializeUnknownFieldsToArray( - _internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance), target, stream); - } - // @@protoc_insertion_point(serialize_to_array_end:flwr.proto.Value.StringList) - return target; -} - -size_t Value_StringList::ByteSizeLong() const { -// @@protoc_insertion_point(message_byte_size_start:flwr.proto.Value.StringList) - size_t total_size = 0; - - ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; - // Prevent compiler warnings about cached_has_bits being unused - (void) cached_has_bits; - - // repeated string vals = 1; - total_size += 1 * - ::PROTOBUF_NAMESPACE_ID::internal::FromIntSize(vals_.size()); - for (int i = 0, n = vals_.size(); i < n; i++) { - total_size += ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize( - vals_.Get(i)); - } - - return MaybeComputeUnknownFieldsSize(total_size, &_cached_size_); -} - -const ::PROTOBUF_NAMESPACE_ID::Message::ClassData Value_StringList::_class_data_ = { - ::PROTOBUF_NAMESPACE_ID::Message::CopyWithSizeCheck, - Value_StringList::MergeImpl -}; -const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*Value_StringList::GetClassData() const { return &_class_data_; } - -void Value_StringList::MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, - const ::PROTOBUF_NAMESPACE_ID::Message& from) { - static_cast(to)->MergeFrom( - static_cast(from)); -} - - -void Value_StringList::MergeFrom(const Value_StringList& from) { -// @@protoc_insertion_point(class_specific_merge_from_start:flwr.proto.Value.StringList) - GOOGLE_DCHECK_NE(&from, this); - ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; - (void) cached_has_bits; - - vals_.MergeFrom(from.vals_); - _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); -} - -void Value_StringList::CopyFrom(const Value_StringList& from) { -// @@protoc_insertion_point(class_specific_copy_from_start:flwr.proto.Value.StringList) - if (&from == this) return; - Clear(); - MergeFrom(from); -} - -bool Value_StringList::IsInitialized() const { - return true; -} - -void Value_StringList::InternalSwap(Value_StringList* other) { - using std::swap; - _internal_metadata_.InternalSwap(&other->_internal_metadata_); - vals_.InternalSwap(&other->vals_); -} - -::PROTOBUF_NAMESPACE_ID::Metadata Value_StringList::GetMetadata() const { - return ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors( - &descriptor_table_flwr_2fproto_2ftask_2eproto_getter, &descriptor_table_flwr_2fproto_2ftask_2eproto_once, - file_level_metadata_flwr_2fproto_2ftask_2eproto[6]); -} - -// =================================================================== - -class Value_BytesList::_Internal { - public: -}; - -Value_BytesList::Value_BytesList(::PROTOBUF_NAMESPACE_ID::Arena* arena, - bool is_message_owned) - : ::PROTOBUF_NAMESPACE_ID::Message(arena, is_message_owned), - vals_(arena) { - SharedCtor(); - if (!is_message_owned) { - RegisterArenaDtor(arena); - } - // @@protoc_insertion_point(arena_constructor:flwr.proto.Value.BytesList) -} -Value_BytesList::Value_BytesList(const Value_BytesList& from) - : ::PROTOBUF_NAMESPACE_ID::Message(), - vals_(from.vals_) { - _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); - // @@protoc_insertion_point(copy_constructor:flwr.proto.Value.BytesList) -} - -void Value_BytesList::SharedCtor() { -} - -Value_BytesList::~Value_BytesList() { - // @@protoc_insertion_point(destructor:flwr.proto.Value.BytesList) - if (GetArenaForAllocation() != nullptr) return; - SharedDtor(); - _internal_metadata_.Delete<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); -} - -inline void Value_BytesList::SharedDtor() { - GOOGLE_DCHECK(GetArenaForAllocation() == nullptr); -} - -void Value_BytesList::ArenaDtor(void* object) { - Value_BytesList* _this = reinterpret_cast< Value_BytesList* >(object); - (void)_this; -} -void Value_BytesList::RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena*) { -} -void Value_BytesList::SetCachedSize(int size) const { - _cached_size_.Set(size); -} - -void Value_BytesList::Clear() { -// @@protoc_insertion_point(message_clear_start:flwr.proto.Value.BytesList) - ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; - // Prevent compiler warnings about cached_has_bits being unused - (void) cached_has_bits; - - vals_.Clear(); - _internal_metadata_.Clear<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); -} - -const char* Value_BytesList::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) { -#define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure - while (!ctx->Done(&ptr)) { - ::PROTOBUF_NAMESPACE_ID::uint32 tag; - ptr = ::PROTOBUF_NAMESPACE_ID::internal::ReadTag(ptr, &tag); - switch (tag >> 3) { - // repeated bytes vals = 1; - case 1: - if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 10)) { - ptr -= 1; - do { - ptr += 1; - auto str = _internal_add_vals(); - ptr = ::PROTOBUF_NAMESPACE_ID::internal::InlineGreedyStringParser(str, ptr, ctx); - CHK_(ptr); - if (!ctx->DataAvailable(ptr)) break; - } while (::PROTOBUF_NAMESPACE_ID::internal::ExpectTag<10>(ptr)); - } else - goto handle_unusual; - continue; - default: - goto handle_unusual; - } // switch - handle_unusual: - if ((tag == 0) || ((tag & 7) == 4)) { - CHK_(ptr); - ctx->SetLastTag(tag); - goto message_done; - } - ptr = UnknownFieldParse( - tag, - _internal_metadata_.mutable_unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(), - ptr, ctx); - CHK_(ptr != nullptr); - } // while -message_done: - return ptr; -failure: - ptr = nullptr; - goto message_done; -#undef CHK_ -} - -::PROTOBUF_NAMESPACE_ID::uint8* Value_BytesList::_InternalSerialize( - ::PROTOBUF_NAMESPACE_ID::uint8* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const { - // @@protoc_insertion_point(serialize_to_array_start:flwr.proto.Value.BytesList) - ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; - (void) cached_has_bits; - - // repeated bytes vals = 1; - for (int i = 0, n = this->_internal_vals_size(); i < n; i++) { - const auto& s = this->_internal_vals(i); - target = stream->WriteBytes(1, s, target); - } - - if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) { - target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::InternalSerializeUnknownFieldsToArray( - _internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance), target, stream); - } - // @@protoc_insertion_point(serialize_to_array_end:flwr.proto.Value.BytesList) - return target; -} - -size_t Value_BytesList::ByteSizeLong() const { -// @@protoc_insertion_point(message_byte_size_start:flwr.proto.Value.BytesList) - size_t total_size = 0; - - ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; - // Prevent compiler warnings about cached_has_bits being unused - (void) cached_has_bits; - - // repeated bytes vals = 1; - total_size += 1 * - ::PROTOBUF_NAMESPACE_ID::internal::FromIntSize(vals_.size()); - for (int i = 0, n = vals_.size(); i < n; i++) { - total_size += ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::BytesSize( - vals_.Get(i)); - } - - return MaybeComputeUnknownFieldsSize(total_size, &_cached_size_); -} - -const ::PROTOBUF_NAMESPACE_ID::Message::ClassData Value_BytesList::_class_data_ = { - ::PROTOBUF_NAMESPACE_ID::Message::CopyWithSizeCheck, - Value_BytesList::MergeImpl -}; -const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*Value_BytesList::GetClassData() const { return &_class_data_; } - -void Value_BytesList::MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, - const ::PROTOBUF_NAMESPACE_ID::Message& from) { - static_cast(to)->MergeFrom( - static_cast(from)); -} - - -void Value_BytesList::MergeFrom(const Value_BytesList& from) { -// @@protoc_insertion_point(class_specific_merge_from_start:flwr.proto.Value.BytesList) - GOOGLE_DCHECK_NE(&from, this); - ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; - (void) cached_has_bits; - - vals_.MergeFrom(from.vals_); - _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); -} - -void Value_BytesList::CopyFrom(const Value_BytesList& from) { -// @@protoc_insertion_point(class_specific_copy_from_start:flwr.proto.Value.BytesList) - if (&from == this) return; - Clear(); - MergeFrom(from); -} - -bool Value_BytesList::IsInitialized() const { - return true; -} - -void Value_BytesList::InternalSwap(Value_BytesList* other) { - using std::swap; - _internal_metadata_.InternalSwap(&other->_internal_metadata_); - vals_.InternalSwap(&other->vals_); -} - -::PROTOBUF_NAMESPACE_ID::Metadata Value_BytesList::GetMetadata() const { - return ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors( - &descriptor_table_flwr_2fproto_2ftask_2eproto_getter, &descriptor_table_flwr_2fproto_2ftask_2eproto_once, - file_level_metadata_flwr_2fproto_2ftask_2eproto[7]); -} - -// =================================================================== - -class Value::_Internal { - public: - static const ::flwr::proto::Value_DoubleList& double_list(const Value* msg); - static const ::flwr::proto::Value_Sint64List& sint64_list(const Value* msg); - static const ::flwr::proto::Value_BoolList& bool_list(const Value* msg); - static const ::flwr::proto::Value_StringList& string_list(const Value* msg); - static const ::flwr::proto::Value_BytesList& bytes_list(const Value* msg); -}; - -const ::flwr::proto::Value_DoubleList& -Value::_Internal::double_list(const Value* msg) { - return *msg->value_.double_list_; -} -const ::flwr::proto::Value_Sint64List& -Value::_Internal::sint64_list(const Value* msg) { - return *msg->value_.sint64_list_; -} -const ::flwr::proto::Value_BoolList& -Value::_Internal::bool_list(const Value* msg) { - return *msg->value_.bool_list_; -} -const ::flwr::proto::Value_StringList& -Value::_Internal::string_list(const Value* msg) { - return *msg->value_.string_list_; -} -const ::flwr::proto::Value_BytesList& -Value::_Internal::bytes_list(const Value* msg) { - return *msg->value_.bytes_list_; -} -void Value::set_allocated_double_list(::flwr::proto::Value_DoubleList* double_list) { - ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation(); - clear_value(); - if (double_list) { - ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena = - ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper<::flwr::proto::Value_DoubleList>::GetOwningArena(double_list); - if (message_arena != submessage_arena) { - double_list = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage( - message_arena, double_list, submessage_arena); - } - set_has_double_list(); - value_.double_list_ = double_list; - } - // @@protoc_insertion_point(field_set_allocated:flwr.proto.Value.double_list) -} -void Value::set_allocated_sint64_list(::flwr::proto::Value_Sint64List* sint64_list) { - ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation(); - clear_value(); - if (sint64_list) { - ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena = - ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper<::flwr::proto::Value_Sint64List>::GetOwningArena(sint64_list); - if (message_arena != submessage_arena) { - sint64_list = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage( - message_arena, sint64_list, submessage_arena); - } - set_has_sint64_list(); - value_.sint64_list_ = sint64_list; - } - // @@protoc_insertion_point(field_set_allocated:flwr.proto.Value.sint64_list) -} -void Value::set_allocated_bool_list(::flwr::proto::Value_BoolList* bool_list) { - ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation(); - clear_value(); - if (bool_list) { - ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena = - ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper<::flwr::proto::Value_BoolList>::GetOwningArena(bool_list); - if (message_arena != submessage_arena) { - bool_list = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage( - message_arena, bool_list, submessage_arena); - } - set_has_bool_list(); - value_.bool_list_ = bool_list; - } - // @@protoc_insertion_point(field_set_allocated:flwr.proto.Value.bool_list) -} -void Value::set_allocated_string_list(::flwr::proto::Value_StringList* string_list) { - ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation(); - clear_value(); - if (string_list) { - ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena = - ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper<::flwr::proto::Value_StringList>::GetOwningArena(string_list); - if (message_arena != submessage_arena) { - string_list = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage( - message_arena, string_list, submessage_arena); - } - set_has_string_list(); - value_.string_list_ = string_list; - } - // @@protoc_insertion_point(field_set_allocated:flwr.proto.Value.string_list) -} -void Value::set_allocated_bytes_list(::flwr::proto::Value_BytesList* bytes_list) { - ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation(); - clear_value(); - if (bytes_list) { - ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena = - ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper<::flwr::proto::Value_BytesList>::GetOwningArena(bytes_list); - if (message_arena != submessage_arena) { - bytes_list = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage( - message_arena, bytes_list, submessage_arena); - } - set_has_bytes_list(); - value_.bytes_list_ = bytes_list; - } - // @@protoc_insertion_point(field_set_allocated:flwr.proto.Value.bytes_list) -} -Value::Value(::PROTOBUF_NAMESPACE_ID::Arena* arena, - bool is_message_owned) - : ::PROTOBUF_NAMESPACE_ID::Message(arena, is_message_owned) { - SharedCtor(); - if (!is_message_owned) { - RegisterArenaDtor(arena); - } - // @@protoc_insertion_point(arena_constructor:flwr.proto.Value) -} -Value::Value(const Value& from) - : ::PROTOBUF_NAMESPACE_ID::Message() { - _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); - clear_has_value(); - switch (from.value_case()) { - case kDouble: { - _internal_set_double_(from._internal_double_()); - break; - } - case kSint64: { - _internal_set_sint64(from._internal_sint64()); - break; - } - case kBool: { - _internal_set_bool_(from._internal_bool_()); - break; - } - case kString: { - _internal_set_string(from._internal_string()); - break; - } - case kBytes: { - _internal_set_bytes(from._internal_bytes()); - break; - } - case kDoubleList: { - _internal_mutable_double_list()->::flwr::proto::Value_DoubleList::MergeFrom(from._internal_double_list()); - break; - } - case kSint64List: { - _internal_mutable_sint64_list()->::flwr::proto::Value_Sint64List::MergeFrom(from._internal_sint64_list()); - break; - } - case kBoolList: { - _internal_mutable_bool_list()->::flwr::proto::Value_BoolList::MergeFrom(from._internal_bool_list()); - break; - } - case kStringList: { - _internal_mutable_string_list()->::flwr::proto::Value_StringList::MergeFrom(from._internal_string_list()); - break; - } - case kBytesList: { - _internal_mutable_bytes_list()->::flwr::proto::Value_BytesList::MergeFrom(from._internal_bytes_list()); - break; - } - case VALUE_NOT_SET: { - break; - } - } - // @@protoc_insertion_point(copy_constructor:flwr.proto.Value) -} - -void Value::SharedCtor() { -clear_has_value(); -} - -Value::~Value() { - // @@protoc_insertion_point(destructor:flwr.proto.Value) - if (GetArenaForAllocation() != nullptr) return; - SharedDtor(); - _internal_metadata_.Delete<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); -} - -inline void Value::SharedDtor() { - GOOGLE_DCHECK(GetArenaForAllocation() == nullptr); - if (has_value()) { - clear_value(); - } -} - -void Value::ArenaDtor(void* object) { - Value* _this = reinterpret_cast< Value* >(object); - (void)_this; -} -void Value::RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena*) { -} -void Value::SetCachedSize(int size) const { - _cached_size_.Set(size); -} - -void Value::clear_value() { -// @@protoc_insertion_point(one_of_clear_start:flwr.proto.Value) - switch (value_case()) { - case kDouble: { - // No need to clear - break; - } - case kSint64: { - // No need to clear - break; - } - case kBool: { - // No need to clear - break; - } - case kString: { - value_.string_.Destroy(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, GetArenaForAllocation()); - break; - } - case kBytes: { - value_.bytes_.Destroy(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, GetArenaForAllocation()); - break; - } - case kDoubleList: { - if (GetArenaForAllocation() == nullptr) { - delete value_.double_list_; - } - break; - } - case kSint64List: { - if (GetArenaForAllocation() == nullptr) { - delete value_.sint64_list_; - } - break; - } - case kBoolList: { - if (GetArenaForAllocation() == nullptr) { - delete value_.bool_list_; - } - break; - } - case kStringList: { - if (GetArenaForAllocation() == nullptr) { - delete value_.string_list_; - } - break; - } - case kBytesList: { - if (GetArenaForAllocation() == nullptr) { - delete value_.bytes_list_; - } - break; - } - case VALUE_NOT_SET: { - break; - } - } - _oneof_case_[0] = VALUE_NOT_SET; -} - - -void Value::Clear() { -// @@protoc_insertion_point(message_clear_start:flwr.proto.Value) - ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; - // Prevent compiler warnings about cached_has_bits being unused - (void) cached_has_bits; - - clear_value(); - _internal_metadata_.Clear<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); -} - -const char* Value::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) { -#define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure - while (!ctx->Done(&ptr)) { - ::PROTOBUF_NAMESPACE_ID::uint32 tag; - ptr = ::PROTOBUF_NAMESPACE_ID::internal::ReadTag(ptr, &tag); - switch (tag >> 3) { - // double double = 1; - case 1: - if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 9)) { - _internal_set_double_(::PROTOBUF_NAMESPACE_ID::internal::UnalignedLoad(ptr)); - ptr += sizeof(double); - } else - goto handle_unusual; - continue; - // sint64 sint64 = 2; - case 2: - if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 16)) { - _internal_set_sint64(::PROTOBUF_NAMESPACE_ID::internal::ReadVarintZigZag64(&ptr)); - CHK_(ptr); - } else - goto handle_unusual; - continue; - // bool bool = 3; - case 3: - if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 24)) { - _internal_set_bool_(::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr)); - CHK_(ptr); - } else - goto handle_unusual; - continue; - // string string = 4; - case 4: - if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 34)) { - auto str = _internal_mutable_string(); - ptr = ::PROTOBUF_NAMESPACE_ID::internal::InlineGreedyStringParser(str, ptr, ctx); - CHK_(::PROTOBUF_NAMESPACE_ID::internal::VerifyUTF8(str, "flwr.proto.Value.string")); - CHK_(ptr); - } else - goto handle_unusual; - continue; - // bytes bytes = 5; - case 5: - if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 42)) { - auto str = _internal_mutable_bytes(); - ptr = ::PROTOBUF_NAMESPACE_ID::internal::InlineGreedyStringParser(str, ptr, ctx); - CHK_(ptr); - } else - goto handle_unusual; - continue; - // .flwr.proto.Value.DoubleList double_list = 21; - case 21: - if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 170)) { - ptr = ctx->ParseMessage(_internal_mutable_double_list(), ptr); - CHK_(ptr); - } else - goto handle_unusual; - continue; - // .flwr.proto.Value.Sint64List sint64_list = 22; - case 22: - if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 178)) { - ptr = ctx->ParseMessage(_internal_mutable_sint64_list(), ptr); - CHK_(ptr); - } else - goto handle_unusual; - continue; - // .flwr.proto.Value.BoolList bool_list = 23; - case 23: - if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 186)) { - ptr = ctx->ParseMessage(_internal_mutable_bool_list(), ptr); - CHK_(ptr); - } else - goto handle_unusual; - continue; - // .flwr.proto.Value.StringList string_list = 24; - case 24: - if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 194)) { - ptr = ctx->ParseMessage(_internal_mutable_string_list(), ptr); - CHK_(ptr); - } else - goto handle_unusual; - continue; - // .flwr.proto.Value.BytesList bytes_list = 25; - case 25: - if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 202)) { - ptr = ctx->ParseMessage(_internal_mutable_bytes_list(), ptr); - CHK_(ptr); - } else - goto handle_unusual; - continue; - default: - goto handle_unusual; - } // switch - handle_unusual: - if ((tag == 0) || ((tag & 7) == 4)) { - CHK_(ptr); - ctx->SetLastTag(tag); - goto message_done; - } - ptr = UnknownFieldParse( - tag, - _internal_metadata_.mutable_unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(), - ptr, ctx); - CHK_(ptr != nullptr); - } // while -message_done: - return ptr; -failure: - ptr = nullptr; - goto message_done; -#undef CHK_ -} - -::PROTOBUF_NAMESPACE_ID::uint8* Value::_InternalSerialize( - ::PROTOBUF_NAMESPACE_ID::uint8* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const { - // @@protoc_insertion_point(serialize_to_array_start:flwr.proto.Value) - ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; - (void) cached_has_bits; - - // double double = 1; - if (_internal_has_double_()) { - target = stream->EnsureSpace(target); - target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteDoubleToArray(1, this->_internal_double_(), target); - } - - // sint64 sint64 = 2; - if (_internal_has_sint64()) { - target = stream->EnsureSpace(target); - target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteSInt64ToArray(2, this->_internal_sint64(), target); - } - - // bool bool = 3; - if (_internal_has_bool_()) { - target = stream->EnsureSpace(target); - target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteBoolToArray(3, this->_internal_bool_(), target); - } - - // string string = 4; - if (_internal_has_string()) { - ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String( - this->_internal_string().data(), static_cast(this->_internal_string().length()), - ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE, - "flwr.proto.Value.string"); - target = stream->WriteStringMaybeAliased( - 4, this->_internal_string(), target); - } - - // bytes bytes = 5; - if (_internal_has_bytes()) { - target = stream->WriteBytesMaybeAliased( - 5, this->_internal_bytes(), target); - } - - // .flwr.proto.Value.DoubleList double_list = 21; - if (_internal_has_double_list()) { - target = stream->EnsureSpace(target); - target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite:: - InternalWriteMessage( - 21, _Internal::double_list(this), target, stream); - } - - // .flwr.proto.Value.Sint64List sint64_list = 22; - if (_internal_has_sint64_list()) { - target = stream->EnsureSpace(target); - target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite:: - InternalWriteMessage( - 22, _Internal::sint64_list(this), target, stream); - } - - // .flwr.proto.Value.BoolList bool_list = 23; - if (_internal_has_bool_list()) { - target = stream->EnsureSpace(target); - target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite:: - InternalWriteMessage( - 23, _Internal::bool_list(this), target, stream); - } - - // .flwr.proto.Value.StringList string_list = 24; - if (_internal_has_string_list()) { - target = stream->EnsureSpace(target); - target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite:: - InternalWriteMessage( - 24, _Internal::string_list(this), target, stream); - } - - // .flwr.proto.Value.BytesList bytes_list = 25; - if (_internal_has_bytes_list()) { - target = stream->EnsureSpace(target); - target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite:: - InternalWriteMessage( - 25, _Internal::bytes_list(this), target, stream); - } - - if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) { - target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::InternalSerializeUnknownFieldsToArray( - _internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance), target, stream); - } - // @@protoc_insertion_point(serialize_to_array_end:flwr.proto.Value) - return target; -} - -size_t Value::ByteSizeLong() const { -// @@protoc_insertion_point(message_byte_size_start:flwr.proto.Value) - size_t total_size = 0; - - ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; - // Prevent compiler warnings about cached_has_bits being unused - (void) cached_has_bits; - - switch (value_case()) { - // double double = 1; - case kDouble: { - total_size += 1 + 8; - break; - } - // sint64 sint64 = 2; - case kSint64: { - total_size += ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SInt64SizePlusOne(this->_internal_sint64()); - break; - } - // bool bool = 3; - case kBool: { - total_size += 1 + 1; - break; - } - // string string = 4; - case kString: { - total_size += 1 + - ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize( - this->_internal_string()); - break; - } - // bytes bytes = 5; - case kBytes: { - total_size += 1 + - ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::BytesSize( - this->_internal_bytes()); - break; - } - // .flwr.proto.Value.DoubleList double_list = 21; - case kDoubleList: { - total_size += 2 + - ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize( - *value_.double_list_); - break; - } - // .flwr.proto.Value.Sint64List sint64_list = 22; - case kSint64List: { - total_size += 2 + - ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize( - *value_.sint64_list_); - break; - } - // .flwr.proto.Value.BoolList bool_list = 23; - case kBoolList: { - total_size += 2 + - ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize( - *value_.bool_list_); - break; - } - // .flwr.proto.Value.StringList string_list = 24; - case kStringList: { - total_size += 2 + - ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize( - *value_.string_list_); - break; - } - // .flwr.proto.Value.BytesList bytes_list = 25; - case kBytesList: { - total_size += 2 + - ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize( - *value_.bytes_list_); - break; - } - case VALUE_NOT_SET: { - break; - } - } - return MaybeComputeUnknownFieldsSize(total_size, &_cached_size_); -} - -const ::PROTOBUF_NAMESPACE_ID::Message::ClassData Value::_class_data_ = { - ::PROTOBUF_NAMESPACE_ID::Message::CopyWithSizeCheck, - Value::MergeImpl -}; -const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*Value::GetClassData() const { return &_class_data_; } - -void Value::MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, - const ::PROTOBUF_NAMESPACE_ID::Message& from) { - static_cast(to)->MergeFrom( - static_cast(from)); -} - - -void Value::MergeFrom(const Value& from) { -// @@protoc_insertion_point(class_specific_merge_from_start:flwr.proto.Value) - GOOGLE_DCHECK_NE(&from, this); - ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; - (void) cached_has_bits; - - switch (from.value_case()) { - case kDouble: { - _internal_set_double_(from._internal_double_()); - break; - } - case kSint64: { - _internal_set_sint64(from._internal_sint64()); - break; - } - case kBool: { - _internal_set_bool_(from._internal_bool_()); - break; - } - case kString: { - _internal_set_string(from._internal_string()); - break; - } - case kBytes: { - _internal_set_bytes(from._internal_bytes()); - break; - } - case kDoubleList: { - _internal_mutable_double_list()->::flwr::proto::Value_DoubleList::MergeFrom(from._internal_double_list()); - break; - } - case kSint64List: { - _internal_mutable_sint64_list()->::flwr::proto::Value_Sint64List::MergeFrom(from._internal_sint64_list()); - break; - } - case kBoolList: { - _internal_mutable_bool_list()->::flwr::proto::Value_BoolList::MergeFrom(from._internal_bool_list()); - break; - } - case kStringList: { - _internal_mutable_string_list()->::flwr::proto::Value_StringList::MergeFrom(from._internal_string_list()); - break; - } - case kBytesList: { - _internal_mutable_bytes_list()->::flwr::proto::Value_BytesList::MergeFrom(from._internal_bytes_list()); - break; - } - case VALUE_NOT_SET: { - break; - } - } - _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); -} - -void Value::CopyFrom(const Value& from) { -// @@protoc_insertion_point(class_specific_copy_from_start:flwr.proto.Value) - if (&from == this) return; - Clear(); - MergeFrom(from); -} - -bool Value::IsInitialized() const { - return true; -} - -void Value::InternalSwap(Value* other) { - using std::swap; - _internal_metadata_.InternalSwap(&other->_internal_metadata_); - swap(value_, other->value_); - swap(_oneof_case_[0], other->_oneof_case_[0]); -} - -::PROTOBUF_NAMESPACE_ID::Metadata Value::GetMetadata() const { - return ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors( - &descriptor_table_flwr_2fproto_2ftask_2eproto_getter, &descriptor_table_flwr_2fproto_2ftask_2eproto_once, - file_level_metadata_flwr_2fproto_2ftask_2eproto[8]); -} - -// =================================================================== - -SecureAggregation_NamedValuesEntry_DoNotUse::SecureAggregation_NamedValuesEntry_DoNotUse() {} -SecureAggregation_NamedValuesEntry_DoNotUse::SecureAggregation_NamedValuesEntry_DoNotUse(::PROTOBUF_NAMESPACE_ID::Arena* arena) - : SuperType(arena) {} -void SecureAggregation_NamedValuesEntry_DoNotUse::MergeFrom(const SecureAggregation_NamedValuesEntry_DoNotUse& other) { - MergeFromInternal(other); -} -::PROTOBUF_NAMESPACE_ID::Metadata SecureAggregation_NamedValuesEntry_DoNotUse::GetMetadata() const { - return ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors( - &descriptor_table_flwr_2fproto_2ftask_2eproto_getter, &descriptor_table_flwr_2fproto_2ftask_2eproto_once, - file_level_metadata_flwr_2fproto_2ftask_2eproto[9]); -} - -// =================================================================== - -class SecureAggregation::_Internal { - public: -}; - -SecureAggregation::SecureAggregation(::PROTOBUF_NAMESPACE_ID::Arena* arena, - bool is_message_owned) - : ::PROTOBUF_NAMESPACE_ID::Message(arena, is_message_owned), - named_values_(arena) { - SharedCtor(); - if (!is_message_owned) { - RegisterArenaDtor(arena); - } - // @@protoc_insertion_point(arena_constructor:flwr.proto.SecureAggregation) -} -SecureAggregation::SecureAggregation(const SecureAggregation& from) - : ::PROTOBUF_NAMESPACE_ID::Message() { - _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); - named_values_.MergeFrom(from.named_values_); - // @@protoc_insertion_point(copy_constructor:flwr.proto.SecureAggregation) -} - -void SecureAggregation::SharedCtor() { -} - -SecureAggregation::~SecureAggregation() { - // @@protoc_insertion_point(destructor:flwr.proto.SecureAggregation) - if (GetArenaForAllocation() != nullptr) return; - SharedDtor(); - _internal_metadata_.Delete<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); -} - -inline void SecureAggregation::SharedDtor() { - GOOGLE_DCHECK(GetArenaForAllocation() == nullptr); -} - -void SecureAggregation::ArenaDtor(void* object) { - SecureAggregation* _this = reinterpret_cast< SecureAggregation* >(object); - (void)_this; - _this->named_values_. ~MapField(); -} -inline void SecureAggregation::RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena* arena) { - if (arena != nullptr) { - arena->OwnCustomDestructor(this, &SecureAggregation::ArenaDtor); - } -} -void SecureAggregation::SetCachedSize(int size) const { - _cached_size_.Set(size); -} - -void SecureAggregation::Clear() { -// @@protoc_insertion_point(message_clear_start:flwr.proto.SecureAggregation) - ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; - // Prevent compiler warnings about cached_has_bits being unused - (void) cached_has_bits; - - named_values_.Clear(); - _internal_metadata_.Clear<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); -} - -const char* SecureAggregation::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) { -#define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure - while (!ctx->Done(&ptr)) { - ::PROTOBUF_NAMESPACE_ID::uint32 tag; - ptr = ::PROTOBUF_NAMESPACE_ID::internal::ReadTag(ptr, &tag); - switch (tag >> 3) { - // map named_values = 1; - case 1: - if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 10)) { - ptr -= 1; - do { - ptr += 1; - ptr = ctx->ParseMessage(&named_values_, ptr); - CHK_(ptr); - if (!ctx->DataAvailable(ptr)) break; - } while (::PROTOBUF_NAMESPACE_ID::internal::ExpectTag<10>(ptr)); - } else - goto handle_unusual; - continue; - default: - goto handle_unusual; - } // switch - handle_unusual: - if ((tag == 0) || ((tag & 7) == 4)) { - CHK_(ptr); - ctx->SetLastTag(tag); - goto message_done; - } - ptr = UnknownFieldParse( - tag, - _internal_metadata_.mutable_unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(), - ptr, ctx); - CHK_(ptr != nullptr); - } // while -message_done: - return ptr; -failure: - ptr = nullptr; - goto message_done; -#undef CHK_ -} - -::PROTOBUF_NAMESPACE_ID::uint8* SecureAggregation::_InternalSerialize( - ::PROTOBUF_NAMESPACE_ID::uint8* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const { - // @@protoc_insertion_point(serialize_to_array_start:flwr.proto.SecureAggregation) - ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; - (void) cached_has_bits; - - // map named_values = 1; - if (!this->_internal_named_values().empty()) { - typedef ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::Value >::const_pointer - ConstPtr; - typedef ConstPtr SortItem; - typedef ::PROTOBUF_NAMESPACE_ID::internal::CompareByDerefFirst Less; - struct Utf8Check { - static void Check(ConstPtr p) { - (void)p; - ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String( - p->first.data(), static_cast(p->first.length()), - ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE, - "flwr.proto.SecureAggregation.NamedValuesEntry.key"); - } - }; - - if (stream->IsSerializationDeterministic() && - this->_internal_named_values().size() > 1) { - ::std::unique_ptr items( - new SortItem[this->_internal_named_values().size()]); - typedef ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::Value >::size_type size_type; - size_type n = 0; - for (::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::Value >::const_iterator - it = this->_internal_named_values().begin(); - it != this->_internal_named_values().end(); ++it, ++n) { - items[static_cast(n)] = SortItem(&*it); - } - ::std::sort(&items[0], &items[static_cast(n)], Less()); - for (size_type i = 0; i < n; i++) { - target = SecureAggregation_NamedValuesEntry_DoNotUse::Funcs::InternalSerialize(1, items[static_cast(i)]->first, items[static_cast(i)]->second, target, stream); - Utf8Check::Check(&(*items[static_cast(i)])); - } - } else { - for (::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::Value >::const_iterator - it = this->_internal_named_values().begin(); - it != this->_internal_named_values().end(); ++it) { - target = SecureAggregation_NamedValuesEntry_DoNotUse::Funcs::InternalSerialize(1, it->first, it->second, target, stream); - Utf8Check::Check(&(*it)); - } - } - } - - if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) { - target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::InternalSerializeUnknownFieldsToArray( - _internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance), target, stream); - } - // @@protoc_insertion_point(serialize_to_array_end:flwr.proto.SecureAggregation) - return target; -} - -size_t SecureAggregation::ByteSizeLong() const { -// @@protoc_insertion_point(message_byte_size_start:flwr.proto.SecureAggregation) - size_t total_size = 0; - - ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; - // Prevent compiler warnings about cached_has_bits being unused - (void) cached_has_bits; - - // map named_values = 1; - total_size += 1 * - ::PROTOBUF_NAMESPACE_ID::internal::FromIntSize(this->_internal_named_values_size()); - for (::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::Value >::const_iterator - it = this->_internal_named_values().begin(); - it != this->_internal_named_values().end(); ++it) { - total_size += SecureAggregation_NamedValuesEntry_DoNotUse::Funcs::ByteSizeLong(it->first, it->second); - } - - return MaybeComputeUnknownFieldsSize(total_size, &_cached_size_); -} - -const ::PROTOBUF_NAMESPACE_ID::Message::ClassData SecureAggregation::_class_data_ = { - ::PROTOBUF_NAMESPACE_ID::Message::CopyWithSizeCheck, - SecureAggregation::MergeImpl -}; -const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*SecureAggregation::GetClassData() const { return &_class_data_; } - -void SecureAggregation::MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, - const ::PROTOBUF_NAMESPACE_ID::Message& from) { - static_cast(to)->MergeFrom( - static_cast(from)); -} - - -void SecureAggregation::MergeFrom(const SecureAggregation& from) { -// @@protoc_insertion_point(class_specific_merge_from_start:flwr.proto.SecureAggregation) - GOOGLE_DCHECK_NE(&from, this); - ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; - (void) cached_has_bits; - - named_values_.MergeFrom(from.named_values_); - _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); -} - -void SecureAggregation::CopyFrom(const SecureAggregation& from) { -// @@protoc_insertion_point(class_specific_copy_from_start:flwr.proto.SecureAggregation) - if (&from == this) return; - Clear(); - MergeFrom(from); -} - -bool SecureAggregation::IsInitialized() const { - return true; -} - -void SecureAggregation::InternalSwap(SecureAggregation* other) { - using std::swap; - _internal_metadata_.InternalSwap(&other->_internal_metadata_); - named_values_.InternalSwap(&other->named_values_); -} - -::PROTOBUF_NAMESPACE_ID::Metadata SecureAggregation::GetMetadata() const { - return ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors( - &descriptor_table_flwr_2fproto_2ftask_2eproto_getter, &descriptor_table_flwr_2fproto_2ftask_2eproto_once, - file_level_metadata_flwr_2fproto_2ftask_2eproto[10]); -} - -// @@protoc_insertion_point(namespace_scope) -} // namespace proto -} // namespace flwr -PROTOBUF_NAMESPACE_OPEN -template<> PROTOBUF_NOINLINE ::flwr::proto::Task* Arena::CreateMaybeMessage< ::flwr::proto::Task >(Arena* arena) { - return Arena::CreateMessageInternal< ::flwr::proto::Task >(arena); -} -template<> PROTOBUF_NOINLINE ::flwr::proto::TaskIns* Arena::CreateMaybeMessage< ::flwr::proto::TaskIns >(Arena* arena) { - return Arena::CreateMessageInternal< ::flwr::proto::TaskIns >(arena); +template<> PROTOBUF_NOINLINE ::flwr::proto::TaskIns* Arena::CreateMaybeMessage< ::flwr::proto::TaskIns >(Arena* arena) { + return Arena::CreateMessageInternal< ::flwr::proto::TaskIns >(arena); } template<> PROTOBUF_NOINLINE ::flwr::proto::TaskRes* Arena::CreateMaybeMessage< ::flwr::proto::TaskRes >(Arena* arena) { return Arena::CreateMessageInternal< ::flwr::proto::TaskRes >(arena); } -template<> PROTOBUF_NOINLINE ::flwr::proto::Value_DoubleList* Arena::CreateMaybeMessage< ::flwr::proto::Value_DoubleList >(Arena* arena) { - return Arena::CreateMessageInternal< ::flwr::proto::Value_DoubleList >(arena); -} -template<> PROTOBUF_NOINLINE ::flwr::proto::Value_Sint64List* Arena::CreateMaybeMessage< ::flwr::proto::Value_Sint64List >(Arena* arena) { - return Arena::CreateMessageInternal< ::flwr::proto::Value_Sint64List >(arena); -} -template<> PROTOBUF_NOINLINE ::flwr::proto::Value_BoolList* Arena::CreateMaybeMessage< ::flwr::proto::Value_BoolList >(Arena* arena) { - return Arena::CreateMessageInternal< ::flwr::proto::Value_BoolList >(arena); -} -template<> PROTOBUF_NOINLINE ::flwr::proto::Value_StringList* Arena::CreateMaybeMessage< ::flwr::proto::Value_StringList >(Arena* arena) { - return Arena::CreateMessageInternal< ::flwr::proto::Value_StringList >(arena); -} -template<> PROTOBUF_NOINLINE ::flwr::proto::Value_BytesList* Arena::CreateMaybeMessage< ::flwr::proto::Value_BytesList >(Arena* arena) { - return Arena::CreateMessageInternal< ::flwr::proto::Value_BytesList >(arena); -} -template<> PROTOBUF_NOINLINE ::flwr::proto::Value* Arena::CreateMaybeMessage< ::flwr::proto::Value >(Arena* arena) { - return Arena::CreateMessageInternal< ::flwr::proto::Value >(arena); -} -template<> PROTOBUF_NOINLINE ::flwr::proto::SecureAggregation_NamedValuesEntry_DoNotUse* Arena::CreateMaybeMessage< ::flwr::proto::SecureAggregation_NamedValuesEntry_DoNotUse >(Arena* arena) { - return Arena::CreateMessageInternal< ::flwr::proto::SecureAggregation_NamedValuesEntry_DoNotUse >(arena); -} -template<> PROTOBUF_NOINLINE ::flwr::proto::SecureAggregation* Arena::CreateMaybeMessage< ::flwr::proto::SecureAggregation >(Arena* arena) { - return Arena::CreateMessageInternal< ::flwr::proto::SecureAggregation >(arena); -} PROTOBUF_NAMESPACE_CLOSE // @@protoc_insertion_point(global_scope) diff --git a/src/cc/flwr/include/flwr/proto/task.pb.h b/src/cc/flwr/include/flwr/proto/task.pb.h index 0c2c94c64938..3dc421e2f8ab 100644 --- a/src/cc/flwr/include/flwr/proto/task.pb.h +++ b/src/cc/flwr/include/flwr/proto/task.pb.h @@ -30,12 +30,11 @@ #include #include // IWYU pragma: export #include // IWYU pragma: export -#include // IWYU pragma: export -#include -#include #include #include "flwr/proto/node.pb.h" +#include "flwr/proto/recordset.pb.h" #include "flwr/proto/transport.pb.h" +#include "flwr/proto/error.pb.h" // @@protoc_insertion_point(includes) #include #define PROTOBUF_INTERNAL_EXPORT_flwr_2fproto_2ftask_2eproto @@ -51,7 +50,7 @@ struct TableStruct_flwr_2fproto_2ftask_2eproto { PROTOBUF_SECTION_VARIABLE(protodesc_cold); static const ::PROTOBUF_NAMESPACE_ID::internal::AuxiliaryParseTableField aux[] PROTOBUF_SECTION_VARIABLE(protodesc_cold); - static const ::PROTOBUF_NAMESPACE_ID::internal::ParseTable schema[11] + static const ::PROTOBUF_NAMESPACE_ID::internal::ParseTable schema[3] PROTOBUF_SECTION_VARIABLE(protodesc_cold); static const ::PROTOBUF_NAMESPACE_ID::internal::FieldMetadata field_metadata[]; static const ::PROTOBUF_NAMESPACE_ID::internal::SerializationTable serialization_table[]; @@ -60,12 +59,6 @@ struct TableStruct_flwr_2fproto_2ftask_2eproto { extern const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable descriptor_table_flwr_2fproto_2ftask_2eproto; namespace flwr { namespace proto { -class SecureAggregation; -struct SecureAggregationDefaultTypeInternal; -extern SecureAggregationDefaultTypeInternal _SecureAggregation_default_instance_; -class SecureAggregation_NamedValuesEntry_DoNotUse; -struct SecureAggregation_NamedValuesEntry_DoNotUseDefaultTypeInternal; -extern SecureAggregation_NamedValuesEntry_DoNotUseDefaultTypeInternal _SecureAggregation_NamedValuesEntry_DoNotUse_default_instance_; class Task; struct TaskDefaultTypeInternal; extern TaskDefaultTypeInternal _Task_default_instance_; @@ -75,38 +68,12 @@ extern TaskInsDefaultTypeInternal _TaskIns_default_instance_; class TaskRes; struct TaskResDefaultTypeInternal; extern TaskResDefaultTypeInternal _TaskRes_default_instance_; -class Value; -struct ValueDefaultTypeInternal; -extern ValueDefaultTypeInternal _Value_default_instance_; -class Value_BoolList; -struct Value_BoolListDefaultTypeInternal; -extern Value_BoolListDefaultTypeInternal _Value_BoolList_default_instance_; -class Value_BytesList; -struct Value_BytesListDefaultTypeInternal; -extern Value_BytesListDefaultTypeInternal _Value_BytesList_default_instance_; -class Value_DoubleList; -struct Value_DoubleListDefaultTypeInternal; -extern Value_DoubleListDefaultTypeInternal _Value_DoubleList_default_instance_; -class Value_Sint64List; -struct Value_Sint64ListDefaultTypeInternal; -extern Value_Sint64ListDefaultTypeInternal _Value_Sint64List_default_instance_; -class Value_StringList; -struct Value_StringListDefaultTypeInternal; -extern Value_StringListDefaultTypeInternal _Value_StringList_default_instance_; } // namespace proto } // namespace flwr PROTOBUF_NAMESPACE_OPEN -template<> ::flwr::proto::SecureAggregation* Arena::CreateMaybeMessage<::flwr::proto::SecureAggregation>(Arena*); -template<> ::flwr::proto::SecureAggregation_NamedValuesEntry_DoNotUse* Arena::CreateMaybeMessage<::flwr::proto::SecureAggregation_NamedValuesEntry_DoNotUse>(Arena*); template<> ::flwr::proto::Task* Arena::CreateMaybeMessage<::flwr::proto::Task>(Arena*); template<> ::flwr::proto::TaskIns* Arena::CreateMaybeMessage<::flwr::proto::TaskIns>(Arena*); template<> ::flwr::proto::TaskRes* Arena::CreateMaybeMessage<::flwr::proto::TaskRes>(Arena*); -template<> ::flwr::proto::Value* Arena::CreateMaybeMessage<::flwr::proto::Value>(Arena*); -template<> ::flwr::proto::Value_BoolList* Arena::CreateMaybeMessage<::flwr::proto::Value_BoolList>(Arena*); -template<> ::flwr::proto::Value_BytesList* Arena::CreateMaybeMessage<::flwr::proto::Value_BytesList>(Arena*); -template<> ::flwr::proto::Value_DoubleList* Arena::CreateMaybeMessage<::flwr::proto::Value_DoubleList>(Arena*); -template<> ::flwr::proto::Value_Sint64List* Arena::CreateMaybeMessage<::flwr::proto::Value_Sint64List>(Arena*); -template<> ::flwr::proto::Value_StringList* Arena::CreateMaybeMessage<::flwr::proto::Value_StringList>(Arena*); PROTOBUF_NAMESPACE_CLOSE namespace flwr { namespace proto { @@ -232,17 +199,18 @@ class Task final : // accessors ------------------------------------------------------- enum : int { - kAncestryFieldNumber = 6, - kCreatedAtFieldNumber = 3, + kAncestryFieldNumber = 7, kDeliveredAtFieldNumber = 4, - kTtlFieldNumber = 5, + kTaskTypeFieldNumber = 8, kProducerFieldNumber = 1, kConsumerFieldNumber = 2, - kSaFieldNumber = 7, - kLegacyServerMessageFieldNumber = 101, - kLegacyClientMessageFieldNumber = 102, + kRecordsetFieldNumber = 9, + kErrorFieldNumber = 10, + kCreatedAtFieldNumber = 3, + kPushedAtFieldNumber = 5, + kTtlFieldNumber = 6, }; - // repeated string ancestry = 6; + // repeated string ancestry = 7; int ancestry_size() const; private: int _internal_ancestry_size() const; @@ -266,20 +234,6 @@ class Task final : std::string* _internal_add_ancestry(); public: - // string created_at = 3; - void clear_created_at(); - const std::string& created_at() const; - template - void set_created_at(ArgT0&& arg0, ArgT... args); - std::string* mutable_created_at(); - PROTOBUF_MUST_USE_RESULT std::string* release_created_at(); - void set_allocated_created_at(std::string* created_at); - private: - const std::string& _internal_created_at() const; - inline PROTOBUF_ALWAYS_INLINE void _internal_set_created_at(const std::string& value); - std::string* _internal_mutable_created_at(); - public: - // string delivered_at = 4; void clear_delivered_at(); const std::string& delivered_at() const; @@ -294,18 +248,18 @@ class Task final : std::string* _internal_mutable_delivered_at(); public: - // string ttl = 5; - void clear_ttl(); - const std::string& ttl() const; + // string task_type = 8; + void clear_task_type(); + const std::string& task_type() const; template - void set_ttl(ArgT0&& arg0, ArgT... args); - std::string* mutable_ttl(); - PROTOBUF_MUST_USE_RESULT std::string* release_ttl(); - void set_allocated_ttl(std::string* ttl); + void set_task_type(ArgT0&& arg0, ArgT... args); + std::string* mutable_task_type(); + PROTOBUF_MUST_USE_RESULT std::string* release_task_type(); + void set_allocated_task_type(std::string* task_type); private: - const std::string& _internal_ttl() const; - inline PROTOBUF_ALWAYS_INLINE void _internal_set_ttl(const std::string& value); - std::string* _internal_mutable_ttl(); + const std::string& _internal_task_type() const; + inline PROTOBUF_ALWAYS_INLINE void _internal_set_task_type(const std::string& value); + std::string* _internal_mutable_task_type(); public: // .flwr.proto.Node producer = 1; @@ -344,59 +298,68 @@ class Task final : ::flwr::proto::Node* consumer); ::flwr::proto::Node* unsafe_arena_release_consumer(); - // .flwr.proto.SecureAggregation sa = 7; - bool has_sa() const; + // .flwr.proto.RecordSet recordset = 9; + bool has_recordset() const; private: - bool _internal_has_sa() const; + bool _internal_has_recordset() const; public: - void clear_sa(); - const ::flwr::proto::SecureAggregation& sa() const; - PROTOBUF_MUST_USE_RESULT ::flwr::proto::SecureAggregation* release_sa(); - ::flwr::proto::SecureAggregation* mutable_sa(); - void set_allocated_sa(::flwr::proto::SecureAggregation* sa); + void clear_recordset(); + const ::flwr::proto::RecordSet& recordset() const; + PROTOBUF_MUST_USE_RESULT ::flwr::proto::RecordSet* release_recordset(); + ::flwr::proto::RecordSet* mutable_recordset(); + void set_allocated_recordset(::flwr::proto::RecordSet* recordset); private: - const ::flwr::proto::SecureAggregation& _internal_sa() const; - ::flwr::proto::SecureAggregation* _internal_mutable_sa(); + const ::flwr::proto::RecordSet& _internal_recordset() const; + ::flwr::proto::RecordSet* _internal_mutable_recordset(); public: - void unsafe_arena_set_allocated_sa( - ::flwr::proto::SecureAggregation* sa); - ::flwr::proto::SecureAggregation* unsafe_arena_release_sa(); + void unsafe_arena_set_allocated_recordset( + ::flwr::proto::RecordSet* recordset); + ::flwr::proto::RecordSet* unsafe_arena_release_recordset(); - // .flwr.proto.ServerMessage legacy_server_message = 101 [deprecated = true]; - PROTOBUF_DEPRECATED bool has_legacy_server_message() const; + // .flwr.proto.Error error = 10; + bool has_error() const; private: - bool _internal_has_legacy_server_message() const; + bool _internal_has_error() const; public: - PROTOBUF_DEPRECATED void clear_legacy_server_message(); - PROTOBUF_DEPRECATED const ::flwr::proto::ServerMessage& legacy_server_message() const; - PROTOBUF_MUST_USE_RESULT PROTOBUF_DEPRECATED ::flwr::proto::ServerMessage* release_legacy_server_message(); - PROTOBUF_DEPRECATED ::flwr::proto::ServerMessage* mutable_legacy_server_message(); - PROTOBUF_DEPRECATED void set_allocated_legacy_server_message(::flwr::proto::ServerMessage* legacy_server_message); + void clear_error(); + const ::flwr::proto::Error& error() const; + PROTOBUF_MUST_USE_RESULT ::flwr::proto::Error* release_error(); + ::flwr::proto::Error* mutable_error(); + void set_allocated_error(::flwr::proto::Error* error); private: - const ::flwr::proto::ServerMessage& _internal_legacy_server_message() const; - ::flwr::proto::ServerMessage* _internal_mutable_legacy_server_message(); + const ::flwr::proto::Error& _internal_error() const; + ::flwr::proto::Error* _internal_mutable_error(); public: - PROTOBUF_DEPRECATED void unsafe_arena_set_allocated_legacy_server_message( - ::flwr::proto::ServerMessage* legacy_server_message); - PROTOBUF_DEPRECATED ::flwr::proto::ServerMessage* unsafe_arena_release_legacy_server_message(); + void unsafe_arena_set_allocated_error( + ::flwr::proto::Error* error); + ::flwr::proto::Error* unsafe_arena_release_error(); - // .flwr.proto.ClientMessage legacy_client_message = 102 [deprecated = true]; - PROTOBUF_DEPRECATED bool has_legacy_client_message() const; + // double created_at = 3; + void clear_created_at(); + double created_at() const; + void set_created_at(double value); + private: + double _internal_created_at() const; + void _internal_set_created_at(double value); + public: + + // double pushed_at = 5; + void clear_pushed_at(); + double pushed_at() const; + void set_pushed_at(double value); private: - bool _internal_has_legacy_client_message() const; + double _internal_pushed_at() const; + void _internal_set_pushed_at(double value); public: - PROTOBUF_DEPRECATED void clear_legacy_client_message(); - PROTOBUF_DEPRECATED const ::flwr::proto::ClientMessage& legacy_client_message() const; - PROTOBUF_MUST_USE_RESULT PROTOBUF_DEPRECATED ::flwr::proto::ClientMessage* release_legacy_client_message(); - PROTOBUF_DEPRECATED ::flwr::proto::ClientMessage* mutable_legacy_client_message(); - PROTOBUF_DEPRECATED void set_allocated_legacy_client_message(::flwr::proto::ClientMessage* legacy_client_message); + + // double ttl = 6; + void clear_ttl(); + double ttl() const; + void set_ttl(double value); private: - const ::flwr::proto::ClientMessage& _internal_legacy_client_message() const; - ::flwr::proto::ClientMessage* _internal_mutable_legacy_client_message(); + double _internal_ttl() const; + void _internal_set_ttl(double value); public: - PROTOBUF_DEPRECATED void unsafe_arena_set_allocated_legacy_client_message( - ::flwr::proto::ClientMessage* legacy_client_message); - PROTOBUF_DEPRECATED ::flwr::proto::ClientMessage* unsafe_arena_release_legacy_client_message(); // @@protoc_insertion_point(class_scope:flwr.proto.Task) private: @@ -406,14 +369,15 @@ class Task final : typedef void InternalArenaConstructable_; typedef void DestructorSkippable_; ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField ancestry_; - ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr created_at_; ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr delivered_at_; - ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr ttl_; + ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr task_type_; ::flwr::proto::Node* producer_; ::flwr::proto::Node* consumer_; - ::flwr::proto::SecureAggregation* sa_; - ::flwr::proto::ServerMessage* legacy_server_message_; - ::flwr::proto::ClientMessage* legacy_client_message_; + ::flwr::proto::RecordSet* recordset_; + ::flwr::proto::Error* error_; + double created_at_; + double pushed_at_; + double ttl_; mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; friend struct ::TableStruct_flwr_2fproto_2ftask_2eproto; }; @@ -541,7 +505,7 @@ class TaskIns final : kTaskIdFieldNumber = 1, kGroupIdFieldNumber = 2, kTaskFieldNumber = 4, - kWorkloadIdFieldNumber = 3, + kRunIdFieldNumber = 3, }; // string task_id = 1; void clear_task_id(); @@ -589,13 +553,13 @@ class TaskIns final : ::flwr::proto::Task* task); ::flwr::proto::Task* unsafe_arena_release_task(); - // sint64 workload_id = 3; - void clear_workload_id(); - ::PROTOBUF_NAMESPACE_ID::int64 workload_id() const; - void set_workload_id(::PROTOBUF_NAMESPACE_ID::int64 value); + // sint64 run_id = 3; + void clear_run_id(); + ::PROTOBUF_NAMESPACE_ID::int64 run_id() const; + void set_run_id(::PROTOBUF_NAMESPACE_ID::int64 value); private: - ::PROTOBUF_NAMESPACE_ID::int64 _internal_workload_id() const; - void _internal_set_workload_id(::PROTOBUF_NAMESPACE_ID::int64 value); + ::PROTOBUF_NAMESPACE_ID::int64 _internal_run_id() const; + void _internal_set_run_id(::PROTOBUF_NAMESPACE_ID::int64 value); public: // @@protoc_insertion_point(class_scope:flwr.proto.TaskIns) @@ -608,7 +572,7 @@ class TaskIns final : ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr task_id_; ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr group_id_; ::flwr::proto::Task* task_; - ::PROTOBUF_NAMESPACE_ID::int64 workload_id_; + ::PROTOBUF_NAMESPACE_ID::int64 run_id_; mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; friend struct ::TableStruct_flwr_2fproto_2ftask_2eproto; }; @@ -736,7 +700,7 @@ class TaskRes final : kTaskIdFieldNumber = 1, kGroupIdFieldNumber = 2, kTaskFieldNumber = 4, - kWorkloadIdFieldNumber = 3, + kRunIdFieldNumber = 3, }; // string task_id = 1; void clear_task_id(); @@ -784,13 +748,13 @@ class TaskRes final : ::flwr::proto::Task* task); ::flwr::proto::Task* unsafe_arena_release_task(); - // sint64 workload_id = 3; - void clear_workload_id(); - ::PROTOBUF_NAMESPACE_ID::int64 workload_id() const; - void set_workload_id(::PROTOBUF_NAMESPACE_ID::int64 value); + // sint64 run_id = 3; + void clear_run_id(); + ::PROTOBUF_NAMESPACE_ID::int64 run_id() const; + void set_run_id(::PROTOBUF_NAMESPACE_ID::int64 value); private: - ::PROTOBUF_NAMESPACE_ID::int64 _internal_workload_id() const; - void _internal_set_workload_id(::PROTOBUF_NAMESPACE_ID::int64 value); + ::PROTOBUF_NAMESPACE_ID::int64 _internal_run_id() const; + void _internal_set_run_id(::PROTOBUF_NAMESPACE_ID::int64 value); public: // @@protoc_insertion_point(class_scope:flwr.proto.TaskRes) @@ -803,1561 +767,211 @@ class TaskRes final : ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr task_id_; ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr group_id_; ::flwr::proto::Task* task_; - ::PROTOBUF_NAMESPACE_ID::int64 workload_id_; + ::PROTOBUF_NAMESPACE_ID::int64 run_id_; mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; friend struct ::TableStruct_flwr_2fproto_2ftask_2eproto; }; -// ------------------------------------------------------------------- +// =================================================================== -class Value_DoubleList final : - public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:flwr.proto.Value.DoubleList) */ { - public: - inline Value_DoubleList() : Value_DoubleList(nullptr) {} - ~Value_DoubleList() override; - explicit constexpr Value_DoubleList(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); - Value_DoubleList(const Value_DoubleList& from); - Value_DoubleList(Value_DoubleList&& from) noexcept - : Value_DoubleList() { - *this = ::std::move(from); - } +// =================================================================== - inline Value_DoubleList& operator=(const Value_DoubleList& from) { - CopyFrom(from); - return *this; - } - inline Value_DoubleList& operator=(Value_DoubleList&& from) noexcept { - if (this == &from) return *this; - if (GetOwningArena() == from.GetOwningArena() - #ifdef PROTOBUF_FORCE_COPY_IN_MOVE - && GetOwningArena() != nullptr - #endif // !PROTOBUF_FORCE_COPY_IN_MOVE - ) { - InternalSwap(&from); - } else { - CopyFrom(from); - } - return *this; - } +#ifdef __GNUC__ + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wstrict-aliasing" +#endif // __GNUC__ +// Task - static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { - return GetDescriptor(); +// .flwr.proto.Node producer = 1; +inline bool Task::_internal_has_producer() const { + return this != internal_default_instance() && producer_ != nullptr; +} +inline bool Task::has_producer() const { + return _internal_has_producer(); +} +inline const ::flwr::proto::Node& Task::_internal_producer() const { + const ::flwr::proto::Node* p = producer_; + return p != nullptr ? *p : reinterpret_cast( + ::flwr::proto::_Node_default_instance_); +} +inline const ::flwr::proto::Node& Task::producer() const { + // @@protoc_insertion_point(field_get:flwr.proto.Task.producer) + return _internal_producer(); +} +inline void Task::unsafe_arena_set_allocated_producer( + ::flwr::proto::Node* producer) { + if (GetArenaForAllocation() == nullptr) { + delete reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(producer_); } - static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { - return default_instance().GetMetadata().descriptor; + producer_ = producer; + if (producer) { + + } else { + } - static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { - return default_instance().GetMetadata().reflection; + // @@protoc_insertion_point(field_unsafe_arena_set_allocated:flwr.proto.Task.producer) +} +inline ::flwr::proto::Node* Task::release_producer() { + + ::flwr::proto::Node* temp = producer_; + producer_ = nullptr; +#ifdef PROTOBUF_FORCE_COPY_IN_RELEASE + auto* old = reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(temp); + temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); + if (GetArenaForAllocation() == nullptr) { delete old; } +#else // PROTOBUF_FORCE_COPY_IN_RELEASE + if (GetArenaForAllocation() != nullptr) { + temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); } - static const Value_DoubleList& default_instance() { - return *internal_default_instance(); +#endif // !PROTOBUF_FORCE_COPY_IN_RELEASE + return temp; +} +inline ::flwr::proto::Node* Task::unsafe_arena_release_producer() { + // @@protoc_insertion_point(field_release:flwr.proto.Task.producer) + + ::flwr::proto::Node* temp = producer_; + producer_ = nullptr; + return temp; +} +inline ::flwr::proto::Node* Task::_internal_mutable_producer() { + + if (producer_ == nullptr) { + auto* p = CreateMaybeMessage<::flwr::proto::Node>(GetArenaForAllocation()); + producer_ = p; } - static inline const Value_DoubleList* internal_default_instance() { - return reinterpret_cast( - &_Value_DoubleList_default_instance_); + return producer_; +} +inline ::flwr::proto::Node* Task::mutable_producer() { + ::flwr::proto::Node* _msg = _internal_mutable_producer(); + // @@protoc_insertion_point(field_mutable:flwr.proto.Task.producer) + return _msg; +} +inline void Task::set_allocated_producer(::flwr::proto::Node* producer) { + ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation(); + if (message_arena == nullptr) { + delete reinterpret_cast< ::PROTOBUF_NAMESPACE_ID::MessageLite*>(producer_); } - static constexpr int kIndexInFileMessages = - 3; + if (producer) { + ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena = + ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper< + ::PROTOBUF_NAMESPACE_ID::MessageLite>::GetOwningArena( + reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(producer)); + if (message_arena != submessage_arena) { + producer = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage( + message_arena, producer, submessage_arena); + } + + } else { + + } + producer_ = producer; + // @@protoc_insertion_point(field_set_allocated:flwr.proto.Task.producer) +} - friend void swap(Value_DoubleList& a, Value_DoubleList& b) { - a.Swap(&b); +// .flwr.proto.Node consumer = 2; +inline bool Task::_internal_has_consumer() const { + return this != internal_default_instance() && consumer_ != nullptr; +} +inline bool Task::has_consumer() const { + return _internal_has_consumer(); +} +inline const ::flwr::proto::Node& Task::_internal_consumer() const { + const ::flwr::proto::Node* p = consumer_; + return p != nullptr ? *p : reinterpret_cast( + ::flwr::proto::_Node_default_instance_); +} +inline const ::flwr::proto::Node& Task::consumer() const { + // @@protoc_insertion_point(field_get:flwr.proto.Task.consumer) + return _internal_consumer(); +} +inline void Task::unsafe_arena_set_allocated_consumer( + ::flwr::proto::Node* consumer) { + if (GetArenaForAllocation() == nullptr) { + delete reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(consumer_); } - inline void Swap(Value_DoubleList* other) { - if (other == this) return; - if (GetOwningArena() == other->GetOwningArena()) { - InternalSwap(other); - } else { - ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); - } + consumer_ = consumer; + if (consumer) { + + } else { + } - void UnsafeArenaSwap(Value_DoubleList* other) { - if (other == this) return; - GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); - InternalSwap(other); + // @@protoc_insertion_point(field_unsafe_arena_set_allocated:flwr.proto.Task.consumer) +} +inline ::flwr::proto::Node* Task::release_consumer() { + + ::flwr::proto::Node* temp = consumer_; + consumer_ = nullptr; +#ifdef PROTOBUF_FORCE_COPY_IN_RELEASE + auto* old = reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(temp); + temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); + if (GetArenaForAllocation() == nullptr) { delete old; } +#else // PROTOBUF_FORCE_COPY_IN_RELEASE + if (GetArenaForAllocation() != nullptr) { + temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); } - - // implements Message ---------------------------------------------- - - inline Value_DoubleList* New() const final { - return new Value_DoubleList(); +#endif // !PROTOBUF_FORCE_COPY_IN_RELEASE + return temp; +} +inline ::flwr::proto::Node* Task::unsafe_arena_release_consumer() { + // @@protoc_insertion_point(field_release:flwr.proto.Task.consumer) + + ::flwr::proto::Node* temp = consumer_; + consumer_ = nullptr; + return temp; +} +inline ::flwr::proto::Node* Task::_internal_mutable_consumer() { + + if (consumer_ == nullptr) { + auto* p = CreateMaybeMessage<::flwr::proto::Node>(GetArenaForAllocation()); + consumer_ = p; } - - Value_DoubleList* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final { - return CreateMaybeMessage(arena); + return consumer_; +} +inline ::flwr::proto::Node* Task::mutable_consumer() { + ::flwr::proto::Node* _msg = _internal_mutable_consumer(); + // @@protoc_insertion_point(field_mutable:flwr.proto.Task.consumer) + return _msg; +} +inline void Task::set_allocated_consumer(::flwr::proto::Node* consumer) { + ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation(); + if (message_arena == nullptr) { + delete reinterpret_cast< ::PROTOBUF_NAMESPACE_ID::MessageLite*>(consumer_); } - using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; - void CopyFrom(const Value_DoubleList& from); - using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; - void MergeFrom(const Value_DoubleList& from); - private: - static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, const ::PROTOBUF_NAMESPACE_ID::Message& from); - public: - PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; - bool IsInitialized() const final; - - size_t ByteSizeLong() const final; - const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; - ::PROTOBUF_NAMESPACE_ID::uint8* _InternalSerialize( - ::PROTOBUF_NAMESPACE_ID::uint8* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final; - int GetCachedSize() const final { return _cached_size_.Get(); } - - private: - void SharedCtor(); - void SharedDtor(); - void SetCachedSize(int size) const final; - void InternalSwap(Value_DoubleList* other); - friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; - static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { - return "flwr.proto.Value.DoubleList"; + if (consumer) { + ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena = + ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper< + ::PROTOBUF_NAMESPACE_ID::MessageLite>::GetOwningArena( + reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(consumer)); + if (message_arena != submessage_arena) { + consumer = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage( + message_arena, consumer, submessage_arena); + } + + } else { + } - protected: - explicit Value_DoubleList(::PROTOBUF_NAMESPACE_ID::Arena* arena, - bool is_message_owned = false); - private: - static void ArenaDtor(void* object); - inline void RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena* arena); - public: - - static const ClassData _class_data_; - const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final; - - ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; - - // nested types ---------------------------------------------------- - - // accessors ------------------------------------------------------- + consumer_ = consumer; + // @@protoc_insertion_point(field_set_allocated:flwr.proto.Task.consumer) +} - enum : int { - kValsFieldNumber = 1, - }; - // repeated double vals = 1; - int vals_size() const; - private: - int _internal_vals_size() const; - public: - void clear_vals(); - private: - double _internal_vals(int index) const; - const ::PROTOBUF_NAMESPACE_ID::RepeatedField< double >& - _internal_vals() const; - void _internal_add_vals(double value); - ::PROTOBUF_NAMESPACE_ID::RepeatedField< double >* - _internal_mutable_vals(); - public: - double vals(int index) const; - void set_vals(int index, double value); - void add_vals(double value); - const ::PROTOBUF_NAMESPACE_ID::RepeatedField< double >& - vals() const; - ::PROTOBUF_NAMESPACE_ID::RepeatedField< double >* - mutable_vals(); - - // @@protoc_insertion_point(class_scope:flwr.proto.Value.DoubleList) - private: - class _Internal; - - template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; - typedef void InternalArenaConstructable_; - typedef void DestructorSkippable_; - ::PROTOBUF_NAMESPACE_ID::RepeatedField< double > vals_; - mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; - friend struct ::TableStruct_flwr_2fproto_2ftask_2eproto; -}; -// ------------------------------------------------------------------- - -class Value_Sint64List final : - public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:flwr.proto.Value.Sint64List) */ { - public: - inline Value_Sint64List() : Value_Sint64List(nullptr) {} - ~Value_Sint64List() override; - explicit constexpr Value_Sint64List(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); - - Value_Sint64List(const Value_Sint64List& from); - Value_Sint64List(Value_Sint64List&& from) noexcept - : Value_Sint64List() { - *this = ::std::move(from); - } - - inline Value_Sint64List& operator=(const Value_Sint64List& from) { - CopyFrom(from); - return *this; - } - inline Value_Sint64List& operator=(Value_Sint64List&& from) noexcept { - if (this == &from) return *this; - if (GetOwningArena() == from.GetOwningArena() - #ifdef PROTOBUF_FORCE_COPY_IN_MOVE - && GetOwningArena() != nullptr - #endif // !PROTOBUF_FORCE_COPY_IN_MOVE - ) { - InternalSwap(&from); - } else { - CopyFrom(from); - } - return *this; - } - - static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { - return GetDescriptor(); - } - static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { - return default_instance().GetMetadata().descriptor; - } - static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { - return default_instance().GetMetadata().reflection; - } - static const Value_Sint64List& default_instance() { - return *internal_default_instance(); - } - static inline const Value_Sint64List* internal_default_instance() { - return reinterpret_cast( - &_Value_Sint64List_default_instance_); - } - static constexpr int kIndexInFileMessages = - 4; - - friend void swap(Value_Sint64List& a, Value_Sint64List& b) { - a.Swap(&b); - } - inline void Swap(Value_Sint64List* other) { - if (other == this) return; - if (GetOwningArena() == other->GetOwningArena()) { - InternalSwap(other); - } else { - ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); - } - } - void UnsafeArenaSwap(Value_Sint64List* other) { - if (other == this) return; - GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); - InternalSwap(other); - } - - // implements Message ---------------------------------------------- - - inline Value_Sint64List* New() const final { - return new Value_Sint64List(); - } - - Value_Sint64List* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final { - return CreateMaybeMessage(arena); - } - using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; - void CopyFrom(const Value_Sint64List& from); - using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; - void MergeFrom(const Value_Sint64List& from); - private: - static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, const ::PROTOBUF_NAMESPACE_ID::Message& from); - public: - PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; - bool IsInitialized() const final; - - size_t ByteSizeLong() const final; - const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; - ::PROTOBUF_NAMESPACE_ID::uint8* _InternalSerialize( - ::PROTOBUF_NAMESPACE_ID::uint8* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final; - int GetCachedSize() const final { return _cached_size_.Get(); } - - private: - void SharedCtor(); - void SharedDtor(); - void SetCachedSize(int size) const final; - void InternalSwap(Value_Sint64List* other); - friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; - static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { - return "flwr.proto.Value.Sint64List"; - } - protected: - explicit Value_Sint64List(::PROTOBUF_NAMESPACE_ID::Arena* arena, - bool is_message_owned = false); - private: - static void ArenaDtor(void* object); - inline void RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena* arena); - public: - - static const ClassData _class_data_; - const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final; - - ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; - - // nested types ---------------------------------------------------- - - // accessors ------------------------------------------------------- - - enum : int { - kValsFieldNumber = 1, - }; - // repeated sint64 vals = 1; - int vals_size() const; - private: - int _internal_vals_size() const; - public: - void clear_vals(); - private: - ::PROTOBUF_NAMESPACE_ID::int64 _internal_vals(int index) const; - const ::PROTOBUF_NAMESPACE_ID::RepeatedField< ::PROTOBUF_NAMESPACE_ID::int64 >& - _internal_vals() const; - void _internal_add_vals(::PROTOBUF_NAMESPACE_ID::int64 value); - ::PROTOBUF_NAMESPACE_ID::RepeatedField< ::PROTOBUF_NAMESPACE_ID::int64 >* - _internal_mutable_vals(); - public: - ::PROTOBUF_NAMESPACE_ID::int64 vals(int index) const; - void set_vals(int index, ::PROTOBUF_NAMESPACE_ID::int64 value); - void add_vals(::PROTOBUF_NAMESPACE_ID::int64 value); - const ::PROTOBUF_NAMESPACE_ID::RepeatedField< ::PROTOBUF_NAMESPACE_ID::int64 >& - vals() const; - ::PROTOBUF_NAMESPACE_ID::RepeatedField< ::PROTOBUF_NAMESPACE_ID::int64 >* - mutable_vals(); - - // @@protoc_insertion_point(class_scope:flwr.proto.Value.Sint64List) - private: - class _Internal; - - template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; - typedef void InternalArenaConstructable_; - typedef void DestructorSkippable_; - ::PROTOBUF_NAMESPACE_ID::RepeatedField< ::PROTOBUF_NAMESPACE_ID::int64 > vals_; - mutable std::atomic _vals_cached_byte_size_; - mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; - friend struct ::TableStruct_flwr_2fproto_2ftask_2eproto; -}; -// ------------------------------------------------------------------- - -class Value_BoolList final : - public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:flwr.proto.Value.BoolList) */ { - public: - inline Value_BoolList() : Value_BoolList(nullptr) {} - ~Value_BoolList() override; - explicit constexpr Value_BoolList(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); - - Value_BoolList(const Value_BoolList& from); - Value_BoolList(Value_BoolList&& from) noexcept - : Value_BoolList() { - *this = ::std::move(from); - } - - inline Value_BoolList& operator=(const Value_BoolList& from) { - CopyFrom(from); - return *this; - } - inline Value_BoolList& operator=(Value_BoolList&& from) noexcept { - if (this == &from) return *this; - if (GetOwningArena() == from.GetOwningArena() - #ifdef PROTOBUF_FORCE_COPY_IN_MOVE - && GetOwningArena() != nullptr - #endif // !PROTOBUF_FORCE_COPY_IN_MOVE - ) { - InternalSwap(&from); - } else { - CopyFrom(from); - } - return *this; - } - - static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { - return GetDescriptor(); - } - static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { - return default_instance().GetMetadata().descriptor; - } - static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { - return default_instance().GetMetadata().reflection; - } - static const Value_BoolList& default_instance() { - return *internal_default_instance(); - } - static inline const Value_BoolList* internal_default_instance() { - return reinterpret_cast( - &_Value_BoolList_default_instance_); - } - static constexpr int kIndexInFileMessages = - 5; - - friend void swap(Value_BoolList& a, Value_BoolList& b) { - a.Swap(&b); - } - inline void Swap(Value_BoolList* other) { - if (other == this) return; - if (GetOwningArena() == other->GetOwningArena()) { - InternalSwap(other); - } else { - ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); - } - } - void UnsafeArenaSwap(Value_BoolList* other) { - if (other == this) return; - GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); - InternalSwap(other); - } - - // implements Message ---------------------------------------------- - - inline Value_BoolList* New() const final { - return new Value_BoolList(); - } - - Value_BoolList* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final { - return CreateMaybeMessage(arena); - } - using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; - void CopyFrom(const Value_BoolList& from); - using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; - void MergeFrom(const Value_BoolList& from); - private: - static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, const ::PROTOBUF_NAMESPACE_ID::Message& from); - public: - PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; - bool IsInitialized() const final; - - size_t ByteSizeLong() const final; - const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; - ::PROTOBUF_NAMESPACE_ID::uint8* _InternalSerialize( - ::PROTOBUF_NAMESPACE_ID::uint8* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final; - int GetCachedSize() const final { return _cached_size_.Get(); } - - private: - void SharedCtor(); - void SharedDtor(); - void SetCachedSize(int size) const final; - void InternalSwap(Value_BoolList* other); - friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; - static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { - return "flwr.proto.Value.BoolList"; - } - protected: - explicit Value_BoolList(::PROTOBUF_NAMESPACE_ID::Arena* arena, - bool is_message_owned = false); - private: - static void ArenaDtor(void* object); - inline void RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena* arena); - public: - - static const ClassData _class_data_; - const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final; - - ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; - - // nested types ---------------------------------------------------- - - // accessors ------------------------------------------------------- - - enum : int { - kValsFieldNumber = 1, - }; - // repeated bool vals = 1; - int vals_size() const; - private: - int _internal_vals_size() const; - public: - void clear_vals(); - private: - bool _internal_vals(int index) const; - const ::PROTOBUF_NAMESPACE_ID::RepeatedField< bool >& - _internal_vals() const; - void _internal_add_vals(bool value); - ::PROTOBUF_NAMESPACE_ID::RepeatedField< bool >* - _internal_mutable_vals(); - public: - bool vals(int index) const; - void set_vals(int index, bool value); - void add_vals(bool value); - const ::PROTOBUF_NAMESPACE_ID::RepeatedField< bool >& - vals() const; - ::PROTOBUF_NAMESPACE_ID::RepeatedField< bool >* - mutable_vals(); - - // @@protoc_insertion_point(class_scope:flwr.proto.Value.BoolList) - private: - class _Internal; - - template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; - typedef void InternalArenaConstructable_; - typedef void DestructorSkippable_; - ::PROTOBUF_NAMESPACE_ID::RepeatedField< bool > vals_; - mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; - friend struct ::TableStruct_flwr_2fproto_2ftask_2eproto; -}; -// ------------------------------------------------------------------- - -class Value_StringList final : - public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:flwr.proto.Value.StringList) */ { - public: - inline Value_StringList() : Value_StringList(nullptr) {} - ~Value_StringList() override; - explicit constexpr Value_StringList(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); - - Value_StringList(const Value_StringList& from); - Value_StringList(Value_StringList&& from) noexcept - : Value_StringList() { - *this = ::std::move(from); - } - - inline Value_StringList& operator=(const Value_StringList& from) { - CopyFrom(from); - return *this; - } - inline Value_StringList& operator=(Value_StringList&& from) noexcept { - if (this == &from) return *this; - if (GetOwningArena() == from.GetOwningArena() - #ifdef PROTOBUF_FORCE_COPY_IN_MOVE - && GetOwningArena() != nullptr - #endif // !PROTOBUF_FORCE_COPY_IN_MOVE - ) { - InternalSwap(&from); - } else { - CopyFrom(from); - } - return *this; - } - - static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { - return GetDescriptor(); - } - static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { - return default_instance().GetMetadata().descriptor; - } - static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { - return default_instance().GetMetadata().reflection; - } - static const Value_StringList& default_instance() { - return *internal_default_instance(); - } - static inline const Value_StringList* internal_default_instance() { - return reinterpret_cast( - &_Value_StringList_default_instance_); - } - static constexpr int kIndexInFileMessages = - 6; - - friend void swap(Value_StringList& a, Value_StringList& b) { - a.Swap(&b); - } - inline void Swap(Value_StringList* other) { - if (other == this) return; - if (GetOwningArena() == other->GetOwningArena()) { - InternalSwap(other); - } else { - ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); - } - } - void UnsafeArenaSwap(Value_StringList* other) { - if (other == this) return; - GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); - InternalSwap(other); - } - - // implements Message ---------------------------------------------- - - inline Value_StringList* New() const final { - return new Value_StringList(); - } - - Value_StringList* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final { - return CreateMaybeMessage(arena); - } - using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; - void CopyFrom(const Value_StringList& from); - using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; - void MergeFrom(const Value_StringList& from); - private: - static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, const ::PROTOBUF_NAMESPACE_ID::Message& from); - public: - PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; - bool IsInitialized() const final; - - size_t ByteSizeLong() const final; - const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; - ::PROTOBUF_NAMESPACE_ID::uint8* _InternalSerialize( - ::PROTOBUF_NAMESPACE_ID::uint8* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final; - int GetCachedSize() const final { return _cached_size_.Get(); } - - private: - void SharedCtor(); - void SharedDtor(); - void SetCachedSize(int size) const final; - void InternalSwap(Value_StringList* other); - friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; - static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { - return "flwr.proto.Value.StringList"; - } - protected: - explicit Value_StringList(::PROTOBUF_NAMESPACE_ID::Arena* arena, - bool is_message_owned = false); - private: - static void ArenaDtor(void* object); - inline void RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena* arena); - public: - - static const ClassData _class_data_; - const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final; - - ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; - - // nested types ---------------------------------------------------- - - // accessors ------------------------------------------------------- - - enum : int { - kValsFieldNumber = 1, - }; - // repeated string vals = 1; - int vals_size() const; - private: - int _internal_vals_size() const; - public: - void clear_vals(); - const std::string& vals(int index) const; - std::string* mutable_vals(int index); - void set_vals(int index, const std::string& value); - void set_vals(int index, std::string&& value); - void set_vals(int index, const char* value); - void set_vals(int index, const char* value, size_t size); - std::string* add_vals(); - void add_vals(const std::string& value); - void add_vals(std::string&& value); - void add_vals(const char* value); - void add_vals(const char* value, size_t size); - const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField& vals() const; - ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField* mutable_vals(); - private: - const std::string& _internal_vals(int index) const; - std::string* _internal_add_vals(); - public: - - // @@protoc_insertion_point(class_scope:flwr.proto.Value.StringList) - private: - class _Internal; - - template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; - typedef void InternalArenaConstructable_; - typedef void DestructorSkippable_; - ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField vals_; - mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; - friend struct ::TableStruct_flwr_2fproto_2ftask_2eproto; -}; -// ------------------------------------------------------------------- - -class Value_BytesList final : - public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:flwr.proto.Value.BytesList) */ { - public: - inline Value_BytesList() : Value_BytesList(nullptr) {} - ~Value_BytesList() override; - explicit constexpr Value_BytesList(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); - - Value_BytesList(const Value_BytesList& from); - Value_BytesList(Value_BytesList&& from) noexcept - : Value_BytesList() { - *this = ::std::move(from); - } - - inline Value_BytesList& operator=(const Value_BytesList& from) { - CopyFrom(from); - return *this; - } - inline Value_BytesList& operator=(Value_BytesList&& from) noexcept { - if (this == &from) return *this; - if (GetOwningArena() == from.GetOwningArena() - #ifdef PROTOBUF_FORCE_COPY_IN_MOVE - && GetOwningArena() != nullptr - #endif // !PROTOBUF_FORCE_COPY_IN_MOVE - ) { - InternalSwap(&from); - } else { - CopyFrom(from); - } - return *this; - } - - static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { - return GetDescriptor(); - } - static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { - return default_instance().GetMetadata().descriptor; - } - static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { - return default_instance().GetMetadata().reflection; - } - static const Value_BytesList& default_instance() { - return *internal_default_instance(); - } - static inline const Value_BytesList* internal_default_instance() { - return reinterpret_cast( - &_Value_BytesList_default_instance_); - } - static constexpr int kIndexInFileMessages = - 7; - - friend void swap(Value_BytesList& a, Value_BytesList& b) { - a.Swap(&b); - } - inline void Swap(Value_BytesList* other) { - if (other == this) return; - if (GetOwningArena() == other->GetOwningArena()) { - InternalSwap(other); - } else { - ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); - } - } - void UnsafeArenaSwap(Value_BytesList* other) { - if (other == this) return; - GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); - InternalSwap(other); - } - - // implements Message ---------------------------------------------- - - inline Value_BytesList* New() const final { - return new Value_BytesList(); - } - - Value_BytesList* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final { - return CreateMaybeMessage(arena); - } - using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; - void CopyFrom(const Value_BytesList& from); - using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; - void MergeFrom(const Value_BytesList& from); - private: - static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, const ::PROTOBUF_NAMESPACE_ID::Message& from); - public: - PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; - bool IsInitialized() const final; - - size_t ByteSizeLong() const final; - const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; - ::PROTOBUF_NAMESPACE_ID::uint8* _InternalSerialize( - ::PROTOBUF_NAMESPACE_ID::uint8* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final; - int GetCachedSize() const final { return _cached_size_.Get(); } - - private: - void SharedCtor(); - void SharedDtor(); - void SetCachedSize(int size) const final; - void InternalSwap(Value_BytesList* other); - friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; - static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { - return "flwr.proto.Value.BytesList"; - } - protected: - explicit Value_BytesList(::PROTOBUF_NAMESPACE_ID::Arena* arena, - bool is_message_owned = false); - private: - static void ArenaDtor(void* object); - inline void RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena* arena); - public: - - static const ClassData _class_data_; - const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final; - - ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; - - // nested types ---------------------------------------------------- - - // accessors ------------------------------------------------------- - - enum : int { - kValsFieldNumber = 1, - }; - // repeated bytes vals = 1; - int vals_size() const; - private: - int _internal_vals_size() const; - public: - void clear_vals(); - const std::string& vals(int index) const; - std::string* mutable_vals(int index); - void set_vals(int index, const std::string& value); - void set_vals(int index, std::string&& value); - void set_vals(int index, const char* value); - void set_vals(int index, const void* value, size_t size); - std::string* add_vals(); - void add_vals(const std::string& value); - void add_vals(std::string&& value); - void add_vals(const char* value); - void add_vals(const void* value, size_t size); - const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField& vals() const; - ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField* mutable_vals(); - private: - const std::string& _internal_vals(int index) const; - std::string* _internal_add_vals(); - public: - - // @@protoc_insertion_point(class_scope:flwr.proto.Value.BytesList) - private: - class _Internal; - - template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; - typedef void InternalArenaConstructable_; - typedef void DestructorSkippable_; - ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField vals_; - mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; - friend struct ::TableStruct_flwr_2fproto_2ftask_2eproto; -}; -// ------------------------------------------------------------------- - -class Value final : - public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:flwr.proto.Value) */ { - public: - inline Value() : Value(nullptr) {} - ~Value() override; - explicit constexpr Value(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); - - Value(const Value& from); - Value(Value&& from) noexcept - : Value() { - *this = ::std::move(from); - } - - inline Value& operator=(const Value& from) { - CopyFrom(from); - return *this; - } - inline Value& operator=(Value&& from) noexcept { - if (this == &from) return *this; - if (GetOwningArena() == from.GetOwningArena() - #ifdef PROTOBUF_FORCE_COPY_IN_MOVE - && GetOwningArena() != nullptr - #endif // !PROTOBUF_FORCE_COPY_IN_MOVE - ) { - InternalSwap(&from); - } else { - CopyFrom(from); - } - return *this; - } - - static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { - return GetDescriptor(); - } - static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { - return default_instance().GetMetadata().descriptor; - } - static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { - return default_instance().GetMetadata().reflection; - } - static const Value& default_instance() { - return *internal_default_instance(); - } - enum ValueCase { - kDouble = 1, - kSint64 = 2, - kBool = 3, - kString = 4, - kBytes = 5, - kDoubleList = 21, - kSint64List = 22, - kBoolList = 23, - kStringList = 24, - kBytesList = 25, - VALUE_NOT_SET = 0, - }; - - static inline const Value* internal_default_instance() { - return reinterpret_cast( - &_Value_default_instance_); - } - static constexpr int kIndexInFileMessages = - 8; - - friend void swap(Value& a, Value& b) { - a.Swap(&b); - } - inline void Swap(Value* other) { - if (other == this) return; - if (GetOwningArena() == other->GetOwningArena()) { - InternalSwap(other); - } else { - ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); - } - } - void UnsafeArenaSwap(Value* other) { - if (other == this) return; - GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); - InternalSwap(other); - } - - // implements Message ---------------------------------------------- - - inline Value* New() const final { - return new Value(); - } - - Value* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final { - return CreateMaybeMessage(arena); - } - using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; - void CopyFrom(const Value& from); - using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; - void MergeFrom(const Value& from); - private: - static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, const ::PROTOBUF_NAMESPACE_ID::Message& from); - public: - PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; - bool IsInitialized() const final; - - size_t ByteSizeLong() const final; - const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; - ::PROTOBUF_NAMESPACE_ID::uint8* _InternalSerialize( - ::PROTOBUF_NAMESPACE_ID::uint8* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final; - int GetCachedSize() const final { return _cached_size_.Get(); } - - private: - void SharedCtor(); - void SharedDtor(); - void SetCachedSize(int size) const final; - void InternalSwap(Value* other); - friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; - static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { - return "flwr.proto.Value"; - } - protected: - explicit Value(::PROTOBUF_NAMESPACE_ID::Arena* arena, - bool is_message_owned = false); - private: - static void ArenaDtor(void* object); - inline void RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena* arena); - public: - - static const ClassData _class_data_; - const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final; - - ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; - - // nested types ---------------------------------------------------- - - typedef Value_DoubleList DoubleList; - typedef Value_Sint64List Sint64List; - typedef Value_BoolList BoolList; - typedef Value_StringList StringList; - typedef Value_BytesList BytesList; - - // accessors ------------------------------------------------------- - - enum : int { - kDoubleFieldNumber = 1, - kSint64FieldNumber = 2, - kBoolFieldNumber = 3, - kStringFieldNumber = 4, - kBytesFieldNumber = 5, - kDoubleListFieldNumber = 21, - kSint64ListFieldNumber = 22, - kBoolListFieldNumber = 23, - kStringListFieldNumber = 24, - kBytesListFieldNumber = 25, - }; - // double double = 1; - bool has_double_() const; - private: - bool _internal_has_double_() const; - public: - void clear_double_(); - double double_() const; - void set_double_(double value); - private: - double _internal_double_() const; - void _internal_set_double_(double value); - public: - - // sint64 sint64 = 2; - bool has_sint64() const; - private: - bool _internal_has_sint64() const; - public: - void clear_sint64(); - ::PROTOBUF_NAMESPACE_ID::int64 sint64() const; - void set_sint64(::PROTOBUF_NAMESPACE_ID::int64 value); - private: - ::PROTOBUF_NAMESPACE_ID::int64 _internal_sint64() const; - void _internal_set_sint64(::PROTOBUF_NAMESPACE_ID::int64 value); - public: - - // bool bool = 3; - bool has_bool_() const; - private: - bool _internal_has_bool_() const; - public: - void clear_bool_(); - bool bool_() const; - void set_bool_(bool value); - private: - bool _internal_bool_() const; - void _internal_set_bool_(bool value); - public: - - // string string = 4; - bool has_string() const; - private: - bool _internal_has_string() const; - public: - void clear_string(); - const std::string& string() const; - template - void set_string(ArgT0&& arg0, ArgT... args); - std::string* mutable_string(); - PROTOBUF_MUST_USE_RESULT std::string* release_string(); - void set_allocated_string(std::string* string); - private: - const std::string& _internal_string() const; - inline PROTOBUF_ALWAYS_INLINE void _internal_set_string(const std::string& value); - std::string* _internal_mutable_string(); - public: - - // bytes bytes = 5; - bool has_bytes() const; - private: - bool _internal_has_bytes() const; - public: - void clear_bytes(); - const std::string& bytes() const; - template - void set_bytes(ArgT0&& arg0, ArgT... args); - std::string* mutable_bytes(); - PROTOBUF_MUST_USE_RESULT std::string* release_bytes(); - void set_allocated_bytes(std::string* bytes); - private: - const std::string& _internal_bytes() const; - inline PROTOBUF_ALWAYS_INLINE void _internal_set_bytes(const std::string& value); - std::string* _internal_mutable_bytes(); - public: - - // .flwr.proto.Value.DoubleList double_list = 21; - bool has_double_list() const; - private: - bool _internal_has_double_list() const; - public: - void clear_double_list(); - const ::flwr::proto::Value_DoubleList& double_list() const; - PROTOBUF_MUST_USE_RESULT ::flwr::proto::Value_DoubleList* release_double_list(); - ::flwr::proto::Value_DoubleList* mutable_double_list(); - void set_allocated_double_list(::flwr::proto::Value_DoubleList* double_list); - private: - const ::flwr::proto::Value_DoubleList& _internal_double_list() const; - ::flwr::proto::Value_DoubleList* _internal_mutable_double_list(); - public: - void unsafe_arena_set_allocated_double_list( - ::flwr::proto::Value_DoubleList* double_list); - ::flwr::proto::Value_DoubleList* unsafe_arena_release_double_list(); - - // .flwr.proto.Value.Sint64List sint64_list = 22; - bool has_sint64_list() const; - private: - bool _internal_has_sint64_list() const; - public: - void clear_sint64_list(); - const ::flwr::proto::Value_Sint64List& sint64_list() const; - PROTOBUF_MUST_USE_RESULT ::flwr::proto::Value_Sint64List* release_sint64_list(); - ::flwr::proto::Value_Sint64List* mutable_sint64_list(); - void set_allocated_sint64_list(::flwr::proto::Value_Sint64List* sint64_list); - private: - const ::flwr::proto::Value_Sint64List& _internal_sint64_list() const; - ::flwr::proto::Value_Sint64List* _internal_mutable_sint64_list(); - public: - void unsafe_arena_set_allocated_sint64_list( - ::flwr::proto::Value_Sint64List* sint64_list); - ::flwr::proto::Value_Sint64List* unsafe_arena_release_sint64_list(); - - // .flwr.proto.Value.BoolList bool_list = 23; - bool has_bool_list() const; - private: - bool _internal_has_bool_list() const; - public: - void clear_bool_list(); - const ::flwr::proto::Value_BoolList& bool_list() const; - PROTOBUF_MUST_USE_RESULT ::flwr::proto::Value_BoolList* release_bool_list(); - ::flwr::proto::Value_BoolList* mutable_bool_list(); - void set_allocated_bool_list(::flwr::proto::Value_BoolList* bool_list); - private: - const ::flwr::proto::Value_BoolList& _internal_bool_list() const; - ::flwr::proto::Value_BoolList* _internal_mutable_bool_list(); - public: - void unsafe_arena_set_allocated_bool_list( - ::flwr::proto::Value_BoolList* bool_list); - ::flwr::proto::Value_BoolList* unsafe_arena_release_bool_list(); - - // .flwr.proto.Value.StringList string_list = 24; - bool has_string_list() const; - private: - bool _internal_has_string_list() const; - public: - void clear_string_list(); - const ::flwr::proto::Value_StringList& string_list() const; - PROTOBUF_MUST_USE_RESULT ::flwr::proto::Value_StringList* release_string_list(); - ::flwr::proto::Value_StringList* mutable_string_list(); - void set_allocated_string_list(::flwr::proto::Value_StringList* string_list); - private: - const ::flwr::proto::Value_StringList& _internal_string_list() const; - ::flwr::proto::Value_StringList* _internal_mutable_string_list(); - public: - void unsafe_arena_set_allocated_string_list( - ::flwr::proto::Value_StringList* string_list); - ::flwr::proto::Value_StringList* unsafe_arena_release_string_list(); - - // .flwr.proto.Value.BytesList bytes_list = 25; - bool has_bytes_list() const; - private: - bool _internal_has_bytes_list() const; - public: - void clear_bytes_list(); - const ::flwr::proto::Value_BytesList& bytes_list() const; - PROTOBUF_MUST_USE_RESULT ::flwr::proto::Value_BytesList* release_bytes_list(); - ::flwr::proto::Value_BytesList* mutable_bytes_list(); - void set_allocated_bytes_list(::flwr::proto::Value_BytesList* bytes_list); - private: - const ::flwr::proto::Value_BytesList& _internal_bytes_list() const; - ::flwr::proto::Value_BytesList* _internal_mutable_bytes_list(); - public: - void unsafe_arena_set_allocated_bytes_list( - ::flwr::proto::Value_BytesList* bytes_list); - ::flwr::proto::Value_BytesList* unsafe_arena_release_bytes_list(); - - void clear_value(); - ValueCase value_case() const; - // @@protoc_insertion_point(class_scope:flwr.proto.Value) - private: - class _Internal; - void set_has_double_(); - void set_has_sint64(); - void set_has_bool_(); - void set_has_string(); - void set_has_bytes(); - void set_has_double_list(); - void set_has_sint64_list(); - void set_has_bool_list(); - void set_has_string_list(); - void set_has_bytes_list(); - - inline bool has_value() const; - inline void clear_has_value(); - - template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; - typedef void InternalArenaConstructable_; - typedef void DestructorSkippable_; - union ValueUnion { - constexpr ValueUnion() : _constinit_{} {} - ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized _constinit_; - double double__; - ::PROTOBUF_NAMESPACE_ID::int64 sint64_; - bool bool__; - ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr string_; - ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr bytes_; - ::flwr::proto::Value_DoubleList* double_list_; - ::flwr::proto::Value_Sint64List* sint64_list_; - ::flwr::proto::Value_BoolList* bool_list_; - ::flwr::proto::Value_StringList* string_list_; - ::flwr::proto::Value_BytesList* bytes_list_; - } value_; - mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; - ::PROTOBUF_NAMESPACE_ID::uint32 _oneof_case_[1]; - - friend struct ::TableStruct_flwr_2fproto_2ftask_2eproto; -}; -// ------------------------------------------------------------------- - -class SecureAggregation_NamedValuesEntry_DoNotUse : public ::PROTOBUF_NAMESPACE_ID::internal::MapEntry { -public: - typedef ::PROTOBUF_NAMESPACE_ID::internal::MapEntry SuperType; - SecureAggregation_NamedValuesEntry_DoNotUse(); - explicit constexpr SecureAggregation_NamedValuesEntry_DoNotUse( - ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); - explicit SecureAggregation_NamedValuesEntry_DoNotUse(::PROTOBUF_NAMESPACE_ID::Arena* arena); - void MergeFrom(const SecureAggregation_NamedValuesEntry_DoNotUse& other); - static const SecureAggregation_NamedValuesEntry_DoNotUse* internal_default_instance() { return reinterpret_cast(&_SecureAggregation_NamedValuesEntry_DoNotUse_default_instance_); } - static bool ValidateKey(std::string* s) { - return ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(s->data(), static_cast(s->size()), ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::PARSE, "flwr.proto.SecureAggregation.NamedValuesEntry.key"); - } - static bool ValidateValue(void*) { return true; } - using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; - ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; -}; - -// ------------------------------------------------------------------- - -class SecureAggregation final : - public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:flwr.proto.SecureAggregation) */ { - public: - inline SecureAggregation() : SecureAggregation(nullptr) {} - ~SecureAggregation() override; - explicit constexpr SecureAggregation(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); - - SecureAggregation(const SecureAggregation& from); - SecureAggregation(SecureAggregation&& from) noexcept - : SecureAggregation() { - *this = ::std::move(from); - } - - inline SecureAggregation& operator=(const SecureAggregation& from) { - CopyFrom(from); - return *this; - } - inline SecureAggregation& operator=(SecureAggregation&& from) noexcept { - if (this == &from) return *this; - if (GetOwningArena() == from.GetOwningArena() - #ifdef PROTOBUF_FORCE_COPY_IN_MOVE - && GetOwningArena() != nullptr - #endif // !PROTOBUF_FORCE_COPY_IN_MOVE - ) { - InternalSwap(&from); - } else { - CopyFrom(from); - } - return *this; - } - - static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { - return GetDescriptor(); - } - static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { - return default_instance().GetMetadata().descriptor; - } - static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { - return default_instance().GetMetadata().reflection; - } - static const SecureAggregation& default_instance() { - return *internal_default_instance(); - } - static inline const SecureAggregation* internal_default_instance() { - return reinterpret_cast( - &_SecureAggregation_default_instance_); - } - static constexpr int kIndexInFileMessages = - 10; - - friend void swap(SecureAggregation& a, SecureAggregation& b) { - a.Swap(&b); - } - inline void Swap(SecureAggregation* other) { - if (other == this) return; - if (GetOwningArena() == other->GetOwningArena()) { - InternalSwap(other); - } else { - ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); - } - } - void UnsafeArenaSwap(SecureAggregation* other) { - if (other == this) return; - GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); - InternalSwap(other); - } - - // implements Message ---------------------------------------------- - - inline SecureAggregation* New() const final { - return new SecureAggregation(); - } - - SecureAggregation* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final { - return CreateMaybeMessage(arena); - } - using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; - void CopyFrom(const SecureAggregation& from); - using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; - void MergeFrom(const SecureAggregation& from); - private: - static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message* to, const ::PROTOBUF_NAMESPACE_ID::Message& from); - public: - PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; - bool IsInitialized() const final; - - size_t ByteSizeLong() const final; - const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; - ::PROTOBUF_NAMESPACE_ID::uint8* _InternalSerialize( - ::PROTOBUF_NAMESPACE_ID::uint8* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final; - int GetCachedSize() const final { return _cached_size_.Get(); } - - private: - void SharedCtor(); - void SharedDtor(); - void SetCachedSize(int size) const final; - void InternalSwap(SecureAggregation* other); - friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; - static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { - return "flwr.proto.SecureAggregation"; - } - protected: - explicit SecureAggregation(::PROTOBUF_NAMESPACE_ID::Arena* arena, - bool is_message_owned = false); - private: - static void ArenaDtor(void* object); - inline void RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena* arena); - public: - - static const ClassData _class_data_; - const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final; - - ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; - - // nested types ---------------------------------------------------- - - - // accessors ------------------------------------------------------- - - enum : int { - kNamedValuesFieldNumber = 1, - }; - // map named_values = 1; - int named_values_size() const; - private: - int _internal_named_values_size() const; - public: - void clear_named_values(); - private: - const ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::Value >& - _internal_named_values() const; - ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::Value >* - _internal_mutable_named_values(); - public: - const ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::Value >& - named_values() const; - ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::Value >* - mutable_named_values(); - - // @@protoc_insertion_point(class_scope:flwr.proto.SecureAggregation) - private: - class _Internal; - - template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; - typedef void InternalArenaConstructable_; - typedef void DestructorSkippable_; - ::PROTOBUF_NAMESPACE_ID::internal::MapField< - SecureAggregation_NamedValuesEntry_DoNotUse, - std::string, ::flwr::proto::Value, - ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_STRING, - ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_MESSAGE> named_values_; - mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; - friend struct ::TableStruct_flwr_2fproto_2ftask_2eproto; -}; -// =================================================================== - - -// =================================================================== - -#ifdef __GNUC__ - #pragma GCC diagnostic push - #pragma GCC diagnostic ignored "-Wstrict-aliasing" -#endif // __GNUC__ -// Task - -// .flwr.proto.Node producer = 1; -inline bool Task::_internal_has_producer() const { - return this != internal_default_instance() && producer_ != nullptr; -} -inline bool Task::has_producer() const { - return _internal_has_producer(); -} -inline const ::flwr::proto::Node& Task::_internal_producer() const { - const ::flwr::proto::Node* p = producer_; - return p != nullptr ? *p : reinterpret_cast( - ::flwr::proto::_Node_default_instance_); -} -inline const ::flwr::proto::Node& Task::producer() const { - // @@protoc_insertion_point(field_get:flwr.proto.Task.producer) - return _internal_producer(); -} -inline void Task::unsafe_arena_set_allocated_producer( - ::flwr::proto::Node* producer) { - if (GetArenaForAllocation() == nullptr) { - delete reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(producer_); - } - producer_ = producer; - if (producer) { - - } else { - - } - // @@protoc_insertion_point(field_unsafe_arena_set_allocated:flwr.proto.Task.producer) -} -inline ::flwr::proto::Node* Task::release_producer() { - - ::flwr::proto::Node* temp = producer_; - producer_ = nullptr; -#ifdef PROTOBUF_FORCE_COPY_IN_RELEASE - auto* old = reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(temp); - temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); - if (GetArenaForAllocation() == nullptr) { delete old; } -#else // PROTOBUF_FORCE_COPY_IN_RELEASE - if (GetArenaForAllocation() != nullptr) { - temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); - } -#endif // !PROTOBUF_FORCE_COPY_IN_RELEASE - return temp; -} -inline ::flwr::proto::Node* Task::unsafe_arena_release_producer() { - // @@protoc_insertion_point(field_release:flwr.proto.Task.producer) - - ::flwr::proto::Node* temp = producer_; - producer_ = nullptr; - return temp; -} -inline ::flwr::proto::Node* Task::_internal_mutable_producer() { - - if (producer_ == nullptr) { - auto* p = CreateMaybeMessage<::flwr::proto::Node>(GetArenaForAllocation()); - producer_ = p; - } - return producer_; -} -inline ::flwr::proto::Node* Task::mutable_producer() { - ::flwr::proto::Node* _msg = _internal_mutable_producer(); - // @@protoc_insertion_point(field_mutable:flwr.proto.Task.producer) - return _msg; -} -inline void Task::set_allocated_producer(::flwr::proto::Node* producer) { - ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation(); - if (message_arena == nullptr) { - delete reinterpret_cast< ::PROTOBUF_NAMESPACE_ID::MessageLite*>(producer_); - } - if (producer) { - ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena = - ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper< - ::PROTOBUF_NAMESPACE_ID::MessageLite>::GetOwningArena( - reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(producer)); - if (message_arena != submessage_arena) { - producer = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage( - message_arena, producer, submessage_arena); - } - - } else { - - } - producer_ = producer; - // @@protoc_insertion_point(field_set_allocated:flwr.proto.Task.producer) -} - -// .flwr.proto.Node consumer = 2; -inline bool Task::_internal_has_consumer() const { - return this != internal_default_instance() && consumer_ != nullptr; -} -inline bool Task::has_consumer() const { - return _internal_has_consumer(); -} -inline const ::flwr::proto::Node& Task::_internal_consumer() const { - const ::flwr::proto::Node* p = consumer_; - return p != nullptr ? *p : reinterpret_cast( - ::flwr::proto::_Node_default_instance_); -} -inline const ::flwr::proto::Node& Task::consumer() const { - // @@protoc_insertion_point(field_get:flwr.proto.Task.consumer) - return _internal_consumer(); -} -inline void Task::unsafe_arena_set_allocated_consumer( - ::flwr::proto::Node* consumer) { - if (GetArenaForAllocation() == nullptr) { - delete reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(consumer_); - } - consumer_ = consumer; - if (consumer) { - - } else { - - } - // @@protoc_insertion_point(field_unsafe_arena_set_allocated:flwr.proto.Task.consumer) -} -inline ::flwr::proto::Node* Task::release_consumer() { - - ::flwr::proto::Node* temp = consumer_; - consumer_ = nullptr; -#ifdef PROTOBUF_FORCE_COPY_IN_RELEASE - auto* old = reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(temp); - temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); - if (GetArenaForAllocation() == nullptr) { delete old; } -#else // PROTOBUF_FORCE_COPY_IN_RELEASE - if (GetArenaForAllocation() != nullptr) { - temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); - } -#endif // !PROTOBUF_FORCE_COPY_IN_RELEASE - return temp; -} -inline ::flwr::proto::Node* Task::unsafe_arena_release_consumer() { - // @@protoc_insertion_point(field_release:flwr.proto.Task.consumer) - - ::flwr::proto::Node* temp = consumer_; - consumer_ = nullptr; - return temp; -} -inline ::flwr::proto::Node* Task::_internal_mutable_consumer() { - - if (consumer_ == nullptr) { - auto* p = CreateMaybeMessage<::flwr::proto::Node>(GetArenaForAllocation()); - consumer_ = p; - } - return consumer_; -} -inline ::flwr::proto::Node* Task::mutable_consumer() { - ::flwr::proto::Node* _msg = _internal_mutable_consumer(); - // @@protoc_insertion_point(field_mutable:flwr.proto.Task.consumer) - return _msg; -} -inline void Task::set_allocated_consumer(::flwr::proto::Node* consumer) { - ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation(); - if (message_arena == nullptr) { - delete reinterpret_cast< ::PROTOBUF_NAMESPACE_ID::MessageLite*>(consumer_); - } - if (consumer) { - ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena = - ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper< - ::PROTOBUF_NAMESPACE_ID::MessageLite>::GetOwningArena( - reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(consumer)); - if (message_arena != submessage_arena) { - consumer = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage( - message_arena, consumer, submessage_arena); - } - - } else { - - } - consumer_ = consumer; - // @@protoc_insertion_point(field_set_allocated:flwr.proto.Task.consumer) -} - -// string created_at = 3; +// double created_at = 3; inline void Task::clear_created_at() { - created_at_.ClearToEmpty(); + created_at_ = 0; } -inline const std::string& Task::created_at() const { +inline double Task::_internal_created_at() const { + return created_at_; +} +inline double Task::created_at() const { // @@protoc_insertion_point(field_get:flwr.proto.Task.created_at) return _internal_created_at(); } -template -inline PROTOBUF_ALWAYS_INLINE -void Task::set_created_at(ArgT0&& arg0, ArgT... args) { - - created_at_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, static_cast(arg0), args..., GetArenaForAllocation()); - // @@protoc_insertion_point(field_set:flwr.proto.Task.created_at) -} -inline std::string* Task::mutable_created_at() { - std::string* _s = _internal_mutable_created_at(); - // @@protoc_insertion_point(field_mutable:flwr.proto.Task.created_at) - return _s; -} -inline const std::string& Task::_internal_created_at() const { - return created_at_.Get(); -} -inline void Task::_internal_set_created_at(const std::string& value) { - - created_at_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, value, GetArenaForAllocation()); -} -inline std::string* Task::_internal_mutable_created_at() { +inline void Task::_internal_set_created_at(double value) { - return created_at_.Mutable(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, GetArenaForAllocation()); -} -inline std::string* Task::release_created_at() { - // @@protoc_insertion_point(field_release:flwr.proto.Task.created_at) - return created_at_.Release(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), GetArenaForAllocation()); + created_at_ = value; } -inline void Task::set_allocated_created_at(std::string* created_at) { - if (created_at != nullptr) { - - } else { - - } - created_at_.SetAllocated(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), created_at, - GetArenaForAllocation()); - // @@protoc_insertion_point(field_set_allocated:flwr.proto.Task.created_at) +inline void Task::set_created_at(double value) { + _internal_set_created_at(value); + // @@protoc_insertion_point(field_set:flwr.proto.Task.created_at) } // string delivered_at = 4; @@ -2406,53 +1020,47 @@ inline void Task::set_allocated_delivered_at(std::string* delivered_at) { // @@protoc_insertion_point(field_set_allocated:flwr.proto.Task.delivered_at) } -// string ttl = 5; -inline void Task::clear_ttl() { - ttl_.ClearToEmpty(); +// double pushed_at = 5; +inline void Task::clear_pushed_at() { + pushed_at_ = 0; } -inline const std::string& Task::ttl() const { - // @@protoc_insertion_point(field_get:flwr.proto.Task.ttl) - return _internal_ttl(); +inline double Task::_internal_pushed_at() const { + return pushed_at_; } -template -inline PROTOBUF_ALWAYS_INLINE -void Task::set_ttl(ArgT0&& arg0, ArgT... args) { - - ttl_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, static_cast(arg0), args..., GetArenaForAllocation()); - // @@protoc_insertion_point(field_set:flwr.proto.Task.ttl) +inline double Task::pushed_at() const { + // @@protoc_insertion_point(field_get:flwr.proto.Task.pushed_at) + return _internal_pushed_at(); } -inline std::string* Task::mutable_ttl() { - std::string* _s = _internal_mutable_ttl(); - // @@protoc_insertion_point(field_mutable:flwr.proto.Task.ttl) - return _s; +inline void Task::_internal_set_pushed_at(double value) { + + pushed_at_ = value; } -inline const std::string& Task::_internal_ttl() const { - return ttl_.Get(); +inline void Task::set_pushed_at(double value) { + _internal_set_pushed_at(value); + // @@protoc_insertion_point(field_set:flwr.proto.Task.pushed_at) } -inline void Task::_internal_set_ttl(const std::string& value) { - - ttl_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, value, GetArenaForAllocation()); + +// double ttl = 6; +inline void Task::clear_ttl() { + ttl_ = 0; } -inline std::string* Task::_internal_mutable_ttl() { - - return ttl_.Mutable(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, GetArenaForAllocation()); +inline double Task::_internal_ttl() const { + return ttl_; } -inline std::string* Task::release_ttl() { - // @@protoc_insertion_point(field_release:flwr.proto.Task.ttl) - return ttl_.Release(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), GetArenaForAllocation()); +inline double Task::ttl() const { + // @@protoc_insertion_point(field_get:flwr.proto.Task.ttl) + return _internal_ttl(); } -inline void Task::set_allocated_ttl(std::string* ttl) { - if (ttl != nullptr) { - - } else { - - } - ttl_.SetAllocated(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), ttl, - GetArenaForAllocation()); - // @@protoc_insertion_point(field_set_allocated:flwr.proto.Task.ttl) +inline void Task::_internal_set_ttl(double value) { + + ttl_ = value; +} +inline void Task::set_ttl(double value) { + _internal_set_ttl(value); + // @@protoc_insertion_point(field_set:flwr.proto.Task.ttl) } -// repeated string ancestry = 6; +// repeated string ancestry = 7; inline int Task::_internal_ancestry_size() const { return ancestry_.size(); } @@ -2527,129 +1135,85 @@ Task::mutable_ancestry() { return &ancestry_; } -// .flwr.proto.SecureAggregation sa = 7; -inline bool Task::_internal_has_sa() const { - return this != internal_default_instance() && sa_ != nullptr; -} -inline bool Task::has_sa() const { - return _internal_has_sa(); -} -inline void Task::clear_sa() { - if (GetArenaForAllocation() == nullptr && sa_ != nullptr) { - delete sa_; - } - sa_ = nullptr; +// string task_type = 8; +inline void Task::clear_task_type() { + task_type_.ClearToEmpty(); } -inline const ::flwr::proto::SecureAggregation& Task::_internal_sa() const { - const ::flwr::proto::SecureAggregation* p = sa_; - return p != nullptr ? *p : reinterpret_cast( - ::flwr::proto::_SecureAggregation_default_instance_); +inline const std::string& Task::task_type() const { + // @@protoc_insertion_point(field_get:flwr.proto.Task.task_type) + return _internal_task_type(); } -inline const ::flwr::proto::SecureAggregation& Task::sa() const { - // @@protoc_insertion_point(field_get:flwr.proto.Task.sa) - return _internal_sa(); +template +inline PROTOBUF_ALWAYS_INLINE +void Task::set_task_type(ArgT0&& arg0, ArgT... args) { + + task_type_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, static_cast(arg0), args..., GetArenaForAllocation()); + // @@protoc_insertion_point(field_set:flwr.proto.Task.task_type) } -inline void Task::unsafe_arena_set_allocated_sa( - ::flwr::proto::SecureAggregation* sa) { - if (GetArenaForAllocation() == nullptr) { - delete reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(sa_); - } - sa_ = sa; - if (sa) { - - } else { - - } - // @@protoc_insertion_point(field_unsafe_arena_set_allocated:flwr.proto.Task.sa) +inline std::string* Task::mutable_task_type() { + std::string* _s = _internal_mutable_task_type(); + // @@protoc_insertion_point(field_mutable:flwr.proto.Task.task_type) + return _s; } -inline ::flwr::proto::SecureAggregation* Task::release_sa() { - - ::flwr::proto::SecureAggregation* temp = sa_; - sa_ = nullptr; -#ifdef PROTOBUF_FORCE_COPY_IN_RELEASE - auto* old = reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(temp); - temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); - if (GetArenaForAllocation() == nullptr) { delete old; } -#else // PROTOBUF_FORCE_COPY_IN_RELEASE - if (GetArenaForAllocation() != nullptr) { - temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); - } -#endif // !PROTOBUF_FORCE_COPY_IN_RELEASE - return temp; +inline const std::string& Task::_internal_task_type() const { + return task_type_.Get(); } -inline ::flwr::proto::SecureAggregation* Task::unsafe_arena_release_sa() { - // @@protoc_insertion_point(field_release:flwr.proto.Task.sa) +inline void Task::_internal_set_task_type(const std::string& value) { - ::flwr::proto::SecureAggregation* temp = sa_; - sa_ = nullptr; - return temp; + task_type_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, value, GetArenaForAllocation()); } -inline ::flwr::proto::SecureAggregation* Task::_internal_mutable_sa() { +inline std::string* Task::_internal_mutable_task_type() { - if (sa_ == nullptr) { - auto* p = CreateMaybeMessage<::flwr::proto::SecureAggregation>(GetArenaForAllocation()); - sa_ = p; - } - return sa_; + return task_type_.Mutable(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, GetArenaForAllocation()); } -inline ::flwr::proto::SecureAggregation* Task::mutable_sa() { - ::flwr::proto::SecureAggregation* _msg = _internal_mutable_sa(); - // @@protoc_insertion_point(field_mutable:flwr.proto.Task.sa) - return _msg; +inline std::string* Task::release_task_type() { + // @@protoc_insertion_point(field_release:flwr.proto.Task.task_type) + return task_type_.Release(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), GetArenaForAllocation()); } -inline void Task::set_allocated_sa(::flwr::proto::SecureAggregation* sa) { - ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation(); - if (message_arena == nullptr) { - delete sa_; - } - if (sa) { - ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena = - ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper<::flwr::proto::SecureAggregation>::GetOwningArena(sa); - if (message_arena != submessage_arena) { - sa = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage( - message_arena, sa, submessage_arena); - } +inline void Task::set_allocated_task_type(std::string* task_type) { + if (task_type != nullptr) { } else { } - sa_ = sa; - // @@protoc_insertion_point(field_set_allocated:flwr.proto.Task.sa) + task_type_.SetAllocated(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), task_type, + GetArenaForAllocation()); + // @@protoc_insertion_point(field_set_allocated:flwr.proto.Task.task_type) } -// .flwr.proto.ServerMessage legacy_server_message = 101 [deprecated = true]; -inline bool Task::_internal_has_legacy_server_message() const { - return this != internal_default_instance() && legacy_server_message_ != nullptr; +// .flwr.proto.RecordSet recordset = 9; +inline bool Task::_internal_has_recordset() const { + return this != internal_default_instance() && recordset_ != nullptr; } -inline bool Task::has_legacy_server_message() const { - return _internal_has_legacy_server_message(); +inline bool Task::has_recordset() const { + return _internal_has_recordset(); } -inline const ::flwr::proto::ServerMessage& Task::_internal_legacy_server_message() const { - const ::flwr::proto::ServerMessage* p = legacy_server_message_; - return p != nullptr ? *p : reinterpret_cast( - ::flwr::proto::_ServerMessage_default_instance_); +inline const ::flwr::proto::RecordSet& Task::_internal_recordset() const { + const ::flwr::proto::RecordSet* p = recordset_; + return p != nullptr ? *p : reinterpret_cast( + ::flwr::proto::_RecordSet_default_instance_); } -inline const ::flwr::proto::ServerMessage& Task::legacy_server_message() const { - // @@protoc_insertion_point(field_get:flwr.proto.Task.legacy_server_message) - return _internal_legacy_server_message(); +inline const ::flwr::proto::RecordSet& Task::recordset() const { + // @@protoc_insertion_point(field_get:flwr.proto.Task.recordset) + return _internal_recordset(); } -inline void Task::unsafe_arena_set_allocated_legacy_server_message( - ::flwr::proto::ServerMessage* legacy_server_message) { +inline void Task::unsafe_arena_set_allocated_recordset( + ::flwr::proto::RecordSet* recordset) { if (GetArenaForAllocation() == nullptr) { - delete reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(legacy_server_message_); + delete reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(recordset_); } - legacy_server_message_ = legacy_server_message; - if (legacy_server_message) { + recordset_ = recordset; + if (recordset) { } else { } - // @@protoc_insertion_point(field_unsafe_arena_set_allocated:flwr.proto.Task.legacy_server_message) + // @@protoc_insertion_point(field_unsafe_arena_set_allocated:flwr.proto.Task.recordset) } -inline ::flwr::proto::ServerMessage* Task::release_legacy_server_message() { +inline ::flwr::proto::RecordSet* Task::release_recordset() { - ::flwr::proto::ServerMessage* temp = legacy_server_message_; - legacy_server_message_ = nullptr; + ::flwr::proto::RecordSet* temp = recordset_; + recordset_ = nullptr; #ifdef PROTOBUF_FORCE_COPY_IN_RELEASE auto* old = reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(temp); temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); @@ -2661,81 +1225,81 @@ inline ::flwr::proto::ServerMessage* Task::release_legacy_server_message() { #endif // !PROTOBUF_FORCE_COPY_IN_RELEASE return temp; } -inline ::flwr::proto::ServerMessage* Task::unsafe_arena_release_legacy_server_message() { - // @@protoc_insertion_point(field_release:flwr.proto.Task.legacy_server_message) +inline ::flwr::proto::RecordSet* Task::unsafe_arena_release_recordset() { + // @@protoc_insertion_point(field_release:flwr.proto.Task.recordset) - ::flwr::proto::ServerMessage* temp = legacy_server_message_; - legacy_server_message_ = nullptr; + ::flwr::proto::RecordSet* temp = recordset_; + recordset_ = nullptr; return temp; } -inline ::flwr::proto::ServerMessage* Task::_internal_mutable_legacy_server_message() { +inline ::flwr::proto::RecordSet* Task::_internal_mutable_recordset() { - if (legacy_server_message_ == nullptr) { - auto* p = CreateMaybeMessage<::flwr::proto::ServerMessage>(GetArenaForAllocation()); - legacy_server_message_ = p; + if (recordset_ == nullptr) { + auto* p = CreateMaybeMessage<::flwr::proto::RecordSet>(GetArenaForAllocation()); + recordset_ = p; } - return legacy_server_message_; + return recordset_; } -inline ::flwr::proto::ServerMessage* Task::mutable_legacy_server_message() { - ::flwr::proto::ServerMessage* _msg = _internal_mutable_legacy_server_message(); - // @@protoc_insertion_point(field_mutable:flwr.proto.Task.legacy_server_message) +inline ::flwr::proto::RecordSet* Task::mutable_recordset() { + ::flwr::proto::RecordSet* _msg = _internal_mutable_recordset(); + // @@protoc_insertion_point(field_mutable:flwr.proto.Task.recordset) return _msg; } -inline void Task::set_allocated_legacy_server_message(::flwr::proto::ServerMessage* legacy_server_message) { +inline void Task::set_allocated_recordset(::flwr::proto::RecordSet* recordset) { ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation(); if (message_arena == nullptr) { - delete reinterpret_cast< ::PROTOBUF_NAMESPACE_ID::MessageLite*>(legacy_server_message_); + delete reinterpret_cast< ::PROTOBUF_NAMESPACE_ID::MessageLite*>(recordset_); } - if (legacy_server_message) { + if (recordset) { ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena = ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper< ::PROTOBUF_NAMESPACE_ID::MessageLite>::GetOwningArena( - reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(legacy_server_message)); + reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(recordset)); if (message_arena != submessage_arena) { - legacy_server_message = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage( - message_arena, legacy_server_message, submessage_arena); + recordset = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage( + message_arena, recordset, submessage_arena); } } else { } - legacy_server_message_ = legacy_server_message; - // @@protoc_insertion_point(field_set_allocated:flwr.proto.Task.legacy_server_message) + recordset_ = recordset; + // @@protoc_insertion_point(field_set_allocated:flwr.proto.Task.recordset) } -// .flwr.proto.ClientMessage legacy_client_message = 102 [deprecated = true]; -inline bool Task::_internal_has_legacy_client_message() const { - return this != internal_default_instance() && legacy_client_message_ != nullptr; +// .flwr.proto.Error error = 10; +inline bool Task::_internal_has_error() const { + return this != internal_default_instance() && error_ != nullptr; } -inline bool Task::has_legacy_client_message() const { - return _internal_has_legacy_client_message(); +inline bool Task::has_error() const { + return _internal_has_error(); } -inline const ::flwr::proto::ClientMessage& Task::_internal_legacy_client_message() const { - const ::flwr::proto::ClientMessage* p = legacy_client_message_; - return p != nullptr ? *p : reinterpret_cast( - ::flwr::proto::_ClientMessage_default_instance_); +inline const ::flwr::proto::Error& Task::_internal_error() const { + const ::flwr::proto::Error* p = error_; + return p != nullptr ? *p : reinterpret_cast( + ::flwr::proto::_Error_default_instance_); } -inline const ::flwr::proto::ClientMessage& Task::legacy_client_message() const { - // @@protoc_insertion_point(field_get:flwr.proto.Task.legacy_client_message) - return _internal_legacy_client_message(); +inline const ::flwr::proto::Error& Task::error() const { + // @@protoc_insertion_point(field_get:flwr.proto.Task.error) + return _internal_error(); } -inline void Task::unsafe_arena_set_allocated_legacy_client_message( - ::flwr::proto::ClientMessage* legacy_client_message) { +inline void Task::unsafe_arena_set_allocated_error( + ::flwr::proto::Error* error) { if (GetArenaForAllocation() == nullptr) { - delete reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(legacy_client_message_); + delete reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(error_); } - legacy_client_message_ = legacy_client_message; - if (legacy_client_message) { + error_ = error; + if (error) { } else { } - // @@protoc_insertion_point(field_unsafe_arena_set_allocated:flwr.proto.Task.legacy_client_message) + // @@protoc_insertion_point(field_unsafe_arena_set_allocated:flwr.proto.Task.error) } -inline ::flwr::proto::ClientMessage* Task::release_legacy_client_message() { +inline ::flwr::proto::Error* Task::release_error() { - ::flwr::proto::ClientMessage* temp = legacy_client_message_; - legacy_client_message_ = nullptr; + ::flwr::proto::Error* temp = error_; + error_ = nullptr; #ifdef PROTOBUF_FORCE_COPY_IN_RELEASE auto* old = reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(temp); temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); @@ -2747,46 +1311,46 @@ inline ::flwr::proto::ClientMessage* Task::release_legacy_client_message() { #endif // !PROTOBUF_FORCE_COPY_IN_RELEASE return temp; } -inline ::flwr::proto::ClientMessage* Task::unsafe_arena_release_legacy_client_message() { - // @@protoc_insertion_point(field_release:flwr.proto.Task.legacy_client_message) +inline ::flwr::proto::Error* Task::unsafe_arena_release_error() { + // @@protoc_insertion_point(field_release:flwr.proto.Task.error) - ::flwr::proto::ClientMessage* temp = legacy_client_message_; - legacy_client_message_ = nullptr; + ::flwr::proto::Error* temp = error_; + error_ = nullptr; return temp; } -inline ::flwr::proto::ClientMessage* Task::_internal_mutable_legacy_client_message() { +inline ::flwr::proto::Error* Task::_internal_mutable_error() { - if (legacy_client_message_ == nullptr) { - auto* p = CreateMaybeMessage<::flwr::proto::ClientMessage>(GetArenaForAllocation()); - legacy_client_message_ = p; + if (error_ == nullptr) { + auto* p = CreateMaybeMessage<::flwr::proto::Error>(GetArenaForAllocation()); + error_ = p; } - return legacy_client_message_; + return error_; } -inline ::flwr::proto::ClientMessage* Task::mutable_legacy_client_message() { - ::flwr::proto::ClientMessage* _msg = _internal_mutable_legacy_client_message(); - // @@protoc_insertion_point(field_mutable:flwr.proto.Task.legacy_client_message) +inline ::flwr::proto::Error* Task::mutable_error() { + ::flwr::proto::Error* _msg = _internal_mutable_error(); + // @@protoc_insertion_point(field_mutable:flwr.proto.Task.error) return _msg; } -inline void Task::set_allocated_legacy_client_message(::flwr::proto::ClientMessage* legacy_client_message) { +inline void Task::set_allocated_error(::flwr::proto::Error* error) { ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation(); if (message_arena == nullptr) { - delete reinterpret_cast< ::PROTOBUF_NAMESPACE_ID::MessageLite*>(legacy_client_message_); + delete reinterpret_cast< ::PROTOBUF_NAMESPACE_ID::MessageLite*>(error_); } - if (legacy_client_message) { + if (error) { ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena = ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper< ::PROTOBUF_NAMESPACE_ID::MessageLite>::GetOwningArena( - reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(legacy_client_message)); + reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(error)); if (message_arena != submessage_arena) { - legacy_client_message = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage( - message_arena, legacy_client_message, submessage_arena); + error = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage( + message_arena, error, submessage_arena); } } else { } - legacy_client_message_ = legacy_client_message; - // @@protoc_insertion_point(field_set_allocated:flwr.proto.Task.legacy_client_message) + error_ = error; + // @@protoc_insertion_point(field_set_allocated:flwr.proto.Task.error) } // ------------------------------------------------------------------- @@ -2808,233 +1372,27 @@ void TaskIns::set_task_id(ArgT0&& arg0, ArgT... args) { task_id_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, static_cast(arg0), args..., GetArenaForAllocation()); // @@protoc_insertion_point(field_set:flwr.proto.TaskIns.task_id) } -inline std::string* TaskIns::mutable_task_id() { - std::string* _s = _internal_mutable_task_id(); - // @@protoc_insertion_point(field_mutable:flwr.proto.TaskIns.task_id) - return _s; -} -inline const std::string& TaskIns::_internal_task_id() const { - return task_id_.Get(); -} -inline void TaskIns::_internal_set_task_id(const std::string& value) { - - task_id_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, value, GetArenaForAllocation()); -} -inline std::string* TaskIns::_internal_mutable_task_id() { - - return task_id_.Mutable(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, GetArenaForAllocation()); -} -inline std::string* TaskIns::release_task_id() { - // @@protoc_insertion_point(field_release:flwr.proto.TaskIns.task_id) - return task_id_.Release(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), GetArenaForAllocation()); -} -inline void TaskIns::set_allocated_task_id(std::string* task_id) { - if (task_id != nullptr) { - - } else { - - } - task_id_.SetAllocated(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), task_id, - GetArenaForAllocation()); - // @@protoc_insertion_point(field_set_allocated:flwr.proto.TaskIns.task_id) -} - -// string group_id = 2; -inline void TaskIns::clear_group_id() { - group_id_.ClearToEmpty(); -} -inline const std::string& TaskIns::group_id() const { - // @@protoc_insertion_point(field_get:flwr.proto.TaskIns.group_id) - return _internal_group_id(); -} -template -inline PROTOBUF_ALWAYS_INLINE -void TaskIns::set_group_id(ArgT0&& arg0, ArgT... args) { - - group_id_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, static_cast(arg0), args..., GetArenaForAllocation()); - // @@protoc_insertion_point(field_set:flwr.proto.TaskIns.group_id) -} -inline std::string* TaskIns::mutable_group_id() { - std::string* _s = _internal_mutable_group_id(); - // @@protoc_insertion_point(field_mutable:flwr.proto.TaskIns.group_id) - return _s; -} -inline const std::string& TaskIns::_internal_group_id() const { - return group_id_.Get(); -} -inline void TaskIns::_internal_set_group_id(const std::string& value) { - - group_id_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, value, GetArenaForAllocation()); -} -inline std::string* TaskIns::_internal_mutable_group_id() { - - return group_id_.Mutable(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, GetArenaForAllocation()); -} -inline std::string* TaskIns::release_group_id() { - // @@protoc_insertion_point(field_release:flwr.proto.TaskIns.group_id) - return group_id_.Release(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), GetArenaForAllocation()); -} -inline void TaskIns::set_allocated_group_id(std::string* group_id) { - if (group_id != nullptr) { - - } else { - - } - group_id_.SetAllocated(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), group_id, - GetArenaForAllocation()); - // @@protoc_insertion_point(field_set_allocated:flwr.proto.TaskIns.group_id) -} - -// sint64 workload_id = 3; -inline void TaskIns::clear_workload_id() { - workload_id_ = int64_t{0}; -} -inline ::PROTOBUF_NAMESPACE_ID::int64 TaskIns::_internal_workload_id() const { - return workload_id_; -} -inline ::PROTOBUF_NAMESPACE_ID::int64 TaskIns::workload_id() const { - // @@protoc_insertion_point(field_get:flwr.proto.TaskIns.workload_id) - return _internal_workload_id(); -} -inline void TaskIns::_internal_set_workload_id(::PROTOBUF_NAMESPACE_ID::int64 value) { - - workload_id_ = value; -} -inline void TaskIns::set_workload_id(::PROTOBUF_NAMESPACE_ID::int64 value) { - _internal_set_workload_id(value); - // @@protoc_insertion_point(field_set:flwr.proto.TaskIns.workload_id) -} - -// .flwr.proto.Task task = 4; -inline bool TaskIns::_internal_has_task() const { - return this != internal_default_instance() && task_ != nullptr; -} -inline bool TaskIns::has_task() const { - return _internal_has_task(); -} -inline void TaskIns::clear_task() { - if (GetArenaForAllocation() == nullptr && task_ != nullptr) { - delete task_; - } - task_ = nullptr; -} -inline const ::flwr::proto::Task& TaskIns::_internal_task() const { - const ::flwr::proto::Task* p = task_; - return p != nullptr ? *p : reinterpret_cast( - ::flwr::proto::_Task_default_instance_); -} -inline const ::flwr::proto::Task& TaskIns::task() const { - // @@protoc_insertion_point(field_get:flwr.proto.TaskIns.task) - return _internal_task(); -} -inline void TaskIns::unsafe_arena_set_allocated_task( - ::flwr::proto::Task* task) { - if (GetArenaForAllocation() == nullptr) { - delete reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(task_); - } - task_ = task; - if (task) { - - } else { - - } - // @@protoc_insertion_point(field_unsafe_arena_set_allocated:flwr.proto.TaskIns.task) -} -inline ::flwr::proto::Task* TaskIns::release_task() { - - ::flwr::proto::Task* temp = task_; - task_ = nullptr; -#ifdef PROTOBUF_FORCE_COPY_IN_RELEASE - auto* old = reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(temp); - temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); - if (GetArenaForAllocation() == nullptr) { delete old; } -#else // PROTOBUF_FORCE_COPY_IN_RELEASE - if (GetArenaForAllocation() != nullptr) { - temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); - } -#endif // !PROTOBUF_FORCE_COPY_IN_RELEASE - return temp; -} -inline ::flwr::proto::Task* TaskIns::unsafe_arena_release_task() { - // @@protoc_insertion_point(field_release:flwr.proto.TaskIns.task) - - ::flwr::proto::Task* temp = task_; - task_ = nullptr; - return temp; -} -inline ::flwr::proto::Task* TaskIns::_internal_mutable_task() { - - if (task_ == nullptr) { - auto* p = CreateMaybeMessage<::flwr::proto::Task>(GetArenaForAllocation()); - task_ = p; - } - return task_; -} -inline ::flwr::proto::Task* TaskIns::mutable_task() { - ::flwr::proto::Task* _msg = _internal_mutable_task(); - // @@protoc_insertion_point(field_mutable:flwr.proto.TaskIns.task) - return _msg; -} -inline void TaskIns::set_allocated_task(::flwr::proto::Task* task) { - ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation(); - if (message_arena == nullptr) { - delete task_; - } - if (task) { - ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena = - ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper<::flwr::proto::Task>::GetOwningArena(task); - if (message_arena != submessage_arena) { - task = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage( - message_arena, task, submessage_arena); - } - - } else { - - } - task_ = task; - // @@protoc_insertion_point(field_set_allocated:flwr.proto.TaskIns.task) -} - -// ------------------------------------------------------------------- - -// TaskRes - -// string task_id = 1; -inline void TaskRes::clear_task_id() { - task_id_.ClearToEmpty(); -} -inline const std::string& TaskRes::task_id() const { - // @@protoc_insertion_point(field_get:flwr.proto.TaskRes.task_id) - return _internal_task_id(); -} -template -inline PROTOBUF_ALWAYS_INLINE -void TaskRes::set_task_id(ArgT0&& arg0, ArgT... args) { - - task_id_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, static_cast(arg0), args..., GetArenaForAllocation()); - // @@protoc_insertion_point(field_set:flwr.proto.TaskRes.task_id) -} -inline std::string* TaskRes::mutable_task_id() { +inline std::string* TaskIns::mutable_task_id() { std::string* _s = _internal_mutable_task_id(); - // @@protoc_insertion_point(field_mutable:flwr.proto.TaskRes.task_id) + // @@protoc_insertion_point(field_mutable:flwr.proto.TaskIns.task_id) return _s; } -inline const std::string& TaskRes::_internal_task_id() const { +inline const std::string& TaskIns::_internal_task_id() const { return task_id_.Get(); } -inline void TaskRes::_internal_set_task_id(const std::string& value) { +inline void TaskIns::_internal_set_task_id(const std::string& value) { task_id_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, value, GetArenaForAllocation()); } -inline std::string* TaskRes::_internal_mutable_task_id() { +inline std::string* TaskIns::_internal_mutable_task_id() { return task_id_.Mutable(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, GetArenaForAllocation()); } -inline std::string* TaskRes::release_task_id() { - // @@protoc_insertion_point(field_release:flwr.proto.TaskRes.task_id) +inline std::string* TaskIns::release_task_id() { + // @@protoc_insertion_point(field_release:flwr.proto.TaskIns.task_id) return task_id_.Release(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), GetArenaForAllocation()); } -inline void TaskRes::set_allocated_task_id(std::string* task_id) { +inline void TaskIns::set_allocated_task_id(std::string* task_id) { if (task_id != nullptr) { } else { @@ -3042,45 +1400,45 @@ inline void TaskRes::set_allocated_task_id(std::string* task_id) { } task_id_.SetAllocated(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), task_id, GetArenaForAllocation()); - // @@protoc_insertion_point(field_set_allocated:flwr.proto.TaskRes.task_id) + // @@protoc_insertion_point(field_set_allocated:flwr.proto.TaskIns.task_id) } // string group_id = 2; -inline void TaskRes::clear_group_id() { +inline void TaskIns::clear_group_id() { group_id_.ClearToEmpty(); } -inline const std::string& TaskRes::group_id() const { - // @@protoc_insertion_point(field_get:flwr.proto.TaskRes.group_id) +inline const std::string& TaskIns::group_id() const { + // @@protoc_insertion_point(field_get:flwr.proto.TaskIns.group_id) return _internal_group_id(); } template inline PROTOBUF_ALWAYS_INLINE -void TaskRes::set_group_id(ArgT0&& arg0, ArgT... args) { +void TaskIns::set_group_id(ArgT0&& arg0, ArgT... args) { group_id_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, static_cast(arg0), args..., GetArenaForAllocation()); - // @@protoc_insertion_point(field_set:flwr.proto.TaskRes.group_id) + // @@protoc_insertion_point(field_set:flwr.proto.TaskIns.group_id) } -inline std::string* TaskRes::mutable_group_id() { +inline std::string* TaskIns::mutable_group_id() { std::string* _s = _internal_mutable_group_id(); - // @@protoc_insertion_point(field_mutable:flwr.proto.TaskRes.group_id) + // @@protoc_insertion_point(field_mutable:flwr.proto.TaskIns.group_id) return _s; } -inline const std::string& TaskRes::_internal_group_id() const { +inline const std::string& TaskIns::_internal_group_id() const { return group_id_.Get(); } -inline void TaskRes::_internal_set_group_id(const std::string& value) { +inline void TaskIns::_internal_set_group_id(const std::string& value) { group_id_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, value, GetArenaForAllocation()); } -inline std::string* TaskRes::_internal_mutable_group_id() { +inline std::string* TaskIns::_internal_mutable_group_id() { return group_id_.Mutable(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, GetArenaForAllocation()); } -inline std::string* TaskRes::release_group_id() { - // @@protoc_insertion_point(field_release:flwr.proto.TaskRes.group_id) +inline std::string* TaskIns::release_group_id() { + // @@protoc_insertion_point(field_release:flwr.proto.TaskIns.group_id) return group_id_.Release(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), GetArenaForAllocation()); } -inline void TaskRes::set_allocated_group_id(std::string* group_id) { +inline void TaskIns::set_allocated_group_id(std::string* group_id) { if (group_id != nullptr) { } else { @@ -3088,52 +1446,52 @@ inline void TaskRes::set_allocated_group_id(std::string* group_id) { } group_id_.SetAllocated(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), group_id, GetArenaForAllocation()); - // @@protoc_insertion_point(field_set_allocated:flwr.proto.TaskRes.group_id) + // @@protoc_insertion_point(field_set_allocated:flwr.proto.TaskIns.group_id) } -// sint64 workload_id = 3; -inline void TaskRes::clear_workload_id() { - workload_id_ = int64_t{0}; +// sint64 run_id = 3; +inline void TaskIns::clear_run_id() { + run_id_ = int64_t{0}; } -inline ::PROTOBUF_NAMESPACE_ID::int64 TaskRes::_internal_workload_id() const { - return workload_id_; +inline ::PROTOBUF_NAMESPACE_ID::int64 TaskIns::_internal_run_id() const { + return run_id_; } -inline ::PROTOBUF_NAMESPACE_ID::int64 TaskRes::workload_id() const { - // @@protoc_insertion_point(field_get:flwr.proto.TaskRes.workload_id) - return _internal_workload_id(); +inline ::PROTOBUF_NAMESPACE_ID::int64 TaskIns::run_id() const { + // @@protoc_insertion_point(field_get:flwr.proto.TaskIns.run_id) + return _internal_run_id(); } -inline void TaskRes::_internal_set_workload_id(::PROTOBUF_NAMESPACE_ID::int64 value) { +inline void TaskIns::_internal_set_run_id(::PROTOBUF_NAMESPACE_ID::int64 value) { - workload_id_ = value; + run_id_ = value; } -inline void TaskRes::set_workload_id(::PROTOBUF_NAMESPACE_ID::int64 value) { - _internal_set_workload_id(value); - // @@protoc_insertion_point(field_set:flwr.proto.TaskRes.workload_id) +inline void TaskIns::set_run_id(::PROTOBUF_NAMESPACE_ID::int64 value) { + _internal_set_run_id(value); + // @@protoc_insertion_point(field_set:flwr.proto.TaskIns.run_id) } // .flwr.proto.Task task = 4; -inline bool TaskRes::_internal_has_task() const { +inline bool TaskIns::_internal_has_task() const { return this != internal_default_instance() && task_ != nullptr; } -inline bool TaskRes::has_task() const { +inline bool TaskIns::has_task() const { return _internal_has_task(); } -inline void TaskRes::clear_task() { +inline void TaskIns::clear_task() { if (GetArenaForAllocation() == nullptr && task_ != nullptr) { delete task_; } task_ = nullptr; } -inline const ::flwr::proto::Task& TaskRes::_internal_task() const { +inline const ::flwr::proto::Task& TaskIns::_internal_task() const { const ::flwr::proto::Task* p = task_; return p != nullptr ? *p : reinterpret_cast( ::flwr::proto::_Task_default_instance_); } -inline const ::flwr::proto::Task& TaskRes::task() const { - // @@protoc_insertion_point(field_get:flwr.proto.TaskRes.task) +inline const ::flwr::proto::Task& TaskIns::task() const { + // @@protoc_insertion_point(field_get:flwr.proto.TaskIns.task) return _internal_task(); } -inline void TaskRes::unsafe_arena_set_allocated_task( +inline void TaskIns::unsafe_arena_set_allocated_task( ::flwr::proto::Task* task) { if (GetArenaForAllocation() == nullptr) { delete reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(task_); @@ -3144,9 +1502,9 @@ inline void TaskRes::unsafe_arena_set_allocated_task( } else { } - // @@protoc_insertion_point(field_unsafe_arena_set_allocated:flwr.proto.TaskRes.task) + // @@protoc_insertion_point(field_unsafe_arena_set_allocated:flwr.proto.TaskIns.task) } -inline ::flwr::proto::Task* TaskRes::release_task() { +inline ::flwr::proto::Task* TaskIns::release_task() { ::flwr::proto::Task* temp = task_; task_ = nullptr; @@ -3161,14 +1519,14 @@ inline ::flwr::proto::Task* TaskRes::release_task() { #endif // !PROTOBUF_FORCE_COPY_IN_RELEASE return temp; } -inline ::flwr::proto::Task* TaskRes::unsafe_arena_release_task() { - // @@protoc_insertion_point(field_release:flwr.proto.TaskRes.task) +inline ::flwr::proto::Task* TaskIns::unsafe_arena_release_task() { + // @@protoc_insertion_point(field_release:flwr.proto.TaskIns.task) ::flwr::proto::Task* temp = task_; task_ = nullptr; return temp; } -inline ::flwr::proto::Task* TaskRes::_internal_mutable_task() { +inline ::flwr::proto::Task* TaskIns::_internal_mutable_task() { if (task_ == nullptr) { auto* p = CreateMaybeMessage<::flwr::proto::Task>(GetArenaForAllocation()); @@ -3176,1036 +1534,235 @@ inline ::flwr::proto::Task* TaskRes::_internal_mutable_task() { } return task_; } -inline ::flwr::proto::Task* TaskRes::mutable_task() { +inline ::flwr::proto::Task* TaskIns::mutable_task() { ::flwr::proto::Task* _msg = _internal_mutable_task(); - // @@protoc_insertion_point(field_mutable:flwr.proto.TaskRes.task) + // @@protoc_insertion_point(field_mutable:flwr.proto.TaskIns.task) return _msg; } -inline void TaskRes::set_allocated_task(::flwr::proto::Task* task) { +inline void TaskIns::set_allocated_task(::flwr::proto::Task* task) { ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation(); - if (message_arena == nullptr) { - delete task_; - } - if (task) { - ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena = - ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper<::flwr::proto::Task>::GetOwningArena(task); - if (message_arena != submessage_arena) { - task = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage( - message_arena, task, submessage_arena); - } - - } else { - - } - task_ = task; - // @@protoc_insertion_point(field_set_allocated:flwr.proto.TaskRes.task) -} - -// ------------------------------------------------------------------- - -// Value_DoubleList - -// repeated double vals = 1; -inline int Value_DoubleList::_internal_vals_size() const { - return vals_.size(); -} -inline int Value_DoubleList::vals_size() const { - return _internal_vals_size(); -} -inline void Value_DoubleList::clear_vals() { - vals_.Clear(); -} -inline double Value_DoubleList::_internal_vals(int index) const { - return vals_.Get(index); -} -inline double Value_DoubleList::vals(int index) const { - // @@protoc_insertion_point(field_get:flwr.proto.Value.DoubleList.vals) - return _internal_vals(index); -} -inline void Value_DoubleList::set_vals(int index, double value) { - vals_.Set(index, value); - // @@protoc_insertion_point(field_set:flwr.proto.Value.DoubleList.vals) -} -inline void Value_DoubleList::_internal_add_vals(double value) { - vals_.Add(value); -} -inline void Value_DoubleList::add_vals(double value) { - _internal_add_vals(value); - // @@protoc_insertion_point(field_add:flwr.proto.Value.DoubleList.vals) -} -inline const ::PROTOBUF_NAMESPACE_ID::RepeatedField< double >& -Value_DoubleList::_internal_vals() const { - return vals_; -} -inline const ::PROTOBUF_NAMESPACE_ID::RepeatedField< double >& -Value_DoubleList::vals() const { - // @@protoc_insertion_point(field_list:flwr.proto.Value.DoubleList.vals) - return _internal_vals(); -} -inline ::PROTOBUF_NAMESPACE_ID::RepeatedField< double >* -Value_DoubleList::_internal_mutable_vals() { - return &vals_; -} -inline ::PROTOBUF_NAMESPACE_ID::RepeatedField< double >* -Value_DoubleList::mutable_vals() { - // @@protoc_insertion_point(field_mutable_list:flwr.proto.Value.DoubleList.vals) - return _internal_mutable_vals(); -} - -// ------------------------------------------------------------------- - -// Value_Sint64List - -// repeated sint64 vals = 1; -inline int Value_Sint64List::_internal_vals_size() const { - return vals_.size(); -} -inline int Value_Sint64List::vals_size() const { - return _internal_vals_size(); -} -inline void Value_Sint64List::clear_vals() { - vals_.Clear(); -} -inline ::PROTOBUF_NAMESPACE_ID::int64 Value_Sint64List::_internal_vals(int index) const { - return vals_.Get(index); -} -inline ::PROTOBUF_NAMESPACE_ID::int64 Value_Sint64List::vals(int index) const { - // @@protoc_insertion_point(field_get:flwr.proto.Value.Sint64List.vals) - return _internal_vals(index); -} -inline void Value_Sint64List::set_vals(int index, ::PROTOBUF_NAMESPACE_ID::int64 value) { - vals_.Set(index, value); - // @@protoc_insertion_point(field_set:flwr.proto.Value.Sint64List.vals) -} -inline void Value_Sint64List::_internal_add_vals(::PROTOBUF_NAMESPACE_ID::int64 value) { - vals_.Add(value); -} -inline void Value_Sint64List::add_vals(::PROTOBUF_NAMESPACE_ID::int64 value) { - _internal_add_vals(value); - // @@protoc_insertion_point(field_add:flwr.proto.Value.Sint64List.vals) -} -inline const ::PROTOBUF_NAMESPACE_ID::RepeatedField< ::PROTOBUF_NAMESPACE_ID::int64 >& -Value_Sint64List::_internal_vals() const { - return vals_; -} -inline const ::PROTOBUF_NAMESPACE_ID::RepeatedField< ::PROTOBUF_NAMESPACE_ID::int64 >& -Value_Sint64List::vals() const { - // @@protoc_insertion_point(field_list:flwr.proto.Value.Sint64List.vals) - return _internal_vals(); -} -inline ::PROTOBUF_NAMESPACE_ID::RepeatedField< ::PROTOBUF_NAMESPACE_ID::int64 >* -Value_Sint64List::_internal_mutable_vals() { - return &vals_; -} -inline ::PROTOBUF_NAMESPACE_ID::RepeatedField< ::PROTOBUF_NAMESPACE_ID::int64 >* -Value_Sint64List::mutable_vals() { - // @@protoc_insertion_point(field_mutable_list:flwr.proto.Value.Sint64List.vals) - return _internal_mutable_vals(); -} - -// ------------------------------------------------------------------- - -// Value_BoolList - -// repeated bool vals = 1; -inline int Value_BoolList::_internal_vals_size() const { - return vals_.size(); -} -inline int Value_BoolList::vals_size() const { - return _internal_vals_size(); -} -inline void Value_BoolList::clear_vals() { - vals_.Clear(); -} -inline bool Value_BoolList::_internal_vals(int index) const { - return vals_.Get(index); -} -inline bool Value_BoolList::vals(int index) const { - // @@protoc_insertion_point(field_get:flwr.proto.Value.BoolList.vals) - return _internal_vals(index); -} -inline void Value_BoolList::set_vals(int index, bool value) { - vals_.Set(index, value); - // @@protoc_insertion_point(field_set:flwr.proto.Value.BoolList.vals) -} -inline void Value_BoolList::_internal_add_vals(bool value) { - vals_.Add(value); -} -inline void Value_BoolList::add_vals(bool value) { - _internal_add_vals(value); - // @@protoc_insertion_point(field_add:flwr.proto.Value.BoolList.vals) -} -inline const ::PROTOBUF_NAMESPACE_ID::RepeatedField< bool >& -Value_BoolList::_internal_vals() const { - return vals_; -} -inline const ::PROTOBUF_NAMESPACE_ID::RepeatedField< bool >& -Value_BoolList::vals() const { - // @@protoc_insertion_point(field_list:flwr.proto.Value.BoolList.vals) - return _internal_vals(); -} -inline ::PROTOBUF_NAMESPACE_ID::RepeatedField< bool >* -Value_BoolList::_internal_mutable_vals() { - return &vals_; -} -inline ::PROTOBUF_NAMESPACE_ID::RepeatedField< bool >* -Value_BoolList::mutable_vals() { - // @@protoc_insertion_point(field_mutable_list:flwr.proto.Value.BoolList.vals) - return _internal_mutable_vals(); -} - -// ------------------------------------------------------------------- - -// Value_StringList - -// repeated string vals = 1; -inline int Value_StringList::_internal_vals_size() const { - return vals_.size(); -} -inline int Value_StringList::vals_size() const { - return _internal_vals_size(); -} -inline void Value_StringList::clear_vals() { - vals_.Clear(); -} -inline std::string* Value_StringList::add_vals() { - std::string* _s = _internal_add_vals(); - // @@protoc_insertion_point(field_add_mutable:flwr.proto.Value.StringList.vals) - return _s; -} -inline const std::string& Value_StringList::_internal_vals(int index) const { - return vals_.Get(index); -} -inline const std::string& Value_StringList::vals(int index) const { - // @@protoc_insertion_point(field_get:flwr.proto.Value.StringList.vals) - return _internal_vals(index); -} -inline std::string* Value_StringList::mutable_vals(int index) { - // @@protoc_insertion_point(field_mutable:flwr.proto.Value.StringList.vals) - return vals_.Mutable(index); -} -inline void Value_StringList::set_vals(int index, const std::string& value) { - vals_.Mutable(index)->assign(value); - // @@protoc_insertion_point(field_set:flwr.proto.Value.StringList.vals) -} -inline void Value_StringList::set_vals(int index, std::string&& value) { - vals_.Mutable(index)->assign(std::move(value)); - // @@protoc_insertion_point(field_set:flwr.proto.Value.StringList.vals) -} -inline void Value_StringList::set_vals(int index, const char* value) { - GOOGLE_DCHECK(value != nullptr); - vals_.Mutable(index)->assign(value); - // @@protoc_insertion_point(field_set_char:flwr.proto.Value.StringList.vals) -} -inline void Value_StringList::set_vals(int index, const char* value, size_t size) { - vals_.Mutable(index)->assign( - reinterpret_cast(value), size); - // @@protoc_insertion_point(field_set_pointer:flwr.proto.Value.StringList.vals) -} -inline std::string* Value_StringList::_internal_add_vals() { - return vals_.Add(); -} -inline void Value_StringList::add_vals(const std::string& value) { - vals_.Add()->assign(value); - // @@protoc_insertion_point(field_add:flwr.proto.Value.StringList.vals) -} -inline void Value_StringList::add_vals(std::string&& value) { - vals_.Add(std::move(value)); - // @@protoc_insertion_point(field_add:flwr.proto.Value.StringList.vals) -} -inline void Value_StringList::add_vals(const char* value) { - GOOGLE_DCHECK(value != nullptr); - vals_.Add()->assign(value); - // @@protoc_insertion_point(field_add_char:flwr.proto.Value.StringList.vals) -} -inline void Value_StringList::add_vals(const char* value, size_t size) { - vals_.Add()->assign(reinterpret_cast(value), size); - // @@protoc_insertion_point(field_add_pointer:flwr.proto.Value.StringList.vals) -} -inline const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField& -Value_StringList::vals() const { - // @@protoc_insertion_point(field_list:flwr.proto.Value.StringList.vals) - return vals_; -} -inline ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField* -Value_StringList::mutable_vals() { - // @@protoc_insertion_point(field_mutable_list:flwr.proto.Value.StringList.vals) - return &vals_; -} - -// ------------------------------------------------------------------- - -// Value_BytesList - -// repeated bytes vals = 1; -inline int Value_BytesList::_internal_vals_size() const { - return vals_.size(); -} -inline int Value_BytesList::vals_size() const { - return _internal_vals_size(); -} -inline void Value_BytesList::clear_vals() { - vals_.Clear(); -} -inline std::string* Value_BytesList::add_vals() { - std::string* _s = _internal_add_vals(); - // @@protoc_insertion_point(field_add_mutable:flwr.proto.Value.BytesList.vals) - return _s; -} -inline const std::string& Value_BytesList::_internal_vals(int index) const { - return vals_.Get(index); -} -inline const std::string& Value_BytesList::vals(int index) const { - // @@protoc_insertion_point(field_get:flwr.proto.Value.BytesList.vals) - return _internal_vals(index); -} -inline std::string* Value_BytesList::mutable_vals(int index) { - // @@protoc_insertion_point(field_mutable:flwr.proto.Value.BytesList.vals) - return vals_.Mutable(index); -} -inline void Value_BytesList::set_vals(int index, const std::string& value) { - vals_.Mutable(index)->assign(value); - // @@protoc_insertion_point(field_set:flwr.proto.Value.BytesList.vals) -} -inline void Value_BytesList::set_vals(int index, std::string&& value) { - vals_.Mutable(index)->assign(std::move(value)); - // @@protoc_insertion_point(field_set:flwr.proto.Value.BytesList.vals) -} -inline void Value_BytesList::set_vals(int index, const char* value) { - GOOGLE_DCHECK(value != nullptr); - vals_.Mutable(index)->assign(value); - // @@protoc_insertion_point(field_set_char:flwr.proto.Value.BytesList.vals) -} -inline void Value_BytesList::set_vals(int index, const void* value, size_t size) { - vals_.Mutable(index)->assign( - reinterpret_cast(value), size); - // @@protoc_insertion_point(field_set_pointer:flwr.proto.Value.BytesList.vals) -} -inline std::string* Value_BytesList::_internal_add_vals() { - return vals_.Add(); -} -inline void Value_BytesList::add_vals(const std::string& value) { - vals_.Add()->assign(value); - // @@protoc_insertion_point(field_add:flwr.proto.Value.BytesList.vals) -} -inline void Value_BytesList::add_vals(std::string&& value) { - vals_.Add(std::move(value)); - // @@protoc_insertion_point(field_add:flwr.proto.Value.BytesList.vals) -} -inline void Value_BytesList::add_vals(const char* value) { - GOOGLE_DCHECK(value != nullptr); - vals_.Add()->assign(value); - // @@protoc_insertion_point(field_add_char:flwr.proto.Value.BytesList.vals) -} -inline void Value_BytesList::add_vals(const void* value, size_t size) { - vals_.Add()->assign(reinterpret_cast(value), size); - // @@protoc_insertion_point(field_add_pointer:flwr.proto.Value.BytesList.vals) -} -inline const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField& -Value_BytesList::vals() const { - // @@protoc_insertion_point(field_list:flwr.proto.Value.BytesList.vals) - return vals_; -} -inline ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField* -Value_BytesList::mutable_vals() { - // @@protoc_insertion_point(field_mutable_list:flwr.proto.Value.BytesList.vals) - return &vals_; -} - -// ------------------------------------------------------------------- - -// Value - -// double double = 1; -inline bool Value::_internal_has_double_() const { - return value_case() == kDouble; -} -inline bool Value::has_double_() const { - return _internal_has_double_(); -} -inline void Value::set_has_double_() { - _oneof_case_[0] = kDouble; -} -inline void Value::clear_double_() { - if (_internal_has_double_()) { - value_.double__ = 0; - clear_has_value(); - } -} -inline double Value::_internal_double_() const { - if (_internal_has_double_()) { - return value_.double__; - } - return 0; -} -inline void Value::_internal_set_double_(double value) { - if (!_internal_has_double_()) { - clear_value(); - set_has_double_(); - } - value_.double__ = value; -} -inline double Value::double_() const { - // @@protoc_insertion_point(field_get:flwr.proto.Value.double) - return _internal_double_(); -} -inline void Value::set_double_(double value) { - _internal_set_double_(value); - // @@protoc_insertion_point(field_set:flwr.proto.Value.double) -} - -// sint64 sint64 = 2; -inline bool Value::_internal_has_sint64() const { - return value_case() == kSint64; -} -inline bool Value::has_sint64() const { - return _internal_has_sint64(); -} -inline void Value::set_has_sint64() { - _oneof_case_[0] = kSint64; -} -inline void Value::clear_sint64() { - if (_internal_has_sint64()) { - value_.sint64_ = int64_t{0}; - clear_has_value(); - } -} -inline ::PROTOBUF_NAMESPACE_ID::int64 Value::_internal_sint64() const { - if (_internal_has_sint64()) { - return value_.sint64_; - } - return int64_t{0}; -} -inline void Value::_internal_set_sint64(::PROTOBUF_NAMESPACE_ID::int64 value) { - if (!_internal_has_sint64()) { - clear_value(); - set_has_sint64(); - } - value_.sint64_ = value; -} -inline ::PROTOBUF_NAMESPACE_ID::int64 Value::sint64() const { - // @@protoc_insertion_point(field_get:flwr.proto.Value.sint64) - return _internal_sint64(); -} -inline void Value::set_sint64(::PROTOBUF_NAMESPACE_ID::int64 value) { - _internal_set_sint64(value); - // @@protoc_insertion_point(field_set:flwr.proto.Value.sint64) -} - -// bool bool = 3; -inline bool Value::_internal_has_bool_() const { - return value_case() == kBool; -} -inline bool Value::has_bool_() const { - return _internal_has_bool_(); -} -inline void Value::set_has_bool_() { - _oneof_case_[0] = kBool; -} -inline void Value::clear_bool_() { - if (_internal_has_bool_()) { - value_.bool__ = false; - clear_has_value(); - } -} -inline bool Value::_internal_bool_() const { - if (_internal_has_bool_()) { - return value_.bool__; - } - return false; -} -inline void Value::_internal_set_bool_(bool value) { - if (!_internal_has_bool_()) { - clear_value(); - set_has_bool_(); - } - value_.bool__ = value; -} -inline bool Value::bool_() const { - // @@protoc_insertion_point(field_get:flwr.proto.Value.bool) - return _internal_bool_(); -} -inline void Value::set_bool_(bool value) { - _internal_set_bool_(value); - // @@protoc_insertion_point(field_set:flwr.proto.Value.bool) -} - -// string string = 4; -inline bool Value::_internal_has_string() const { - return value_case() == kString; -} -inline bool Value::has_string() const { - return _internal_has_string(); -} -inline void Value::set_has_string() { - _oneof_case_[0] = kString; -} -inline void Value::clear_string() { - if (_internal_has_string()) { - value_.string_.Destroy(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, GetArenaForAllocation()); - clear_has_value(); - } -} -inline const std::string& Value::string() const { - // @@protoc_insertion_point(field_get:flwr.proto.Value.string) - return _internal_string(); -} -template -inline void Value::set_string(ArgT0&& arg0, ArgT... args) { - if (!_internal_has_string()) { - clear_value(); - set_has_string(); - value_.string_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); - } - value_.string_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, static_cast(arg0), args..., GetArenaForAllocation()); - // @@protoc_insertion_point(field_set:flwr.proto.Value.string) -} -inline std::string* Value::mutable_string() { - std::string* _s = _internal_mutable_string(); - // @@protoc_insertion_point(field_mutable:flwr.proto.Value.string) - return _s; -} -inline const std::string& Value::_internal_string() const { - if (_internal_has_string()) { - return value_.string_.Get(); - } - return ::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(); -} -inline void Value::_internal_set_string(const std::string& value) { - if (!_internal_has_string()) { - clear_value(); - set_has_string(); - value_.string_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); - } - value_.string_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, value, GetArenaForAllocation()); -} -inline std::string* Value::_internal_mutable_string() { - if (!_internal_has_string()) { - clear_value(); - set_has_string(); - value_.string_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); - } - return value_.string_.Mutable( - ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, GetArenaForAllocation()); -} -inline std::string* Value::release_string() { - // @@protoc_insertion_point(field_release:flwr.proto.Value.string) - if (_internal_has_string()) { - clear_has_value(); - return value_.string_.ReleaseNonDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), GetArenaForAllocation()); - } else { - return nullptr; - } -} -inline void Value::set_allocated_string(std::string* string) { - if (has_value()) { - clear_value(); + if (message_arena == nullptr) { + delete task_; } - if (string != nullptr) { - set_has_string(); - value_.string_.UnsafeSetDefault(string); - ::PROTOBUF_NAMESPACE_ID::Arena* arena = GetArenaForAllocation(); - if (arena != nullptr) { - arena->Own(string); + if (task) { + ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena = + ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper<::flwr::proto::Task>::GetOwningArena(task); + if (message_arena != submessage_arena) { + task = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage( + message_arena, task, submessage_arena); } + + } else { + } - // @@protoc_insertion_point(field_set_allocated:flwr.proto.Value.string) + task_ = task; + // @@protoc_insertion_point(field_set_allocated:flwr.proto.TaskIns.task) } -// bytes bytes = 5; -inline bool Value::_internal_has_bytes() const { - return value_case() == kBytes; -} -inline bool Value::has_bytes() const { - return _internal_has_bytes(); -} -inline void Value::set_has_bytes() { - _oneof_case_[0] = kBytes; -} -inline void Value::clear_bytes() { - if (_internal_has_bytes()) { - value_.bytes_.Destroy(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, GetArenaForAllocation()); - clear_has_value(); - } +// ------------------------------------------------------------------- + +// TaskRes + +// string task_id = 1; +inline void TaskRes::clear_task_id() { + task_id_.ClearToEmpty(); } -inline const std::string& Value::bytes() const { - // @@protoc_insertion_point(field_get:flwr.proto.Value.bytes) - return _internal_bytes(); +inline const std::string& TaskRes::task_id() const { + // @@protoc_insertion_point(field_get:flwr.proto.TaskRes.task_id) + return _internal_task_id(); } template -inline void Value::set_bytes(ArgT0&& arg0, ArgT... args) { - if (!_internal_has_bytes()) { - clear_value(); - set_has_bytes(); - value_.bytes_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); - } - value_.bytes_.SetBytes(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, static_cast(arg0), args..., GetArenaForAllocation()); - // @@protoc_insertion_point(field_set:flwr.proto.Value.bytes) +inline PROTOBUF_ALWAYS_INLINE +void TaskRes::set_task_id(ArgT0&& arg0, ArgT... args) { + + task_id_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, static_cast(arg0), args..., GetArenaForAllocation()); + // @@protoc_insertion_point(field_set:flwr.proto.TaskRes.task_id) } -inline std::string* Value::mutable_bytes() { - std::string* _s = _internal_mutable_bytes(); - // @@protoc_insertion_point(field_mutable:flwr.proto.Value.bytes) +inline std::string* TaskRes::mutable_task_id() { + std::string* _s = _internal_mutable_task_id(); + // @@protoc_insertion_point(field_mutable:flwr.proto.TaskRes.task_id) return _s; } -inline const std::string& Value::_internal_bytes() const { - if (_internal_has_bytes()) { - return value_.bytes_.Get(); - } - return ::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(); -} -inline void Value::_internal_set_bytes(const std::string& value) { - if (!_internal_has_bytes()) { - clear_value(); - set_has_bytes(); - value_.bytes_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); - } - value_.bytes_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, value, GetArenaForAllocation()); -} -inline std::string* Value::_internal_mutable_bytes() { - if (!_internal_has_bytes()) { - clear_value(); - set_has_bytes(); - value_.bytes_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); - } - return value_.bytes_.Mutable( - ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, GetArenaForAllocation()); -} -inline std::string* Value::release_bytes() { - // @@protoc_insertion_point(field_release:flwr.proto.Value.bytes) - if (_internal_has_bytes()) { - clear_has_value(); - return value_.bytes_.ReleaseNonDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), GetArenaForAllocation()); - } else { - return nullptr; - } -} -inline void Value::set_allocated_bytes(std::string* bytes) { - if (has_value()) { - clear_value(); - } - if (bytes != nullptr) { - set_has_bytes(); - value_.bytes_.UnsafeSetDefault(bytes); - ::PROTOBUF_NAMESPACE_ID::Arena* arena = GetArenaForAllocation(); - if (arena != nullptr) { - arena->Own(bytes); - } - } - // @@protoc_insertion_point(field_set_allocated:flwr.proto.Value.bytes) -} - -// .flwr.proto.Value.DoubleList double_list = 21; -inline bool Value::_internal_has_double_list() const { - return value_case() == kDoubleList; -} -inline bool Value::has_double_list() const { - return _internal_has_double_list(); +inline const std::string& TaskRes::_internal_task_id() const { + return task_id_.Get(); } -inline void Value::set_has_double_list() { - _oneof_case_[0] = kDoubleList; +inline void TaskRes::_internal_set_task_id(const std::string& value) { + + task_id_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, value, GetArenaForAllocation()); } -inline void Value::clear_double_list() { - if (_internal_has_double_list()) { - if (GetArenaForAllocation() == nullptr) { - delete value_.double_list_; - } - clear_has_value(); - } +inline std::string* TaskRes::_internal_mutable_task_id() { + + return task_id_.Mutable(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, GetArenaForAllocation()); } -inline ::flwr::proto::Value_DoubleList* Value::release_double_list() { - // @@protoc_insertion_point(field_release:flwr.proto.Value.double_list) - if (_internal_has_double_list()) { - clear_has_value(); - ::flwr::proto::Value_DoubleList* temp = value_.double_list_; - if (GetArenaForAllocation() != nullptr) { - temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); - } - value_.double_list_ = nullptr; - return temp; - } else { - return nullptr; - } +inline std::string* TaskRes::release_task_id() { + // @@protoc_insertion_point(field_release:flwr.proto.TaskRes.task_id) + return task_id_.Release(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), GetArenaForAllocation()); } -inline const ::flwr::proto::Value_DoubleList& Value::_internal_double_list() const { - return _internal_has_double_list() - ? *value_.double_list_ - : reinterpret_cast< ::flwr::proto::Value_DoubleList&>(::flwr::proto::_Value_DoubleList_default_instance_); -} -inline const ::flwr::proto::Value_DoubleList& Value::double_list() const { - // @@protoc_insertion_point(field_get:flwr.proto.Value.double_list) - return _internal_double_list(); -} -inline ::flwr::proto::Value_DoubleList* Value::unsafe_arena_release_double_list() { - // @@protoc_insertion_point(field_unsafe_arena_release:flwr.proto.Value.double_list) - if (_internal_has_double_list()) { - clear_has_value(); - ::flwr::proto::Value_DoubleList* temp = value_.double_list_; - value_.double_list_ = nullptr; - return temp; +inline void TaskRes::set_allocated_task_id(std::string* task_id) { + if (task_id != nullptr) { + } else { - return nullptr; + } + task_id_.SetAllocated(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), task_id, + GetArenaForAllocation()); + // @@protoc_insertion_point(field_set_allocated:flwr.proto.TaskRes.task_id) } -inline void Value::unsafe_arena_set_allocated_double_list(::flwr::proto::Value_DoubleList* double_list) { - clear_value(); - if (double_list) { - set_has_double_list(); - value_.double_list_ = double_list; - } - // @@protoc_insertion_point(field_unsafe_arena_set_allocated:flwr.proto.Value.double_list) + +// string group_id = 2; +inline void TaskRes::clear_group_id() { + group_id_.ClearToEmpty(); } -inline ::flwr::proto::Value_DoubleList* Value::_internal_mutable_double_list() { - if (!_internal_has_double_list()) { - clear_value(); - set_has_double_list(); - value_.double_list_ = CreateMaybeMessage< ::flwr::proto::Value_DoubleList >(GetArenaForAllocation()); - } - return value_.double_list_; +inline const std::string& TaskRes::group_id() const { + // @@protoc_insertion_point(field_get:flwr.proto.TaskRes.group_id) + return _internal_group_id(); } -inline ::flwr::proto::Value_DoubleList* Value::mutable_double_list() { - ::flwr::proto::Value_DoubleList* _msg = _internal_mutable_double_list(); - // @@protoc_insertion_point(field_mutable:flwr.proto.Value.double_list) - return _msg; +template +inline PROTOBUF_ALWAYS_INLINE +void TaskRes::set_group_id(ArgT0&& arg0, ArgT... args) { + + group_id_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, static_cast(arg0), args..., GetArenaForAllocation()); + // @@protoc_insertion_point(field_set:flwr.proto.TaskRes.group_id) } - -// .flwr.proto.Value.Sint64List sint64_list = 22; -inline bool Value::_internal_has_sint64_list() const { - return value_case() == kSint64List; +inline std::string* TaskRes::mutable_group_id() { + std::string* _s = _internal_mutable_group_id(); + // @@protoc_insertion_point(field_mutable:flwr.proto.TaskRes.group_id) + return _s; } -inline bool Value::has_sint64_list() const { - return _internal_has_sint64_list(); +inline const std::string& TaskRes::_internal_group_id() const { + return group_id_.Get(); } -inline void Value::set_has_sint64_list() { - _oneof_case_[0] = kSint64List; +inline void TaskRes::_internal_set_group_id(const std::string& value) { + + group_id_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, value, GetArenaForAllocation()); } -inline void Value::clear_sint64_list() { - if (_internal_has_sint64_list()) { - if (GetArenaForAllocation() == nullptr) { - delete value_.sint64_list_; - } - clear_has_value(); - } +inline std::string* TaskRes::_internal_mutable_group_id() { + + return group_id_.Mutable(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, GetArenaForAllocation()); } -inline ::flwr::proto::Value_Sint64List* Value::release_sint64_list() { - // @@protoc_insertion_point(field_release:flwr.proto.Value.sint64_list) - if (_internal_has_sint64_list()) { - clear_has_value(); - ::flwr::proto::Value_Sint64List* temp = value_.sint64_list_; - if (GetArenaForAllocation() != nullptr) { - temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); - } - value_.sint64_list_ = nullptr; - return temp; - } else { - return nullptr; - } +inline std::string* TaskRes::release_group_id() { + // @@protoc_insertion_point(field_release:flwr.proto.TaskRes.group_id) + return group_id_.Release(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), GetArenaForAllocation()); } -inline const ::flwr::proto::Value_Sint64List& Value::_internal_sint64_list() const { - return _internal_has_sint64_list() - ? *value_.sint64_list_ - : reinterpret_cast< ::flwr::proto::Value_Sint64List&>(::flwr::proto::_Value_Sint64List_default_instance_); -} -inline const ::flwr::proto::Value_Sint64List& Value::sint64_list() const { - // @@protoc_insertion_point(field_get:flwr.proto.Value.sint64_list) - return _internal_sint64_list(); -} -inline ::flwr::proto::Value_Sint64List* Value::unsafe_arena_release_sint64_list() { - // @@protoc_insertion_point(field_unsafe_arena_release:flwr.proto.Value.sint64_list) - if (_internal_has_sint64_list()) { - clear_has_value(); - ::flwr::proto::Value_Sint64List* temp = value_.sint64_list_; - value_.sint64_list_ = nullptr; - return temp; +inline void TaskRes::set_allocated_group_id(std::string* group_id) { + if (group_id != nullptr) { + } else { - return nullptr; - } -} -inline void Value::unsafe_arena_set_allocated_sint64_list(::flwr::proto::Value_Sint64List* sint64_list) { - clear_value(); - if (sint64_list) { - set_has_sint64_list(); - value_.sint64_list_ = sint64_list; - } - // @@protoc_insertion_point(field_unsafe_arena_set_allocated:flwr.proto.Value.sint64_list) -} -inline ::flwr::proto::Value_Sint64List* Value::_internal_mutable_sint64_list() { - if (!_internal_has_sint64_list()) { - clear_value(); - set_has_sint64_list(); - value_.sint64_list_ = CreateMaybeMessage< ::flwr::proto::Value_Sint64List >(GetArenaForAllocation()); + } - return value_.sint64_list_; -} -inline ::flwr::proto::Value_Sint64List* Value::mutable_sint64_list() { - ::flwr::proto::Value_Sint64List* _msg = _internal_mutable_sint64_list(); - // @@protoc_insertion_point(field_mutable:flwr.proto.Value.sint64_list) - return _msg; + group_id_.SetAllocated(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), group_id, + GetArenaForAllocation()); + // @@protoc_insertion_point(field_set_allocated:flwr.proto.TaskRes.group_id) } -// .flwr.proto.Value.BoolList bool_list = 23; -inline bool Value::_internal_has_bool_list() const { - return value_case() == kBoolList; +// sint64 run_id = 3; +inline void TaskRes::clear_run_id() { + run_id_ = int64_t{0}; } -inline bool Value::has_bool_list() const { - return _internal_has_bool_list(); +inline ::PROTOBUF_NAMESPACE_ID::int64 TaskRes::_internal_run_id() const { + return run_id_; } -inline void Value::set_has_bool_list() { - _oneof_case_[0] = kBoolList; +inline ::PROTOBUF_NAMESPACE_ID::int64 TaskRes::run_id() const { + // @@protoc_insertion_point(field_get:flwr.proto.TaskRes.run_id) + return _internal_run_id(); } -inline void Value::clear_bool_list() { - if (_internal_has_bool_list()) { - if (GetArenaForAllocation() == nullptr) { - delete value_.bool_list_; - } - clear_has_value(); - } +inline void TaskRes::_internal_set_run_id(::PROTOBUF_NAMESPACE_ID::int64 value) { + + run_id_ = value; } -inline ::flwr::proto::Value_BoolList* Value::release_bool_list() { - // @@protoc_insertion_point(field_release:flwr.proto.Value.bool_list) - if (_internal_has_bool_list()) { - clear_has_value(); - ::flwr::proto::Value_BoolList* temp = value_.bool_list_; - if (GetArenaForAllocation() != nullptr) { - temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); - } - value_.bool_list_ = nullptr; - return temp; - } else { - return nullptr; - } +inline void TaskRes::set_run_id(::PROTOBUF_NAMESPACE_ID::int64 value) { + _internal_set_run_id(value); + // @@protoc_insertion_point(field_set:flwr.proto.TaskRes.run_id) } -inline const ::flwr::proto::Value_BoolList& Value::_internal_bool_list() const { - return _internal_has_bool_list() - ? *value_.bool_list_ - : reinterpret_cast< ::flwr::proto::Value_BoolList&>(::flwr::proto::_Value_BoolList_default_instance_); -} -inline const ::flwr::proto::Value_BoolList& Value::bool_list() const { - // @@protoc_insertion_point(field_get:flwr.proto.Value.bool_list) - return _internal_bool_list(); -} -inline ::flwr::proto::Value_BoolList* Value::unsafe_arena_release_bool_list() { - // @@protoc_insertion_point(field_unsafe_arena_release:flwr.proto.Value.bool_list) - if (_internal_has_bool_list()) { - clear_has_value(); - ::flwr::proto::Value_BoolList* temp = value_.bool_list_; - value_.bool_list_ = nullptr; - return temp; - } else { - return nullptr; - } + +// .flwr.proto.Task task = 4; +inline bool TaskRes::_internal_has_task() const { + return this != internal_default_instance() && task_ != nullptr; } -inline void Value::unsafe_arena_set_allocated_bool_list(::flwr::proto::Value_BoolList* bool_list) { - clear_value(); - if (bool_list) { - set_has_bool_list(); - value_.bool_list_ = bool_list; - } - // @@protoc_insertion_point(field_unsafe_arena_set_allocated:flwr.proto.Value.bool_list) +inline bool TaskRes::has_task() const { + return _internal_has_task(); } -inline ::flwr::proto::Value_BoolList* Value::_internal_mutable_bool_list() { - if (!_internal_has_bool_list()) { - clear_value(); - set_has_bool_list(); - value_.bool_list_ = CreateMaybeMessage< ::flwr::proto::Value_BoolList >(GetArenaForAllocation()); +inline void TaskRes::clear_task() { + if (GetArenaForAllocation() == nullptr && task_ != nullptr) { + delete task_; } - return value_.bool_list_; -} -inline ::flwr::proto::Value_BoolList* Value::mutable_bool_list() { - ::flwr::proto::Value_BoolList* _msg = _internal_mutable_bool_list(); - // @@protoc_insertion_point(field_mutable:flwr.proto.Value.bool_list) - return _msg; -} - -// .flwr.proto.Value.StringList string_list = 24; -inline bool Value::_internal_has_string_list() const { - return value_case() == kStringList; + task_ = nullptr; } -inline bool Value::has_string_list() const { - return _internal_has_string_list(); +inline const ::flwr::proto::Task& TaskRes::_internal_task() const { + const ::flwr::proto::Task* p = task_; + return p != nullptr ? *p : reinterpret_cast( + ::flwr::proto::_Task_default_instance_); } -inline void Value::set_has_string_list() { - _oneof_case_[0] = kStringList; +inline const ::flwr::proto::Task& TaskRes::task() const { + // @@protoc_insertion_point(field_get:flwr.proto.TaskRes.task) + return _internal_task(); } -inline void Value::clear_string_list() { - if (_internal_has_string_list()) { - if (GetArenaForAllocation() == nullptr) { - delete value_.string_list_; - } - clear_has_value(); +inline void TaskRes::unsafe_arena_set_allocated_task( + ::flwr::proto::Task* task) { + if (GetArenaForAllocation() == nullptr) { + delete reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(task_); } -} -inline ::flwr::proto::Value_StringList* Value::release_string_list() { - // @@protoc_insertion_point(field_release:flwr.proto.Value.string_list) - if (_internal_has_string_list()) { - clear_has_value(); - ::flwr::proto::Value_StringList* temp = value_.string_list_; - if (GetArenaForAllocation() != nullptr) { - temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); - } - value_.string_list_ = nullptr; - return temp; + task_ = task; + if (task) { + } else { - return nullptr; + } + // @@protoc_insertion_point(field_unsafe_arena_set_allocated:flwr.proto.TaskRes.task) } -inline const ::flwr::proto::Value_StringList& Value::_internal_string_list() const { - return _internal_has_string_list() - ? *value_.string_list_ - : reinterpret_cast< ::flwr::proto::Value_StringList&>(::flwr::proto::_Value_StringList_default_instance_); -} -inline const ::flwr::proto::Value_StringList& Value::string_list() const { - // @@protoc_insertion_point(field_get:flwr.proto.Value.string_list) - return _internal_string_list(); -} -inline ::flwr::proto::Value_StringList* Value::unsafe_arena_release_string_list() { - // @@protoc_insertion_point(field_unsafe_arena_release:flwr.proto.Value.string_list) - if (_internal_has_string_list()) { - clear_has_value(); - ::flwr::proto::Value_StringList* temp = value_.string_list_; - value_.string_list_ = nullptr; - return temp; - } else { - return nullptr; +inline ::flwr::proto::Task* TaskRes::release_task() { + + ::flwr::proto::Task* temp = task_; + task_ = nullptr; +#ifdef PROTOBUF_FORCE_COPY_IN_RELEASE + auto* old = reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(temp); + temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); + if (GetArenaForAllocation() == nullptr) { delete old; } +#else // PROTOBUF_FORCE_COPY_IN_RELEASE + if (GetArenaForAllocation() != nullptr) { + temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); } +#endif // !PROTOBUF_FORCE_COPY_IN_RELEASE + return temp; } -inline void Value::unsafe_arena_set_allocated_string_list(::flwr::proto::Value_StringList* string_list) { - clear_value(); - if (string_list) { - set_has_string_list(); - value_.string_list_ = string_list; - } - // @@protoc_insertion_point(field_unsafe_arena_set_allocated:flwr.proto.Value.string_list) +inline ::flwr::proto::Task* TaskRes::unsafe_arena_release_task() { + // @@protoc_insertion_point(field_release:flwr.proto.TaskRes.task) + + ::flwr::proto::Task* temp = task_; + task_ = nullptr; + return temp; } -inline ::flwr::proto::Value_StringList* Value::_internal_mutable_string_list() { - if (!_internal_has_string_list()) { - clear_value(); - set_has_string_list(); - value_.string_list_ = CreateMaybeMessage< ::flwr::proto::Value_StringList >(GetArenaForAllocation()); +inline ::flwr::proto::Task* TaskRes::_internal_mutable_task() { + + if (task_ == nullptr) { + auto* p = CreateMaybeMessage<::flwr::proto::Task>(GetArenaForAllocation()); + task_ = p; } - return value_.string_list_; + return task_; } -inline ::flwr::proto::Value_StringList* Value::mutable_string_list() { - ::flwr::proto::Value_StringList* _msg = _internal_mutable_string_list(); - // @@protoc_insertion_point(field_mutable:flwr.proto.Value.string_list) +inline ::flwr::proto::Task* TaskRes::mutable_task() { + ::flwr::proto::Task* _msg = _internal_mutable_task(); + // @@protoc_insertion_point(field_mutable:flwr.proto.TaskRes.task) return _msg; } - -// .flwr.proto.Value.BytesList bytes_list = 25; -inline bool Value::_internal_has_bytes_list() const { - return value_case() == kBytesList; -} -inline bool Value::has_bytes_list() const { - return _internal_has_bytes_list(); -} -inline void Value::set_has_bytes_list() { - _oneof_case_[0] = kBytesList; -} -inline void Value::clear_bytes_list() { - if (_internal_has_bytes_list()) { - if (GetArenaForAllocation() == nullptr) { - delete value_.bytes_list_; - } - clear_has_value(); +inline void TaskRes::set_allocated_task(::flwr::proto::Task* task) { + ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation(); + if (message_arena == nullptr) { + delete task_; } -} -inline ::flwr::proto::Value_BytesList* Value::release_bytes_list() { - // @@protoc_insertion_point(field_release:flwr.proto.Value.bytes_list) - if (_internal_has_bytes_list()) { - clear_has_value(); - ::flwr::proto::Value_BytesList* temp = value_.bytes_list_; - if (GetArenaForAllocation() != nullptr) { - temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); + if (task) { + ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena = + ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper<::flwr::proto::Task>::GetOwningArena(task); + if (message_arena != submessage_arena) { + task = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage( + message_arena, task, submessage_arena); } - value_.bytes_list_ = nullptr; - return temp; - } else { - return nullptr; - } -} -inline const ::flwr::proto::Value_BytesList& Value::_internal_bytes_list() const { - return _internal_has_bytes_list() - ? *value_.bytes_list_ - : reinterpret_cast< ::flwr::proto::Value_BytesList&>(::flwr::proto::_Value_BytesList_default_instance_); -} -inline const ::flwr::proto::Value_BytesList& Value::bytes_list() const { - // @@protoc_insertion_point(field_get:flwr.proto.Value.bytes_list) - return _internal_bytes_list(); -} -inline ::flwr::proto::Value_BytesList* Value::unsafe_arena_release_bytes_list() { - // @@protoc_insertion_point(field_unsafe_arena_release:flwr.proto.Value.bytes_list) - if (_internal_has_bytes_list()) { - clear_has_value(); - ::flwr::proto::Value_BytesList* temp = value_.bytes_list_; - value_.bytes_list_ = nullptr; - return temp; + } else { - return nullptr; - } -} -inline void Value::unsafe_arena_set_allocated_bytes_list(::flwr::proto::Value_BytesList* bytes_list) { - clear_value(); - if (bytes_list) { - set_has_bytes_list(); - value_.bytes_list_ = bytes_list; - } - // @@protoc_insertion_point(field_unsafe_arena_set_allocated:flwr.proto.Value.bytes_list) -} -inline ::flwr::proto::Value_BytesList* Value::_internal_mutable_bytes_list() { - if (!_internal_has_bytes_list()) { - clear_value(); - set_has_bytes_list(); - value_.bytes_list_ = CreateMaybeMessage< ::flwr::proto::Value_BytesList >(GetArenaForAllocation()); + } - return value_.bytes_list_; -} -inline ::flwr::proto::Value_BytesList* Value::mutable_bytes_list() { - ::flwr::proto::Value_BytesList* _msg = _internal_mutable_bytes_list(); - // @@protoc_insertion_point(field_mutable:flwr.proto.Value.bytes_list) - return _msg; -} - -inline bool Value::has_value() const { - return value_case() != VALUE_NOT_SET; -} -inline void Value::clear_has_value() { - _oneof_case_[0] = VALUE_NOT_SET; -} -inline Value::ValueCase Value::value_case() const { - return Value::ValueCase(_oneof_case_[0]); -} -// ------------------------------------------------------------------- - -// ------------------------------------------------------------------- - -// SecureAggregation - -// map named_values = 1; -inline int SecureAggregation::_internal_named_values_size() const { - return named_values_.size(); -} -inline int SecureAggregation::named_values_size() const { - return _internal_named_values_size(); -} -inline void SecureAggregation::clear_named_values() { - named_values_.Clear(); -} -inline const ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::Value >& -SecureAggregation::_internal_named_values() const { - return named_values_.GetMap(); -} -inline const ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::Value >& -SecureAggregation::named_values() const { - // @@protoc_insertion_point(field_map:flwr.proto.SecureAggregation.named_values) - return _internal_named_values(); -} -inline ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::Value >* -SecureAggregation::_internal_mutable_named_values() { - return named_values_.MutableMap(); -} -inline ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::flwr::proto::Value >* -SecureAggregation::mutable_named_values() { - // @@protoc_insertion_point(field_mutable_map:flwr.proto.SecureAggregation.named_values) - return _internal_mutable_named_values(); + task_ = task; + // @@protoc_insertion_point(field_set_allocated:flwr.proto.TaskRes.task) } #ifdef __GNUC__ @@ -4215,22 +1772,6 @@ SecureAggregation::mutable_named_values() { // ------------------------------------------------------------------- -// ------------------------------------------------------------------- - -// ------------------------------------------------------------------- - -// ------------------------------------------------------------------- - -// ------------------------------------------------------------------- - -// ------------------------------------------------------------------- - -// ------------------------------------------------------------------- - -// ------------------------------------------------------------------- - -// ------------------------------------------------------------------- - // @@protoc_insertion_point(namespace_scope) diff --git a/src/cc/flwr/include/grpc_rere.h b/src/cc/flwr/include/grpc_rere.h index 2a04b923f200..4f5a1895cbac 100644 --- a/src/cc/flwr/include/grpc_rere.h +++ b/src/cc/flwr/include/grpc_rere.h @@ -15,15 +15,29 @@ #ifndef GRPC_RERE_H #define GRPC_RERE_H #pragma once +#include "communicator.h" +#include "flwr/proto/fleet.grpc.pb.h" #include "message_handler.h" -#include "task_handler.h" #include -void create_node(const std::unique_ptr &stub); -void delete_node(const std::unique_ptr &stub); -void send(const std::unique_ptr &stub, - flwr::proto::TaskRes task_res); -std::optional -receive(const std::unique_ptr &stub); +class gRPCRereCommunicator : public Communicator { +public: + gRPCRereCommunicator(std::string server_address, int grpc_max_message_length); + + bool send_create_node(flwr::proto::CreateNodeRequest request, + flwr::proto::CreateNodeResponse *response); + + bool send_delete_node(flwr::proto::DeleteNodeRequest request, + flwr::proto::DeleteNodeResponse *response); + + bool send_pull_task_ins(flwr::proto::PullTaskInsRequest request, + flwr::proto::PullTaskInsResponse *response); + + bool send_push_task_res(flwr::proto::PushTaskResRequest request, + flwr::proto::PushTaskResResponse *response); + +private: + std::unique_ptr stub; +}; #endif diff --git a/src/cc/flwr/include/message_handler.h b/src/cc/flwr/include/message_handler.h index eb6df5cc1f9d..0c45ea485359 100644 --- a/src/cc/flwr/include/message_handler.h +++ b/src/cc/flwr/include/message_handler.h @@ -16,24 +16,8 @@ #include "client.h" #include "serde.h" -std::tuple -_reconnect(flwr::proto::ServerMessage_ReconnectIns reconnect_msg); - -flwr::proto::ClientMessage _get_parameters(flwr_local::Client *client); - -flwr::proto::ClientMessage _fit(flwr_local::Client *client, - flwr::proto::ServerMessage_FitIns fit_msg); - -flwr::proto::ClientMessage -_evaluate(flwr_local::Client *client, - flwr::proto::ServerMessage_EvaluateIns evaluate_msg); - std::tuple handle(flwr_local::Client *client, flwr::proto::ServerMessage server_msg); std::tuple handle_task(flwr_local::Client *client, const flwr::proto::TaskIns &task_ins); - -flwr::proto::TaskRes configure_task_res(const flwr::proto::TaskRes &task_res, - const flwr::proto::TaskIns &task_ins, - const flwr::proto::Node &node); diff --git a/src/cc/flwr/include/serde.h b/src/cc/flwr/include/serde.h index 8b9d809d7c8a..384f2b05c011 100644 --- a/src/cc/flwr/include/serde.h +++ b/src/cc/flwr/include/serde.h @@ -13,9 +13,7 @@ * ********************************************************************************************************/ #pragma once -#include "flwr/proto/fleet.grpc.pb.h" #include "flwr/proto/fleet.pb.h" -#include "flwr/proto/transport.grpc.pb.h" #include "flwr/proto/transport.pb.h" #include "typing.h" @@ -80,3 +78,24 @@ evaluate_ins_from_proto(flwr::proto::ServerMessage_EvaluateIns msg); */ flwr::proto::ClientMessage_EvaluateRes evaluate_res_to_proto(flwr_local::EvaluateRes res); + +flwr_local::RecordSet +recordset_from_proto(const flwr::proto::RecordSet &recordset); + +flwr_local::FitIns recordset_to_fit_ins(const flwr_local::RecordSet &recordset, + bool keep_input); + +flwr_local::EvaluateIns +recordset_to_evaluate_ins(const flwr_local::RecordSet &recordset, + bool keep_input); + +flwr_local::RecordSet +recordset_from_evaluate_res(const flwr_local::EvaluateRes &evaluate_res); + +flwr_local::RecordSet recordset_from_fit_res(const flwr_local::FitRes &fit_res); + +flwr_local::RecordSet recordset_from_get_parameters_res( + const flwr_local::ParametersRes ¶meters_res); + +flwr::proto::RecordSet +recordset_to_proto(const flwr_local::RecordSet &recordset); diff --git a/src/cc/flwr/include/start.h b/src/cc/flwr/include/start.h index 2c233be8249c..1a9033278df9 100644 --- a/src/cc/flwr/include/start.h +++ b/src/cc/flwr/include/start.h @@ -17,6 +17,8 @@ #define START_H #pragma once #include "client.h" +#include "communicator.h" +#include "flwr/proto/transport.grpc.pb.h" #include "grpc_rere.h" #include "message_handler.h" #include @@ -51,8 +53,5 @@ class start { static void start_client(std::string server_address, flwr_local::Client *client, int grpc_max_message_length = GRPC_MAX_MESSAGE_LENGTH); - static void - start_rere_client(std::string server_address, flwr_local::Client *client, - int grpc_max_message_length = GRPC_MAX_MESSAGE_LENGTH); }; #endif diff --git a/src/cc/flwr/include/task_handler.h b/src/cc/flwr/include/task_handler.h deleted file mode 100644 index 77fe5fef4d98..000000000000 --- a/src/cc/flwr/include/task_handler.h +++ /dev/null @@ -1,24 +0,0 @@ -/************************************************************************************************* - * - * @file task_handler.h - * - * @brief Handle incoming or outgoing tasks - * - * @author The Flower Authors - * - * @version 1.0 - * - * @date 06/11/2023 - * - *************************************************************************************************/ - -#pragma once -#include "client.h" -#include "serde.h" - -bool validate_task_ins(const flwr::proto::TaskIns &task_ins, - const bool discard_reconnect_ins); -bool validate_task_res(const flwr::proto::TaskRes &task_res); -flwr::proto::TaskRes configure_task_res(const flwr::proto::TaskRes &task_res, - const flwr::proto::TaskIns &task_ins, - const flwr::proto::Node &node); diff --git a/src/cc/flwr/include/typing.h b/src/cc/flwr/include/typing.h index 5aee90b6c215..39b78dc89ede 100644 --- a/src/cc/flwr/include/typing.h +++ b/src/cc/flwr/include/typing.h @@ -17,6 +17,8 @@ #include #include #include +#include +#include namespace flwr_local { /** @@ -66,8 +68,8 @@ class Parameters { : tensors(tensors), tensor_type(tensor_type) {} // Getters - std::list getTensors() { return tensors; } - std::string getTensor_type() { return tensor_type; } + const std::list getTensors() const { return tensors; } + const std::string getTensor_type() const { return tensor_type; } // Setters void setTensors(const std::list &tensors) { @@ -90,7 +92,7 @@ class ParametersRes { explicit ParametersRes(const Parameters ¶meters) : parameters(parameters) {} - Parameters getParameters() { return parameters; } + const Parameters getParameters() const { return parameters; } void setParameters(const Parameters &p) { parameters = p; } private: @@ -129,35 +131,26 @@ class FitRes { FitRes() {} FitRes(const Parameters ¶meters, int num_examples, int num_examples_ceil, float fit_duration, const Metrics &metrics) - : parameters(parameters), num_examples(num_examples), - fit_duration(fit_duration), metrics(metrics) {} + : _parameters(parameters), _num_examples(num_examples), + _fit_duration(fit_duration), _metrics(metrics) {} // Getters - Parameters getParameters() { return parameters; } - int getNum_example() { return num_examples; } - /*std::optional getNum_examples_ceil() - { - return num_examples_ceil; - }*/ - std::optional getFit_duration() { return fit_duration; } - std::optional getMetrics() { return metrics; } + const Parameters getParameters() const { return _parameters; } + const int getNum_example() const { return _num_examples; } + const std::optional getFit_duration() const { return _fit_duration; } + const std::optional getMetrics() const { return _metrics; } // Setters - void setParameters(const Parameters &p) { parameters = p; } - void setNum_example(int n) { num_examples = n; } - /*void setNum_examples_ceil(int n) - { - num_examples_ceil = n; - }*/ - void setFit_duration(float f) { fit_duration = f; } - void setMetrics(const flwr_local::Metrics &m) { metrics = m; } + void setParameters(const Parameters &p) { _parameters = p; } + void setNum_example(int n) { _num_examples = n; } + void setFit_duration(float f) { _fit_duration = f; } + void setMetrics(const flwr_local::Metrics &m) { _metrics = m; } private: - Parameters parameters; - int num_examples; - // std::optional num_examples_ceil = std::nullopt; - std::optional fit_duration = std::nullopt; - std::optional metrics = std::nullopt; + Parameters _parameters; + int _num_examples; + std::optional _fit_duration = std::nullopt; + std::optional _metrics = std::nullopt; }; /** @@ -195,9 +188,9 @@ class EvaluateRes { : loss(loss), num_examples(num_examples), metrics(metrics) {} // Getters - float getLoss() { return loss; } - int getNum_example() { return num_examples; } - std::optional getMetrics() { return metrics; } + const float getLoss() const { return loss; } + const int getNum_example() const { return num_examples; } + const std::optional getMetrics() const { return metrics; } // Setters void setLoss(float f) { loss = f; } @@ -239,4 +232,62 @@ class PropertiesRes { Properties properties; }; +struct Array { + std::string dtype; + std::vector shape; + std::string stype; + std::string data; // use string to represent bytes +}; + +using ParametersRecord = std::map; +using MetricsRecord = + std::map, std::vector>>; + +using ConfigsRecord = + std::map, + std::vector, std::vector, + std::vector>>; + +class RecordSet { +public: + RecordSet( + const std::map ¶metersRecords = {}, + const std::map &metricsRecords = {}, + const std::map &configsRecords = {}) + : _parametersRecords(parametersRecords), _metricsRecords(metricsRecords), + _configsRecords(configsRecords) {} + + const std::map &getParametersRecords() const { + return _parametersRecords; + } + const std::map &getMetricsRecords() const { + return _metricsRecords; + } + const std::map &getConfigsRecords() const { + return _configsRecords; + } + + void setParametersRecords( + const std::map ¶metersRecords) { + _parametersRecords = parametersRecords; + } + + void setMetricsRecords( + const std::map &metricsRecords) { + _metricsRecords = metricsRecords; + } + + void setConfigsRecords( + const std::map &configsRecords) { + _configsRecords = configsRecords; + } + +private: + std::map _parametersRecords; + std::map _metricsRecords; + std::map _configsRecords; +}; + } // namespace flwr_local diff --git a/src/cc/flwr/src/communicator.cc b/src/cc/flwr/src/communicator.cc new file mode 100644 index 000000000000..bcbea9de60ef --- /dev/null +++ b/src/cc/flwr/src/communicator.cc @@ -0,0 +1,187 @@ +#include "communicator.h" + +const std::string KEY_NODE = "node"; +const std::string KEY_TASK_INS = "current_task_ins"; + +std::map> node_store; +std::map> state; + +std::mutex node_store_mutex; +std::mutex state_mutex; + +std::optional get_node_from_store() { + std::lock_guard lock(node_store_mutex); + auto node = node_store.find(KEY_NODE); + if (node == node_store.end() || !node->second.has_value()) { + std::cerr << "Node instance missing" << std::endl; + return std::nullopt; + } + return node->second; +} + +bool validate_task_ins(const flwr::proto::TaskIns &task_ins, + const bool discard_reconnect_ins) { + return task_ins.has_task() && task_ins.task().has_recordset(); +} + +bool validate_task_res(const flwr::proto::TaskRes &task_res) { + // Retrieve initialized fields in TaskRes + return true; +} + +flwr::proto::TaskRes +configure_task_res(const flwr::proto::TaskRes &task_res, + const flwr::proto::TaskIns &ref_task_ins, + const flwr::proto::Node &producer) { + flwr::proto::TaskRes result_task_res; + + // Setting scalar fields + result_task_res.set_task_id(""); + result_task_res.set_group_id(ref_task_ins.group_id()); + result_task_res.set_run_id(ref_task_ins.run_id()); + + // Merge the task from the input task_res + *result_task_res.mutable_task() = task_res.task(); + + // Construct and set the producer and consumer for the task + std::unique_ptr new_producer = + std::make_unique(producer); + result_task_res.mutable_task()->set_allocated_producer( + new_producer.release()); + + std::unique_ptr new_consumer = + std::make_unique(ref_task_ins.task().producer()); + result_task_res.mutable_task()->set_allocated_consumer( + new_consumer.release()); + + // Set ancestry in the task + result_task_res.mutable_task()->add_ancestry(ref_task_ins.task_id()); + + return result_task_res; +} + +void delete_node_from_store() { + std::lock_guard lock(node_store_mutex); + auto node = node_store.find(KEY_NODE); + if (node == node_store.end() || !node->second.has_value()) { + node_store.erase(node); + } +} + +std::optional get_current_task_ins() { + std::lock_guard state_lock(state_mutex); + auto current_task_ins = state.find(KEY_TASK_INS); + if (current_task_ins == state.end() || + !current_task_ins->second.has_value()) { + std::cerr << "No current TaskIns" << std::endl; + return std::nullopt; + } + return current_task_ins->second; +} + +void create_node(Communicator *communicator) { + flwr::proto::CreateNodeRequest create_node_request; + flwr::proto::CreateNodeResponse create_node_response; + + create_node_request.set_ping_interval(300.0); + + communicator->send_create_node(create_node_request, &create_node_response); + + // Validate the response + if (!create_node_response.has_node()) { + std::cerr << "Received response does not contain a node." << std::endl; + return; + } + + { + std::lock_guard lock(node_store_mutex); + node_store[KEY_NODE] = create_node_response.node(); + } +} + +void delete_node(Communicator *communicator) { + auto node = get_node_from_store(); + if (!node) { + return; + } + flwr::proto::DeleteNodeRequest delete_node_request; + flwr::proto::DeleteNodeResponse delete_node_response; + + auto heap_node = new flwr::proto::Node(*node); + delete_node_request.set_allocated_node(heap_node); + + if (!communicator->send_delete_node(delete_node_request, + &delete_node_response)) { + delete heap_node; // Make sure to delete if status is not ok + return; + } else { + delete_node_request.release_node(); // Release if status is ok + } + + delete_node_from_store(); +} + +std::optional receive(Communicator *communicator) { + auto node = get_node_from_store(); + if (!node) { + return std::nullopt; + } + flwr::proto::PullTaskInsResponse response; + flwr::proto::PullTaskInsRequest request; + + request.set_allocated_node(new flwr::proto::Node(*node)); + + bool success = communicator->send_pull_task_ins(request, &response); + + // Release ownership so that the heap_node won't be deleted when request + // goes out of scope. + request.release_node(); + + if (!success) { + return std::nullopt; + } + + if (response.task_ins_list_size() > 0) { + flwr::proto::TaskIns task_ins = response.task_ins_list().at(0); + if (validate_task_ins(task_ins, true)) { + std::lock_guard state_lock(state_mutex); + state[KEY_TASK_INS] = task_ins; + return task_ins; + } + } + std::cerr << "TaskIns list is empty." << std::endl; + return std::nullopt; +} + +void send(Communicator *communicator, flwr::proto::TaskRes task_res) { + auto node = get_node_from_store(); + if (!node) { + return; + } + + auto task_ins = get_current_task_ins(); + if (!task_ins) { + return; + } + + if (!validate_task_res(task_res)) { + std::cerr << "TaskRes is invalid" << std::endl; + std::lock_guard state_lock(state_mutex); + state[KEY_TASK_INS].reset(); + return; + } + + flwr::proto::TaskRes new_task_res = + configure_task_res(task_res, *task_ins, *node); + + flwr::proto::PushTaskResRequest request; + *request.add_task_res_list() = new_task_res; + flwr::proto::PushTaskResResponse response; + + communicator->send_push_task_res(request, &response); + + { + std::lock_guard state_lock(state_mutex); + state[KEY_TASK_INS].reset(); + } +} diff --git a/src/cc/flwr/src/grpc_rere.cc b/src/cc/flwr/src/grpc_rere.cc index 267874a7a0e2..b8a04d9b9bf7 100644 --- a/src/cc/flwr/src/grpc_rere.cc +++ b/src/cc/flwr/src/grpc_rere.cc @@ -1,167 +1,75 @@ #include "grpc_rere.h" +#include "flwr/proto/fleet.grpc.pb.h" -const std::string KEY_NODE = "node"; -const std::string KEY_TASK_INS = "current_task_ins"; +gRPCRereCommunicator::gRPCRereCommunicator(std::string server_address, + int grpc_max_message_length) { + grpc::ChannelArguments args; + args.SetMaxReceiveMessageSize(grpc_max_message_length); + args.SetMaxSendMessageSize(grpc_max_message_length); -std::map> node_store; -std::map> state; + // Establish an insecure gRPC connection to a gRPC server + std::shared_ptr channel = grpc::CreateCustomChannel( + server_address, grpc::InsecureChannelCredentials(), args); -std::mutex node_store_mutex; -std::mutex state_mutex; - -std::optional get_node_from_store() { - std::lock_guard lock(node_store_mutex); - auto node = node_store.find(KEY_NODE); - if (node == node_store.end() || !node->second.has_value()) { - std::cerr << "Node instance missing" << std::endl; - return std::nullopt; - } - return node->second; -} - -void delete_node_from_store() { - std::lock_guard lock(node_store_mutex); - auto node = node_store.find(KEY_NODE); - if (node == node_store.end() || !node->second.has_value()) { - node_store.erase(node); - } -} - -std::optional get_current_task_ins() { - std::lock_guard state_lock(state_mutex); - auto current_task_ins = state.find(KEY_TASK_INS); - if (current_task_ins == state.end() || - !current_task_ins->second.has_value()) { - std::cerr << "No current TaskIns" << std::endl; - return std::nullopt; - } - return current_task_ins->second; + // Create stub + stub = flwr::proto::Fleet::NewStub(channel); } -void create_node(const std::unique_ptr &stub) { - flwr::proto::CreateNodeRequest create_node_request; - flwr::proto::CreateNodeResponse create_node_response; - +bool gRPCRereCommunicator::send_create_node( + flwr::proto::CreateNodeRequest request, + flwr::proto::CreateNodeResponse *response) { grpc::ClientContext context; - grpc::Status status = - stub->CreateNode(&context, create_node_request, &create_node_response); - + grpc::Status status = stub->CreateNode(&context, request, response); if (!status.ok()) { std::cerr << "CreateNode RPC failed: " << status.error_message() << std::endl; - return; - } - - // Validate the response - if (!create_node_response.has_node()) { - std::cerr << "Received response does not contain a node." << std::endl; - return; + return false; } - { - std::lock_guard lock(node_store_mutex); - node_store[KEY_NODE] = create_node_response.node(); - } + return true; } -void delete_node(const std::unique_ptr &stub) { - auto node = get_node_from_store(); - if (!node) { - return; - } - flwr::proto::DeleteNodeRequest delete_node_request; - flwr::proto::DeleteNodeResponse delete_node_response; - - auto heap_node = new flwr::proto::Node(*node); - delete_node_request.set_allocated_node(heap_node); - +bool gRPCRereCommunicator::send_delete_node( + flwr::proto::DeleteNodeRequest request, + flwr::proto::DeleteNodeResponse *response) { grpc::ClientContext context; - grpc::Status status = - stub->DeleteNode(&context, delete_node_request, &delete_node_response); + grpc::Status status = stub->DeleteNode(&context, request, response); if (!status.ok()) { std::cerr << "DeleteNode RPC failed with status: " << status.error_message() << std::endl; - delete heap_node; // Make sure to delete if status is not ok - return; - } else { - delete_node_request.release_node(); // Release if status is ok + return false; } - delete_node_from_store(); + return true; } -std::optional -receive(const std::unique_ptr &stub) { - auto node = get_node_from_store(); - if (!node) { - return std::nullopt; - } - flwr::proto::PullTaskInsResponse response; - flwr::proto::PullTaskInsRequest request; - - request.set_allocated_node(new flwr::proto::Node(*node)); - +bool gRPCRereCommunicator::send_pull_task_ins( + flwr::proto::PullTaskInsRequest request, + flwr::proto::PullTaskInsResponse *response) { grpc::ClientContext context; - grpc::Status status = stub->PullTaskIns(&context, request, &response); - - // Release ownership so that the heap_node won't be deleted when request - // goes out of scope. - request.release_node(); + grpc::Status status = stub->PullTaskIns(&context, request, response); if (!status.ok()) { std::cerr << "PullTaskIns RPC failed with status: " << status.error_message() << std::endl; - return std::nullopt; + return false; } - if (response.task_ins_list_size() > 0) { - flwr::proto::TaskIns task_ins = response.task_ins_list().at(0); - if (validate_task_ins(task_ins, true)) { - std::lock_guard state_lock(state_mutex); - state[KEY_TASK_INS] = task_ins; - return task_ins; - } - } - std::cerr << "TaskIns list is empty." << std::endl; - return std::nullopt; + return true; } -void send(const std::unique_ptr &stub, - flwr::proto::TaskRes task_res) { - auto node = get_node_from_store(); - if (!node) { - return; - } - - auto task_ins = get_current_task_ins(); - if (!task_ins) { - return; - } - - if (!validate_task_res(task_res)) { - std::cerr << "TaskRes is invalid" << std::endl; - std::lock_guard state_lock(state_mutex); - state[KEY_TASK_INS].reset(); - return; - } - - flwr::proto::TaskRes new_task_res = - configure_task_res(task_res, *task_ins, *node); - - flwr::proto::PushTaskResRequest request; - *request.add_task_res_list() = new_task_res; - flwr::proto::PushTaskResResponse response; - +bool gRPCRereCommunicator::send_push_task_res( + flwr::proto::PushTaskResRequest request, + flwr::proto::PushTaskResResponse *response) { grpc::ClientContext context; - grpc::Status status = stub->PushTaskRes(&context, request, &response); + grpc::Status status = stub->PushTaskRes(&context, request, response); if (!status.ok()) { std::cerr << "PushTaskRes RPC failed with status: " << status.error_message() << std::endl; + return false; } - { - std::lock_guard state_lock(state_mutex); - state[KEY_TASK_INS].reset(); - } + + return true; } diff --git a/src/cc/flwr/src/message_handler.cc b/src/cc/flwr/src/message_handler.cc index 2c1e9ccbb49d..e1ce56f2cd96 100644 --- a/src/cc/flwr/src/message_handler.cc +++ b/src/cc/flwr/src/message_handler.cc @@ -1,109 +1,104 @@ #include "message_handler.h" +#include "flwr/proto/task.pb.h" +#include + +std::tuple +_reconnect(flwr::proto::RecordSet proto_recordset) { -std::tuple -_reconnect(flwr::proto::ServerMessage_ReconnectIns reconnect_msg) { // Determine the reason for sending Disconnect message flwr::proto::Reason reason = flwr::proto::Reason::ACK; int sleep_duration = 0; - if (reconnect_msg.seconds() != 0) { - reason = flwr::proto::Reason::RECONNECT; - sleep_duration = reconnect_msg.seconds(); - } // Build Disconnect message - flwr::proto::ClientMessage_DisconnectRes disconnect; - disconnect.set_reason(reason); - flwr::proto::ClientMessage cm; - *cm.mutable_disconnect_res() = disconnect; - - return std::make_tuple(cm, sleep_duration); + return std::make_tuple( + flwr_local::RecordSet({}, {}, {{"config", {{"reason", reason}}}}), + sleep_duration); } -flwr::proto::ClientMessage _get_parameters(flwr_local::Client *client) { - flwr::proto::ClientMessage cm; - *(cm.mutable_get_parameters_res()) = - parameters_res_to_proto(client->get_parameters()); - return cm; +flwr_local::RecordSet _get_parameters(flwr_local::Client *client) { + return recordset_from_get_parameters_res(client->get_parameters()); } -flwr::proto::ClientMessage _fit(flwr_local::Client *client, - flwr::proto::ServerMessage_FitIns fit_msg) { - // Deserialize fit instruction - flwr_local::FitIns fit_ins = fit_ins_from_proto(fit_msg); +flwr_local::RecordSet _fit(flwr_local::Client *client, + flwr::proto::RecordSet proto_recordset) { + flwr_local::RecordSet recordset = recordset_from_proto(proto_recordset); + flwr_local::FitIns fit_ins = recordset_to_fit_ins(recordset, true); // Perform fit flwr_local::FitRes fit_res = client->fit(fit_ins); - // Serialize fit result - flwr::proto::ClientMessage cm; - *cm.mutable_fit_res() = fit_res_to_proto(fit_res); - return cm; + + flwr_local::RecordSet out_recordset = recordset_from_fit_res(fit_res); + return out_recordset; } -flwr::proto::ClientMessage -_evaluate(flwr_local::Client *client, - flwr::proto::ServerMessage_EvaluateIns evaluate_msg) { - // Deserialize evaluate instruction - flwr_local::EvaluateIns evaluate_ins = evaluate_ins_from_proto(evaluate_msg); +flwr_local::RecordSet _evaluate(flwr_local::Client *client, + flwr::proto::RecordSet proto_recordset) { + flwr_local::RecordSet recordset = recordset_from_proto(proto_recordset); + flwr_local::EvaluateIns evaluate_ins = + recordset_to_evaluate_ins(recordset, true); // Perform evaluation flwr_local::EvaluateRes evaluate_res = client->evaluate(evaluate_ins); - // Serialize evaluate result - flwr::proto::ClientMessage cm; - *cm.mutable_evaluate_res() = evaluate_res_to_proto(evaluate_res); - return cm; + + flwr_local::RecordSet out_recordset = + recordset_from_evaluate_res(evaluate_res); + return out_recordset; } -std::tuple -handle(flwr_local::Client *client, flwr::proto::ServerMessage server_msg) { - if (server_msg.has_reconnect_ins()) { - std::tuple rec = - _reconnect(server_msg.reconnect_ins()); +std::tuple handle(flwr_local::Client *client, + flwr::proto::Task task) { + if (task.task_type() == "reconnect") { + std::tuple rec = _reconnect(task.recordset()); return std::make_tuple(std::get<0>(rec), std::get<1>(rec), false); } - if (server_msg.has_get_parameters_ins()) { + if (task.task_type() == "get_parameters") { return std::make_tuple(_get_parameters(client), 0, true); } - if (server_msg.has_fit_ins()) { - return std::make_tuple(_fit(client, server_msg.fit_ins()), 0, true); + if (task.task_type() == "train") { + return std::make_tuple(_fit(client, task.recordset()), 0, true); } - if (server_msg.has_evaluate_ins()) { - return std::make_tuple(_evaluate(client, server_msg.evaluate_ins()), 0, - true); + if (task.task_type() == "evaluate") { + return std::make_tuple(_evaluate(client, task.recordset()), 0, true); } throw "Unkown server message"; } std::tuple handle_task(flwr_local::Client *client, const flwr::proto::TaskIns &task_ins) { + flwr::proto::Task received_task = task_ins.task(); -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wdeprecated-declarations" - if (!task_ins.task().has_legacy_server_message()) { - // TODO: Handle SecureAggregation - throw std::runtime_error("Task still needs legacy server message"); - } - flwr::proto::ServerMessage server_msg = - task_ins.task().legacy_server_message(); -#pragma GCC diagnostic pop - - std::tuple legacy_res = - handle(client, server_msg); - std::unique_ptr client_message = - std::make_unique(std::get<0>(legacy_res)); + std::tuple legacy_res = + handle(client, received_task); + auto conf_records = + recordset_from_proto(recordset_to_proto(std::get<0>(legacy_res))) + .getConfigsRecords(); flwr::proto::TaskRes task_res; + task_res.set_task_id(""); - task_res.set_group_id(""); - task_res.set_workload_id(0); + task_res.set_group_id(task_ins.group_id()); + task_res.set_run_id(task_ins.run_id()); std::unique_ptr task = std::make_unique(); -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wdeprecated-declarations" - task->set_allocated_legacy_client_message( - client_message.release()); // Ownership transferred to `task` -#pragma GCC diagnostic pop + std::unique_ptr proto_recordset_ptr = + std::make_unique( + recordset_to_proto(std::get<0>(legacy_res))); + + task->set_allocated_recordset(proto_recordset_ptr.release()); + task->set_task_type(received_task.task_type()); + task->set_ttl(3600); + task->set_created_at(std::chrono::duration_cast( + std::chrono::system_clock::now().time_since_epoch()) + .count()); + task->set_allocated_consumer( + std::make_unique(received_task.producer()).release()); + task->set_allocated_producer( + std::make_unique(received_task.consumer()).release()); task_res.set_allocated_task(task.release()); - return std::make_tuple(task_res, std::get<1>(legacy_res), - std::get<2>(legacy_res)); + + std::tuple tuple = std::make_tuple( + task_res, std::get<1>(legacy_res), std::get<2>(legacy_res)); + + return tuple; } diff --git a/src/cc/flwr/src/serde.cc b/src/cc/flwr/src/serde.cc index 2977915b57df..f3ad17a3386b 100644 --- a/src/cc/flwr/src/serde.cc +++ b/src/cc/flwr/src/serde.cc @@ -1,4 +1,6 @@ #include "serde.h" +#include "flwr/proto/recordset.pb.h" +#include "typing.h" /** * Serialize client parameters to protobuf parameters message @@ -185,3 +187,441 @@ evaluate_res_to_proto(flwr_local::EvaluateRes res) { return cres; } + +flwr::proto::Array array_to_proto(const flwr_local::Array &array) { + flwr::proto::Array protoArray; + protoArray.set_dtype(array.dtype); + for (int32_t dim : array.shape) { + protoArray.add_shape(dim); + } + protoArray.set_stype(array.stype); + protoArray.set_data({array.data.begin(), array.data.end()}); + return protoArray; +} + +flwr_local::Array array_from_proto(const flwr::proto::Array &protoArray) { + flwr_local::Array array; + array.dtype = protoArray.dtype(); + array.shape.assign(protoArray.shape().begin(), protoArray.shape().end()); + array.stype = protoArray.stype(); + + const std::string &protoData = protoArray.data(); + array.data.assign(protoData.begin(), protoData.end()); + + return array; +} + +flwr::proto::ParametersRecord +parameters_record_to_proto(const flwr_local::ParametersRecord &record) { + flwr::proto::ParametersRecord protoRecord; + for (const auto &[key, value] : record) { + *protoRecord.add_data_keys() = key; + *protoRecord.add_data_values() = array_to_proto(value); + } + return protoRecord; +} + +flwr_local::ParametersRecord +parameters_record_from_proto(const flwr::proto::ParametersRecord &protoRecord) { + flwr_local::ParametersRecord record; + + auto keys = protoRecord.data_keys(); + auto values = protoRecord.data_values(); + for (size_t i = 0; i < keys.size(); ++i) { + record[keys[i]] = array_from_proto(values[i]); + } + return record; +} + +flwr::proto::MetricsRecord +metrics_record_to_proto(const flwr_local::MetricsRecord &record) { + flwr::proto::MetricsRecord protoRecord; + + for (const auto &[key, value] : record) { + auto &data = (*protoRecord.mutable_data())[key]; + + if (std::holds_alternative(value)) { + data.set_sint64(std::get(value)); + } else if (std::holds_alternative(value)) { + data.set_double_(std::get(value)); + } else if (std::holds_alternative>(value)) { + auto &int_list = std::get>(value); + auto *list = data.mutable_sint64_list(); + for (int val : int_list) { + list->add_vals(val); + } + } else if (std::holds_alternative>(value)) { + auto &double_list = std::get>(value); + auto *list = data.mutable_double_list(); + for (double val : double_list) { + list->add_vals(val); + } + } + } + + return protoRecord; +} + +flwr_local::MetricsRecord +metrics_record_from_proto(const flwr::proto::MetricsRecord &protoRecord) { + flwr_local::MetricsRecord record; + + for (const auto &[key, value] : protoRecord.data()) { + if (value.has_sint64()) { + record[key] = (int)value.sint64(); + } else if (value.has_double_()) { + record[key] = (double)value.double_(); + } else if (value.has_sint64_list()) { + std::vector int_list; + for (const auto sint : value.sint64_list().vals()) { + int_list.push_back((int)sint); + } + record[key] = int_list; + } else if (value.has_double_list()) { + std::vector double_list; + for (const auto proto_double : value.double_list().vals()) { + double_list.push_back((double)proto_double); + } + record[key] = double_list; + } + } + return record; +} + +flwr::proto::ConfigsRecord +configs_record_to_proto(const flwr_local::ConfigsRecord &record) { + flwr::proto::ConfigsRecord protoRecord; + + for (const auto &[key, value] : record) { + auto &data = (*protoRecord.mutable_data())[key]; + + if (std::holds_alternative(value)) { + data.set_sint64(std::get(value)); + } else if (std::holds_alternative(value)) { + data.set_double_(std::get(value)); + } else if (std::holds_alternative(value)) { + data.set_bool_(std::get(value)); + } else if (std::holds_alternative(value)) { + data.set_string(std::get(value)); + } else if (std::holds_alternative>(value)) { + auto &list = *data.mutable_sint64_list(); + for (int val : std::get>(value)) { + list.add_vals(val); + } + } else if (std::holds_alternative>(value)) { + auto &list = *data.mutable_double_list(); + for (double val : std::get>(value)) { + list.add_vals(val); + } + } else if (std::holds_alternative>(value)) { + auto &list = *data.mutable_bool_list(); + for (bool val : std::get>(value)) { + list.add_vals(val); + } + } else if (std::holds_alternative>(value)) { + auto &list = *data.mutable_string_list(); + for (const auto &val : std::get>(value)) { + list.add_vals(val); + } + } + } + + return protoRecord; +} + +flwr_local::ConfigsRecord +configs_record_from_proto(const flwr::proto::ConfigsRecord &protoRecord) { + flwr_local::ConfigsRecord record; + + for (const auto &[key, value] : protoRecord.data()) { + if (value.has_sint64_list()) { + std::vector int_list; + for (const auto sint : value.sint64_list().vals()) { + int_list.push_back((int)sint); + } + record[key] = int_list; + } else if (value.has_double_list()) { + std::vector double_list; + for (const auto proto_double : value.double_list().vals()) { + double_list.push_back((double)proto_double); + } + record[key] = double_list; + } else if (value.has_bool_list()) { + std::vector tmp_list; + for (const auto proto_val : value.bool_list().vals()) { + tmp_list.push_back((bool)proto_val); + } + record[key] = tmp_list; + } else if (value.has_bytes_list()) { + std::vector tmp_list; + for (const auto proto_val : value.bytes_list().vals()) { + tmp_list.push_back(proto_val); + } + record[key] = tmp_list; + } else if (value.has_string_list()) { + std::vector tmp_list; + for (const auto proto_val : value.bytes_list().vals()) { + tmp_list.push_back(proto_val); + } + record[key] = tmp_list; + } else if (value.has_sint64()) { + record[key] = (int)value.sint64(); + } else if (value.has_double_()) { + record[key] = (double)value.double_(); + } else if (value.has_bool_()) { + record[key] = value.bool_(); + } else if (value.has_bytes()) { + record[key] = value.bytes(); + } else if (value.has_string()) { + record[key] = value.string(); + } + } + return record; +} + +flwr_local::Parameters +parametersrecord_to_parameters(const flwr_local::ParametersRecord &record, + bool keep_input) { + std::list tensors; + std::string tensor_type; + + for (const auto &[key, array] : record) { + tensors.push_back(array.data); + + if (tensor_type.empty()) { + tensor_type = array.stype; + } + } + + return flwr_local::Parameters(tensors, tensor_type); +} + +flwr_local::EvaluateIns +recordset_to_evaluate_ins(const flwr_local::RecordSet &recordset, + bool keep_input) { + auto parameters_record = + recordset.getParametersRecords().at("evaluateins.parameters"); + + flwr_local::Parameters params = + parametersrecord_to_parameters(parameters_record, keep_input); + + auto configs_record = recordset.getConfigsRecords().at("evaluateins.config"); + flwr_local::Config config_dict; + + for (const auto &[key, value] : configs_record) { + flwr_local::Scalar scalar; + + std::visit( + [&scalar](auto &&arg) { + using T = std::decay_t; + if constexpr (std::is_same_v) { + scalar.setInt(arg); + } else if constexpr (std::is_same_v) { + scalar.setDouble(arg); + } else if constexpr (std::is_same_v) { + scalar.setString(arg); + } else if constexpr (std::is_same_v) { + scalar.setBool(arg); + } else if constexpr (std::is_same_v>) { + } else if constexpr (std::is_same_v>) { + } else if constexpr (std::is_same_v>) { + } else if constexpr (std::is_same_v>) { + } + }, + value); + + config_dict[key] = scalar; + } + + return flwr_local::EvaluateIns(params, config_dict); +} + +flwr_local::ConfigsRecord +metrics_to_config_record(const flwr_local::Metrics metrics) { + flwr_local::ConfigsRecord config_record; + for (const auto &[key, value] : metrics) { + flwr_local::Scalar scalar_value = value; + if (scalar_value.getBool().has_value()) { + config_record[key] = scalar_value.getBool().value(); + } else if (scalar_value.getBytes().has_value()) { + config_record[key] = scalar_value.getBytes().value(); + } else if (scalar_value.getDouble().has_value()) { + config_record[key] = scalar_value.getDouble().value(); + } else if (scalar_value.getInt().has_value()) { + config_record[key] = scalar_value.getInt().value(); + } else if (scalar_value.getString().has_value()) { + config_record[key] = scalar_value.getString().value(); + } else { + config_record[key] = ""; + } + } + return config_record; +} + +flwr_local::FitIns recordset_to_fit_ins(const flwr_local::RecordSet &recordset, + bool keep_input) { + auto parameters_record = + recordset.getParametersRecords().at("fitins.parameters"); + + flwr_local::Parameters params = + parametersrecord_to_parameters(parameters_record, keep_input); + + auto configs_record = recordset.getConfigsRecords().at("fitins.config"); + flwr_local::Config config_dict; + + for (const auto &[key, value] : configs_record) { + flwr_local::Scalar scalar; + + std::visit( + [&scalar](auto &&arg) { + using T = std::decay_t; + if constexpr (std::is_same_v) { + scalar.setInt(arg); + } else if constexpr (std::is_same_v) { + scalar.setDouble(arg); + } else if constexpr (std::is_same_v) { + scalar.setString(arg); + } else if constexpr (std::is_same_v) { + scalar.setBool(arg); + } else if constexpr (std::is_same_v>) { + } else if constexpr (std::is_same_v>) { + } else if constexpr (std::is_same_v>) { + } else if constexpr (std::is_same_v>) { + } + }, + value); + + config_dict[key] = scalar; + } + + return flwr_local::FitIns(params, config_dict); +} + +flwr_local::ParametersRecord +parameters_to_parametersrecord(const flwr_local::Parameters ¶meters) { + flwr_local::ParametersRecord record; + const std::list tensors = parameters.getTensors(); + const std::string tensor_type = parameters.getTensor_type(); + + int idx = 0; + for (const auto &tensor : tensors) { + flwr_local::Array array{tensor_type, std::vector(), tensor_type, + tensor}; + record[std::to_string(idx++)] = array; + } + + return record; +} + +flwr_local::RecordSet recordset_from_get_parameters_res( + const flwr_local::ParametersRes &get_parameters_res) { + std::map parameters_record = { + {"getparametersres.parameters", + parameters_to_parametersrecord(get_parameters_res.getParameters())}}; + + std::map configs_record = { + {"getparametersres.status", {{"code", 0}, {"message", "Success"}}}}; + + flwr_local::RecordSet recordset = flwr_local::RecordSet(); + + recordset.setParametersRecords(parameters_record); + recordset.setConfigsRecords(configs_record); + + return recordset; +} + +flwr_local::RecordSet recordset_from_fit_res(const flwr_local::FitRes &fitres) { + std::map parameters_record = { + {"fitres.parameters", + parameters_to_parametersrecord(fitres.getParameters())}}; + + std::map metrics_record = { + {"fitres.num_examples", {{"num_examples", fitres.getNum_example()}}}}; + + std::map configs_record = { + {"fitres.status", {{"code", 0}, {"message", "Success"}}}}; + + if (fitres.getMetrics() != std::nullopt) { + configs_record["fitres.metrics"] = + metrics_to_config_record(fitres.getMetrics().value()); + } else { + configs_record["fitres.metrics"] = {}; + } + flwr_local::RecordSet recordset = flwr_local::RecordSet(); + + recordset.setParametersRecords(parameters_record); + recordset.setMetricsRecords(metrics_record); + recordset.setConfigsRecords(configs_record); + + return recordset; +} + +flwr_local::RecordSet +recordset_from_evaluate_res(const flwr_local::EvaluateRes &evaluate_res) { + std::map metrics_record = { + {"evaluateres.loss", {{"loss", evaluate_res.getLoss()}}}, + {"evaluateres.num_examples", + {{"num_examples", evaluate_res.getNum_example()}}}}; + + std::map configs_record = { + {"evaluateres.status", {{"code", 0}, {"message", "Success"}}}}; + + if (evaluate_res.getMetrics() != std::nullopt) { + configs_record["evaluateres.metrics"] = + metrics_to_config_record(evaluate_res.getMetrics().value()); + } else { + configs_record["evaluateres.metrics"] = {}; + } + + flwr_local::RecordSet recordset = flwr_local::RecordSet(); + + recordset.setMetricsRecords(metrics_record); + recordset.setConfigsRecords(configs_record); + + return recordset; +} + +flwr_local::RecordSet +recordset_from_proto(const flwr::proto::RecordSet &recordset) { + + std::map parametersRecords; + std::map metricsRecords; + std::map configsRecords; + + for (const auto &[key, param_record] : recordset.parameters()) { + parametersRecords[key] = parameters_record_from_proto(param_record); + } + + for (const auto &[key, metrics_record] : recordset.metrics()) { + metricsRecords[key] = metrics_record_from_proto(metrics_record); + } + + for (const auto &[key, configs_record] : recordset.configs()) { + configsRecords[key] = configs_record_from_proto(configs_record); + } + + return flwr_local::RecordSet(parametersRecords, metricsRecords, + configsRecords); +} + +flwr::proto::RecordSet +recordset_to_proto(const flwr_local::RecordSet &recordset) { + flwr::proto::RecordSet proto_recordset; + + for (const auto &[key, param_record] : recordset.getParametersRecords()) { + (*(proto_recordset.mutable_parameters()))[key] = + parameters_record_to_proto(param_record); + } + + for (const auto &[key, metrics_record] : recordset.getMetricsRecords()) { + (*(proto_recordset.mutable_metrics()))[key] = + metrics_record_to_proto(metrics_record); + } + + for (const auto &[key, configs_record] : recordset.getConfigsRecords()) { + (*(proto_recordset.mutable_configs()))[key] = + configs_record_to_proto(configs_record); + } + + return proto_recordset; +} diff --git a/src/cc/flwr/src/start.cc b/src/cc/flwr/src/start.cc index 52f193a09af4..06b520ba8a06 100644 --- a/src/cc/flwr/src/start.cc +++ b/src/cc/flwr/src/start.cc @@ -3,92 +3,31 @@ // cppcheck-suppress unusedFunction void start::start_client(std::string server_address, flwr_local::Client *client, int grpc_max_message_length) { - while (true) { - int sleep_duration = 0; - - // Set channel parameters - grpc::ChannelArguments args; - args.SetMaxReceiveMessageSize(grpc_max_message_length); - args.SetMaxSendMessageSize(grpc_max_message_length); - - // Establish an insecure gRPC connection to a gRPC server - std::shared_ptr channel = grpc::CreateCustomChannel( - server_address, grpc::InsecureChannelCredentials(), args); - - // Create stub - std::unique_ptr stub_ = - flwr::proto::FlowerService::NewStub(channel); - - // Read and write messages - grpc::ClientContext context; - std::shared_ptr> - reader_writer(stub_->Join(&context)); - flwr::proto::ServerMessage sm; - while (reader_writer->Read(&sm)) { - std::tuple receive = - handle(client, sm); - sleep_duration = std::get<1>(receive); - reader_writer->Write(std::get<0>(receive)); - if (std::get<2>(receive) == false) { - break; - } - } - reader_writer->WritesDone(); - - // Check connection status - grpc::Status status = reader_writer->Finish(); - - if (sleep_duration == 0) { - std::cout << "Disconnect and shut down." << std::endl; - break; - } - // Sleep and reconnect afterwards - // std::cout << "Disconnect, then re-establish connection after" << - // sleep_duration << "second(s)" << std::endl; Sleep(sleep_duration * 1000); - } -} + gRPCRereCommunicator communicator(server_address, grpc_max_message_length); -// cppcheck-suppress unusedFunction -void start::start_rere_client(std::string server_address, - flwr_local::Client *client, - int grpc_max_message_length) { while (true) { int sleep_duration = 0; - // Set channel parameters - grpc::ChannelArguments args; - args.SetMaxReceiveMessageSize(grpc_max_message_length); - args.SetMaxSendMessageSize(grpc_max_message_length); - - // Establish an insecure gRPC connection to a gRPC server - std::shared_ptr channel = grpc::CreateCustomChannel( - server_address, grpc::InsecureChannelCredentials(), args); - - // Create stub - std::unique_ptr stub_ = - flwr::proto::Fleet::NewStub(channel); - - // Read and write messages - - create_node(stub_); + create_node(&communicator); while (true) { - auto task_ins = receive(stub_); + auto task_ins = receive(&communicator); if (!task_ins) { std::this_thread::sleep_for(std::chrono::seconds(3)); continue; } + auto [task_res, sleep_duration, keep_going] = handle_task(client, task_ins.value()); - send(stub_, task_res); + + send(&communicator, task_res); if (!keep_going) { break; } } - delete_node(stub_); + delete_node(&communicator); if (sleep_duration == 0) { std::cout << "Disconnect and shut down." << std::endl; break; diff --git a/src/cc/flwr/src/task_handler.cc b/src/cc/flwr/src/task_handler.cc deleted file mode 100644 index b356ea12dd66..000000000000 --- a/src/cc/flwr/src/task_handler.cc +++ /dev/null @@ -1,52 +0,0 @@ -#include "task_handler.h" - -bool validate_task_ins(const flwr::proto::TaskIns &task_ins, - const bool discard_reconnect_ins) { -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wdeprecated-declarations" - return !(!task_ins.has_task() || - (!task_ins.task().has_legacy_server_message() && - !task_ins.task().has_sa()) || - (discard_reconnect_ins && - task_ins.task().legacy_server_message().has_reconnect_ins())); -#pragma GCC diagnostic pop -} - -bool validate_task_res(const flwr::proto::TaskRes &task_res) { - // Retrieve initialized fields in TaskRes - return (task_res.task_id().empty() && task_res.group_id().empty() && - task_res.workload_id() == 0 && !task_res.task().has_producer() && - !task_res.task().has_producer() && !task_res.task().has_consumer() && - task_res.task().ancestry_size() == 0); -} - -flwr::proto::TaskRes -configure_task_res(const flwr::proto::TaskRes &task_res, - const flwr::proto::TaskIns &ref_task_ins, - const flwr::proto::Node &producer) { - flwr::proto::TaskRes result_task_res; - - // Setting scalar fields - result_task_res.set_task_id(""); // This will be generated by the server - result_task_res.set_group_id(ref_task_ins.group_id()); - result_task_res.set_workload_id(ref_task_ins.workload_id()); - - // Merge the task from the input task_res - *result_task_res.mutable_task() = task_res.task(); - - // Construct and set the producer and consumer for the task - std::unique_ptr new_producer = - std::make_unique(producer); - result_task_res.mutable_task()->set_allocated_producer( - new_producer.release()); - - std::unique_ptr new_consumer = - std::make_unique(ref_task_ins.task().producer()); - result_task_res.mutable_task()->set_allocated_consumer( - new_consumer.release()); - - // Set ancestry in the task - result_task_res.mutable_task()->add_ancestry(ref_task_ins.task_id()); - - return result_task_res; -} diff --git a/src/docker/client/Dockerfile b/src/docker/client/Dockerfile deleted file mode 100644 index 0755a7989281..000000000000 --- a/src/docker/client/Dockerfile +++ /dev/null @@ -1,8 +0,0 @@ -# Copyright 2023 Flower Labs GmbH. All Rights Reserved. - -ARG BASE_REPOSITORY=flwr/base -ARG BASE_IMAGE_TAG -FROM $BASE_REPOSITORY:$BASE_IMAGE_TAG - -ARG FLWR_VERSION -RUN python -m pip install -U --no-cache-dir flwr[rest]==${FLWR_VERSION} diff --git a/src/docker/server/Dockerfile b/src/docker/server/Dockerfile deleted file mode 100644 index faa9cf2e56fe..000000000000 --- a/src/docker/server/Dockerfile +++ /dev/null @@ -1,14 +0,0 @@ -# Copyright 2023 Flower Labs GmbH. All Rights Reserved. - -ARG BASE_REPOSITORY=flwr/base -ARG BASE_IMAGE_TAG=py3.11-ubuntu22.04 -FROM $BASE_REPOSITORY:$BASE_IMAGE_TAG as server - -WORKDIR /app -ARG FLWR_VERSION -RUN python -m pip install -U --no-cache-dir flwr[rest]==${FLWR_VERSION} -ENTRYPOINT ["python", "-c", "from flwr.server import run_superlink; run_superlink()"] - -# Test if Flower can be successfully installed and imported -FROM server as test -RUN python -c "from flwr.server import run_superlink" diff --git a/src/py/flwr_example/pytorch_imagenet/run-clients.sh b/src/docker/superlink/Dockerfile old mode 100755 new mode 100644 similarity index 51% rename from src/py/flwr_example/pytorch_imagenet/run-clients.sh rename to src/docker/superlink/Dockerfile index f907ac67db12..acf06f66f2fb --- a/src/py/flwr_example/pytorch_imagenet/run-clients.sh +++ b/src/docker/superlink/Dockerfile @@ -1,6 +1,4 @@ -#!/bin/bash - -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -15,19 +13,20 @@ # limitations under the License. # ============================================================================== -set -e +ARG BASE_REPOSITORY=flwr/base +ARG PYTHON_VERSION=3.11 +ARG UBUNTU_VERSION=ubuntu22.04 +FROM $BASE_REPOSITORY:py${PYTHON_VERSION}-${UBUNTU_VERSION} -SERVER_ADDRESS="[::]:8080" -NUM_CLIENTS=40 -IMAGENET_PATH="~/Downloads/imagenet-object-localization-challenge/" +ARG FLWR_PACKAGE=flwr +ARG FLWR_VERSION +RUN python -m pip install -U --no-cache-dir \ + ${FLWR_PACKAGE}==${FLWR_VERSION} && \ + # Without pyenv rehash the executable cannot be found. + # pyenv rehash is usually called via the shell by adding + # `pyenv init -` in the shell profile, but that doesn't work + # well in docker + pyenv rehash -echo "Starting $NUM_CLIENTS clients." -for ((i = 0; i < $NUM_CLIENTS; i++)) -do - echo "Starting client(cid=$i) with partition $i out of $NUM_CLIENTS clients." - python -m flwr_example.pytorch_imagenet.client \ - --cid=$i \ - --server_address=$SERVER_ADDRESS \ - --data_path=$IMAGENET_PATH & -done -echo "Started $NUM_CLIENTS clients." +WORKDIR /app +ENTRYPOINT ["flower-superlink"] diff --git a/src/py/flwr_example/pytorch_save_weights/run-clients.sh b/src/docker/supernode/Dockerfile old mode 100755 new mode 100644 similarity index 61% rename from src/py/flwr_example/pytorch_save_weights/run-clients.sh rename to src/docker/supernode/Dockerfile index 9065415148a0..8117dcc295df --- a/src/py/flwr_example/pytorch_save_weights/run-clients.sh +++ b/src/docker/supernode/Dockerfile @@ -1,6 +1,4 @@ -#!/bin/bash - -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -15,14 +13,16 @@ # limitations under the License. # ============================================================================== -set -e +ARG BASE_REPOSITORY=flwr/base +ARG PYTHON_VERSION=3.11 +ARG UBUNTU_VERSION=ubuntu22.04 +FROM $BASE_REPOSITORY:py${PYTHON_VERSION}-${UBUNTU_VERSION} -NUM_CLIENTS=2 +ARG FLWR_PACKAGE=flwr +ARG FLWR_VERSION +RUN python -m pip install -U --no-cache-dir \ + ${FLWR_PACKAGE}==${FLWR_VERSION} && \ + pyenv rehash -echo "Starting $NUM_CLIENTS clients." -for ((i = 0; i < $NUM_CLIENTS; i++)) -do - echo "Starting client(cid=$i) with partition $i out of $NUM_CLIENTS clients." - python -m flwr_example.pytorch_save_weights.client & -done -echo "Started $NUM_CLIENTS clients." +WORKDIR /app +ENTRYPOINT ["flower-client-app"] diff --git a/src/kotlin/gradle.properties b/src/kotlin/gradle.properties index 151a5a4112bb..c792dc1c822b 100644 --- a/src/kotlin/gradle.properties +++ b/src/kotlin/gradle.properties @@ -42,6 +42,6 @@ POM_SCM_URL=https://github.com/adap/flower/ POM_SCM_CONNECTION=scm:git:git://github.com/adap/flower.git POM_SCM_DEV_CONNECTION=scm:git:ssh://git@github.com/adap/flower.git -POM_DEVELOPER_ID=flower.dev +POM_DEVELOPER_ID=flower.ai POM_DEVELOPER_NAME=The Flower Authors POM_DEVELOPER_URL=https://github.com/adap/ diff --git a/src/proto/flwr/proto/driver.proto b/src/proto/flwr/proto/driver.proto index bc0062c4a51f..54e6b6b41b68 100644 --- a/src/proto/flwr/proto/driver.proto +++ b/src/proto/flwr/proto/driver.proto @@ -35,7 +35,10 @@ service Driver { } // CreateRun -message CreateRunRequest {} +message CreateRunRequest { + string fab_id = 1; + string fab_version = 2; +} message CreateRunResponse { sint64 run_id = 1; } // GetNodes messages diff --git a/src/proto/flwr/proto/fleet.proto b/src/proto/flwr/proto/fleet.proto index c900a3b1148d..df6b5843023d 100644 --- a/src/proto/flwr/proto/fleet.proto +++ b/src/proto/flwr/proto/fleet.proto @@ -23,6 +23,7 @@ import "flwr/proto/task.proto"; service Fleet { rpc CreateNode(CreateNodeRequest) returns (CreateNodeResponse) {} rpc DeleteNode(DeleteNodeRequest) returns (DeleteNodeResponse) {} + rpc Ping(PingRequest) returns (PingResponse) {} // Retrieve one or more tasks, if possible // @@ -33,16 +34,25 @@ service Fleet { // // HTTP API path: /api/v1/fleet/push-task-res rpc PushTaskRes(PushTaskResRequest) returns (PushTaskResResponse) {} + + rpc GetRun(GetRunRequest) returns (GetRunResponse) {} } // CreateNode messages -message CreateNodeRequest {} +message CreateNodeRequest { double ping_interval = 1; } message CreateNodeResponse { Node node = 1; } // DeleteNode messages message DeleteNodeRequest { Node node = 1; } message DeleteNodeResponse {} +// Ping messages +message PingRequest { + Node node = 1; + double ping_interval = 2; +} +message PingResponse { bool success = 1; } + // PullTaskIns messages message PullTaskInsRequest { Node node = 1; @@ -60,4 +70,13 @@ message PushTaskResResponse { map results = 2; } +// GetRun messages +message Run { + sint64 run_id = 1; + string fab_id = 2; + string fab_version = 3; +} +message GetRunRequest { sint64 run_id = 1; } +message GetRunResponse { Run run = 1; } + message Reconnect { uint64 reconnect = 1; } diff --git a/src/proto/flwr/proto/task.proto b/src/proto/flwr/proto/task.proto index 423df76f1335..cf77d110acab 100644 --- a/src/proto/flwr/proto/task.proto +++ b/src/proto/flwr/proto/task.proto @@ -25,13 +25,14 @@ import "flwr/proto/error.proto"; message Task { Node producer = 1; Node consumer = 2; - string created_at = 3; + double created_at = 3; string delivered_at = 4; - string ttl = 5; - repeated string ancestry = 6; - string task_type = 7; - RecordSet recordset = 8; - Error error = 9; + double pushed_at = 5; + double ttl = 6; + repeated string ancestry = 7; + string task_type = 8; + RecordSet recordset = 9; + Error error = 10; } message TaskIns { diff --git a/src/py/flwr/cli/flower_toml.py b/src/py/flwr/cli/config_utils.py similarity index 75% rename from src/py/flwr/cli/flower_toml.py rename to src/py/flwr/cli/config_utils.py index 103f83532054..bca35a51dde5 100644 --- a/src/py/flwr/cli/flower_toml.py +++ b/src/py/flwr/cli/config_utils.py @@ -12,9 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== -"""Utility to validate the `flower.toml` file.""" +"""Utility to validate the `pyproject.toml` file.""" -import os +from pathlib import Path from typing import Any, Dict, List, Optional, Tuple import tomli @@ -23,9 +23,9 @@ def load_and_validate_with_defaults( - path: Optional[str] = None, + path: Optional[Path] = None, ) -> Tuple[Optional[Dict[str, Any]], List[str], List[str]]: - """Load and validate flower.toml as dict. + """Load and validate pyproject.toml as dict. Returns ------- @@ -37,7 +37,8 @@ def load_and_validate_with_defaults( if config is None: errors = [ - "Project configuration could not be loaded. flower.toml does not exist." + "Project configuration could not be loaded. " + "`pyproject.toml` does not exist." ] return (None, errors, []) @@ -57,24 +58,25 @@ def load_and_validate_with_defaults( return (config, errors, warnings) -def load(path: Optional[str] = None) -> Optional[Dict[str, Any]]: - """Load flower.toml and return as dict.""" +def load(path: Optional[Path] = None) -> Optional[Dict[str, Any]]: + """Load pyproject.toml and return as dict.""" if path is None: - cur_dir = os.getcwd() - toml_path = os.path.join(cur_dir, "flower.toml") + cur_dir = Path.cwd() + toml_path = cur_dir / "pyproject.toml" else: toml_path = path - if not os.path.isfile(toml_path): + if not toml_path.is_file(): return None - with open(toml_path, encoding="utf-8") as toml_file: + with toml_path.open(encoding="utf-8") as toml_file: data = tomli.loads(toml_file.read()) return data +# pylint: disable=too-many-branches def validate_fields(config: Dict[str, Any]) -> Tuple[bool, List[str], List[str]]: - """Validate flower.toml fields.""" + """Validate pyproject.toml fields.""" errors = [] warnings = [] @@ -94,19 +96,22 @@ def validate_fields(config: Dict[str, Any]) -> Tuple[bool, List[str], List[str]] if "flower" not in config: errors.append("Missing [flower] section") - elif "components" not in config["flower"]: - errors.append("Missing [flower.components] section") else: - if "serverapp" not in config["flower"]["components"]: - errors.append('Property "serverapp" missing in [flower.components]') - if "clientapp" not in config["flower"]["components"]: - errors.append('Property "clientapp" missing in [flower.components]') + if "publisher" not in config["flower"]: + errors.append('Property "publisher" missing in [flower]') + if "components" not in config["flower"]: + errors.append("Missing [flower.components] section") + else: + if "serverapp" not in config["flower"]["components"]: + errors.append('Property "serverapp" missing in [flower.components]') + if "clientapp" not in config["flower"]["components"]: + errors.append('Property "clientapp" missing in [flower.components]') return len(errors) == 0, errors, warnings def validate(config: Dict[str, Any]) -> Tuple[bool, List[str], List[str]]: - """Validate flower.toml.""" + """Validate pyproject.toml.""" is_valid, errors, warnings = validate_fields(config) if not is_valid: diff --git a/src/py/flwr/cli/flower_toml_test.py b/src/py/flwr/cli/config_utils_test.py similarity index 61% rename from src/py/flwr/cli/flower_toml_test.py rename to src/py/flwr/cli/config_utils_test.py index 72a52e4e8b9b..b47206249dfc 100644 --- a/src/py/flwr/cli/flower_toml_test.py +++ b/src/py/flwr/cli/config_utils_test.py @@ -16,17 +16,35 @@ import os import textwrap +from pathlib import Path from typing import Any, Dict -from .flower_toml import load, validate, validate_fields +from .config_utils import load, validate, validate_fields -def test_load_flower_toml_load_from_cwd(tmp_path: str) -> None: +def test_load_pyproject_toml_load_from_cwd(tmp_path: Path) -> None: """Test if load_template returns a string.""" # Prepare - flower_toml_content = """ + pyproject_toml_content = """ + [build-system] + requires = ["hatchling"] + build-backend = "hatchling.build" + [project] name = "fedgpt" + version = "1.0.0" + description = "" + authors = [ + { name = "The Flower Authors", email = "hello@flower.ai" }, + ] + license = {text = "Apache License (2.0)"} + dependencies = [ + "flwr[simulation]>=1.8.0,<2.0", + "numpy>=1.21.0", + ] + + [flower] + publisher = "flwrlabs" [flower.components] serverapp = "fedgpt.server:app" @@ -39,10 +57,17 @@ def test_load_flower_toml_load_from_cwd(tmp_path: str) -> None: count = 10 # optional """ expected_config = { + "build-system": {"build-backend": "hatchling.build", "requires": ["hatchling"]}, "project": { "name": "fedgpt", + "version": "1.0.0", + "description": "", + "authors": [{"email": "hello@flower.ai", "name": "The Flower Authors"}], + "license": {"text": "Apache License (2.0)"}, + "dependencies": ["flwr[simulation]>=1.8.0,<2.0", "numpy>=1.21.0"], }, "flower": { + "publisher": "flwrlabs", "components": { "serverapp": "fedgpt.server:app", "clientapp": "fedgpt.client:app", @@ -55,13 +80,13 @@ def test_load_flower_toml_load_from_cwd(tmp_path: str) -> None: } # Current directory - origin = os.getcwd() + origin = Path.cwd() try: # Change into the temporary directory os.chdir(tmp_path) - with open("flower.toml", "w", encoding="utf-8") as f: - f.write(textwrap.dedent(flower_toml_content)) + with open("pyproject.toml", "w", encoding="utf-8") as f: + f.write(textwrap.dedent(pyproject_toml_content)) # Execute config = load() @@ -72,12 +97,29 @@ def test_load_flower_toml_load_from_cwd(tmp_path: str) -> None: os.chdir(origin) -def test_load_flower_toml_from_path(tmp_path: str) -> None: +def test_load_pyproject_toml_from_path(tmp_path: Path) -> None: """Test if load_template returns a string.""" # Prepare - flower_toml_content = """ + pyproject_toml_content = """ + [build-system] + requires = ["hatchling"] + build-backend = "hatchling.build" + [project] name = "fedgpt" + version = "1.0.0" + description = "" + authors = [ + { name = "The Flower Authors", email = "hello@flower.ai" }, + ] + license = {text = "Apache License (2.0)"} + dependencies = [ + "flwr[simulation]>=1.8.0,<2.0", + "numpy>=1.21.0", + ] + + [flower] + publisher = "flwrlabs" [flower.components] serverapp = "fedgpt.server:app" @@ -90,10 +132,17 @@ def test_load_flower_toml_from_path(tmp_path: str) -> None: count = 10 # optional """ expected_config = { + "build-system": {"build-backend": "hatchling.build", "requires": ["hatchling"]}, "project": { "name": "fedgpt", + "version": "1.0.0", + "description": "", + "authors": [{"email": "hello@flower.ai", "name": "The Flower Authors"}], + "license": {"text": "Apache License (2.0)"}, + "dependencies": ["flwr[simulation]>=1.8.0,<2.0", "numpy>=1.21.0"], }, "flower": { + "publisher": "flwrlabs", "components": { "serverapp": "fedgpt.server:app", "clientapp": "fedgpt.client:app", @@ -111,11 +160,11 @@ def test_load_flower_toml_from_path(tmp_path: str) -> None: try: # Change into the temporary directory os.chdir(tmp_path) - with open("flower.toml", "w", encoding="utf-8") as f: - f.write(textwrap.dedent(flower_toml_content)) + with open("pyproject.toml", "w", encoding="utf-8") as f: + f.write(textwrap.dedent(pyproject_toml_content)) # Execute - config = load(path=os.path.join(tmp_path, "flower.toml")) + config = load(path=tmp_path / "pyproject.toml") # Assert assert config == expected_config @@ -123,8 +172,8 @@ def test_load_flower_toml_from_path(tmp_path: str) -> None: os.chdir(origin) -def test_validate_flower_toml_fields_empty() -> None: - """Test that validate_flower_toml_fields fails correctly.""" +def test_validate_pyproject_toml_fields_empty() -> None: + """Test that validate_pyproject_toml_fields fails correctly.""" # Prepare config: Dict[str, Any] = {} @@ -137,8 +186,8 @@ def test_validate_flower_toml_fields_empty() -> None: assert len(warnings) == 0 -def test_validate_flower_toml_fields_no_flower() -> None: - """Test that validate_flower_toml_fields fails correctly.""" +def test_validate_pyproject_toml_fields_no_flower() -> None: + """Test that validate_pyproject_toml_fields fails correctly.""" # Prepare config = { "project": { @@ -159,8 +208,8 @@ def test_validate_flower_toml_fields_no_flower() -> None: assert len(warnings) == 0 -def test_validate_flower_toml_fields_no_flower_components() -> None: - """Test that validate_flower_toml_fields fails correctly.""" +def test_validate_pyproject_toml_fields_no_flower_components() -> None: + """Test that validate_pyproject_toml_fields fails correctly.""" # Prepare config = { "project": { @@ -178,12 +227,12 @@ def test_validate_flower_toml_fields_no_flower_components() -> None: # Assert assert not is_valid - assert len(errors) == 1 + assert len(errors) == 2 assert len(warnings) == 0 -def test_validate_flower_toml_fields_no_server_and_client_app() -> None: - """Test that validate_flower_toml_fields fails correctly.""" +def test_validate_pyproject_toml_fields_no_server_and_client_app() -> None: + """Test that validate_pyproject_toml_fields fails correctly.""" # Prepare config = { "project": { @@ -201,12 +250,12 @@ def test_validate_flower_toml_fields_no_server_and_client_app() -> None: # Assert assert not is_valid - assert len(errors) == 2 + assert len(errors) == 3 assert len(warnings) == 0 -def test_validate_flower_toml_fields() -> None: - """Test that validate_flower_toml_fields succeeds correctly.""" +def test_validate_pyproject_toml_fields() -> None: + """Test that validate_pyproject_toml_fields succeeds correctly.""" # Prepare config = { "project": { @@ -216,7 +265,10 @@ def test_validate_flower_toml_fields() -> None: "license": "", "authors": [], }, - "flower": {"components": {"serverapp": "", "clientapp": ""}}, + "flower": { + "publisher": "flwrlabs", + "components": {"serverapp": "", "clientapp": ""}, + }, } # Execute @@ -228,8 +280,8 @@ def test_validate_flower_toml_fields() -> None: assert len(warnings) == 0 -def test_validate_flower_toml() -> None: - """Test that validate_flower_toml succeeds correctly.""" +def test_validate_pyproject_toml() -> None: + """Test that validate_pyproject_toml succeeds correctly.""" # Prepare config = { "project": { @@ -240,10 +292,11 @@ def test_validate_flower_toml() -> None: "authors": [], }, "flower": { + "publisher": "flwrlabs", "components": { "serverapp": "flwr.cli.run:run", "clientapp": "flwr.cli.run:run", - } + }, }, } @@ -256,8 +309,8 @@ def test_validate_flower_toml() -> None: assert not warnings -def test_validate_flower_toml_fail() -> None: - """Test that validate_flower_toml fails correctly.""" +def test_validate_pyproject_toml_fail() -> None: + """Test that validate_pyproject_toml fails correctly.""" # Prepare config = { "project": { @@ -268,10 +321,11 @@ def test_validate_flower_toml_fail() -> None: "authors": [], }, "flower": { + "publisher": "flwrlabs", "components": { "serverapp": "flwr.cli.run:run", "clientapp": "flwr.cli.run:runa", - } + }, }, } diff --git a/src/py/flwr/cli/example.py b/src/py/flwr/cli/example.py index 625ca8729640..4790e72d48bf 100644 --- a/src/py/flwr/cli/example.py +++ b/src/py/flwr/cli/example.py @@ -39,7 +39,9 @@ def example() -> None: with urllib.request.urlopen(examples_directory_url) as res: data = json.load(res) example_names = [ - item["path"] for item in data["tree"] if item["path"] not in [".gitignore"] + item["path"] + for item in data["tree"] + if item["path"] not in [".gitignore", "doc"] ] example_name = prompt_options( diff --git a/src/py/flwr/cli/new/new.py b/src/py/flwr/cli/new/new.py index 6cb44d84c4d4..f008435a0601 100644 --- a/src/py/flwr/cli/new/new.py +++ b/src/py/flwr/cli/new/new.py @@ -15,6 +15,7 @@ """Flower command line interface `new` command.""" import os +import re from enum import Enum from string import Template from typing import Dict, Optional @@ -22,7 +23,12 @@ import typer from typing_extensions import Annotated -from ..utils import prompt_options, prompt_text +from ..utils import ( + is_valid_project_name, + prompt_options, + prompt_text, + sanitize_project_name, +) class MlFramework(str, Enum): @@ -31,6 +37,7 @@ class MlFramework(str, Enum): NUMPY = "NumPy" PYTORCH = "PyTorch" TENSORFLOW = "TensorFlow" + SKLEARN = "sklearn" class TemplateNotFound(Exception): @@ -53,8 +60,9 @@ def render_template(template: str, data: Dict[str, str]) -> str: """Render template.""" tpl_file = load_template(template) tpl = Template(tpl_file) - result = tpl.substitute(data) - return result + if ".gitignore" not in template: + return tpl.substitute(data) + return tpl.template def create_file(file_path: str, content: str) -> None: @@ -79,18 +87,24 @@ def new( Optional[MlFramework], typer.Option(case_sensitive=False, help="The ML framework to use"), ] = None, + username: Annotated[ + Optional[str], + typer.Option(case_sensitive=False, help="The Flower username of the author"), + ] = None, ) -> None: """Create new Flower project.""" - print( - typer.style( - f"🔨 Creating Flower project {project_name}...", - fg=typer.colors.GREEN, - bold=True, + if project_name is None: + project_name = prompt_text("Please provide the project name") + if not is_valid_project_name(project_name): + project_name = prompt_text( + "Please provide a name that only contains " + "characters in {'-', a-zA-Z', '0-9'}", + predicate=is_valid_project_name, + default=sanitize_project_name(project_name), ) - ) - if project_name is None: - project_name = prompt_text("Please provide project name") + if username is None: + username = prompt_text("Please provide your Flower username") if framework is not None: framework_str = str(framework.value) @@ -108,30 +122,50 @@ def new( framework_str = framework_str.lower() + print( + typer.style( + f"\n🔨 Creating Flower project {project_name}...", + fg=typer.colors.GREEN, + bold=True, + ) + ) + # Set project directory path cwd = os.getcwd() - pnl = project_name.lower() - project_dir = os.path.join(cwd, pnl) + package_name = re.sub(r"[-_.]+", "-", project_name).lower() + import_name = package_name.replace("-", "_") + project_dir = os.path.join(cwd, package_name) # List of files to render files = { + ".gitignore": {"template": "app/.gitignore.tpl"}, "README.md": {"template": "app/README.md.tpl"}, - "requirements.txt": {"template": f"app/requirements.{framework_str}.txt.tpl"}, - "flower.toml": {"template": "app/flower.toml.tpl"}, "pyproject.toml": {"template": f"app/pyproject.{framework_str}.toml.tpl"}, - f"{pnl}/__init__.py": {"template": "app/code/__init__.py.tpl"}, - f"{pnl}/server.py": {"template": f"app/code/server.{framework_str}.py.tpl"}, - f"{pnl}/client.py": {"template": f"app/code/client.{framework_str}.py.tpl"}, + f"{import_name}/__init__.py": {"template": "app/code/__init__.py.tpl"}, + f"{import_name}/server.py": { + "template": f"app/code/server.{framework_str}.py.tpl" + }, + f"{import_name}/client.py": { + "template": f"app/code/client.{framework_str}.py.tpl" + }, } - # In case framework is MlFramework.PYTORCH generate additionally the task.py file - if ( - framework_str == MlFramework.PYTORCH.value.lower() - or framework_str == MlFramework.TENSORFLOW.value.lower() - ): - files[f"{pnl}/task.py"] = {"template": f"app/code/task.{framework_str}.py.tpl"} - - context = {"project_name": project_name} + # Depending on the framework, generate task.py file + frameworks_with_tasks = [ + MlFramework.PYTORCH.value.lower(), + MlFramework.TENSORFLOW.value.lower(), + ] + if framework_str in frameworks_with_tasks: + files[f"{import_name}/task.py"] = { + "template": f"app/code/task.{framework_str}.py.tpl" + } + + context = { + "project_name": project_name, + "package_name": package_name, + "import_name": import_name.replace("-", "_"), + "username": username, + } for file_path, value in files.items(): render_and_create( diff --git a/src/py/flwr/cli/new/new_test.py b/src/py/flwr/cli/new/new_test.py index cedcb09b7755..33ad745efa93 100644 --- a/src/py/flwr/cli/new/new_test.py +++ b/src/py/flwr/cli/new/new_test.py @@ -16,6 +16,8 @@ import os +import pytest + from .new import MlFramework, create_file, load_template, new, render_template @@ -35,7 +37,12 @@ def test_render_template() -> None: """Test if a string is correctly substituted.""" # Prepare filename = "app/README.md.tpl" - data = {"project_name": "FedGPT"} + data = { + "project_name": "FedGPT", + "package_name": "fedgpt", + "import_name": "fedgpt", + "username": "flwrlabs", + } # Execute result = render_template(filename, data) @@ -60,42 +67,74 @@ def test_create_file(tmp_path: str) -> None: assert text == "Foobar" -def test_new(tmp_path: str) -> None: - """Test if project is created for framework.""" +def test_new_correct_name(tmp_path: str) -> None: + """Test if project with correct name is created for framework.""" # Prepare - project_name = "FedGPT" framework = MlFramework.PYTORCH - expected_files_top_level = { - "requirements.txt", - "fedgpt", - "README.md", - "flower.toml", - "pyproject.toml", - } - expected_files_module = { - "__init__.py", - "server.py", - "client.py", - "task.py", - } + username = "flwrlabs" + expected_names = [ + ("FedGPT", "fedgpt", "fedgpt"), + ("My-Flower-App", "my-flower-app", "my_flower_app"), + ] + + for project_name, expected_top_level_dir, expected_module_dir in expected_names: + expected_files_top_level = { + expected_module_dir, + "README.md", + "pyproject.toml", + ".gitignore", + } + expected_files_module = { + "__init__.py", + "server.py", + "client.py", + "task.py", + } + + # Current directory + origin = os.getcwd() + + try: + # Change into the temprorary directory + os.chdir(tmp_path) + # Execute + new(project_name=project_name, framework=framework, username=username) + + # Assert + file_list = os.listdir(os.path.join(tmp_path, expected_top_level_dir)) + assert set(file_list) == expected_files_top_level + + file_list = os.listdir( + os.path.join(tmp_path, expected_top_level_dir, expected_module_dir) + ) + assert set(file_list) == expected_files_module + finally: + os.chdir(origin) + + +def test_new_incorrect_name(tmp_path: str) -> None: + """Test if project with incorrect name is created for framework.""" + framework = MlFramework.PYTORCH + username = "flwrlabs" + + for project_name in ["My_Flower_App", "My.Flower App"]: + # Current directory + origin = os.getcwd() - # Current directory - origin = os.getcwd() + try: + # Change into the temprorary directory + os.chdir(tmp_path) - try: - # Change into the temprorary directory - os.chdir(tmp_path) + with pytest.raises(OSError) as exc_info: - # Execute - new(project_name=project_name, framework=framework) + # Execute + new( + project_name=project_name, + framework=framework, + username=username, + ) - # Assert - file_list = os.listdir(os.path.join(tmp_path, project_name.lower())) - assert set(file_list) == expected_files_top_level + assert "Failed to read from stdin" in str(exc_info.value) - file_list = os.listdir( - os.path.join(tmp_path, project_name.lower(), project_name.lower()) - ) - assert set(file_list) == expected_files_module - finally: - os.chdir(origin) + finally: + os.chdir(origin) diff --git a/src/py/flwr/cli/new/templates/app/.gitignore.tpl b/src/py/flwr/cli/new/templates/app/.gitignore.tpl new file mode 100644 index 000000000000..68bc17f9ff21 --- /dev/null +++ b/src/py/flwr/cli/new/templates/app/.gitignore.tpl @@ -0,0 +1,160 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +cover/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +.pybuilder/ +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# poetry +# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. +# This is especially recommended for binary packages to ensure reproducibility, and is more +# commonly ignored for libraries. +# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control +#poetry.lock + +# pdm +# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. +#pdm.lock +# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it +# in version control. +# https://pdm.fming.dev/#use-with-ide +.pdm.toml + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ + +# PyCharm +# JetBrains specific template is maintained in a separate JetBrains.gitignore that can +# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore +# and can be added to the global gitignore or merged into this file. For a more nuclear +# option (not recommended) you can uncomment the following to ignore the entire idea folder. +#.idea/ diff --git a/src/py/flwr/cli/new/templates/app/README.md.tpl b/src/py/flwr/cli/new/templates/app/README.md.tpl index 516bed0f40c2..f61bd540ac14 100644 --- a/src/py/flwr/cli/new/templates/app/README.md.tpl +++ b/src/py/flwr/cli/new/templates/app/README.md.tpl @@ -3,16 +3,12 @@ ## Install dependencies ```bash -# Using pip pip install . - -# Or using Poetry -poetry install ``` ## Run (Simulation Engine) -In the `$project_name` directory, use `flwr run` to run a local simulation: +In the `$import_name` directory, use `flwr run` to run a local simulation: ```bash flwr run diff --git a/src/py/flwr/cli/new/templates/app/code/client.pytorch.py.tpl b/src/py/flwr/cli/new/templates/app/code/client.pytorch.py.tpl index 4f2b26ceddea..c68974efaadf 100644 --- a/src/py/flwr/cli/new/templates/app/code/client.pytorch.py.tpl +++ b/src/py/flwr/cli/new/templates/app/code/client.pytorch.py.tpl @@ -2,7 +2,7 @@ from flwr.client import NumPyClient, ClientApp -from $project_name.task import ( +from $import_name.task import ( Net, DEVICE, load_data, @@ -15,7 +15,7 @@ from $project_name.task import ( # Define Flower Client and client_fn class FlowerClient(NumPyClient): - def __init__(self, net, trainloader, valloader) -> None: + def __init__(self, net, trainloader, valloader): self.net = net self.trainloader = trainloader self.valloader = valloader @@ -31,7 +31,7 @@ class FlowerClient(NumPyClient): return loss, len(self.valloader.dataset), {"accuracy": accuracy} -def client_fn(cid: str): +def client_fn(cid): # Load model and data net = Net().to(DEVICE) trainloader, valloader = load_data(int(cid), 2) diff --git a/src/py/flwr/cli/new/templates/app/code/client.sklearn.py.tpl b/src/py/flwr/cli/new/templates/app/code/client.sklearn.py.tpl new file mode 100644 index 000000000000..9181389cad1c --- /dev/null +++ b/src/py/flwr/cli/new/templates/app/code/client.sklearn.py.tpl @@ -0,0 +1,94 @@ +"""$project_name: A Flower / Scikit-Learn app.""" + +import warnings + +import numpy as np +from flwr.client import NumPyClient, ClientApp +from flwr_datasets import FederatedDataset +from sklearn.linear_model import LogisticRegression +from sklearn.metrics import log_loss + + +def get_model_parameters(model): + if model.fit_intercept: + params = [ + model.coef_, + model.intercept_, + ] + else: + params = [model.coef_] + return params + + +def set_model_params(model, params): + model.coef_ = params[0] + if model.fit_intercept: + model.intercept_ = params[1] + return model + + +def set_initial_params(model): + n_classes = 10 # MNIST has 10 classes + n_features = 784 # Number of features in dataset + model.classes_ = np.array([i for i in range(10)]) + + model.coef_ = np.zeros((n_classes, n_features)) + if model.fit_intercept: + model.intercept_ = np.zeros((n_classes,)) + + +class FlowerClient(NumPyClient): + def __init__(self, model, X_train, X_test, y_train, y_test): + self.model = model + self.X_train = X_train + self.X_test = X_test + self.y_train = y_train + self.y_test = y_test + + def get_parameters(self, config): + return get_model_parameters(self.model) + + def fit(self, parameters, config): + set_model_params(self.model, parameters) + + # Ignore convergence failure due to low local epochs + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + self.model.fit(self.X_train, self.y_train) + + return get_model_parameters(self.model), len(self.X_train), {} + + def evaluate(self, parameters, config): + set_model_params(self.model, parameters) + + loss = log_loss(self.y_test, self.model.predict_proba(self.X_test)) + accuracy = self.model.score(self.X_test, self.y_test) + + return loss, len(self.X_test), {"accuracy": accuracy} + +fds = FederatedDataset(dataset="mnist", partitioners={"train": 2}) + +def client_fn(cid: str): + dataset = fds.load_partition(int(cid), "train").with_format("numpy") + + X, y = dataset["image"].reshape((len(dataset), -1)), dataset["label"] + + # Split the on edge data: 80% train, 20% test + X_train, X_test = X[: int(0.8 * len(X))], X[int(0.8 * len(X)) :] + y_train, y_test = y[: int(0.8 * len(y))], y[int(0.8 * len(y)) :] + + # Create LogisticRegression Model + model = LogisticRegression( + penalty="l2", + max_iter=1, # local epoch + warm_start=True, # prevent refreshing weights when fitting + ) + + # Setting initial parameters, akin to model.compile for keras models + set_initial_params(model) + + return FlowerClient(model, X_train, X_test, y_train, y_test).to_client() + + +# Flower ClientApp +app = ClientApp(client_fn=client_fn) diff --git a/src/py/flwr/cli/new/templates/app/code/server.pytorch.py.tpl b/src/py/flwr/cli/new/templates/app/code/server.pytorch.py.tpl index cb04c052b429..dc635f79a664 100644 --- a/src/py/flwr/cli/new/templates/app/code/server.pytorch.py.tpl +++ b/src/py/flwr/cli/new/templates/app/code/server.pytorch.py.tpl @@ -4,7 +4,7 @@ from flwr.common import ndarrays_to_parameters from flwr.server import ServerApp, ServerConfig from flwr.server.strategy import FedAvg -from $project_name.task import Net, get_weights +from $import_name.task import Net, get_weights # Initialize model parameters diff --git a/src/py/flwr/cli/new/templates/app/code/server.sklearn.py.tpl b/src/py/flwr/cli/new/templates/app/code/server.sklearn.py.tpl new file mode 100644 index 000000000000..266a53ac5794 --- /dev/null +++ b/src/py/flwr/cli/new/templates/app/code/server.sklearn.py.tpl @@ -0,0 +1,17 @@ +"""$project_name: A Flower / Scikit-Learn app.""" + +from flwr.server import ServerApp, ServerConfig +from flwr.server.strategy import FedAvg + + +strategy = FedAvg( + fraction_fit=1.0, + fraction_evaluate=1.0, + min_available_clients=2, +) + +# Create ServerApp +app = ServerApp( + config=ServerConfig(num_rounds=3), + strategy=strategy, +) diff --git a/src/py/flwr/cli/new/templates/app/code/server.tensorflow.py.tpl b/src/py/flwr/cli/new/templates/app/code/server.tensorflow.py.tpl index 29b2d09719c1..48a7a223a79d 100644 --- a/src/py/flwr/cli/new/templates/app/code/server.tensorflow.py.tpl +++ b/src/py/flwr/cli/new/templates/app/code/server.tensorflow.py.tpl @@ -22,4 +22,3 @@ app = ServerApp( config=ServerConfig(num_rounds=3), strategy=strategy, ) - diff --git a/src/py/flwr/cli/new/templates/app/code/task.pytorch.py.tpl b/src/py/flwr/cli/new/templates/app/code/task.pytorch.py.tpl index 82e57388fa3e..b30c65a285b5 100644 --- a/src/py/flwr/cli/new/templates/app/code/task.pytorch.py.tpl +++ b/src/py/flwr/cli/new/templates/app/code/task.pytorch.py.tpl @@ -16,7 +16,7 @@ DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") class Net(nn.Module): """Model (simple CNN adapted from 'PyTorch: A 60 Minute Blitz')""" - def __init__(self) -> None: + def __init__(self): super(Net, self).__init__() self.conv1 = nn.Conv2d(3, 6, 5) self.pool = nn.MaxPool2d(2, 2) @@ -25,7 +25,7 @@ class Net(nn.Module): self.fc2 = nn.Linear(120, 84) self.fc3 = nn.Linear(84, 10) - def forward(self, x: torch.Tensor) -> torch.Tensor: + def forward(self, x): x = self.pool(F.relu(self.conv1(x))) x = self.pool(F.relu(self.conv2(x))) x = x.view(-1, 16 * 5 * 5) @@ -34,12 +34,12 @@ class Net(nn.Module): return self.fc3(x) -def load_data(partition_id: int, num_partitions: int): +def load_data(partition_id, num_partitions): """Load partition CIFAR10 data.""" fds = FederatedDataset(dataset="cifar10", partitioners={"train": num_partitions}) partition = fds.load_partition(partition_id) # Divide data on each node: 80% train, 20% test - partition_train_test = partition.train_test_split(test_size=0.2) + partition_train_test = partition.train_test_split(test_size=0.2, seed=42) pytorch_transforms = Compose( [ToTensor(), Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))] ) diff --git a/src/py/flwr/cli/new/templates/app/pyproject.numpy.toml.tpl b/src/py/flwr/cli/new/templates/app/pyproject.numpy.toml.tpl index 15d8211a1a25..ac81c02bf6ea 100644 --- a/src/py/flwr/cli/new/templates/app/pyproject.numpy.toml.tpl +++ b/src/py/flwr/cli/new/templates/app/pyproject.numpy.toml.tpl @@ -1,19 +1,26 @@ [build-system] -requires = ["poetry-core>=1.4.0"] -build-backend = "poetry.core.masonry.api" +requires = ["hatchling"] +build-backend = "hatchling.build" -[tool.poetry] -name = "$project_name" +[project] +name = "$package_name" version = "1.0.0" description = "" -license = "Apache-2.0" authors = [ - "The Flower Authors ", + { name = "The Flower Authors", email = "hello@flower.ai" }, ] -readme = "README.md" +license = {text = "Apache License (2.0)"} +dependencies = [ + "flwr[simulation]>=1.8.0,<2.0", + "numpy>=1.21.0", +] + +[tool.hatch.build.targets.wheel] +packages = ["."] + +[flower] +publisher = "$username" -[tool.poetry.dependencies] -python = "^3.9" -# Mandatory dependencies -numpy = "^1.21.0" -flwr = { version = "^1.8.0", extras = ["simulation"] } +[flower.components] +serverapp = "$import_name.server:app" +clientapp = "$import_name.client:app" diff --git a/src/py/flwr/cli/new/templates/app/pyproject.pytorch.toml.tpl b/src/py/flwr/cli/new/templates/app/pyproject.pytorch.toml.tpl index da0e15b903f8..cbc34fea6304 100644 --- a/src/py/flwr/cli/new/templates/app/pyproject.pytorch.toml.tpl +++ b/src/py/flwr/cli/new/templates/app/pyproject.pytorch.toml.tpl @@ -1,21 +1,28 @@ [build-system] -requires = ["poetry-core>=1.4.0"] -build-backend = "poetry.core.masonry.api" +requires = ["hatchling"] +build-backend = "hatchling.build" -[tool.poetry] -name = "$project_name" +[project] +name = "$package_name" version = "1.0.0" description = "" -license = "Apache-2.0" authors = [ - "The Flower Authors ", + { name = "The Flower Authors", email = "hello@flower.ai" }, ] -readme = "README.md" +license = {text = "Apache License (2.0)"} +dependencies = [ + "flwr[simulation]>=1.8.0,<2.0", + "flwr-datasets[vision]>=0.0.2,<1.0.0", + "torch==2.2.1", + "torchvision==0.17.1", +] + +[tool.hatch.build.targets.wheel] +packages = ["."] + +[flower] +publisher = "$username" -[tool.poetry.dependencies] -python = "^3.9" -# Mandatory dependencies -flwr-nightly = { version = "1.8.0.dev20240313", extras = ["simulation"] } -flwr-datasets = { version = "0.0.2", extras = ["vision"] } -torch = "2.2.1" -torchvision = "0.17.1" +[flower.components] +serverapp = "$import_name.server:app" +clientapp = "$import_name.client:app" diff --git a/src/py/flwr/cli/new/templates/app/pyproject.sklearn.toml.tpl b/src/py/flwr/cli/new/templates/app/pyproject.sklearn.toml.tpl new file mode 100644 index 000000000000..89d5b66d2382 --- /dev/null +++ b/src/py/flwr/cli/new/templates/app/pyproject.sklearn.toml.tpl @@ -0,0 +1,27 @@ +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" + +[project] +name = "$package_name" +version = "1.0.0" +description = "" +authors = [ + { name = "The Flower Authors", email = "hello@flower.ai" }, +] +license = {text = "Apache License (2.0)"} +dependencies = [ + "flwr[simulation]>=1.8.0,<2.0", + "flwr-datasets[vision]>=0.0.2,<1.0.0", + "scikit-learn>=1.1.1", +] + +[tool.hatch.build.targets.wheel] +packages = ["."] + +[flower] +publisher = "$username" + +[flower.components] +serverapp = "$import_name.server:app" +clientapp = "$import_name.client:app" diff --git a/src/py/flwr/cli/new/templates/app/pyproject.tensorflow.toml.tpl b/src/py/flwr/cli/new/templates/app/pyproject.tensorflow.toml.tpl index f7383a78b7d5..dea76d951382 100644 --- a/src/py/flwr/cli/new/templates/app/pyproject.tensorflow.toml.tpl +++ b/src/py/flwr/cli/new/templates/app/pyproject.tensorflow.toml.tpl @@ -1,21 +1,27 @@ [build-system] -requires = ["poetry-core>=1.4.0"] -build-backend = "poetry.core.masonry.api" +requires = ["hatchling"] +build-backend = "hatchling.build" -[tool.poetry] -name = "$project_name" +[project] +name = "$package_name" version = "1.0.0" description = "" -license = "Apache-2.0" authors = [ - "The Flower Authors ", + { name = "The Flower Authors", email = "hello@flower.ai" }, ] -readme = "README.md" +license = {text = "Apache License (2.0)"} +dependencies = [ + "flwr[simulation]>=1.8.0,<2.0", + "flwr-datasets[vision]>=0.0.2,<1.0.0", + "tensorflow>=2.11.1", +] + +[tool.hatch.build.targets.wheel] +packages = ["."] + +[flower] +publisher = "$username" -[tool.poetry.dependencies] -python = ">=3.9,<3.11" -# Mandatory dependencies -flwr = { version = "^1.8.0", extras = ["simulation"] } -flwr-datasets = { version = "^0.0.2", extras = ["vision"] } -tensorflow-cpu = { version = ">=2.9.1,<2.11.1 || >2.11.1", markers = "platform_machine == \"x86_64\"" } -tensorflow-macos = { version = ">=2.9.1,<2.11.1 || >2.11.1", markers = "sys_platform == \"darwin\" and platform_machine == \"arm64\"" } +[flower.components] +serverapp = "$import_name.server:app" +clientapp = "$import_name.client:app" diff --git a/src/py/flwr/cli/new/templates/app/requirements.numpy.txt.tpl b/src/py/flwr/cli/new/templates/app/requirements.numpy.txt.tpl deleted file mode 100644 index 4b460798e96f..000000000000 --- a/src/py/flwr/cli/new/templates/app/requirements.numpy.txt.tpl +++ /dev/null @@ -1,2 +0,0 @@ -flwr>=1.8, <2.0 -numpy>=1.21.0 diff --git a/src/py/flwr/cli/new/templates/app/requirements.tensorflow.txt.tpl b/src/py/flwr/cli/new/templates/app/requirements.tensorflow.txt.tpl deleted file mode 100644 index b6fb49a4bbcb..000000000000 --- a/src/py/flwr/cli/new/templates/app/requirements.tensorflow.txt.tpl +++ /dev/null @@ -1,4 +0,0 @@ -flwr>=1.8, <2.0 -flwr-datasets[vision]>=0.0.2, <1.0.0 -tensorflow-macos>=2.9.1, !=2.11.1 ; sys_platform == "darwin" and platform_machine == "arm64" -tensorflow-cpu>=2.9.1, !=2.11.1 ; platform_machine == "x86_64" diff --git a/src/py/flwr/cli/run/run.py b/src/py/flwr/cli/run/run.py index 98b5da1843a6..9c50c8cb1980 100644 --- a/src/py/flwr/cli/run/run.py +++ b/src/py/flwr/cli/run/run.py @@ -18,7 +18,7 @@ import typer -from flwr.cli import flower_toml +from flwr.cli import config_utils from flwr.simulation.run_simulation import _run_simulation @@ -26,11 +26,11 @@ def run() -> None: """Run Flower project.""" typer.secho("Loading project configuration... ", fg=typer.colors.BLUE) - config, errors, warnings = flower_toml.load_and_validate_with_defaults() + config, errors, warnings = config_utils.load_and_validate_with_defaults() if config is None: typer.secho( - "Project configuration could not be loaded.\nflower.toml is invalid:\n" + "Project configuration could not be loaded.\npyproject.toml is invalid:\n" + "\n".join([f"- {line}" for line in errors]), fg=typer.colors.RED, bold=True, diff --git a/src/py/flwr/cli/utils.py b/src/py/flwr/cli/utils.py index 4e86f0c3b8c8..6460b770b184 100644 --- a/src/py/flwr/cli/utils.py +++ b/src/py/flwr/cli/utils.py @@ -14,18 +14,24 @@ # ============================================================================== """Flower command line interface utils.""" -from typing import List, cast +import re +from typing import Callable, List, Optional, cast import typer -def prompt_text(text: str) -> str: +def prompt_text( + text: str, + predicate: Callable[[str], bool] = lambda _: True, + default: Optional[str] = None, +) -> str: """Ask user to enter text input.""" while True: result = typer.prompt( - typer.style(f"\n💬 {text}", fg=typer.colors.MAGENTA, bold=True) + typer.style(f"\n💬 {text}", fg=typer.colors.MAGENTA, bold=True), + default=default, ) - if len(result) > 0: + if predicate(result) and len(result) > 0: break print(typer.style("❌ Invalid entry", fg=typer.colors.RED, bold=True)) @@ -65,3 +71,54 @@ def prompt_options(text: str, options: List[str]) -> str: result = options[int(index)] return result + + +def is_valid_project_name(name: str) -> bool: + """Check if the given string is a valid Python project name. + + A valid project name must start with a letter and can only contain letters, digits, + and hyphens. + """ + if not name: + return False + + # Check if the first character is a letter + if not name[0].isalpha(): + return False + + # Check if the rest of the characters are valid (letter, digit, or dash) + for char in name[1:]: + if not (char.isalnum() or char in "-"): + return False + + return True + + +def sanitize_project_name(name: str) -> str: + """Sanitize the given string to make it a valid Python project name. + + This version replaces spaces, dots, slashes, and underscores with dashes, removes + any characters not allowed in Python project names, makes the string lowercase, and + ensures it starts with a valid character. + """ + # Replace whitespace with '_' + name_with_hyphens = re.sub(r"[ ./_]", "-", name) + + # Allowed characters in a module name: letters, digits, underscore + allowed_chars = set( + "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-" + ) + + # Make the string lowercase + sanitized_name = name_with_hyphens.lower() + + # Remove any characters not allowed in Python module names + sanitized_name = "".join(c for c in sanitized_name if c in allowed_chars) + + # Ensure the first character is a letter or underscore + while sanitized_name and ( + sanitized_name[0].isdigit() or sanitized_name[0] not in allowed_chars + ): + sanitized_name = sanitized_name[1:] + + return sanitized_name diff --git a/src/py/flwr/client/__init__.py b/src/py/flwr/client/__init__.py index a721fb584164..fd8647dbaf2e 100644 --- a/src/py/flwr/client/__init__.py +++ b/src/py/flwr/client/__init__.py @@ -15,12 +15,13 @@ """Flower client.""" -from .app import run_client_app as run_client_app from .app import start_client as start_client from .app import start_numpy_client as start_numpy_client from .client import Client as Client from .client_app import ClientApp as ClientApp from .numpy_client import NumPyClient as NumPyClient +from .supernode import run_client_app as run_client_app +from .supernode import run_supernode as run_supernode from .typing import ClientFn as ClientFn __all__ = [ @@ -29,6 +30,7 @@ "ClientFn", "NumPyClient", "run_client_app", + "run_supernode", "start_client", "start_numpy_client", ] diff --git a/src/py/flwr/client/app.py b/src/py/flwr/client/app.py index c8287afc0fd0..d7c05d8afbb2 100644 --- a/src/py/flwr/client/app.py +++ b/src/py/flwr/client/app.py @@ -14,14 +14,12 @@ # ============================================================================== """Flower client app.""" - -import argparse import sys import time -from logging import DEBUG, INFO, WARN -from pathlib import Path +from logging import DEBUG, ERROR, INFO, WARN from typing import Callable, ContextManager, Optional, Tuple, Type, Union +from cryptography.hazmat.primitives.asymmetric import ec from grpc import RpcError from flwr.client.client import Client @@ -35,10 +33,10 @@ TRANSPORT_TYPE_GRPC_RERE, TRANSPORT_TYPE_REST, TRANSPORT_TYPES, + ErrorCode, ) -from flwr.common.exit_handlers import register_exit_handlers -from flwr.common.logger import log, warn_deprecated_feature, warn_experimental_feature -from flwr.common.object_ref import load_app, validate +from flwr.common.logger import log, warn_deprecated_feature +from flwr.common.message import Error from flwr.common.retry_invoker import RetryInvoker, exponential from .grpc_client.connection import grpc_connection @@ -48,142 +46,6 @@ from .numpy_client import NumPyClient -def run_client_app() -> None: - """Run Flower client app.""" - event(EventType.RUN_CLIENT_APP_ENTER) - - log(INFO, "Long-running Flower client starting") - - args = _parse_args_run_client_app().parse_args() - - # Obtain certificates - if args.insecure: - if args.root_certificates is not None: - sys.exit( - "Conflicting options: The '--insecure' flag disables HTTPS, " - "but '--root-certificates' was also specified. Please remove " - "the '--root-certificates' option when running in insecure mode, " - "or omit '--insecure' to use HTTPS." - ) - log( - WARN, - "Option `--insecure` was set. " - "Starting insecure HTTP client connected to %s.", - args.server, - ) - root_certificates = None - else: - # Load the certificates if provided, or load the system certificates - cert_path = args.root_certificates - if cert_path is None: - root_certificates = None - else: - root_certificates = Path(cert_path).read_bytes() - log( - DEBUG, - "Starting secure HTTPS client connected to %s " - "with the following certificates: %s.", - args.server, - cert_path, - ) - - log( - DEBUG, - "Flower will load ClientApp `%s`", - getattr(args, "client-app"), - ) - - client_app_dir = args.dir - if client_app_dir is not None: - sys.path.insert(0, client_app_dir) - - app_ref: str = getattr(args, "client-app") - valid, error_msg = validate(app_ref) - if not valid and error_msg: - raise LoadClientAppError(error_msg) from None - - def _load() -> ClientApp: - client_app = load_app(app_ref, LoadClientAppError) - - if not isinstance(client_app, ClientApp): - raise LoadClientAppError( - f"Attribute {app_ref} is not of type {ClientApp}", - ) from None - - return client_app - - _start_client_internal( - server_address=args.server, - load_client_app_fn=_load, - transport="rest" if args.rest else "grpc-rere", - root_certificates=root_certificates, - insecure=args.insecure, - max_retries=args.max_retries, - max_wait_time=args.max_wait_time, - ) - register_exit_handlers(event_type=EventType.RUN_CLIENT_APP_LEAVE) - - -def _parse_args_run_client_app() -> argparse.ArgumentParser: - """Parse flower-client-app command line arguments.""" - parser = argparse.ArgumentParser( - description="Start a Flower client app", - ) - - parser.add_argument( - "client-app", - help="For example: `client:app` or `project.package.module:wrapper.app`", - ) - parser.add_argument( - "--insecure", - action="store_true", - help="Run the client without HTTPS. By default, the client runs with " - "HTTPS enabled. Use this flag only if you understand the risks.", - ) - parser.add_argument( - "--rest", - action="store_true", - help="Use REST as a transport layer for the client.", - ) - parser.add_argument( - "--root-certificates", - metavar="ROOT_CERT", - type=str, - help="Specifies the path to the PEM-encoded root certificate file for " - "establishing secure HTTPS connections.", - ) - parser.add_argument( - "--server", - default="0.0.0.0:9092", - help="Server address", - ) - parser.add_argument( - "--max-retries", - type=int, - default=None, - help="The maximum number of times the client will try to connect to the" - "server before giving up in case of a connection error. By default," - "it is set to None, meaning there is no limit to the number of tries.", - ) - parser.add_argument( - "--max-wait-time", - type=float, - default=None, - help="The maximum duration before the client stops trying to" - "connect to the server in case of connection error. By default, it" - "is set to None, meaning there is no limit to the total time.", - ) - parser.add_argument( - "--dir", - default="", - help="Add specified directory to the PYTHONPATH and load Flower " - "app from there." - " Default: current working directory.", - ) - - return parser - - def _check_actionable_client( client: Optional[Client], client_fn: Optional[ClientFn] ) -> None: @@ -212,6 +74,9 @@ def start_client( root_certificates: Optional[Union[bytes, str]] = None, insecure: Optional[bool] = None, transport: Optional[str] = None, + authentication_keys: Optional[ + Tuple[ec.EllipticCurvePrivateKey, ec.EllipticCurvePublicKey] + ] = None, max_retries: Optional[int] = None, max_wait_time: Optional[float] = None, ) -> None: @@ -296,6 +161,7 @@ class `flwr.client.Client` (default: None) root_certificates=root_certificates, insecure=insecure, transport=transport, + authentication_keys=authentication_keys, max_retries=max_retries, max_wait_time=max_wait_time, ) @@ -316,6 +182,9 @@ def _start_client_internal( root_certificates: Optional[Union[bytes, str]] = None, insecure: Optional[bool] = None, transport: Optional[str] = None, + authentication_keys: Optional[ + Tuple[ec.EllipticCurvePrivateKey, ec.EllipticCurvePublicKey] + ] = None, max_retries: Optional[int] = None, max_wait_time: Optional[float] = None, ) -> None: @@ -385,8 +254,6 @@ def _load_client_app() -> ClientApp: return ClientApp(client_fn=client_fn) load_client_app_fn = _load_client_app - else: - warn_experimental_feature("`load_client_app_fn`") # At this point, only `load_client_app_fn` should be used # Both `client` and `client_fn` must not be used directly @@ -397,7 +264,7 @@ def _load_client_app() -> ClientApp: ) retry_invoker = RetryInvoker( - wait_factory=exponential, + wait_gen_factory=exponential, recoverable_exceptions=connection_error_type, max_tries=max_retries, max_time=max_wait_time, @@ -442,8 +309,10 @@ def _load_client_app() -> ClientApp: retry_invoker, grpc_max_message_length, root_certificates, + authentication_keys, ) as conn: - receive, send, create_node, delete_node = conn + # pylint: disable-next=W0612 + receive, send, create_node, delete_node, get_run = conn # Register node if create_node is not None: @@ -457,12 +326,13 @@ def _load_client_app() -> ClientApp: continue log(INFO, "") - log( - INFO, - "[RUN %s, ROUND %s]", - message.metadata.run_id, - message.metadata.group_id, - ) + if len(message.metadata.group_id) > 0: + log( + INFO, + "[RUN %s, ROUND %s]", + message.metadata.run_id, + message.metadata.group_id, + ) log( INFO, "Received: %s message %s", @@ -482,32 +352,57 @@ def _load_client_app() -> ClientApp: # Retrieve context for this run context = node_state.retrieve_context(run_id=message.metadata.run_id) - # Load ClientApp instance - client_app: ClientApp = load_client_app_fn() - - # Handle task message - out_message = client_app(message=message, context=context) - - # Update node state - node_state.update_context( - run_id=message.metadata.run_id, - context=context, + # Create an error reply message that will never be used to prevent + # the used-before-assignment linting error + reply_message = message.create_error_reply( + error=Error(code=ErrorCode.UNKNOWN, reason="Unknown") ) + # Handle app loading and task message + try: + # Load ClientApp instance + client_app: ClientApp = load_client_app_fn() + + # Execute ClientApp + reply_message = client_app(message=message, context=context) + except Exception as ex: # pylint: disable=broad-exception-caught + + # Legacy grpc-bidi + if transport in ["grpc-bidi", None]: + log(ERROR, "Client raised an exception.", exc_info=ex) + # Raise exception, crash process + raise ex + + # Don't update/change NodeState + + e_code = ErrorCode.CLIENT_APP_RAISED_EXCEPTION + # Reason example: ":<'division by zero'>" + reason = str(type(ex)) + ":<'" + str(ex) + "'>" + exc_entity = "ClientApp" + if isinstance(ex, LoadClientAppError): + reason = ( + "An exception was raised when attempting to load " + "`ClientApp`" + ) + e_code = ErrorCode.LOAD_CLIENT_APP_EXCEPTION + exc_entity = "SuperNode" + + log(ERROR, "%s raised an exception", exc_entity, exc_info=ex) + + # Create error message + reply_message = message.create_error_reply( + error=Error(code=e_code, reason=reason) + ) + else: + # No exception, update node state + node_state.update_context( + run_id=message.metadata.run_id, + context=context, + ) + # Send - send(out_message) - log( - INFO, - "[RUN %s, ROUND %s]", - out_message.metadata.run_id, - out_message.metadata.group_id, - ) - log( - INFO, - "Sent: %s reply to message %s", - out_message.metadata.message_type, - message.metadata.message_id, - ) + send(reply_message) + log(INFO, "Sent reply") # Unregister node if delete_node is not None: @@ -628,13 +523,21 @@ def start_numpy_client( def _init_connection(transport: Optional[str], server_address: str) -> Tuple[ Callable[ - [str, bool, RetryInvoker, int, Union[bytes, str, None]], + [ + str, + bool, + RetryInvoker, + int, + Union[bytes, str, None], + Optional[Tuple[ec.EllipticCurvePrivateKey, ec.EllipticCurvePublicKey]], + ], ContextManager[ Tuple[ Callable[[], Optional[Message]], Callable[[Message], None], Optional[Callable[[], None]], Optional[Callable[[], None]], + Optional[Callable[[int], Tuple[str, str]]], ] ], ], diff --git a/src/py/flwr/client/client_app.py b/src/py/flwr/client/client_app.py index ad7a01326991..c9d337700147 100644 --- a/src/py/flwr/client/client_app.py +++ b/src/py/flwr/client/client_app.py @@ -23,10 +23,20 @@ from flwr.client.mod.utils import make_ffn from flwr.client.typing import ClientFn, Mod from flwr.common import Context, Message, MessageType +from flwr.common.logger import warn_preview_feature from .typing import ClientAppCallable +class ClientAppException(Exception): + """Exception raised when an exception is raised while executing a ClientApp.""" + + def __init__(self, message: str): + ex_name = self.__class__.__name__ + self.message = f"\nException {ex_name} occurred. Message: " + message + super().__init__(self.message) + + class ClientApp: """Flower ClientApp. @@ -115,7 +125,7 @@ def train(self) -> Callable[[ClientAppCallable], ClientAppCallable]: >>> def train(message: Message, context: Context) -> Message: >>> print("ClientApp training running") >>> # Create and return an echo reply message - >>> return message.create_reply(content=message.content(), ttl="") + >>> return message.create_reply(content=message.content()) """ def train_decorator(train_fn: ClientAppCallable) -> ClientAppCallable: @@ -123,6 +133,8 @@ def train_decorator(train_fn: ClientAppCallable) -> ClientAppCallable: if self._call: raise _registration_error(MessageType.TRAIN) + warn_preview_feature("ClientApp-register-train-function") + # Register provided function with the ClientApp object # Wrap mods around the wrapped step function self._train = make_ffn(train_fn, self._mods) @@ -143,7 +155,7 @@ def evaluate(self) -> Callable[[ClientAppCallable], ClientAppCallable]: >>> def evaluate(message: Message, context: Context) -> Message: >>> print("ClientApp evaluation running") >>> # Create and return an echo reply message - >>> return message.create_reply(content=message.content(), ttl="") + >>> return message.create_reply(content=message.content()) """ def evaluate_decorator(evaluate_fn: ClientAppCallable) -> ClientAppCallable: @@ -151,6 +163,8 @@ def evaluate_decorator(evaluate_fn: ClientAppCallable) -> ClientAppCallable: if self._call: raise _registration_error(MessageType.EVALUATE) + warn_preview_feature("ClientApp-register-evaluate-function") + # Register provided function with the ClientApp object # Wrap mods around the wrapped step function self._evaluate = make_ffn(evaluate_fn, self._mods) @@ -171,7 +185,7 @@ def query(self) -> Callable[[ClientAppCallable], ClientAppCallable]: >>> def query(message: Message, context: Context) -> Message: >>> print("ClientApp query running") >>> # Create and return an echo reply message - >>> return message.create_reply(content=message.content(), ttl="") + >>> return message.create_reply(content=message.content()) """ def query_decorator(query_fn: ClientAppCallable) -> ClientAppCallable: @@ -179,6 +193,8 @@ def query_decorator(query_fn: ClientAppCallable) -> ClientAppCallable: if self._call: raise _registration_error(MessageType.QUERY) + warn_preview_feature("ClientApp-register-query-function") + # Register provided function with the ClientApp object # Wrap mods around the wrapped step function self._query = make_ffn(query_fn, self._mods) @@ -218,7 +234,7 @@ def _registration_error(fn_name: str) -> ValueError: >>> print("ClientApp {fn_name} running") >>> # Create and return an echo reply message >>> return message.create_reply( - >>> content=message.content(), ttl="" + >>> content=message.content() >>> ) """, ) diff --git a/src/py/flwr/client/grpc_client/connection.py b/src/py/flwr/client/grpc_client/connection.py index 163a58542c9e..6e5227cf5e5f 100644 --- a/src/py/flwr/client/grpc_client/connection.py +++ b/src/py/flwr/client/grpc_client/connection.py @@ -22,7 +22,10 @@ from queue import Queue from typing import Callable, Iterator, Optional, Tuple, Union, cast +from cryptography.hazmat.primitives.asymmetric import ec + from flwr.common import ( + DEFAULT_TTL, GRPC_MAX_MESSAGE_LENGTH, ConfigsRecord, Message, @@ -55,18 +58,22 @@ def on_channel_state_change(channel_connectivity: str) -> None: @contextmanager -def grpc_connection( # pylint: disable=R0915 +def grpc_connection( # pylint: disable=R0913, R0915 server_address: str, insecure: bool, retry_invoker: RetryInvoker, # pylint: disable=unused-argument max_message_length: int = GRPC_MAX_MESSAGE_LENGTH, root_certificates: Optional[Union[bytes, str]] = None, + authentication_keys: Optional[ # pylint: disable=unused-argument + Tuple[ec.EllipticCurvePrivateKey, ec.EllipticCurvePublicKey] + ] = None, ) -> Iterator[ Tuple[ Callable[[], Optional[Message]], Callable[[Message], None], Optional[Callable[[], None]], Optional[Callable[[], None]], + Optional[Callable[[int], Tuple[str, str]]], ] ]: """Establish a gRPC connection to a gRPC server. @@ -180,7 +187,7 @@ def receive() -> Message: dst_node_id=0, reply_to_message="", group_id="", - ttl="", + ttl=DEFAULT_TTL, message_type=message_type, ), content=recordset, @@ -223,7 +230,7 @@ def send(message: Message) -> None: try: # Yield methods - yield (receive, send, None, None) + yield (receive, send, None, None, None) finally: # Make sure to have a final channel.close() diff --git a/src/py/flwr/client/grpc_client/connection_test.py b/src/py/flwr/client/grpc_client/connection_test.py index b7737f511a2a..da7800b26639 100644 --- a/src/py/flwr/client/grpc_client/connection_test.py +++ b/src/py/flwr/client/grpc_client/connection_test.py @@ -23,7 +23,7 @@ import grpc -from flwr.common import ConfigsRecord, Message, Metadata, RecordSet +from flwr.common import DEFAULT_TTL, ConfigsRecord, Message, Metadata, RecordSet from flwr.common import recordset_compat as compat from flwr.common.constant import MessageTypeLegacy from flwr.common.retry_invoker import RetryInvoker, exponential @@ -50,7 +50,7 @@ dst_node_id=0, reply_to_message="", group_id="", - ttl="", + ttl=DEFAULT_TTL, message_type=MessageTypeLegacy.GET_PROPERTIES, ), content=compat.getpropertiesres_to_recordset( @@ -65,7 +65,7 @@ dst_node_id=0, reply_to_message="", group_id="", - ttl="", + ttl=DEFAULT_TTL, message_type="reconnect", ), content=RecordSet(configs_records={"config": ConfigsRecord({"reason": 0})}), @@ -132,13 +132,13 @@ def run_client() -> int: server_address=f"[::]:{port}", insecure=True, retry_invoker=RetryInvoker( - wait_factory=exponential, + wait_gen_factory=exponential, recoverable_exceptions=grpc.RpcError, max_tries=1, max_time=None, ), ) as conn: - receive, send, _, _ = conn + receive, send, _, _, _ = conn # Setup processing loop while True: diff --git a/src/py/flwr/client/grpc_rere_client/client_interceptor.py b/src/py/flwr/client/grpc_rere_client/client_interceptor.py new file mode 100644 index 000000000000..8bc55878971d --- /dev/null +++ b/src/py/flwr/client/grpc_rere_client/client_interceptor.py @@ -0,0 +1,158 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Flower client interceptor.""" + + +import base64 +import collections +from typing import Any, Callable, Optional, Sequence, Tuple, Union + +import grpc +from cryptography.hazmat.primitives.asymmetric import ec + +from flwr.common.secure_aggregation.crypto.symmetric_encryption import ( + bytes_to_public_key, + compute_hmac, + generate_shared_key, + public_key_to_bytes, +) +from flwr.proto.fleet_pb2 import ( # pylint: disable=E0611 + CreateNodeRequest, + DeleteNodeRequest, + GetRunRequest, + PingRequest, + PullTaskInsRequest, + PushTaskResRequest, +) + +_PUBLIC_KEY_HEADER = "public-key" +_AUTH_TOKEN_HEADER = "auth-token" + +Request = Union[ + CreateNodeRequest, + DeleteNodeRequest, + PullTaskInsRequest, + PushTaskResRequest, + GetRunRequest, + PingRequest, +] + + +def _get_value_from_tuples( + key_string: str, tuples: Sequence[Tuple[str, Union[str, bytes]]] +) -> bytes: + value = next((value for key, value in tuples if key == key_string), "") + if isinstance(value, str): + return value.encode() + + return value + + +class _ClientCallDetails( + collections.namedtuple( + "_ClientCallDetails", ("method", "timeout", "metadata", "credentials") + ), + grpc.ClientCallDetails, # type: ignore +): + """Details for each client call. + + The class will be passed on as the first argument in continuation function. + In our case, `AuthenticateClientInterceptor` adds new metadata to the construct. + """ + + +class AuthenticateClientInterceptor(grpc.UnaryUnaryClientInterceptor): # type: ignore + """Client interceptor for client authentication.""" + + def __init__( + self, + private_key: ec.EllipticCurvePrivateKey, + public_key: ec.EllipticCurvePublicKey, + ): + self.private_key = private_key + self.public_key = public_key + self.shared_secret: Optional[bytes] = None + self.server_public_key: Optional[ec.EllipticCurvePublicKey] = None + self.encoded_public_key = base64.urlsafe_b64encode( + public_key_to_bytes(self.public_key) + ) + + def intercept_unary_unary( + self, + continuation: Callable[[Any, Any], Any], + client_call_details: grpc.ClientCallDetails, + request: Request, + ) -> grpc.Call: + """Flower client interceptor. + + Intercept unary call from client and add necessary authentication header in the + RPC metadata. + """ + metadata = [] + postprocess = False + if client_call_details.metadata is not None: + metadata = list(client_call_details.metadata) + + # Always add the public key header + metadata.append( + ( + _PUBLIC_KEY_HEADER, + self.encoded_public_key, + ) + ) + + if isinstance(request, CreateNodeRequest): + postprocess = True + elif isinstance( + request, + ( + DeleteNodeRequest, + PullTaskInsRequest, + PushTaskResRequest, + GetRunRequest, + PingRequest, + ), + ): + if self.shared_secret is None: + raise RuntimeError("Failure to compute hmac") + + metadata.append( + ( + _AUTH_TOKEN_HEADER, + base64.urlsafe_b64encode( + compute_hmac( + self.shared_secret, request.SerializeToString(True) + ) + ), + ) + ) + + client_call_details = _ClientCallDetails( + client_call_details.method, + client_call_details.timeout, + metadata, + client_call_details.credentials, + ) + + response = continuation(client_call_details, request) + if postprocess: + server_public_key_bytes = base64.urlsafe_b64decode( + _get_value_from_tuples(_PUBLIC_KEY_HEADER, response.initial_metadata()) + ) + self.server_public_key = bytes_to_public_key(server_public_key_bytes) + self.shared_secret = generate_shared_key( + self.private_key, self.server_public_key + ) + return response diff --git a/src/py/flwr/client/grpc_rere_client/client_interceptor_test.py b/src/py/flwr/client/grpc_rere_client/client_interceptor_test.py new file mode 100644 index 000000000000..487361a06026 --- /dev/null +++ b/src/py/flwr/client/grpc_rere_client/client_interceptor_test.py @@ -0,0 +1,376 @@ +# Copyright 2020 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Flower client interceptor tests.""" + + +import base64 +import threading +import unittest +from concurrent import futures +from logging import DEBUG, INFO, WARN +from typing import Optional, Sequence, Tuple, Union + +import grpc + +from flwr.client.grpc_rere_client.connection import grpc_request_response +from flwr.common import GRPC_MAX_MESSAGE_LENGTH +from flwr.common.logger import log +from flwr.common.message import Message, Metadata +from flwr.common.record import RecordSet +from flwr.common.retry_invoker import RetryInvoker, exponential +from flwr.common.secure_aggregation.crypto.symmetric_encryption import ( + compute_hmac, + generate_key_pairs, + generate_shared_key, + public_key_to_bytes, +) +from flwr.proto.fleet_pb2 import ( # pylint: disable=E0611 + CreateNodeRequest, + CreateNodeResponse, + DeleteNodeRequest, + DeleteNodeResponse, + GetRunRequest, + GetRunResponse, + PullTaskInsRequest, + PullTaskInsResponse, + PushTaskResRequest, + PushTaskResResponse, +) + +from .client_interceptor import _AUTH_TOKEN_HEADER, _PUBLIC_KEY_HEADER, Request + + +class _MockServicer: + """Mock Servicer for Flower clients.""" + + def __init__(self) -> None: + """Initialize mock servicer.""" + self._lock = threading.Lock() + self._received_client_metadata: Optional[ + Sequence[Tuple[str, Union[str, bytes]]] + ] = None + self.server_private_key, self.server_public_key = generate_key_pairs() + self._received_message_bytes: bytes = b"" + + def unary_unary( + self, request: Request, context: grpc.ServicerContext + ) -> Union[ + CreateNodeResponse, DeleteNodeResponse, PushTaskResResponse, PullTaskInsResponse + ]: + """Handle unary call.""" + with self._lock: + self._received_client_metadata = context.invocation_metadata() + self._received_message_bytes = request.SerializeToString(True) + + if isinstance(request, CreateNodeRequest): + context.send_initial_metadata( + ((_PUBLIC_KEY_HEADER, self.server_public_key),) + ) + return CreateNodeResponse() + if isinstance(request, DeleteNodeRequest): + return DeleteNodeResponse() + if isinstance(request, PushTaskResRequest): + return PushTaskResResponse() + + return PullTaskInsResponse() + + def received_client_metadata( + self, + ) -> Optional[Sequence[Tuple[str, Union[str, bytes]]]]: + """Return received client metadata.""" + with self._lock: + return self._received_client_metadata + + def received_message_bytes(self) -> bytes: + """Return received message bytes.""" + with self._lock: + return self._received_message_bytes + + +def _add_generic_handler(servicer: _MockServicer, server: grpc.Server) -> None: + rpc_method_handlers = { + "CreateNode": grpc.unary_unary_rpc_method_handler( + servicer.unary_unary, + request_deserializer=CreateNodeRequest.FromString, + response_serializer=CreateNodeResponse.SerializeToString, + ), + "DeleteNode": grpc.unary_unary_rpc_method_handler( + servicer.unary_unary, + request_deserializer=DeleteNodeRequest.FromString, + response_serializer=DeleteNodeResponse.SerializeToString, + ), + "PullTaskIns": grpc.unary_unary_rpc_method_handler( + servicer.unary_unary, + request_deserializer=PullTaskInsRequest.FromString, + response_serializer=PullTaskInsResponse.SerializeToString, + ), + "PushTaskRes": grpc.unary_unary_rpc_method_handler( + servicer.unary_unary, + request_deserializer=PushTaskResRequest.FromString, + response_serializer=PushTaskResResponse.SerializeToString, + ), + "GetRun": grpc.unary_unary_rpc_method_handler( + servicer.unary_unary, + request_deserializer=GetRunRequest.FromString, + response_serializer=GetRunResponse.SerializeToString, + ), + } + generic_handler = grpc.method_handlers_generic_handler( + "flwr.proto.Fleet", rpc_method_handlers + ) + server.add_generic_rpc_handlers((generic_handler,)) + + +def _init_retry_invoker() -> RetryInvoker: + return RetryInvoker( + wait_gen_factory=exponential, + recoverable_exceptions=grpc.RpcError, + max_tries=None, + max_time=None, + on_giveup=lambda retry_state: ( + log( + WARN, + "Giving up reconnection after %.2f seconds and %s tries.", + retry_state.elapsed_time, + retry_state.tries, + ) + if retry_state.tries > 1 + else None + ), + on_success=lambda retry_state: ( + log( + INFO, + "Connection successful after %.2f seconds and %s tries.", + retry_state.elapsed_time, + retry_state.tries, + ) + if retry_state.tries > 1 + else None + ), + on_backoff=lambda retry_state: ( + log(WARN, "Connection attempt failed, retrying...") + if retry_state.tries == 1 + else log( + DEBUG, + "Connection attempt failed, retrying in %.2f seconds", + retry_state.actual_wait, + ) + ), + ) + + +class TestAuthenticateClientInterceptor(unittest.TestCase): + """Test for client interceptor client authentication.""" + + def setUp(self) -> None: + """Initialize mock server and client.""" + self._server = grpc.server( + futures.ThreadPoolExecutor(max_workers=10), + options=(("grpc.so_reuseport", int(False)),), + ) + self._servicer = _MockServicer() + _add_generic_handler(self._servicer, self._server) + port = self._server.add_insecure_port("[::]:0") + self._server.start() + self._client_private_key, self._client_public_key = generate_key_pairs() + + self._connection = grpc_request_response + self._address = f"localhost:{port}" + + def test_client_auth_create_node(self) -> None: + """Test client authentication during create node.""" + # Prepare + retry_invoker = _init_retry_invoker() + + # Execute + with self._connection( + self._address, + True, + retry_invoker, + GRPC_MAX_MESSAGE_LENGTH, + None, + (self._client_private_key, self._client_public_key), + ) as conn: + _, _, create_node, _, _ = conn + assert create_node is not None + create_node() + expected_client_metadata = ( + _PUBLIC_KEY_HEADER, + base64.urlsafe_b64encode(public_key_to_bytes(self._client_public_key)), + ) + + # Assert + assert self._servicer.received_client_metadata() == expected_client_metadata + + def test_client_auth_delete_node(self) -> None: + """Test client authentication during delete node.""" + # Prepare + retry_invoker = _init_retry_invoker() + + # Execute + with self._connection( + self._address, + True, + retry_invoker, + GRPC_MAX_MESSAGE_LENGTH, + None, + (self._client_private_key, self._client_public_key), + ) as conn: + _, _, _, delete_node, _ = conn + assert delete_node is not None + delete_node() + shared_secret = generate_shared_key( + self._servicer.server_private_key, self._client_public_key + ) + expected_hmac = compute_hmac( + shared_secret, self._servicer.received_message_bytes() + ) + expected_client_metadata = ( + ( + _PUBLIC_KEY_HEADER, + base64.urlsafe_b64encode( + public_key_to_bytes(self._client_public_key) + ), + ), + ( + _AUTH_TOKEN_HEADER, + base64.urlsafe_b64encode(expected_hmac), + ), + ) + + # Assert + assert self._servicer.received_client_metadata() == expected_client_metadata + + def test_client_auth_receive(self) -> None: + """Test client authentication during receive node.""" + # Prepare + retry_invoker = _init_retry_invoker() + + # Execute + with self._connection( + self._address, + True, + retry_invoker, + GRPC_MAX_MESSAGE_LENGTH, + None, + (self._client_private_key, self._client_public_key), + ) as conn: + receive, _, _, _, _ = conn + assert receive is not None + receive() + shared_secret = generate_shared_key( + self._servicer.server_private_key, self._client_public_key + ) + expected_hmac = compute_hmac( + shared_secret, self._servicer.received_message_bytes() + ) + expected_client_metadata = ( + ( + _PUBLIC_KEY_HEADER, + base64.urlsafe_b64encode( + public_key_to_bytes(self._client_public_key) + ), + ), + ( + _AUTH_TOKEN_HEADER, + base64.urlsafe_b64encode(expected_hmac), + ), + ) + + # Assert + assert self._servicer.received_client_metadata() == expected_client_metadata + + def test_client_auth_send(self) -> None: + """Test client authentication during send node.""" + # Prepare + retry_invoker = _init_retry_invoker() + message = Message(Metadata(0, "1", 0, 0, "", "", 0, ""), RecordSet()) + + # Execute + with self._connection( + self._address, + True, + retry_invoker, + GRPC_MAX_MESSAGE_LENGTH, + None, + (self._client_private_key, self._client_public_key), + ) as conn: + _, send, _, _, _ = conn + assert send is not None + send(message) + shared_secret = generate_shared_key( + self._servicer.server_private_key, self._client_public_key + ) + expected_hmac = compute_hmac( + shared_secret, self._servicer.received_message_bytes() + ) + expected_client_metadata = ( + ( + _PUBLIC_KEY_HEADER, + base64.urlsafe_b64encode( + public_key_to_bytes(self._client_public_key) + ), + ), + ( + _AUTH_TOKEN_HEADER, + base64.urlsafe_b64encode(expected_hmac), + ), + ) + + # Assert + assert self._servicer.received_client_metadata() == expected_client_metadata + + def test_client_auth_get_run(self) -> None: + """Test client authentication during send node.""" + # Prepare + retry_invoker = _init_retry_invoker() + + # Execute + with self._connection( + self._address, + True, + retry_invoker, + GRPC_MAX_MESSAGE_LENGTH, + None, + (self._client_private_key, self._client_public_key), + ) as conn: + _, _, _, _, get_run = conn + assert get_run is not None + get_run(0) + shared_secret = generate_shared_key( + self._servicer.server_private_key, self._client_public_key + ) + expected_hmac = compute_hmac( + shared_secret, self._servicer.received_message_bytes() + ) + expected_client_metadata = ( + ( + _PUBLIC_KEY_HEADER, + base64.urlsafe_b64encode( + public_key_to_bytes(self._client_public_key) + ), + ), + ( + _AUTH_TOKEN_HEADER, + base64.urlsafe_b64encode(expected_hmac), + ), + ) + + # Assert + assert self._servicer.received_client_metadata() == expected_client_metadata + + +if __name__ == "__main__": + unittest.main(verbosity=2) diff --git a/src/py/flwr/client/grpc_rere_client/connection.py b/src/py/flwr/client/grpc_rere_client/connection.py index e6e22998b947..3778fd4061f9 100644 --- a/src/py/flwr/client/grpc_rere_client/connection.py +++ b/src/py/flwr/client/grpc_rere_client/connection.py @@ -15,23 +15,39 @@ """Contextmanager for a gRPC request-response channel to the Flower server.""" +import random +import threading from contextlib import contextmanager from copy import copy from logging import DEBUG, ERROR from pathlib import Path -from typing import Callable, Dict, Iterator, Optional, Tuple, Union, cast +from typing import Callable, Iterator, Optional, Sequence, Tuple, Union, cast +import grpc +from cryptography.hazmat.primitives.asymmetric import ec + +from flwr.client.heartbeat import start_ping_loop from flwr.client.message_handler.message_handler import validate_out_message from flwr.client.message_handler.task_handler import get_task_ins, validate_task_ins from flwr.common import GRPC_MAX_MESSAGE_LENGTH +from flwr.common.constant import ( + PING_BASE_MULTIPLIER, + PING_CALL_TIMEOUT, + PING_DEFAULT_INTERVAL, + PING_RANDOM_RANGE, +) from flwr.common.grpc import create_channel -from flwr.common.logger import log, warn_experimental_feature +from flwr.common.logger import log from flwr.common.message import Message, Metadata from flwr.common.retry_invoker import RetryInvoker from flwr.common.serde import message_from_taskins, message_to_taskres from flwr.proto.fleet_pb2 import ( # pylint: disable=E0611 CreateNodeRequest, DeleteNodeRequest, + GetRunRequest, + GetRunResponse, + PingRequest, + PingResponse, PullTaskInsRequest, PushTaskResRequest, ) @@ -39,8 +55,7 @@ from flwr.proto.node_pb2 import Node # pylint: disable=E0611 from flwr.proto.task_pb2 import TaskIns # pylint: disable=E0611 -KEY_NODE = "node" -KEY_METADATA = "in_message_metadata" +from .client_interceptor import AuthenticateClientInterceptor def on_channel_state_change(channel_connectivity: str) -> None: @@ -49,18 +64,22 @@ def on_channel_state_change(channel_connectivity: str) -> None: @contextmanager -def grpc_request_response( +def grpc_request_response( # pylint: disable=R0913, R0914, R0915 server_address: str, insecure: bool, retry_invoker: RetryInvoker, max_message_length: int = GRPC_MAX_MESSAGE_LENGTH, # pylint: disable=W0613 root_certificates: Optional[Union[bytes, str]] = None, + authentication_keys: Optional[ + Tuple[ec.EllipticCurvePrivateKey, ec.EllipticCurvePublicKey] + ] = None, ) -> Iterator[ Tuple[ Callable[[], Optional[Message]], Callable[[Message], None], Optional[Callable[[], None]], Optional[Callable[[], None]], + Optional[Callable[[int], Tuple[str, str]]], ] ]: """Primitives for request/response-based interaction with a server. @@ -95,59 +114,98 @@ def grpc_request_response( create_node : Optional[Callable] delete_node : Optional[Callable] """ - warn_experimental_feature("`grpc-rere`") - if isinstance(root_certificates, str): root_certificates = Path(root_certificates).read_bytes() + interceptors: Optional[Sequence[grpc.UnaryUnaryClientInterceptor]] = None + if authentication_keys is not None: + interceptors = AuthenticateClientInterceptor( + authentication_keys[0], authentication_keys[1] + ) + channel = create_channel( server_address=server_address, insecure=insecure, root_certificates=root_certificates, max_message_length=max_message_length, + interceptors=interceptors, ) channel.subscribe(on_channel_state_change) - stub = FleetStub(channel) - # Necessary state to validate messages to be sent - state: Dict[str, Optional[Metadata]] = {KEY_METADATA: None} - - # Enable create_node and delete_node to store node - node_store: Dict[str, Optional[Node]] = {KEY_NODE: None} + # Shared variables for inner functions + stub = FleetStub(channel) + metadata: Optional[Metadata] = None + node: Optional[Node] = None + ping_thread: Optional[threading.Thread] = None + ping_stop_event = threading.Event() ########################################################################### - # receive/send functions + # ping/create_node/delete_node/receive/send/get_run functions ########################################################################### + def ping() -> None: + # Get Node + if node is None: + log(ERROR, "Node instance missing") + return + + # Construct the ping request + req = PingRequest(node=node, ping_interval=PING_DEFAULT_INTERVAL) + + # Call FleetAPI + res: PingResponse = stub.Ping(req, timeout=PING_CALL_TIMEOUT) + + # Check if success + if not res.success: + raise RuntimeError("Ping failed unexpectedly.") + + # Wait + rd = random.uniform(*PING_RANDOM_RANGE) + next_interval: float = PING_DEFAULT_INTERVAL - PING_CALL_TIMEOUT + next_interval *= PING_BASE_MULTIPLIER + rd + if not ping_stop_event.is_set(): + ping_stop_event.wait(next_interval) + def create_node() -> None: """Set create_node.""" - create_node_request = CreateNodeRequest() + # Call FleetAPI + create_node_request = CreateNodeRequest(ping_interval=PING_DEFAULT_INTERVAL) create_node_response = retry_invoker.invoke( stub.CreateNode, request=create_node_request, ) - node_store[KEY_NODE] = create_node_response.node + + # Remember the node and the ping-loop thread + nonlocal node, ping_thread + node = cast(Node, create_node_response.node) + ping_thread = start_ping_loop(ping, ping_stop_event) def delete_node() -> None: """Set delete_node.""" # Get Node - if node_store[KEY_NODE] is None: + nonlocal node + if node is None: log(ERROR, "Node instance missing") return - node: Node = cast(Node, node_store[KEY_NODE]) + # Stop the ping-loop thread + ping_stop_event.set() + if ping_thread is not None: + ping_thread.join() + + # Call FleetAPI delete_node_request = DeleteNodeRequest(node=node) retry_invoker.invoke(stub.DeleteNode, request=delete_node_request) - del node_store[KEY_NODE] + # Cleanup + node = None def receive() -> Optional[Message]: """Receive next task from server.""" # Get Node - if node_store[KEY_NODE] is None: + if node is None: log(ERROR, "Node instance missing") return None - node: Node = cast(Node, node_store[KEY_NODE]) # Request instructions (task) from server request = PullTaskInsRequest(node=node) @@ -167,7 +225,8 @@ def receive() -> Optional[Message]: in_message = message_from_taskins(task_ins) if task_ins else None # Remember `metadata` of the in message - state[KEY_METADATA] = copy(in_message.metadata) if in_message else None + nonlocal metadata + metadata = copy(in_message.metadata) if in_message else None # Return the message if available return in_message @@ -175,18 +234,18 @@ def receive() -> Optional[Message]: def send(message: Message) -> None: """Send task result back to server.""" # Get Node - if node_store[KEY_NODE] is None: + if node is None: log(ERROR, "Node instance missing") return - # Get incoming message - in_metadata = state[KEY_METADATA] - if in_metadata is None: + # Get the metadata of the incoming message + nonlocal metadata + if metadata is None: log(ERROR, "No current message") return # Validate out message - if not validate_out_message(message, in_metadata): + if not validate_out_message(message, metadata): log(ERROR, "Invalid out message") return @@ -197,10 +256,22 @@ def send(message: Message) -> None: request = PushTaskResRequest(task_res_list=[task_res]) _ = retry_invoker.invoke(stub.PushTaskRes, request) - state[KEY_METADATA] = None + # Cleanup + metadata = None + + def get_run(run_id: int) -> Tuple[str, str]: + # Call FleetAPI + get_run_request = GetRunRequest(run_id=run_id) + get_run_response: GetRunResponse = retry_invoker.invoke( + stub.GetRun, + request=get_run_request, + ) + + # Return fab_id and fab_version + return get_run_response.run.fab_id, get_run_response.run.fab_version try: # Yield methods - yield (receive, send, create_node, delete_node) + yield (receive, send, create_node, delete_node, get_run) except Exception as exc: # pylint: disable=broad-except log(ERROR, exc) diff --git a/src/py/flwr/client/heartbeat.py b/src/py/flwr/client/heartbeat.py new file mode 100644 index 000000000000..b68e6163cc01 --- /dev/null +++ b/src/py/flwr/client/heartbeat.py @@ -0,0 +1,74 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Heartbeat utility functions.""" + + +import threading +from typing import Callable + +import grpc + +from flwr.common.constant import PING_CALL_TIMEOUT +from flwr.common.retry_invoker import RetryInvoker, RetryState, exponential + + +def _ping_loop(ping_fn: Callable[[], None], stop_event: threading.Event) -> None: + def wait_fn(wait_time: float) -> None: + if not stop_event.is_set(): + stop_event.wait(wait_time) + + def on_backoff(state: RetryState) -> None: + err = state.exception + if not isinstance(err, grpc.RpcError): + return + status_code = err.code() + # If ping call timeout is triggered + if status_code == grpc.StatusCode.DEADLINE_EXCEEDED: + # Avoid long wait time. + if state.actual_wait is None: + return + state.actual_wait = max(state.actual_wait - PING_CALL_TIMEOUT, 0.0) + + def wrapped_ping() -> None: + if not stop_event.is_set(): + ping_fn() + + retrier = RetryInvoker( + exponential, + grpc.RpcError, + max_tries=None, + max_time=None, + on_backoff=on_backoff, + wait_function=wait_fn, + ) + while not stop_event.is_set(): + retrier.invoke(wrapped_ping) + + +def start_ping_loop( + ping_fn: Callable[[], None], stop_event: threading.Event +) -> threading.Thread: + """Start a ping loop in a separate thread. + + This function initializes a new thread that runs a ping loop, allowing for + asynchronous ping operations. The loop can be terminated through the provided stop + event. + """ + thread = threading.Thread( + target=_ping_loop, args=(ping_fn, stop_event), daemon=True + ) + thread.start() + + return thread diff --git a/src/py/flwr/client/heartbeat_test.py b/src/py/flwr/client/heartbeat_test.py new file mode 100644 index 000000000000..286429e075b1 --- /dev/null +++ b/src/py/flwr/client/heartbeat_test.py @@ -0,0 +1,59 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Unit tests for heartbeat utility functions.""" + + +import threading +import time +import unittest +from unittest.mock import MagicMock + +from .heartbeat import start_ping_loop + + +class TestStartPingLoopWithFailures(unittest.TestCase): + """Test heartbeat utility functions.""" + + def test_ping_loop_terminates(self) -> None: + """Test if the ping loop thread terminates when flagged.""" + # Prepare + ping_fn = MagicMock() + stop_event = threading.Event() + + # Execute + thread = start_ping_loop(ping_fn, stop_event) + time.sleep(1) + stop_event.set() + thread.join(timeout=1) + + # Assert + self.assertTrue(ping_fn.called) + self.assertFalse(thread.is_alive()) + + def test_ping_loop_with_failures_terminates(self) -> None: + """Test if the ping loop thread with failures terminates when flagged.""" + # Prepare + ping_fn = MagicMock(side_effect=RuntimeError()) + stop_event = threading.Event() + + # Execute + thread = start_ping_loop(ping_fn, stop_event) + time.sleep(1) + stop_event.set() + thread.join(timeout=1) + + # Assert + self.assertTrue(ping_fn.called) + self.assertFalse(thread.is_alive()) diff --git a/src/py/flwr/client/message_handler/message_handler.py b/src/py/flwr/client/message_handler/message_handler.py index 9a5d70b1ac4d..e5acbe0cc9d0 100644 --- a/src/py/flwr/client/message_handler/message_handler.py +++ b/src/py/flwr/client/message_handler/message_handler.py @@ -81,7 +81,7 @@ def handle_control_message(message: Message) -> Tuple[Optional[Message], int]: reason = cast(int, disconnect_msg.disconnect_res.reason) recordset = RecordSet() recordset.configs_records["config"] = ConfigsRecord({"reason": reason}) - out_message = message.create_reply(recordset, ttl="") + out_message = message.create_reply(recordset) # Return TaskRes and sleep duration return out_message, sleep_duration @@ -143,7 +143,7 @@ def handle_legacy_message_from_msgtype( raise ValueError(f"Invalid message type: {message_type}") # Return Message - return message.create_reply(out_recordset, ttl="") + return message.create_reply(out_recordset) def _reconnect( @@ -172,6 +172,7 @@ def validate_out_message(out_message: Message, in_message_metadata: Metadata) -> and out_meta.reply_to_message == in_meta.message_id and out_meta.group_id == in_meta.group_id and out_meta.message_type == in_meta.message_type + and out_meta.created_at > in_meta.created_at ): return True return False diff --git a/src/py/flwr/client/message_handler/message_handler_test.py b/src/py/flwr/client/message_handler/message_handler_test.py index eaf16f7dc993..8a2db1804e4a 100644 --- a/src/py/flwr/client/message_handler/message_handler_test.py +++ b/src/py/flwr/client/message_handler/message_handler_test.py @@ -15,6 +15,7 @@ """Client-side message handler tests.""" +import time import unittest import uuid from copy import copy @@ -23,6 +24,7 @@ from flwr.client import Client from flwr.client.typing import ClientFn from flwr.common import ( + DEFAULT_TTL, Code, Context, EvaluateIns, @@ -131,7 +133,7 @@ def test_client_without_get_properties() -> None: src_node_id=0, dst_node_id=1123, reply_to_message="", - ttl="", + ttl=DEFAULT_TTL, message_type=MessageTypeLegacy.GET_PROPERTIES, ), content=recordset, @@ -161,14 +163,25 @@ def test_client_without_get_properties() -> None: src_node_id=1123, dst_node_id=0, reply_to_message=message.metadata.message_id, - ttl="", + ttl=actual_msg.metadata.ttl, # computed based on [message].create_reply() message_type=MessageTypeLegacy.GET_PROPERTIES, ), content=expected_rs, ) assert actual_msg.content == expected_msg.content - assert actual_msg.metadata == expected_msg.metadata + # metadata.created_at will differ so let's exclude it from checks + attrs = vars(actual_msg.metadata) + attrs_keys = list(attrs.keys()) + attrs_keys.remove("_created_at") + # metadata.created_at will differ so let's exclude it from checks + for attr in attrs_keys: + assert getattr(actual_msg.metadata, attr) == getattr( + expected_msg.metadata, attr + ) + + # Ensure the message created last has a higher timestamp + assert actual_msg.metadata.created_at < expected_msg.metadata.created_at def test_client_with_get_properties() -> None: @@ -184,7 +197,7 @@ def test_client_with_get_properties() -> None: src_node_id=0, dst_node_id=1123, reply_to_message="", - ttl="", + ttl=DEFAULT_TTL, message_type=MessageTypeLegacy.GET_PROPERTIES, ), content=recordset, @@ -214,14 +227,24 @@ def test_client_with_get_properties() -> None: src_node_id=1123, dst_node_id=0, reply_to_message=message.metadata.message_id, - ttl="", + ttl=actual_msg.metadata.ttl, # computed based on [message].create_reply() message_type=MessageTypeLegacy.GET_PROPERTIES, ), content=expected_rs, ) assert actual_msg.content == expected_msg.content - assert actual_msg.metadata == expected_msg.metadata + attrs = vars(actual_msg.metadata) + attrs_keys = list(attrs.keys()) + attrs_keys.remove("_created_at") + # metadata.created_at will differ so let's exclude it from checks + for attr in attrs_keys: + assert getattr(actual_msg.metadata, attr) == getattr( + expected_msg.metadata, attr + ) + + # Ensure the message created last has a higher timestamp + assert actual_msg.metadata.created_at < expected_msg.metadata.created_at class TestMessageValidation(unittest.TestCase): @@ -237,9 +260,14 @@ def setUp(self) -> None: dst_node_id=20, reply_to_message="", group_id="group1", - ttl="60", + ttl=DEFAULT_TTL, message_type="mock", ) + # We need to set created_at in this way + # since this `self.in_metadata` is used for tests + # without it ever being part of a Message + self.in_metadata.created_at = time.time() + self.valid_out_metadata = Metadata( run_id=123, message_id="", @@ -247,7 +275,7 @@ def setUp(self) -> None: dst_node_id=10, reply_to_message="qwerty", group_id="group1", - ttl="60", + ttl=DEFAULT_TTL, message_type="mock", ) self.common_content = RecordSet() @@ -280,11 +308,15 @@ def test_invalid_message_run_id(self) -> None: value = 999 elif isinstance(value, str): value = "999" + elif isinstance(value, float): + if attr == "_created_at": + # make it be in 1h the past + value = value - 3600 setattr(invalid_metadata, attr, value) # Add to list invalid_metadata_list.append(invalid_metadata) # Assert for invalid_metadata in invalid_metadata_list: - msg._metadata = invalid_metadata # pylint: disable=protected-access + msg.__dict__["_metadata"] = invalid_metadata self.assertFalse(validate_out_message(msg, self.in_metadata)) diff --git a/src/py/flwr/client/mod/centraldp_mods.py b/src/py/flwr/client/mod/centraldp_mods.py index 4f4a595e8d9c..e6276ccf2245 100644 --- a/src/py/flwr/client/mod/centraldp_mods.py +++ b/src/py/flwr/client/mod/centraldp_mods.py @@ -82,7 +82,9 @@ def fixedclipping_mod( clipping_norm, ) - log(INFO, "fixedclipping_mod: parameters are clipped by value: %s.", clipping_norm) + log( + INFO, "fixedclipping_mod: parameters are clipped by value: %.4f.", clipping_norm + ) fit_res.parameters = ndarrays_to_parameters(client_to_server_params) out_msg.content = compat.fitres_to_recordset(fit_res, keep_input=True) @@ -146,7 +148,7 @@ def adaptiveclipping_mod( ) log( INFO, - "adaptiveclipping_mod: parameters are clipped by value: %s.", + "adaptiveclipping_mod: parameters are clipped by value: %.4f.", clipping_norm, ) diff --git a/src/py/flwr/client/mod/localdp_mod.py b/src/py/flwr/client/mod/localdp_mod.py index 3b0311a612b9..e70c86bc7d7c 100644 --- a/src/py/flwr/client/mod/localdp_mod.py +++ b/src/py/flwr/client/mod/localdp_mod.py @@ -128,7 +128,9 @@ def __call__( self.clipping_norm, ) log( - INFO, "LocalDpMod: parameters are clipped by value: %s.", self.clipping_norm + INFO, + "LocalDpMod: parameters are clipped by value: %.4f.", + self.clipping_norm, ) fit_res.parameters = ndarrays_to_parameters(client_to_server_params) @@ -137,11 +139,15 @@ def __call__( add_localdp_gaussian_noise_to_params( fit_res.parameters, self.sensitivity, self.epsilon, self.delta ) + + noise_value_sd = ( + self.sensitivity * np.sqrt(2 * np.log(1.25 / self.delta)) / self.epsilon + ) log( INFO, "LocalDpMod: local DP noise with " - "standard deviation: %s added to parameters.", - self.sensitivity * np.sqrt(2 * np.log(1.25 / self.delta)) / self.epsilon, + "standard deviation: %.4f added to parameters.", + noise_value_sd, ) out_msg.content = compat.fitres_to_recordset(fit_res, keep_input=True) diff --git a/src/py/flwr/client/mod/secure_aggregation/secaggplus_mod.py b/src/py/flwr/client/mod/secure_aggregation/secaggplus_mod.py index 989d5f6e1361..5b196ad84321 100644 --- a/src/py/flwr/client/mod/secure_aggregation/secaggplus_mod.py +++ b/src/py/flwr/client/mod/secure_aggregation/secaggplus_mod.py @@ -187,7 +187,7 @@ def secaggplus_mod( # Return message out_content.configs_records[RECORD_KEY_CONFIGS] = ConfigsRecord(res, False) - return msg.create_reply(out_content, ttl="") + return msg.create_reply(out_content) def check_stage(current_stage: str, configs: ConfigsRecord) -> None: diff --git a/src/py/flwr/client/mod/secure_aggregation/secaggplus_mod_test.py b/src/py/flwr/client/mod/secure_aggregation/secaggplus_mod_test.py index db5ed67c02a4..36844a2983a1 100644 --- a/src/py/flwr/client/mod/secure_aggregation/secaggplus_mod_test.py +++ b/src/py/flwr/client/mod/secure_aggregation/secaggplus_mod_test.py @@ -19,7 +19,14 @@ from typing import Callable, Dict, List from flwr.client.mod import make_ffn -from flwr.common import ConfigsRecord, Context, Message, Metadata, RecordSet +from flwr.common import ( + DEFAULT_TTL, + ConfigsRecord, + Context, + Message, + Metadata, + RecordSet, +) from flwr.common.constant import MessageType from flwr.common.secure_aggregation.secaggplus_constants import ( RECORD_KEY_CONFIGS, @@ -38,7 +45,7 @@ def get_test_handler( """.""" def empty_ffn(_msg: Message, _2: Context) -> Message: - return _msg.create_reply(RecordSet(), ttl="") + return _msg.create_reply(RecordSet()) app = make_ffn(empty_ffn, [secaggplus_mod]) @@ -51,7 +58,7 @@ def func(configs: Dict[str, ConfigsRecordValues]) -> ConfigsRecord: dst_node_id=123, reply_to_message="", group_id="", - ttl="", + ttl=DEFAULT_TTL, message_type=MessageType.TRAIN, ), content=RecordSet( diff --git a/src/py/flwr/client/mod/utils_test.py b/src/py/flwr/client/mod/utils_test.py index e588b8b53b3b..4676a2c02c4b 100644 --- a/src/py/flwr/client/mod/utils_test.py +++ b/src/py/flwr/client/mod/utils_test.py @@ -20,6 +20,7 @@ from flwr.client.typing import ClientAppCallable, Mod from flwr.common import ( + DEFAULT_TTL, ConfigsRecord, Context, Message, @@ -84,7 +85,7 @@ def _get_dummy_flower_message() -> Message: src_node_id=0, dst_node_id=0, reply_to_message="", - ttl="", + ttl=DEFAULT_TTL, message_type="mock", ), ) diff --git a/src/py/flwr/client/rest_client/connection.py b/src/py/flwr/client/rest_client/connection.py index d2cc71ba3b3f..da8fbd351ab1 100644 --- a/src/py/flwr/client/rest_client/connection.py +++ b/src/py/flwr/client/rest_client/connection.py @@ -15,16 +15,28 @@ """Contextmanager for a REST request-response channel to the Flower server.""" +import random import sys +import threading from contextlib import contextmanager from copy import copy from logging import ERROR, INFO, WARN -from typing import Callable, Dict, Iterator, Optional, Tuple, Union, cast +from typing import Callable, Iterator, Optional, Tuple, Type, TypeVar, Union +from cryptography.hazmat.primitives.asymmetric import ec +from google.protobuf.message import Message as GrpcMessage + +from flwr.client.heartbeat import start_ping_loop from flwr.client.message_handler.message_handler import validate_out_message from flwr.client.message_handler.task_handler import get_task_ins, validate_task_ins from flwr.common import GRPC_MAX_MESSAGE_LENGTH -from flwr.common.constant import MISSING_EXTRA_REST +from flwr.common.constant import ( + MISSING_EXTRA_REST, + PING_BASE_MULTIPLIER, + PING_CALL_TIMEOUT, + PING_DEFAULT_INTERVAL, + PING_RANDOM_RANGE, +) from flwr.common.logger import log from flwr.common.message import Message, Metadata from flwr.common.retry_invoker import RetryInvoker @@ -33,6 +45,11 @@ CreateNodeRequest, CreateNodeResponse, DeleteNodeRequest, + DeleteNodeResponse, + GetRunRequest, + GetRunResponse, + PingRequest, + PingResponse, PullTaskInsRequest, PullTaskInsResponse, PushTaskResRequest, @@ -47,19 +64,18 @@ sys.exit(MISSING_EXTRA_REST) -KEY_NODE = "node" -KEY_METADATA = "in_message_metadata" - - PATH_CREATE_NODE: str = "api/v0/fleet/create-node" PATH_DELETE_NODE: str = "api/v0/fleet/delete-node" PATH_PULL_TASK_INS: str = "api/v0/fleet/pull-task-ins" PATH_PUSH_TASK_RES: str = "api/v0/fleet/push-task-res" +PATH_PING: str = "api/v0/fleet/ping" +PATH_GET_RUN: str = "/api/v0/fleet/get-run" + +T = TypeVar("T", bound=GrpcMessage) @contextmanager -# pylint: disable-next=too-many-statements -def http_request_response( +def http_request_response( # pylint: disable=,R0913, R0914, R0915 server_address: str, insecure: bool, # pylint: disable=unused-argument retry_invoker: RetryInvoker, @@ -67,12 +83,16 @@ def http_request_response( root_certificates: Optional[ Union[bytes, str] ] = None, # pylint: disable=unused-argument + authentication_keys: Optional[ # pylint: disable=unused-argument + Tuple[ec.EllipticCurvePrivateKey, ec.EllipticCurvePublicKey] + ] = None, ) -> Iterator[ Tuple[ Callable[[], Optional[Message]], Callable[[Message], None], Optional[Callable[[], None]], Optional[Callable[[], None]], + Optional[Callable[[int], Tuple[str, str]]], ] ]: """Primitives for request/response-based interaction with a server. @@ -127,143 +147,142 @@ def http_request_response( "must be provided as a string path to the client.", ) - # Necessary state to validate messages to be sent - state: Dict[str, Optional[Metadata]] = {KEY_METADATA: None} - - # Enable create_node and delete_node to store node - node_store: Dict[str, Optional[Node]] = {KEY_NODE: None} + # Shared variables for inner functions + metadata: Optional[Metadata] = None + node: Optional[Node] = None + ping_thread: Optional[threading.Thread] = None + ping_stop_event = threading.Event() ########################################################################### - # receive/send functions + # ping/create_node/delete_node/receive/send/get_run functions ########################################################################### - def create_node() -> None: - """Set create_node.""" - create_node_req_proto = CreateNodeRequest() - create_node_req_bytes: bytes = create_node_req_proto.SerializeToString() - - res = retry_invoker.invoke( - requests.post, - url=f"{base_url}/{PATH_CREATE_NODE}", - headers={ - "Accept": "application/protobuf", - "Content-Type": "application/protobuf", - }, - data=create_node_req_bytes, - verify=verify, - timeout=None, - ) + def _request( + req: GrpcMessage, res_type: Type[T], api_path: str, retry: bool = True + ) -> Optional[T]: + # Serialize the request + req_bytes = req.SerializeToString() + + # Send the request + def post() -> requests.Response: + return requests.post( + f"{base_url}/{api_path}", + data=req_bytes, + headers={ + "Accept": "application/protobuf", + "Content-Type": "application/protobuf", + }, + verify=verify, + timeout=None, + ) + + if retry: + res: requests.Response = retry_invoker.invoke(post) + else: + res = post() # Check status code and headers if res.status_code != 200: - return + return None if "content-type" not in res.headers: log( WARN, "[Node] POST /%s: missing header `Content-Type`", - PATH_PULL_TASK_INS, + api_path, ) - return + return None if res.headers["content-type"] != "application/protobuf": log( WARN, "[Node] POST /%s: header `Content-Type` has wrong value", - PATH_PULL_TASK_INS, + api_path, ) - return + return None # Deserialize ProtoBuf from bytes - create_node_response_proto = CreateNodeResponse() - create_node_response_proto.ParseFromString(res.content) - # pylint: disable-next=no-member - node_store[KEY_NODE] = create_node_response_proto.node + grpc_res = res_type() + grpc_res.ParseFromString(res.content) + return grpc_res + + def ping() -> None: + # Get Node + if node is None: + log(ERROR, "Node instance missing") + return + + # Construct the ping request + req = PingRequest(node=node, ping_interval=PING_DEFAULT_INTERVAL) + + # Send the request + res = _request(req, PingResponse, PATH_PING, retry=False) + if res is None: + return + + # Check if success + if not res.success: + raise RuntimeError("Ping failed unexpectedly.") + + # Wait + rd = random.uniform(*PING_RANDOM_RANGE) + next_interval: float = PING_DEFAULT_INTERVAL - PING_CALL_TIMEOUT + next_interval *= PING_BASE_MULTIPLIER + rd + if not ping_stop_event.is_set(): + ping_stop_event.wait(next_interval) + + def create_node() -> None: + """Set create_node.""" + req = CreateNodeRequest(ping_interval=PING_DEFAULT_INTERVAL) + + # Send the request + res = _request(req, CreateNodeResponse, PATH_CREATE_NODE) + if res is None: + return + + # Remember the node and the ping-loop thread + nonlocal node, ping_thread + node = res.node + ping_thread = start_ping_loop(ping, ping_stop_event) def delete_node() -> None: """Set delete_node.""" - if node_store[KEY_NODE] is None: + nonlocal node + if node is None: log(ERROR, "Node instance missing") return - node: Node = cast(Node, node_store[KEY_NODE]) - delete_node_req_proto = DeleteNodeRequest(node=node) - delete_node_req_req_bytes: bytes = delete_node_req_proto.SerializeToString() - res = retry_invoker.invoke( - requests.post, - url=f"{base_url}/{PATH_DELETE_NODE}", - headers={ - "Accept": "application/protobuf", - "Content-Type": "application/protobuf", - }, - data=delete_node_req_req_bytes, - verify=verify, - timeout=None, - ) - # Check status code and headers - if res.status_code != 200: - return - if "content-type" not in res.headers: - log( - WARN, - "[Node] POST /%s: missing header `Content-Type`", - PATH_PULL_TASK_INS, - ) + # Stop the ping-loop thread + ping_stop_event.set() + if ping_thread is not None: + ping_thread.join() + + # Send DeleteNode request + req = DeleteNodeRequest(node=node) + + # Send the request + res = _request(req, DeleteNodeResponse, PATH_CREATE_NODE) + if res is None: return - if res.headers["content-type"] != "application/protobuf": - log( - WARN, - "[Node] POST /%s: header `Content-Type` has wrong value", - PATH_PULL_TASK_INS, - ) + + # Cleanup + node = None def receive() -> Optional[Message]: """Receive next task from server.""" # Get Node - if node_store[KEY_NODE] is None: + if node is None: log(ERROR, "Node instance missing") return None - node: Node = cast(Node, node_store[KEY_NODE]) - - # Request instructions (task) from server - pull_task_ins_req_proto = PullTaskInsRequest(node=node) - pull_task_ins_req_bytes: bytes = pull_task_ins_req_proto.SerializeToString() # Request instructions (task) from server - res = retry_invoker.invoke( - requests.post, - url=f"{base_url}/{PATH_PULL_TASK_INS}", - headers={ - "Accept": "application/protobuf", - "Content-Type": "application/protobuf", - }, - data=pull_task_ins_req_bytes, - verify=verify, - timeout=None, - ) + req = PullTaskInsRequest(node=node) - # Check status code and headers - if res.status_code != 200: + # Send the request + res = _request(req, PullTaskInsResponse, PATH_PULL_TASK_INS) + if res is None: return None - if "content-type" not in res.headers: - log( - WARN, - "[Node] POST /%s: missing header `Content-Type`", - PATH_PULL_TASK_INS, - ) - return None - if res.headers["content-type"] != "application/protobuf": - log( - WARN, - "[Node] POST /%s: header `Content-Type` has wrong value", - PATH_PULL_TASK_INS, - ) - return None - - # Deserialize ProtoBuf from bytes - pull_task_ins_response_proto = PullTaskInsResponse() - pull_task_ins_response_proto.ParseFromString(res.content) # Get the current TaskIns - task_ins: Optional[TaskIns] = get_task_ins(pull_task_ins_response_proto) + task_ins: Optional[TaskIns] = get_task_ins(res) # Discard the current TaskIns if not valid if task_ins is not None and not ( @@ -273,86 +292,64 @@ def receive() -> Optional[Message]: task_ins = None # Return the Message if available + nonlocal metadata message = None - state[KEY_METADATA] = None if task_ins is not None: message = message_from_taskins(task_ins) - state[KEY_METADATA] = copy(message.metadata) + metadata = copy(message.metadata) log(INFO, "[Node] POST /%s: success", PATH_PULL_TASK_INS) return message def send(message: Message) -> None: """Send task result back to server.""" # Get Node - if node_store[KEY_NODE] is None: + if node is None: log(ERROR, "Node instance missing") return # Get incoming message - in_metadata = state[KEY_METADATA] - if in_metadata is None: + nonlocal metadata + if metadata is None: log(ERROR, "No current message") return # Validate out message - if not validate_out_message(message, in_metadata): + if not validate_out_message(message, metadata): log(ERROR, "Invalid out message") return + metadata = None # Construct TaskRes task_res = message_to_taskres(message) # Serialize ProtoBuf to bytes - push_task_res_request_proto = PushTaskResRequest(task_res_list=[task_res]) - push_task_res_request_bytes: bytes = ( - push_task_res_request_proto.SerializeToString() - ) - - # Send ClientMessage to server - res = retry_invoker.invoke( - requests.post, - url=f"{base_url}/{PATH_PUSH_TASK_RES}", - headers={ - "Accept": "application/protobuf", - "Content-Type": "application/protobuf", - }, - data=push_task_res_request_bytes, - verify=verify, - timeout=None, - ) + req = PushTaskResRequest(task_res_list=[task_res]) - state[KEY_METADATA] = None - - # Check status code and headers - if res.status_code != 200: - return - if "content-type" not in res.headers: - log( - WARN, - "[Node] POST /%s: missing header `Content-Type`", - PATH_PUSH_TASK_RES, - ) - return - if res.headers["content-type"] != "application/protobuf": - log( - WARN, - "[Node] POST /%s: header `Content-Type` has wrong value", - PATH_PUSH_TASK_RES, - ) + # Send the request + res = _request(req, PushTaskResResponse, PATH_PUSH_TASK_RES) + if res is None: return - # Deserialize ProtoBuf from bytes - push_task_res_response_proto = PushTaskResResponse() - push_task_res_response_proto.ParseFromString(res.content) log( INFO, "[Node] POST /%s: success, created result %s", PATH_PUSH_TASK_RES, - push_task_res_response_proto.results, # pylint: disable=no-member + res.results, # pylint: disable=no-member ) + def get_run(run_id: int) -> Tuple[str, str]: + # Construct the request + req = GetRunRequest(run_id=run_id) + + # Send the request + res = _request(req, GetRunResponse, PATH_GET_RUN) + if res is None: + return "", "" + + return res.run.fab_id, res.run.fab_version + try: # Yield methods - yield (receive, send, create_node, delete_node) + yield (receive, send, create_node, delete_node, get_run) except Exception as exc: # pylint: disable=broad-except log(ERROR, exc) diff --git a/src/py/flwr_experimental/baseline/tf_cifar/__init__.py b/src/py/flwr/client/supernode/__init__.py similarity index 72% rename from src/py/flwr_experimental/baseline/tf_cifar/__init__.py rename to src/py/flwr/client/supernode/__init__.py index ad2e33481ba8..bc505f693875 100644 --- a/src/py/flwr_experimental/baseline/tf_cifar/__init__.py +++ b/src/py/flwr/client/supernode/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,11 +12,13 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== -"""Flower baseline using TensorFlow for CIFAR-10/100 image classification.""" +"""Flower SuperNode.""" -DEFAULT_SERVER_ADDRESS = "[::]:8080" +from .app import run_client_app as run_client_app +from .app import run_supernode as run_supernode -SEED = 2020 - -NUM_CLASSES = 10 +__all__ = [ + "run_client_app", + "run_supernode", +] diff --git a/src/py/flwr/client/supernode/app.py b/src/py/flwr/client/supernode/app.py new file mode 100644 index 000000000000..e46ed43cc676 --- /dev/null +++ b/src/py/flwr/client/supernode/app.py @@ -0,0 +1,281 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Flower SuperNode.""" + +import argparse +import sys +from logging import DEBUG, INFO, WARN +from pathlib import Path +from typing import Callable, Optional, Tuple + +from cryptography.hazmat.primitives.asymmetric import ec +from cryptography.hazmat.primitives.serialization import ( + load_ssh_private_key, + load_ssh_public_key, +) + +from flwr.client.client_app import ClientApp, LoadClientAppError +from flwr.common import EventType, event +from flwr.common.exit_handlers import register_exit_handlers +from flwr.common.logger import log +from flwr.common.object_ref import load_app, validate +from flwr.common.secure_aggregation.crypto.symmetric_encryption import ( + ssh_types_to_elliptic_curve, +) + +from ..app import _start_client_internal + + +def run_supernode() -> None: + """Run Flower SuperNode.""" + log(INFO, "Starting Flower SuperNode") + + event(EventType.RUN_SUPERNODE_ENTER) + + _ = _parse_args_run_supernode().parse_args() + + log( + DEBUG, + "Flower SuperNode starting...", + ) + + # Graceful shutdown + register_exit_handlers( + event_type=EventType.RUN_SUPERNODE_LEAVE, + ) + + +def run_client_app() -> None: + """Run Flower client app.""" + log(INFO, "Long-running Flower client starting") + + event(EventType.RUN_CLIENT_APP_ENTER) + + args = _parse_args_run_client_app().parse_args() + + root_certificates = _get_certificates(args) + log( + DEBUG, + "Flower will load ClientApp `%s`", + getattr(args, "client-app"), + ) + load_fn = _get_load_client_app_fn(args) + authentication_keys = _try_setup_client_authentication(args) + + _start_client_internal( + server_address=args.server, + load_client_app_fn=load_fn, + transport="rest" if args.rest else "grpc-rere", + root_certificates=root_certificates, + insecure=args.insecure, + authentication_keys=authentication_keys, + max_retries=args.max_retries, + max_wait_time=args.max_wait_time, + ) + register_exit_handlers(event_type=EventType.RUN_CLIENT_APP_LEAVE) + + +def _get_certificates(args: argparse.Namespace) -> Optional[bytes]: + """Load certificates if specified in args.""" + # Obtain certificates + if args.insecure: + if args.root_certificates is not None: + sys.exit( + "Conflicting options: The '--insecure' flag disables HTTPS, " + "but '--root-certificates' was also specified. Please remove " + "the '--root-certificates' option when running in insecure mode, " + "or omit '--insecure' to use HTTPS." + ) + log( + WARN, + "Option `--insecure` was set. " + "Starting insecure HTTP client connected to %s.", + args.server, + ) + root_certificates = None + else: + # Load the certificates if provided, or load the system certificates + cert_path = args.root_certificates + if cert_path is None: + root_certificates = None + else: + root_certificates = Path(cert_path).read_bytes() + log( + DEBUG, + "Starting secure HTTPS client connected to %s " + "with the following certificates: %s.", + args.server, + cert_path, + ) + return root_certificates + + +def _get_load_client_app_fn( + args: argparse.Namespace, +) -> Callable[[], ClientApp]: + """Get the load_client_app_fn function.""" + client_app_dir = args.dir + if client_app_dir is not None: + sys.path.insert(0, client_app_dir) + + app_ref: str = getattr(args, "client-app") + valid, error_msg = validate(app_ref) + if not valid and error_msg: + raise LoadClientAppError(error_msg) from None + + def _load() -> ClientApp: + client_app = load_app(app_ref, LoadClientAppError) + + if not isinstance(client_app, ClientApp): + raise LoadClientAppError( + f"Attribute {app_ref} is not of type {ClientApp}", + ) from None + + return client_app + + return _load + + +def _parse_args_run_supernode() -> argparse.ArgumentParser: + """Parse flower-supernode command line arguments.""" + parser = argparse.ArgumentParser( + description="Start a Flower SuperNode", + ) + + parser.add_argument( + "client-app", + nargs="?", + default="", + help="For example: `client:app` or `project.package.module:wrapper.app`. " + "This is optional and serves as the default ClientApp to be loaded when " + "the ServerApp does not specify `fab_id` and `fab_version`. " + "If not provided, defaults to an empty string.", + ) + _parse_args_common(parser) + parser.add_argument( + "--flwr-dir", + default=None, + help="""The path containing installed Flower Apps. + By default, this value isequal to: + + - `$FLWR_HOME/` if `$FLWR_HOME` is defined + - `$XDG_DATA_HOME/.flwr/` if `$XDG_DATA_HOME` is defined + - `$HOME/.flwr/` in all other cases + """, + ) + + return parser + + +def _parse_args_run_client_app() -> argparse.ArgumentParser: + """Parse flower-client-app command line arguments.""" + parser = argparse.ArgumentParser( + description="Start a Flower client app", + ) + + parser.add_argument( + "client-app", + help="For example: `client:app` or `project.package.module:wrapper.app`", + ) + _parse_args_common(parser=parser) + + return parser + + +def _parse_args_common(parser: argparse.ArgumentParser) -> None: + parser.add_argument( + "--insecure", + action="store_true", + help="Run the client without HTTPS. By default, the client runs with " + "HTTPS enabled. Use this flag only if you understand the risks.", + ) + parser.add_argument( + "--rest", + action="store_true", + help="Use REST as a transport layer for the client.", + ) + parser.add_argument( + "--root-certificates", + metavar="ROOT_CERT", + type=str, + help="Specifies the path to the PEM-encoded root certificate file for " + "establishing secure HTTPS connections.", + ) + parser.add_argument( + "--server", + default="0.0.0.0:9092", + help="Server address", + ) + parser.add_argument( + "--max-retries", + type=int, + default=None, + help="The maximum number of times the client will try to connect to the" + "server before giving up in case of a connection error. By default," + "it is set to None, meaning there is no limit to the number of tries.", + ) + parser.add_argument( + "--max-wait-time", + type=float, + default=None, + help="The maximum duration before the client stops trying to" + "connect to the server in case of connection error. By default, it" + "is set to None, meaning there is no limit to the total time.", + ) + parser.add_argument( + "--dir", + default="", + help="Add specified directory to the PYTHONPATH and load Flower " + "app from there." + " Default: current working directory.", + ) + parser.add_argument( + "--authentication-keys", + nargs=2, + metavar=("CLIENT_PRIVATE_KEY", "CLIENT_PUBLIC_KEY"), + type=str, + help="Provide two file paths: (1) the client's private " + "key file, and (2) the client's public key file.", + ) + + +def _try_setup_client_authentication( + args: argparse.Namespace, +) -> Optional[Tuple[ec.EllipticCurvePrivateKey, ec.EllipticCurvePublicKey]]: + if not args.authentication_keys: + return None + + ssh_private_key = load_ssh_private_key( + Path(args.authentication_keys[0]).read_bytes(), + None, + ) + ssh_public_key = load_ssh_public_key(Path(args.authentication_keys[1]).read_bytes()) + + try: + client_private_key, client_public_key = ssh_types_to_elliptic_curve( + ssh_private_key, ssh_public_key + ) + except TypeError: + sys.exit( + "The file paths provided could not be read as a private and public " + "key pair. Client authentication requires an elliptic curve public and " + "private key pair. Please provide the file paths containing elliptic " + "curve private and public keys to '--authentication-keys'." + ) + + return ( + client_private_key, + client_public_key, + ) diff --git a/src/py/flwr/common/__init__.py b/src/py/flwr/common/__init__.py index 9f9ff7ebc68a..2fb98c82dd6f 100644 --- a/src/py/flwr/common/__init__.py +++ b/src/py/flwr/common/__init__.py @@ -22,6 +22,7 @@ from .grpc import GRPC_MAX_MESSAGE_LENGTH from .logger import configure as configure from .logger import log as log +from .message import DEFAULT_TTL from .message import Error as Error from .message import Message as Message from .message import Metadata as Metadata @@ -87,6 +88,7 @@ "Message", "MessageType", "MessageTypeLegacy", + "DEFAULT_TTL", "Metadata", "Metrics", "MetricsAggregationFn", diff --git a/src/py/flwr/common/constant.py b/src/py/flwr/common/constant.py index 7d30a10f5881..b6d39b6e8932 100644 --- a/src/py/flwr/common/constant.py +++ b/src/py/flwr/common/constant.py @@ -36,6 +36,13 @@ TRANSPORT_TYPE_VCE, ] +# Constants for ping +PING_DEFAULT_INTERVAL = 30 +PING_CALL_TIMEOUT = 5 +PING_BASE_MULTIPLIER = 0.8 +PING_RANDOM_RANGE = (-0.1, 0.1) +PING_MAX_INTERVAL = 1e300 + class MessageType: """Message type.""" @@ -68,3 +75,16 @@ class SType: def __new__(cls) -> SType: """Prevent instantiation.""" raise TypeError(f"{cls.__name__} cannot be instantiated.") + + +class ErrorCode: + """Error codes for Message's Error.""" + + UNKNOWN = 0 + LOAD_CLIENT_APP_EXCEPTION = 1 + CLIENT_APP_RAISED_EXCEPTION = 2 + NODE_UNAVAILABLE = 3 + + def __new__(cls) -> ErrorCode: + """Prevent instantiation.""" + raise TypeError(f"{cls.__name__} cannot be instantiated.") diff --git a/src/py/flwr/common/grpc.py b/src/py/flwr/common/grpc.py index 7d0eba078ab0..ead0329ca79c 100644 --- a/src/py/flwr/common/grpc.py +++ b/src/py/flwr/common/grpc.py @@ -16,7 +16,7 @@ from logging import DEBUG -from typing import Optional +from typing import Optional, Sequence import grpc @@ -30,6 +30,7 @@ def create_channel( insecure: bool, root_certificates: Optional[bytes] = None, max_message_length: int = GRPC_MAX_MESSAGE_LENGTH, + interceptors: Optional[Sequence[grpc.UnaryUnaryClientInterceptor]] = None, ) -> grpc.Channel: """Create a gRPC channel, either secure or insecure.""" # Check for conflicting parameters @@ -57,4 +58,7 @@ def create_channel( ) log(DEBUG, "Opened secure gRPC connection using certificates") + if interceptors is not None: + channel = grpc.intercept_channel(channel, interceptors) + return channel diff --git a/src/py/flwr/common/logger.py b/src/py/flwr/common/logger.py index 2bc41773ed61..f9c70afc25f8 100644 --- a/src/py/flwr/common/logger.py +++ b/src/py/flwr/common/logger.py @@ -164,13 +164,13 @@ def configure( log = logger.log # pylint: disable=invalid-name -def warn_experimental_feature(name: str) -> None: - """Warn the user when they use an experimental feature.""" +def warn_preview_feature(name: str) -> None: + """Warn the user when they use a preview feature.""" log( WARN, - """EXPERIMENTAL FEATURE: %s + """PREVIEW FEATURE: %s - This is an experimental feature. It could change significantly or be removed + This is a preview feature. It could change significantly or be removed entirely in future versions of Flower. """, name, @@ -188,3 +188,29 @@ def warn_deprecated_feature(name: str) -> None: """, name, ) + + +def set_logger_propagation( + child_logger: logging.Logger, value: bool = True +) -> logging.Logger: + """Set the logger propagation attribute. + + Parameters + ---------- + child_logger : logging.Logger + Child logger object + value : bool + Boolean setting for propagation. If True, both parent and child logger + display messages. Otherwise, only the child logger displays a message. + This False setting prevents duplicate logs in Colab notebooks. + Reference: https://stackoverflow.com/a/19561320 + + Returns + ------- + logging.Logger + Child logger object with updated propagation setting + """ + child_logger.propagate = value + if not child_logger.propagate: + child_logger.log(logging.DEBUG, "Logger propagate set to False") + return child_logger diff --git a/src/py/flwr/common/message.py b/src/py/flwr/common/message.py index 88cf750f1a94..ccbd4109c237 100644 --- a/src/py/flwr/common/message.py +++ b/src/py/flwr/common/message.py @@ -16,10 +16,15 @@ from __future__ import annotations +import time +import warnings from dataclasses import dataclass +from typing import Optional, cast from .record import RecordSet +DEFAULT_TTL = 3600 + @dataclass class Metadata: # pylint: disable=too-many-instance-attributes @@ -40,8 +45,8 @@ class Metadata: # pylint: disable=too-many-instance-attributes group_id : str An identifier for grouping messages. In some settings, this is used as the FL round. - ttl : str - Time-to-live for this message. + ttl : float + Time-to-live for this message in seconds. message_type : str A string that encodes the action to be executed on the receiving end. @@ -51,16 +56,6 @@ class Metadata: # pylint: disable=too-many-instance-attributes is more relevant when conducting simulations. """ - _run_id: int - _message_id: str - _src_node_id: int - _dst_node_id: int - _reply_to_message: str - _group_id: str - _ttl: str - _message_type: str - _partition_id: int | None - def __init__( # pylint: disable=too-many-arguments self, run_id: int, @@ -69,89 +64,102 @@ def __init__( # pylint: disable=too-many-arguments dst_node_id: int, reply_to_message: str, group_id: str, - ttl: str, + ttl: float, message_type: str, partition_id: int | None = None, ) -> None: - self._run_id = run_id - self._message_id = message_id - self._src_node_id = src_node_id - self._dst_node_id = dst_node_id - self._reply_to_message = reply_to_message - self._group_id = group_id - self._ttl = ttl - self._message_type = message_type - self._partition_id = partition_id + var_dict = { + "_run_id": run_id, + "_message_id": message_id, + "_src_node_id": src_node_id, + "_dst_node_id": dst_node_id, + "_reply_to_message": reply_to_message, + "_group_id": group_id, + "_ttl": ttl, + "_message_type": message_type, + "_partition_id": partition_id, + } + self.__dict__.update(var_dict) @property def run_id(self) -> int: """An identifier for the current run.""" - return self._run_id + return cast(int, self.__dict__["_run_id"]) @property def message_id(self) -> str: """An identifier for the current message.""" - return self._message_id + return cast(str, self.__dict__["_message_id"]) @property def src_node_id(self) -> int: """An identifier for the node sending this message.""" - return self._src_node_id + return cast(int, self.__dict__["_src_node_id"]) @property def reply_to_message(self) -> str: """An identifier for the message this message replies to.""" - return self._reply_to_message + return cast(str, self.__dict__["_reply_to_message"]) @property def dst_node_id(self) -> int: """An identifier for the node receiving this message.""" - return self._dst_node_id + return cast(int, self.__dict__["_dst_node_id"]) @dst_node_id.setter def dst_node_id(self, value: int) -> None: """Set dst_node_id.""" - self._dst_node_id = value + self.__dict__["_dst_node_id"] = value @property def group_id(self) -> str: """An identifier for grouping messages.""" - return self._group_id + return cast(str, self.__dict__["_group_id"]) @group_id.setter def group_id(self, value: str) -> None: """Set group_id.""" - self._group_id = value + self.__dict__["_group_id"] = value + + @property + def created_at(self) -> float: + """Unix timestamp when the message was created.""" + return cast(float, self.__dict__["_created_at"]) + + @created_at.setter + def created_at(self, value: float) -> None: + """Set creation timestamp for this message.""" + self.__dict__["_created_at"] = value @property - def ttl(self) -> str: + def ttl(self) -> float: """Time-to-live for this message.""" - return self._ttl + return cast(float, self.__dict__["_ttl"]) @ttl.setter - def ttl(self, value: str) -> None: + def ttl(self, value: float) -> None: """Set ttl.""" - self._ttl = value + self.__dict__["_ttl"] = value @property def message_type(self) -> str: """A string that encodes the action to be executed on the receiving end.""" - return self._message_type + return cast(str, self.__dict__["_message_type"]) @message_type.setter def message_type(self, value: str) -> None: """Set message_type.""" - self._message_type = value + self.__dict__["_message_type"] = value @property def partition_id(self) -> int | None: """An identifier telling which data partition a ClientApp should use.""" - return self._partition_id + return cast(int, self.__dict__["_partition_id"]) @partition_id.setter def partition_id(self, value: int) -> None: - """Set patition_id.""" - self._partition_id = value + """Set partition_id.""" + self.__dict__["_partition_id"] = value @dataclass @@ -166,22 +174,22 @@ class Error: A reason for why the error arose (e.g. an exception stack-trace) """ - _code: int - _reason: str | None = None - def __init__(self, code: int, reason: str | None = None) -> None: - self._code = code - self._reason = reason + var_dict = { + "_code": code, + "_reason": reason, + } + self.__dict__.update(var_dict) @property def code(self) -> int: """Error code.""" - return self._code + return cast(int, self.__dict__["_code"]) @property def reason(self) -> str | None: """Reason reported about the error.""" - return self._reason + return cast(Optional[str], self.__dict__["_reason"]) @dataclass @@ -200,105 +208,108 @@ class Message: when processing another message. """ - _metadata: Metadata - _content: RecordSet | None = None - _error: Error | None = None - def __init__( self, metadata: Metadata, content: RecordSet | None = None, error: Error | None = None, ) -> None: - self._metadata = metadata - if not (content is None) ^ (error is None): raise ValueError("Either `content` or `error` must be set, but not both.") - self._content = content - self._error = error + metadata.created_at = time.time() # Set the message creation timestamp + var_dict = { + "_metadata": metadata, + "_content": content, + "_error": error, + } + self.__dict__.update(var_dict) @property def metadata(self) -> Metadata: """A dataclass including information about the message to be executed.""" - return self._metadata + return cast(Metadata, self.__dict__["_metadata"]) @property def content(self) -> RecordSet: """The content of this message.""" - if self._content is None: + if self.__dict__["_content"] is None: raise ValueError( "Message content is None. Use .has_content() " "to check if a message has content." ) - return self._content + return cast(RecordSet, self.__dict__["_content"]) @content.setter def content(self, value: RecordSet) -> None: """Set content.""" - if self._error is None: - self._content = value + if self.__dict__["_error"] is None: + self.__dict__["_content"] = value else: raise ValueError("A message with an error set cannot have content.") @property def error(self) -> Error: """Error captured by this message.""" - if self._error is None: + if self.__dict__["_error"] is None: raise ValueError( "Message error is None. Use .has_error() " "to check first if a message carries an error." ) - return self._error + return cast(Error, self.__dict__["_error"]) @error.setter def error(self, value: Error) -> None: """Set error.""" if self.has_content(): raise ValueError("A message with content set cannot carry an error.") - self._error = value + self.__dict__["_error"] = value def has_content(self) -> bool: """Return True if message has content, else False.""" - return self._content is not None + return self.__dict__["_content"] is not None def has_error(self) -> bool: """Return True if message has an error, else False.""" - return self._error is not None - - def _create_reply_metadata(self, ttl: str) -> Metadata: - """Construct metadata for a reply message.""" - return Metadata( - run_id=self.metadata.run_id, - message_id="", - src_node_id=self.metadata.dst_node_id, - dst_node_id=self.metadata.src_node_id, - reply_to_message=self.metadata.message_id, - group_id=self.metadata.group_id, - ttl=ttl, - message_type=self.metadata.message_type, - partition_id=self.metadata.partition_id, - ) + return self.__dict__["_error"] is not None - def create_error_reply( - self, - error: Error, - ttl: str, - ) -> Message: + def create_error_reply(self, error: Error, ttl: float | None = None) -> Message: """Construct a reply message indicating an error happened. Parameters ---------- error : Error The error that was encountered. - ttl : str - Time-to-live for this message. + ttl : Optional[float] (default: None) + Time-to-live for this message in seconds. If unset, it will be set based + on the remaining time for the received message before it expires. This + follows the equation: + + ttl = msg.meta.ttl - (reply.meta.created_at - msg.meta.created_at) """ + if ttl: + warnings.warn( + "A custom TTL was set, but note that the SuperLink does not enforce " + "the TTL yet. The SuperLink will start enforcing the TTL in a future " + "version of Flower.", + stacklevel=2, + ) + # If no TTL passed, use default for message creation (will update after + # message creation) + ttl_ = DEFAULT_TTL if ttl is None else ttl # Create reply with error - message = Message(metadata=self._create_reply_metadata(ttl), error=error) + message = Message(metadata=_create_reply_metadata(self, ttl_), error=error) + + if ttl is None: + # Set TTL equal to the remaining time for the received message to expire + ttl = self.metadata.ttl - ( + message.metadata.created_at - self.metadata.created_at + ) + message.metadata.ttl = ttl + return message - def create_reply(self, content: RecordSet, ttl: str) -> Message: + def create_reply(self, content: RecordSet, ttl: float | None = None) -> Message: """Create a reply to this message with specified content and TTL. The method generates a new `Message` as a reply to this message. @@ -309,15 +320,54 @@ def create_reply(self, content: RecordSet, ttl: str) -> Message: ---------- content : RecordSet The content for the reply message. - ttl : str - Time-to-live for this message. + ttl : Optional[float] (default: None) + Time-to-live for this message in seconds. If unset, it will be set based + on the remaining time for the received message before it expires. This + follows the equation: + + ttl = msg.meta.ttl - (reply.meta.created_at - msg.meta.created_at) Returns ------- Message A new `Message` instance representing the reply. """ - return Message( - metadata=self._create_reply_metadata(ttl), + if ttl: + warnings.warn( + "A custom TTL was set, but note that the SuperLink does not enforce " + "the TTL yet. The SuperLink will start enforcing the TTL in a future " + "version of Flower.", + stacklevel=2, + ) + # If no TTL passed, use default for message creation (will update after + # message creation) + ttl_ = DEFAULT_TTL if ttl is None else ttl + + message = Message( + metadata=_create_reply_metadata(self, ttl_), content=content, ) + + if ttl is None: + # Set TTL equal to the remaining time for the received message to expire + ttl = self.metadata.ttl - ( + message.metadata.created_at - self.metadata.created_at + ) + message.metadata.ttl = ttl + + return message + + +def _create_reply_metadata(msg: Message, ttl: float) -> Metadata: + """Construct metadata for a reply message.""" + return Metadata( + run_id=msg.metadata.run_id, + message_id="", + src_node_id=msg.metadata.dst_node_id, + dst_node_id=msg.metadata.src_node_id, + reply_to_message=msg.metadata.message_id, + group_id=msg.metadata.group_id, + ttl=ttl, + message_type=msg.metadata.message_type, + partition_id=msg.metadata.partition_id, + ) diff --git a/src/py/flwr/common/message_test.py b/src/py/flwr/common/message_test.py index ba628bb3235a..1a5da0517352 100644 --- a/src/py/flwr/common/message_test.py +++ b/src/py/flwr/common/message_test.py @@ -14,9 +14,9 @@ # ============================================================================== """Message tests.""" - +import time from contextlib import ExitStack -from typing import Any, Callable +from typing import Any, Callable, Optional import pytest @@ -62,24 +62,32 @@ def test_message_creation( if context: stack.enter_context(context) - _ = Message( + current_time = time.time() + message = Message( metadata=metadata, content=None if content_fn is None else content_fn(maker), error=None if error_fn is None else error_fn(0), ) + assert message.metadata.created_at > current_time + assert message.metadata.created_at < time.time() + -def create_message_with_content() -> Message: +def create_message_with_content(ttl: Optional[float] = None) -> Message: """Create a Message with content.""" maker = RecordMaker(state=2) metadata = maker.metadata() + if ttl: + metadata.ttl = ttl return Message(metadata=metadata, content=RecordSet()) -def create_message_with_error() -> Message: +def create_message_with_error(ttl: Optional[float] = None) -> Message: """Create a Message with error.""" maker = RecordMaker(state=2) metadata = maker.metadata() + if ttl: + metadata.ttl = ttl return Message(metadata=metadata, error=Error(code=1)) @@ -107,3 +115,45 @@ def test_altering_message( message.error = Error(code=123) if message.has_error(): message.content = RecordSet() + + +@pytest.mark.parametrize( + "message_creation_fn,ttl,reply_ttl", + [ + (create_message_with_content, 1e6, None), + (create_message_with_error, 1e6, None), + (create_message_with_content, 1e6, 3600), + (create_message_with_error, 1e6, 3600), + ], +) +def test_create_reply( + message_creation_fn: Callable[ + [float], + Message, + ], + ttl: float, + reply_ttl: Optional[float], +) -> None: + """Test reply creation from message.""" + message: Message = message_creation_fn(ttl) + + time.sleep(0.1) + + if message.has_error(): + dummy_error = Error(code=0, reason="it crashed") + reply_message = message.create_error_reply(dummy_error, ttl=reply_ttl) + else: + reply_message = message.create_reply(content=RecordSet(), ttl=reply_ttl) + + # Ensure reply has a higher timestamp + assert message.metadata.created_at < reply_message.metadata.created_at + if reply_ttl: + # Ensure the TTL is the one specify upon reply creation + assert reply_message.metadata.ttl == reply_ttl + else: + # Ensure reply ttl is lower (since it uses remaining time left) + assert message.metadata.ttl > reply_message.metadata.ttl + + assert message.metadata.src_node_id == reply_message.metadata.dst_node_id + assert message.metadata.dst_node_id == reply_message.metadata.src_node_id + assert reply_message.metadata.reply_to_message == message.metadata.message_id diff --git a/src/py/flwr/common/record/recordset.py b/src/py/flwr/common/record/recordset.py index d8ef44ab15c2..5f247d0e2edf 100644 --- a/src/py/flwr/common/record/recordset.py +++ b/src/py/flwr/common/record/recordset.py @@ -16,23 +16,21 @@ from dataclasses import dataclass -from typing import Callable, Dict, Optional, Type, TypeVar +from typing import Dict, Optional, cast from .configsrecord import ConfigsRecord from .metricsrecord import MetricsRecord from .parametersrecord import ParametersRecord from .typeddict import TypedDict -T = TypeVar("T") - @dataclass -class RecordSet: - """RecordSet stores groups of parameters, metrics and configs.""" +class RecordSetData: + """Inner data container for the RecordSet class.""" - _parameters_records: TypedDict[str, ParametersRecord] - _metrics_records: TypedDict[str, MetricsRecord] - _configs_records: TypedDict[str, ConfigsRecord] + parameters_records: TypedDict[str, ParametersRecord] + metrics_records: TypedDict[str, MetricsRecord] + configs_records: TypedDict[str, ConfigsRecord] def __init__( self, @@ -40,40 +38,82 @@ def __init__( metrics_records: Optional[Dict[str, MetricsRecord]] = None, configs_records: Optional[Dict[str, ConfigsRecord]] = None, ) -> None: - def _get_check_fn(__t: Type[T]) -> Callable[[T], None]: - def _check_fn(__v: T) -> None: - if not isinstance(__v, __t): - raise TypeError(f"Expected `{__t}`, but `{type(__v)}` was passed.") - - return _check_fn - - self._parameters_records = TypedDict[str, ParametersRecord]( - _get_check_fn(str), _get_check_fn(ParametersRecord) + self.parameters_records = TypedDict[str, ParametersRecord]( + self._check_fn_str, self._check_fn_params ) - self._metrics_records = TypedDict[str, MetricsRecord]( - _get_check_fn(str), _get_check_fn(MetricsRecord) + self.metrics_records = TypedDict[str, MetricsRecord]( + self._check_fn_str, self._check_fn_metrics ) - self._configs_records = TypedDict[str, ConfigsRecord]( - _get_check_fn(str), _get_check_fn(ConfigsRecord) + self.configs_records = TypedDict[str, ConfigsRecord]( + self._check_fn_str, self._check_fn_configs ) if parameters_records is not None: - self._parameters_records.update(parameters_records) + self.parameters_records.update(parameters_records) if metrics_records is not None: - self._metrics_records.update(metrics_records) + self.metrics_records.update(metrics_records) if configs_records is not None: - self._configs_records.update(configs_records) + self.configs_records.update(configs_records) + + def _check_fn_str(self, key: str) -> None: + if not isinstance(key, str): + raise TypeError( + f"Expected `{str.__name__}`, but " + f"received `{type(key).__name__}` for the key." + ) + + def _check_fn_params(self, record: ParametersRecord) -> None: + if not isinstance(record, ParametersRecord): + raise TypeError( + f"Expected `{ParametersRecord.__name__}`, but " + f"received `{type(record).__name__}` for the value." + ) + + def _check_fn_metrics(self, record: MetricsRecord) -> None: + if not isinstance(record, MetricsRecord): + raise TypeError( + f"Expected `{MetricsRecord.__name__}`, but " + f"received `{type(record).__name__}` for the value." + ) + + def _check_fn_configs(self, record: ConfigsRecord) -> None: + if not isinstance(record, ConfigsRecord): + raise TypeError( + f"Expected `{ConfigsRecord.__name__}`, but " + f"received `{type(record).__name__}` for the value." + ) + + +@dataclass +class RecordSet: + """RecordSet stores groups of parameters, metrics and configs.""" + + def __init__( + self, + parameters_records: Optional[Dict[str, ParametersRecord]] = None, + metrics_records: Optional[Dict[str, MetricsRecord]] = None, + configs_records: Optional[Dict[str, ConfigsRecord]] = None, + ) -> None: + data = RecordSetData( + parameters_records=parameters_records, + metrics_records=metrics_records, + configs_records=configs_records, + ) + self.__dict__["_data"] = data @property def parameters_records(self) -> TypedDict[str, ParametersRecord]: """Dictionary holding ParametersRecord instances.""" - return self._parameters_records + data = cast(RecordSetData, self.__dict__["_data"]) + return data.parameters_records @property def metrics_records(self) -> TypedDict[str, MetricsRecord]: """Dictionary holding MetricsRecord instances.""" - return self._metrics_records + data = cast(RecordSetData, self.__dict__["_data"]) + return data.metrics_records @property def configs_records(self) -> TypedDict[str, ConfigsRecord]: """Dictionary holding ConfigsRecord instances.""" - return self._configs_records + data = cast(RecordSetData, self.__dict__["_data"]) + return data.configs_records diff --git a/src/py/flwr/common/record/recordset_test.py b/src/py/flwr/common/record/recordset_test.py index 0e0b149881be..94d087795841 100644 --- a/src/py/flwr/common/record/recordset_test.py +++ b/src/py/flwr/common/record/recordset_test.py @@ -14,6 +14,7 @@ # ============================================================================== """RecordSet tests.""" +import pickle from copy import deepcopy from typing import Callable, Dict, List, OrderedDict, Type, Union @@ -33,7 +34,7 @@ Parameters, ) -from . import Array, ConfigsRecord, MetricsRecord, ParametersRecord +from . import Array, ConfigsRecord, MetricsRecord, ParametersRecord, RecordSet def get_ndarrays() -> NDArrays: @@ -398,3 +399,18 @@ def test_count_bytes_configsrecord() -> None: record_bytest_count = c_record.count_bytes() assert bytes_in_dict == record_bytest_count + + +def test_record_is_picklable() -> None: + """Test if RecordSet and *Record are picklable.""" + # Prepare + p_record = ParametersRecord() + m_record = MetricsRecord({"aa": 123}) + c_record = ConfigsRecord({"cc": bytes(9)}) + rs = RecordSet() + rs.parameters_records["params"] = p_record + rs.metrics_records["metrics"] = m_record + rs.configs_records["configs"] = c_record + + # Execute + pickle.dumps((p_record, m_record, c_record, rs)) diff --git a/src/py/flwr/common/retry_invoker.py b/src/py/flwr/common/retry_invoker.py index 5441e766983a..d12124b89840 100644 --- a/src/py/flwr/common/retry_invoker.py +++ b/src/py/flwr/common/retry_invoker.py @@ -107,7 +107,7 @@ class RetryInvoker: Parameters ---------- - wait_factory: Callable[[], Generator[float, None, None]] + wait_gen_factory: Callable[[], Generator[float, None, None]] A generator yielding successive wait times in seconds. If the generator is finite, the giveup event will be triggered when the generator raises `StopIteration`. @@ -129,12 +129,12 @@ class RetryInvoker: data class object detailing the invocation. on_giveup: Optional[Callable[[RetryState], None]] (default: None) A callable to be executed in the event that `max_tries` or `max_time` is - exceeded, `should_giveup` returns True, or `wait_factory()` generator raises + exceeded, `should_giveup` returns True, or `wait_gen_factory()` generator raises `StopInteration`. The parameter is a data class object detailing the invocation. jitter: Optional[Callable[[float], float]] (default: full_jitter) - A function of the value yielded by `wait_factory()` returning the actual time - to wait. This function helps distribute wait times stochastically to avoid + A function of the value yielded by `wait_gen_factory()` returning the actual + time to wait. This function helps distribute wait times stochastically to avoid timing collisions across concurrent clients. Wait times are jittered by default using the `full_jitter` function. To disable jittering, pass `jitter=None`. @@ -142,6 +142,13 @@ class RetryInvoker: A function accepting an exception instance, returning whether or not to give up prematurely before other give-up conditions are evaluated. If set to None, the strategy is to never give up prematurely. + wait_function: Optional[Callable[[float], None]] (default: None) + A function that defines how to wait between retry attempts. It accepts + one argument, the wait time in seconds, allowing the use of various waiting + mechanisms (e.g., asynchronous waits or event-based synchronization) suitable + for different execution environments. If set to `None`, the `wait_function` + defaults to `time.sleep`, which is ideal for synchronous operations. Custom + functions should manage execution flow to prevent blocking or interference. Examples -------- @@ -159,7 +166,7 @@ class RetryInvoker: # pylint: disable-next=too-many-arguments def __init__( self, - wait_factory: Callable[[], Generator[float, None, None]], + wait_gen_factory: Callable[[], Generator[float, None, None]], recoverable_exceptions: Union[Type[Exception], Tuple[Type[Exception], ...]], max_tries: Optional[int], max_time: Optional[float], @@ -169,8 +176,9 @@ def __init__( on_giveup: Optional[Callable[[RetryState], None]] = None, jitter: Optional[Callable[[float], float]] = full_jitter, should_giveup: Optional[Callable[[Exception], bool]] = None, + wait_function: Optional[Callable[[float], None]] = None, ) -> None: - self.wait_factory = wait_factory + self.wait_gen_factory = wait_gen_factory self.recoverable_exceptions = recoverable_exceptions self.max_tries = max_tries self.max_time = max_time @@ -179,6 +187,9 @@ def __init__( self.on_giveup = on_giveup self.jitter = jitter self.should_giveup = should_giveup + if wait_function is None: + wait_function = time.sleep + self.wait_function = wait_function # pylint: disable-next=too-many-locals def invoke( @@ -212,13 +223,13 @@ def invoke( Raises ------ Exception - If the number of tries exceeds `max_tries`, if the total time - exceeds `max_time`, if `wait_factory()` generator raises `StopInteration`, + If the number of tries exceeds `max_tries`, if the total time exceeds + `max_time`, if `wait_gen_factory()` generator raises `StopInteration`, or if the `should_giveup` returns True for a raised exception. Notes ----- - The time between retries is determined by the provided `wait_factory()` + The time between retries is determined by the provided `wait_gen_factory()` generator and can optionally be jittered using the `jitter` function. The recoverable exceptions that trigger a retry, as well as conditions to stop retries, are also determined by the class's initialization parameters. @@ -231,13 +242,13 @@ def try_call_event_handler( handler(cast(RetryState, ref_state[0])) try_cnt = 0 - wait_generator = self.wait_factory() - start = time.time() + wait_generator = self.wait_gen_factory() + start = time.monotonic() ref_state: List[Optional[RetryState]] = [None] while True: try_cnt += 1 - elapsed_time = time.time() - start + elapsed_time = time.monotonic() - start state = RetryState( target=target, args=args, @@ -250,6 +261,7 @@ def try_call_event_handler( try: ret = target(*args, **kwargs) except self.recoverable_exceptions as err: + state.exception = err # Check if giveup event should be triggered max_tries_exceeded = try_cnt == self.max_tries max_time_exceeded = ( @@ -282,7 +294,7 @@ def giveup_check(_exception: Exception) -> bool: try_call_event_handler(self.on_backoff) # Sleep - time.sleep(wait_time) + self.wait_function(state.actual_wait) else: # Trigger success event try_call_event_handler(self.on_success) diff --git a/src/py/flwr/common/retry_invoker_test.py b/src/py/flwr/common/retry_invoker_test.py index e67c0641e2ba..2259ae47ded4 100644 --- a/src/py/flwr/common/retry_invoker_test.py +++ b/src/py/flwr/common/retry_invoker_test.py @@ -35,8 +35,8 @@ def failing_function() -> None: @pytest.fixture(name="mock_time") def fixture_mock_time() -> Generator[MagicMock, None, None]: - """Mock time.time for controlled testing.""" - with patch("time.time") as mock_time: + """Mock time.monotonic for controlled testing.""" + with patch("time.monotonic") as mock_time: yield mock_time diff --git a/src/py/flwr/common/secure_aggregation/crypto/symmetric_encryption.py b/src/py/flwr/common/secure_aggregation/crypto/symmetric_encryption.py index 844a93f3bde9..9856b8b706f9 100644 --- a/src/py/flwr/common/secure_aggregation/crypto/symmetric_encryption.py +++ b/src/py/flwr/common/secure_aggregation/crypto/symmetric_encryption.py @@ -18,8 +18,9 @@ import base64 from typing import Tuple, cast +from cryptography.exceptions import InvalidSignature from cryptography.fernet import Fernet -from cryptography.hazmat.primitives import hashes, serialization +from cryptography.hazmat.primitives import hashes, hmac, serialization from cryptography.hazmat.primitives.asymmetric import ec from cryptography.hazmat.primitives.kdf.hkdf import HKDF @@ -98,3 +99,36 @@ def decrypt(key: bytes, ciphertext: bytes) -> bytes: # The input key must be url safe fernet = Fernet(key) return fernet.decrypt(ciphertext) + + +def compute_hmac(key: bytes, message: bytes) -> bytes: + """Compute hmac of a message using key as hash.""" + computed_hmac = hmac.HMAC(key, hashes.SHA256()) + computed_hmac.update(message) + return computed_hmac.finalize() + + +def verify_hmac(key: bytes, message: bytes, hmac_value: bytes) -> bool: + """Verify hmac of a message using key as hash.""" + computed_hmac = hmac.HMAC(key, hashes.SHA256()) + computed_hmac.update(message) + try: + computed_hmac.verify(hmac_value) + return True + except InvalidSignature: + return False + + +def ssh_types_to_elliptic_curve( + private_key: serialization.SSHPrivateKeyTypes, + public_key: serialization.SSHPublicKeyTypes, +) -> Tuple[ec.EllipticCurvePrivateKey, ec.EllipticCurvePublicKey]: + """Cast SSH key types to elliptic curve.""" + if isinstance(private_key, ec.EllipticCurvePrivateKey) and isinstance( + public_key, ec.EllipticCurvePublicKey + ): + return (private_key, public_key) + + raise TypeError( + "The provided key is not an EllipticCurvePrivateKey or EllipticCurvePublicKey" + ) diff --git a/src/py/flwr/common/secure_aggregation/crypto/symmetric_encryption_test.py b/src/py/flwr/common/secure_aggregation/crypto/symmetric_encryption_test.py new file mode 100644 index 000000000000..f62276b63ff3 --- /dev/null +++ b/src/py/flwr/common/secure_aggregation/crypto/symmetric_encryption_test.py @@ -0,0 +1,102 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Symmetric encryption tests.""" + + +from .symmetric_encryption import ( + compute_hmac, + generate_key_pairs, + generate_shared_key, + verify_hmac, +) + + +def test_generate_shared_key() -> None: + """Test util function generate_shared_key.""" + # Prepare + client_keys = generate_key_pairs() + server_keys = generate_key_pairs() + + # Execute + client_shared_secret = generate_shared_key(client_keys[0], server_keys[1]) + server_shared_secret = generate_shared_key(server_keys[0], client_keys[1]) + + # Assert + assert client_shared_secret == server_shared_secret + + +def test_wrong_secret_generate_shared_key() -> None: + """Test util function generate_shared_key with wrong secret.""" + # Prepare + client_keys = generate_key_pairs() + server_keys = generate_key_pairs() + other_keys = generate_key_pairs() + + # Execute + client_shared_secret = generate_shared_key(client_keys[0], other_keys[1]) + server_shared_secret = generate_shared_key(server_keys[0], client_keys[1]) + + # Assert + assert client_shared_secret != server_shared_secret + + +def test_hmac() -> None: + """Test util function compute and verify hmac.""" + # Prepare + client_keys = generate_key_pairs() + server_keys = generate_key_pairs() + client_shared_secret = generate_shared_key(client_keys[0], server_keys[1]) + server_shared_secret = generate_shared_key(server_keys[0], client_keys[1]) + message = b"Flower is the future of AI" + + # Execute + client_compute_hmac = compute_hmac(client_shared_secret, message) + + # Assert + assert verify_hmac(server_shared_secret, message, client_compute_hmac) + + +def test_wrong_secret_hmac() -> None: + """Test util function compute and verify hmac with wrong secret.""" + # Prepare + client_keys = generate_key_pairs() + server_keys = generate_key_pairs() + other_keys = generate_key_pairs() + client_shared_secret = generate_shared_key(client_keys[0], other_keys[1]) + server_shared_secret = generate_shared_key(server_keys[0], client_keys[1]) + message = b"Flower is the future of AI" + + # Execute + client_compute_hmac = compute_hmac(client_shared_secret, message) + + # Assert + assert verify_hmac(server_shared_secret, message, client_compute_hmac) is False + + +def test_wrong_message_hmac() -> None: + """Test util function compute and verify hmac with wrong message.""" + # Prepare + client_keys = generate_key_pairs() + server_keys = generate_key_pairs() + client_shared_secret = generate_shared_key(client_keys[0], server_keys[1]) + server_shared_secret = generate_shared_key(server_keys[0], client_keys[1]) + message = b"Flower is the future of AI" + other_message = b"Flower is not the future of AI" + + # Execute + client_compute_hmac = compute_hmac(client_shared_secret, other_message) + + # Assert + assert verify_hmac(server_shared_secret, message, client_compute_hmac) is False diff --git a/src/py/flwr/common/serde.py b/src/py/flwr/common/serde.py index 6c7a077d2f9f..84932b806aff 100644 --- a/src/py/flwr/common/serde.py +++ b/src/py/flwr/common/serde.py @@ -575,6 +575,7 @@ def message_to_taskins(message: Message) -> TaskIns: task=Task( producer=Node(node_id=0, anonymous=True), # Assume driver node consumer=Node(node_id=md.dst_node_id, anonymous=False), + created_at=md.created_at, ttl=md.ttl, ancestry=[md.reply_to_message] if md.reply_to_message != "" else [], task_type=md.message_type, @@ -601,7 +602,7 @@ def message_from_taskins(taskins: TaskIns) -> Message: ) # Construct Message - return Message( + message = Message( metadata=metadata, content=( recordset_from_proto(taskins.task.recordset) @@ -614,6 +615,8 @@ def message_from_taskins(taskins: TaskIns) -> Message: else None ), ) + message.metadata.created_at = taskins.task.created_at + return message def message_to_taskres(message: Message) -> TaskRes: @@ -626,6 +629,7 @@ def message_to_taskres(message: Message) -> TaskRes: task=Task( producer=Node(node_id=md.src_node_id, anonymous=False), consumer=Node(node_id=0, anonymous=True), # Assume driver node + created_at=md.created_at, ttl=md.ttl, ancestry=[md.reply_to_message] if md.reply_to_message != "" else [], task_type=md.message_type, @@ -652,7 +656,7 @@ def message_from_taskres(taskres: TaskRes) -> Message: ) # Construct the Message - return Message( + message = Message( metadata=metadata, content=( recordset_from_proto(taskres.task.recordset) @@ -665,3 +669,5 @@ def message_from_taskres(taskres: TaskRes) -> Message: else None ), ) + message.metadata.created_at = taskres.task.created_at + return message diff --git a/src/py/flwr/common/serde_test.py b/src/py/flwr/common/serde_test.py index 8596e5d2f330..f9969426fc36 100644 --- a/src/py/flwr/common/serde_test.py +++ b/src/py/flwr/common/serde_test.py @@ -219,7 +219,7 @@ def metadata(self) -> Metadata: src_node_id=self.rng.randint(0, 1 << 63), dst_node_id=self.rng.randint(0, 1 << 63), reply_to_message=self.get_str(64), - ttl=self.get_str(10), + ttl=self.rng.randint(1, 1 << 30), message_type=self.get_str(10), ) @@ -324,7 +324,7 @@ def test_message_to_and_from_taskins( maker = RecordMaker(state=1) metadata = maker.metadata() # pylint: disable-next=protected-access - metadata._src_node_id = 0 # Assume driver node + metadata.__dict__["_src_node_id"] = 0 # Assume driver node original = Message( metadata=metadata, diff --git a/src/py/flwr/common/telemetry.py b/src/py/flwr/common/telemetry.py index 8eb594085d31..41fe1508e652 100644 --- a/src/py/flwr/common/telemetry.py +++ b/src/py/flwr/common/telemetry.py @@ -160,6 +160,10 @@ def _generate_next_value_(name: str, start: int, count: int, last_values: List[A RUN_SERVER_APP_ENTER = auto() RUN_SERVER_APP_LEAVE = auto() + # SuperNode + RUN_SUPERNODE_ENTER = auto() + RUN_SUPERNODE_LEAVE = auto() + # Use the ThreadPoolExecutor with max_workers=1 to have a queue # and also ensure that telemetry calls are not blocking. diff --git a/src/py/flwr/common/telemetry_test.py b/src/py/flwr/common/telemetry_test.py index 006f4422bc1d..a5eea48443b5 100644 --- a/src/py/flwr/common/telemetry_test.py +++ b/src/py/flwr/common/telemetry_test.py @@ -47,8 +47,8 @@ def test_not_blocking(self) -> None: 0.001s. """ # Prepare - # Use 0.1ms as any blocking networked call would take longer. - duration_max = 0.001 + # Use 5ms as any blocking networked call would take longer. + duration_max = 0.005 start = time.time() # Execute diff --git a/src/py/flwr/proto/driver_pb2.py b/src/py/flwr/proto/driver_pb2.py index fe9c33da0fa9..b0caae58ff6f 100644 --- a/src/py/flwr/proto/driver_pb2.py +++ b/src/py/flwr/proto/driver_pb2.py @@ -16,7 +16,7 @@ from flwr.proto import task_pb2 as flwr_dot_proto_dot_task__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x17\x66lwr/proto/driver.proto\x12\nflwr.proto\x1a\x15\x66lwr/proto/node.proto\x1a\x15\x66lwr/proto/task.proto\"\x12\n\x10\x43reateRunRequest\"#\n\x11\x43reateRunResponse\x12\x0e\n\x06run_id\x18\x01 \x01(\x12\"!\n\x0fGetNodesRequest\x12\x0e\n\x06run_id\x18\x01 \x01(\x12\"3\n\x10GetNodesResponse\x12\x1f\n\x05nodes\x18\x01 \x03(\x0b\x32\x10.flwr.proto.Node\"@\n\x12PushTaskInsRequest\x12*\n\rtask_ins_list\x18\x01 \x03(\x0b\x32\x13.flwr.proto.TaskIns\"\'\n\x13PushTaskInsResponse\x12\x10\n\x08task_ids\x18\x02 \x03(\t\"F\n\x12PullTaskResRequest\x12\x1e\n\x04node\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\x12\x10\n\x08task_ids\x18\x02 \x03(\t\"A\n\x13PullTaskResResponse\x12*\n\rtask_res_list\x18\x01 \x03(\x0b\x32\x13.flwr.proto.TaskRes2\xc1\x02\n\x06\x44river\x12J\n\tCreateRun\x12\x1c.flwr.proto.CreateRunRequest\x1a\x1d.flwr.proto.CreateRunResponse\"\x00\x12G\n\x08GetNodes\x12\x1b.flwr.proto.GetNodesRequest\x1a\x1c.flwr.proto.GetNodesResponse\"\x00\x12P\n\x0bPushTaskIns\x12\x1e.flwr.proto.PushTaskInsRequest\x1a\x1f.flwr.proto.PushTaskInsResponse\"\x00\x12P\n\x0bPullTaskRes\x12\x1e.flwr.proto.PullTaskResRequest\x1a\x1f.flwr.proto.PullTaskResResponse\"\x00\x62\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x17\x66lwr/proto/driver.proto\x12\nflwr.proto\x1a\x15\x66lwr/proto/node.proto\x1a\x15\x66lwr/proto/task.proto\"7\n\x10\x43reateRunRequest\x12\x0e\n\x06\x66\x61\x62_id\x18\x01 \x01(\t\x12\x13\n\x0b\x66\x61\x62_version\x18\x02 \x01(\t\"#\n\x11\x43reateRunResponse\x12\x0e\n\x06run_id\x18\x01 \x01(\x12\"!\n\x0fGetNodesRequest\x12\x0e\n\x06run_id\x18\x01 \x01(\x12\"3\n\x10GetNodesResponse\x12\x1f\n\x05nodes\x18\x01 \x03(\x0b\x32\x10.flwr.proto.Node\"@\n\x12PushTaskInsRequest\x12*\n\rtask_ins_list\x18\x01 \x03(\x0b\x32\x13.flwr.proto.TaskIns\"\'\n\x13PushTaskInsResponse\x12\x10\n\x08task_ids\x18\x02 \x03(\t\"F\n\x12PullTaskResRequest\x12\x1e\n\x04node\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\x12\x10\n\x08task_ids\x18\x02 \x03(\t\"A\n\x13PullTaskResResponse\x12*\n\rtask_res_list\x18\x01 \x03(\x0b\x32\x13.flwr.proto.TaskRes2\xc1\x02\n\x06\x44river\x12J\n\tCreateRun\x12\x1c.flwr.proto.CreateRunRequest\x1a\x1d.flwr.proto.CreateRunResponse\"\x00\x12G\n\x08GetNodes\x12\x1b.flwr.proto.GetNodesRequest\x1a\x1c.flwr.proto.GetNodesResponse\"\x00\x12P\n\x0bPushTaskIns\x12\x1e.flwr.proto.PushTaskInsRequest\x1a\x1f.flwr.proto.PushTaskInsResponse\"\x00\x12P\n\x0bPullTaskRes\x12\x1e.flwr.proto.PullTaskResRequest\x1a\x1f.flwr.proto.PullTaskResResponse\"\x00\x62\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) @@ -24,21 +24,21 @@ if _descriptor._USE_C_DESCRIPTORS == False: DESCRIPTOR._options = None _globals['_CREATERUNREQUEST']._serialized_start=85 - _globals['_CREATERUNREQUEST']._serialized_end=103 - _globals['_CREATERUNRESPONSE']._serialized_start=105 - _globals['_CREATERUNRESPONSE']._serialized_end=140 - _globals['_GETNODESREQUEST']._serialized_start=142 - _globals['_GETNODESREQUEST']._serialized_end=175 - _globals['_GETNODESRESPONSE']._serialized_start=177 - _globals['_GETNODESRESPONSE']._serialized_end=228 - _globals['_PUSHTASKINSREQUEST']._serialized_start=230 - _globals['_PUSHTASKINSREQUEST']._serialized_end=294 - _globals['_PUSHTASKINSRESPONSE']._serialized_start=296 - _globals['_PUSHTASKINSRESPONSE']._serialized_end=335 - _globals['_PULLTASKRESREQUEST']._serialized_start=337 - _globals['_PULLTASKRESREQUEST']._serialized_end=407 - _globals['_PULLTASKRESRESPONSE']._serialized_start=409 - _globals['_PULLTASKRESRESPONSE']._serialized_end=474 - _globals['_DRIVER']._serialized_start=477 - _globals['_DRIVER']._serialized_end=798 + _globals['_CREATERUNREQUEST']._serialized_end=140 + _globals['_CREATERUNRESPONSE']._serialized_start=142 + _globals['_CREATERUNRESPONSE']._serialized_end=177 + _globals['_GETNODESREQUEST']._serialized_start=179 + _globals['_GETNODESREQUEST']._serialized_end=212 + _globals['_GETNODESRESPONSE']._serialized_start=214 + _globals['_GETNODESRESPONSE']._serialized_end=265 + _globals['_PUSHTASKINSREQUEST']._serialized_start=267 + _globals['_PUSHTASKINSREQUEST']._serialized_end=331 + _globals['_PUSHTASKINSRESPONSE']._serialized_start=333 + _globals['_PUSHTASKINSRESPONSE']._serialized_end=372 + _globals['_PULLTASKRESREQUEST']._serialized_start=374 + _globals['_PULLTASKRESREQUEST']._serialized_end=444 + _globals['_PULLTASKRESRESPONSE']._serialized_start=446 + _globals['_PULLTASKRESRESPONSE']._serialized_end=511 + _globals['_DRIVER']._serialized_start=514 + _globals['_DRIVER']._serialized_end=835 # @@protoc_insertion_point(module_scope) diff --git a/src/py/flwr/proto/driver_pb2.pyi b/src/py/flwr/proto/driver_pb2.pyi index 8dc254a55e8c..2d8d11fb59a3 100644 --- a/src/py/flwr/proto/driver_pb2.pyi +++ b/src/py/flwr/proto/driver_pb2.pyi @@ -16,8 +16,16 @@ DESCRIPTOR: google.protobuf.descriptor.FileDescriptor class CreateRunRequest(google.protobuf.message.Message): """CreateRun""" DESCRIPTOR: google.protobuf.descriptor.Descriptor + FAB_ID_FIELD_NUMBER: builtins.int + FAB_VERSION_FIELD_NUMBER: builtins.int + fab_id: typing.Text + fab_version: typing.Text def __init__(self, + *, + fab_id: typing.Text = ..., + fab_version: typing.Text = ..., ) -> None: ... + def ClearField(self, field_name: typing_extensions.Literal["fab_id",b"fab_id","fab_version",b"fab_version"]) -> None: ... global___CreateRunRequest = CreateRunRequest class CreateRunResponse(google.protobuf.message.Message): diff --git a/src/py/flwr/proto/fleet_pb2.py b/src/py/flwr/proto/fleet_pb2.py index e8443c296f0c..42f3292d910d 100644 --- a/src/py/flwr/proto/fleet_pb2.py +++ b/src/py/flwr/proto/fleet_pb2.py @@ -16,7 +16,7 @@ from flwr.proto import task_pb2 as flwr_dot_proto_dot_task__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x16\x66lwr/proto/fleet.proto\x12\nflwr.proto\x1a\x15\x66lwr/proto/node.proto\x1a\x15\x66lwr/proto/task.proto\"\x13\n\x11\x43reateNodeRequest\"4\n\x12\x43reateNodeResponse\x12\x1e\n\x04node\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\"3\n\x11\x44\x65leteNodeRequest\x12\x1e\n\x04node\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\"\x14\n\x12\x44\x65leteNodeResponse\"F\n\x12PullTaskInsRequest\x12\x1e\n\x04node\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\x12\x10\n\x08task_ids\x18\x02 \x03(\t\"k\n\x13PullTaskInsResponse\x12(\n\treconnect\x18\x01 \x01(\x0b\x32\x15.flwr.proto.Reconnect\x12*\n\rtask_ins_list\x18\x02 \x03(\x0b\x32\x13.flwr.proto.TaskIns\"@\n\x12PushTaskResRequest\x12*\n\rtask_res_list\x18\x01 \x03(\x0b\x32\x13.flwr.proto.TaskRes\"\xae\x01\n\x13PushTaskResResponse\x12(\n\treconnect\x18\x01 \x01(\x0b\x32\x15.flwr.proto.Reconnect\x12=\n\x07results\x18\x02 \x03(\x0b\x32,.flwr.proto.PushTaskResResponse.ResultsEntry\x1a.\n\x0cResultsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\r:\x02\x38\x01\"\x1e\n\tReconnect\x12\x11\n\treconnect\x18\x01 \x01(\x04\x32\xc9\x02\n\x05\x46leet\x12M\n\nCreateNode\x12\x1d.flwr.proto.CreateNodeRequest\x1a\x1e.flwr.proto.CreateNodeResponse\"\x00\x12M\n\nDeleteNode\x12\x1d.flwr.proto.DeleteNodeRequest\x1a\x1e.flwr.proto.DeleteNodeResponse\"\x00\x12P\n\x0bPullTaskIns\x12\x1e.flwr.proto.PullTaskInsRequest\x1a\x1f.flwr.proto.PullTaskInsResponse\"\x00\x12P\n\x0bPushTaskRes\x12\x1e.flwr.proto.PushTaskResRequest\x1a\x1f.flwr.proto.PushTaskResResponse\"\x00\x62\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x16\x66lwr/proto/fleet.proto\x12\nflwr.proto\x1a\x15\x66lwr/proto/node.proto\x1a\x15\x66lwr/proto/task.proto\"*\n\x11\x43reateNodeRequest\x12\x15\n\rping_interval\x18\x01 \x01(\x01\"4\n\x12\x43reateNodeResponse\x12\x1e\n\x04node\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\"3\n\x11\x44\x65leteNodeRequest\x12\x1e\n\x04node\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\"\x14\n\x12\x44\x65leteNodeResponse\"D\n\x0bPingRequest\x12\x1e\n\x04node\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\x12\x15\n\rping_interval\x18\x02 \x01(\x01\"\x1f\n\x0cPingResponse\x12\x0f\n\x07success\x18\x01 \x01(\x08\"F\n\x12PullTaskInsRequest\x12\x1e\n\x04node\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\x12\x10\n\x08task_ids\x18\x02 \x03(\t\"k\n\x13PullTaskInsResponse\x12(\n\treconnect\x18\x01 \x01(\x0b\x32\x15.flwr.proto.Reconnect\x12*\n\rtask_ins_list\x18\x02 \x03(\x0b\x32\x13.flwr.proto.TaskIns\"@\n\x12PushTaskResRequest\x12*\n\rtask_res_list\x18\x01 \x03(\x0b\x32\x13.flwr.proto.TaskRes\"\xae\x01\n\x13PushTaskResResponse\x12(\n\treconnect\x18\x01 \x01(\x0b\x32\x15.flwr.proto.Reconnect\x12=\n\x07results\x18\x02 \x03(\x0b\x32,.flwr.proto.PushTaskResResponse.ResultsEntry\x1a.\n\x0cResultsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\r:\x02\x38\x01\":\n\x03Run\x12\x0e\n\x06run_id\x18\x01 \x01(\x12\x12\x0e\n\x06\x66\x61\x62_id\x18\x02 \x01(\t\x12\x13\n\x0b\x66\x61\x62_version\x18\x03 \x01(\t\"\x1f\n\rGetRunRequest\x12\x0e\n\x06run_id\x18\x01 \x01(\x12\".\n\x0eGetRunResponse\x12\x1c\n\x03run\x18\x01 \x01(\x0b\x32\x0f.flwr.proto.Run\"\x1e\n\tReconnect\x12\x11\n\treconnect\x18\x01 \x01(\x04\x32\xc9\x03\n\x05\x46leet\x12M\n\nCreateNode\x12\x1d.flwr.proto.CreateNodeRequest\x1a\x1e.flwr.proto.CreateNodeResponse\"\x00\x12M\n\nDeleteNode\x12\x1d.flwr.proto.DeleteNodeRequest\x1a\x1e.flwr.proto.DeleteNodeResponse\"\x00\x12;\n\x04Ping\x12\x17.flwr.proto.PingRequest\x1a\x18.flwr.proto.PingResponse\"\x00\x12P\n\x0bPullTaskIns\x12\x1e.flwr.proto.PullTaskInsRequest\x1a\x1f.flwr.proto.PullTaskInsResponse\"\x00\x12P\n\x0bPushTaskRes\x12\x1e.flwr.proto.PushTaskResRequest\x1a\x1f.flwr.proto.PushTaskResResponse\"\x00\x12\x41\n\x06GetRun\x12\x19.flwr.proto.GetRunRequest\x1a\x1a.flwr.proto.GetRunResponse\"\x00\x62\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) @@ -26,25 +26,35 @@ _globals['_PUSHTASKRESRESPONSE_RESULTSENTRY']._options = None _globals['_PUSHTASKRESRESPONSE_RESULTSENTRY']._serialized_options = b'8\001' _globals['_CREATENODEREQUEST']._serialized_start=84 - _globals['_CREATENODEREQUEST']._serialized_end=103 - _globals['_CREATENODERESPONSE']._serialized_start=105 - _globals['_CREATENODERESPONSE']._serialized_end=157 - _globals['_DELETENODEREQUEST']._serialized_start=159 - _globals['_DELETENODEREQUEST']._serialized_end=210 - _globals['_DELETENODERESPONSE']._serialized_start=212 - _globals['_DELETENODERESPONSE']._serialized_end=232 - _globals['_PULLTASKINSREQUEST']._serialized_start=234 - _globals['_PULLTASKINSREQUEST']._serialized_end=304 - _globals['_PULLTASKINSRESPONSE']._serialized_start=306 - _globals['_PULLTASKINSRESPONSE']._serialized_end=413 - _globals['_PUSHTASKRESREQUEST']._serialized_start=415 - _globals['_PUSHTASKRESREQUEST']._serialized_end=479 - _globals['_PUSHTASKRESRESPONSE']._serialized_start=482 - _globals['_PUSHTASKRESRESPONSE']._serialized_end=656 - _globals['_PUSHTASKRESRESPONSE_RESULTSENTRY']._serialized_start=610 - _globals['_PUSHTASKRESRESPONSE_RESULTSENTRY']._serialized_end=656 - _globals['_RECONNECT']._serialized_start=658 - _globals['_RECONNECT']._serialized_end=688 - _globals['_FLEET']._serialized_start=691 - _globals['_FLEET']._serialized_end=1020 + _globals['_CREATENODEREQUEST']._serialized_end=126 + _globals['_CREATENODERESPONSE']._serialized_start=128 + _globals['_CREATENODERESPONSE']._serialized_end=180 + _globals['_DELETENODEREQUEST']._serialized_start=182 + _globals['_DELETENODEREQUEST']._serialized_end=233 + _globals['_DELETENODERESPONSE']._serialized_start=235 + _globals['_DELETENODERESPONSE']._serialized_end=255 + _globals['_PINGREQUEST']._serialized_start=257 + _globals['_PINGREQUEST']._serialized_end=325 + _globals['_PINGRESPONSE']._serialized_start=327 + _globals['_PINGRESPONSE']._serialized_end=358 + _globals['_PULLTASKINSREQUEST']._serialized_start=360 + _globals['_PULLTASKINSREQUEST']._serialized_end=430 + _globals['_PULLTASKINSRESPONSE']._serialized_start=432 + _globals['_PULLTASKINSRESPONSE']._serialized_end=539 + _globals['_PUSHTASKRESREQUEST']._serialized_start=541 + _globals['_PUSHTASKRESREQUEST']._serialized_end=605 + _globals['_PUSHTASKRESRESPONSE']._serialized_start=608 + _globals['_PUSHTASKRESRESPONSE']._serialized_end=782 + _globals['_PUSHTASKRESRESPONSE_RESULTSENTRY']._serialized_start=736 + _globals['_PUSHTASKRESRESPONSE_RESULTSENTRY']._serialized_end=782 + _globals['_RUN']._serialized_start=784 + _globals['_RUN']._serialized_end=842 + _globals['_GETRUNREQUEST']._serialized_start=844 + _globals['_GETRUNREQUEST']._serialized_end=875 + _globals['_GETRUNRESPONSE']._serialized_start=877 + _globals['_GETRUNRESPONSE']._serialized_end=923 + _globals['_RECONNECT']._serialized_start=925 + _globals['_RECONNECT']._serialized_end=955 + _globals['_FLEET']._serialized_start=958 + _globals['_FLEET']._serialized_end=1415 # @@protoc_insertion_point(module_scope) diff --git a/src/py/flwr/proto/fleet_pb2.pyi b/src/py/flwr/proto/fleet_pb2.pyi index 86bc358858d2..a6f38b703e76 100644 --- a/src/py/flwr/proto/fleet_pb2.pyi +++ b/src/py/flwr/proto/fleet_pb2.pyi @@ -16,8 +16,13 @@ DESCRIPTOR: google.protobuf.descriptor.FileDescriptor class CreateNodeRequest(google.protobuf.message.Message): """CreateNode messages""" DESCRIPTOR: google.protobuf.descriptor.Descriptor + PING_INTERVAL_FIELD_NUMBER: builtins.int + ping_interval: builtins.float def __init__(self, + *, + ping_interval: builtins.float = ..., ) -> None: ... + def ClearField(self, field_name: typing_extensions.Literal["ping_interval",b"ping_interval"]) -> None: ... global___CreateNodeRequest = CreateNodeRequest class CreateNodeResponse(google.protobuf.message.Message): @@ -53,6 +58,34 @@ class DeleteNodeResponse(google.protobuf.message.Message): ) -> None: ... global___DeleteNodeResponse = DeleteNodeResponse +class PingRequest(google.protobuf.message.Message): + """Ping messages""" + DESCRIPTOR: google.protobuf.descriptor.Descriptor + NODE_FIELD_NUMBER: builtins.int + PING_INTERVAL_FIELD_NUMBER: builtins.int + @property + def node(self) -> flwr.proto.node_pb2.Node: ... + ping_interval: builtins.float + def __init__(self, + *, + node: typing.Optional[flwr.proto.node_pb2.Node] = ..., + ping_interval: builtins.float = ..., + ) -> None: ... + def HasField(self, field_name: typing_extensions.Literal["node",b"node"]) -> builtins.bool: ... + def ClearField(self, field_name: typing_extensions.Literal["node",b"node","ping_interval",b"ping_interval"]) -> None: ... +global___PingRequest = PingRequest + +class PingResponse(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor + SUCCESS_FIELD_NUMBER: builtins.int + success: builtins.bool + def __init__(self, + *, + success: builtins.bool = ..., + ) -> None: ... + def ClearField(self, field_name: typing_extensions.Literal["success",b"success"]) -> None: ... +global___PingResponse = PingResponse + class PullTaskInsRequest(google.protobuf.message.Message): """PullTaskIns messages""" DESCRIPTOR: google.protobuf.descriptor.Descriptor @@ -131,6 +164,48 @@ class PushTaskResResponse(google.protobuf.message.Message): def ClearField(self, field_name: typing_extensions.Literal["reconnect",b"reconnect","results",b"results"]) -> None: ... global___PushTaskResResponse = PushTaskResResponse +class Run(google.protobuf.message.Message): + """GetRun messages""" + DESCRIPTOR: google.protobuf.descriptor.Descriptor + RUN_ID_FIELD_NUMBER: builtins.int + FAB_ID_FIELD_NUMBER: builtins.int + FAB_VERSION_FIELD_NUMBER: builtins.int + run_id: builtins.int + fab_id: typing.Text + fab_version: typing.Text + def __init__(self, + *, + run_id: builtins.int = ..., + fab_id: typing.Text = ..., + fab_version: typing.Text = ..., + ) -> None: ... + def ClearField(self, field_name: typing_extensions.Literal["fab_id",b"fab_id","fab_version",b"fab_version","run_id",b"run_id"]) -> None: ... +global___Run = Run + +class GetRunRequest(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor + RUN_ID_FIELD_NUMBER: builtins.int + run_id: builtins.int + def __init__(self, + *, + run_id: builtins.int = ..., + ) -> None: ... + def ClearField(self, field_name: typing_extensions.Literal["run_id",b"run_id"]) -> None: ... +global___GetRunRequest = GetRunRequest + +class GetRunResponse(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor + RUN_FIELD_NUMBER: builtins.int + @property + def run(self) -> global___Run: ... + def __init__(self, + *, + run: typing.Optional[global___Run] = ..., + ) -> None: ... + def HasField(self, field_name: typing_extensions.Literal["run",b"run"]) -> builtins.bool: ... + def ClearField(self, field_name: typing_extensions.Literal["run",b"run"]) -> None: ... +global___GetRunResponse = GetRunResponse + class Reconnect(google.protobuf.message.Message): DESCRIPTOR: google.protobuf.descriptor.Descriptor RECONNECT_FIELD_NUMBER: builtins.int diff --git a/src/py/flwr/proto/fleet_pb2_grpc.py b/src/py/flwr/proto/fleet_pb2_grpc.py index 2b53ec43e851..16757eaed381 100644 --- a/src/py/flwr/proto/fleet_pb2_grpc.py +++ b/src/py/flwr/proto/fleet_pb2_grpc.py @@ -24,6 +24,11 @@ def __init__(self, channel): request_serializer=flwr_dot_proto_dot_fleet__pb2.DeleteNodeRequest.SerializeToString, response_deserializer=flwr_dot_proto_dot_fleet__pb2.DeleteNodeResponse.FromString, ) + self.Ping = channel.unary_unary( + '/flwr.proto.Fleet/Ping', + request_serializer=flwr_dot_proto_dot_fleet__pb2.PingRequest.SerializeToString, + response_deserializer=flwr_dot_proto_dot_fleet__pb2.PingResponse.FromString, + ) self.PullTaskIns = channel.unary_unary( '/flwr.proto.Fleet/PullTaskIns', request_serializer=flwr_dot_proto_dot_fleet__pb2.PullTaskInsRequest.SerializeToString, @@ -34,6 +39,11 @@ def __init__(self, channel): request_serializer=flwr_dot_proto_dot_fleet__pb2.PushTaskResRequest.SerializeToString, response_deserializer=flwr_dot_proto_dot_fleet__pb2.PushTaskResResponse.FromString, ) + self.GetRun = channel.unary_unary( + '/flwr.proto.Fleet/GetRun', + request_serializer=flwr_dot_proto_dot_fleet__pb2.GetRunRequest.SerializeToString, + response_deserializer=flwr_dot_proto_dot_fleet__pb2.GetRunResponse.FromString, + ) class FleetServicer(object): @@ -51,6 +61,12 @@ def DeleteNode(self, request, context): context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') + def Ping(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + def PullTaskIns(self, request, context): """Retrieve one or more tasks, if possible @@ -69,6 +85,12 @@ def PushTaskRes(self, request, context): context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') + def GetRun(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + def add_FleetServicer_to_server(servicer, server): rpc_method_handlers = { @@ -82,6 +104,11 @@ def add_FleetServicer_to_server(servicer, server): request_deserializer=flwr_dot_proto_dot_fleet__pb2.DeleteNodeRequest.FromString, response_serializer=flwr_dot_proto_dot_fleet__pb2.DeleteNodeResponse.SerializeToString, ), + 'Ping': grpc.unary_unary_rpc_method_handler( + servicer.Ping, + request_deserializer=flwr_dot_proto_dot_fleet__pb2.PingRequest.FromString, + response_serializer=flwr_dot_proto_dot_fleet__pb2.PingResponse.SerializeToString, + ), 'PullTaskIns': grpc.unary_unary_rpc_method_handler( servicer.PullTaskIns, request_deserializer=flwr_dot_proto_dot_fleet__pb2.PullTaskInsRequest.FromString, @@ -92,6 +119,11 @@ def add_FleetServicer_to_server(servicer, server): request_deserializer=flwr_dot_proto_dot_fleet__pb2.PushTaskResRequest.FromString, response_serializer=flwr_dot_proto_dot_fleet__pb2.PushTaskResResponse.SerializeToString, ), + 'GetRun': grpc.unary_unary_rpc_method_handler( + servicer.GetRun, + request_deserializer=flwr_dot_proto_dot_fleet__pb2.GetRunRequest.FromString, + response_serializer=flwr_dot_proto_dot_fleet__pb2.GetRunResponse.SerializeToString, + ), } generic_handler = grpc.method_handlers_generic_handler( 'flwr.proto.Fleet', rpc_method_handlers) @@ -136,6 +168,23 @@ def DeleteNode(request, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + @staticmethod + def Ping(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/flwr.proto.Fleet/Ping', + flwr_dot_proto_dot_fleet__pb2.PingRequest.SerializeToString, + flwr_dot_proto_dot_fleet__pb2.PingResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + @staticmethod def PullTaskIns(request, target, @@ -169,3 +218,20 @@ def PushTaskRes(request, flwr_dot_proto_dot_fleet__pb2.PushTaskResResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def GetRun(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/flwr.proto.Fleet/GetRun', + flwr_dot_proto_dot_fleet__pb2.GetRunRequest.SerializeToString, + flwr_dot_proto_dot_fleet__pb2.GetRunResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) diff --git a/src/py/flwr/proto/fleet_pb2_grpc.pyi b/src/py/flwr/proto/fleet_pb2_grpc.pyi index cfa83f737439..f275cd149d69 100644 --- a/src/py/flwr/proto/fleet_pb2_grpc.pyi +++ b/src/py/flwr/proto/fleet_pb2_grpc.pyi @@ -16,6 +16,10 @@ class FleetStub: flwr.proto.fleet_pb2.DeleteNodeRequest, flwr.proto.fleet_pb2.DeleteNodeResponse] + Ping: grpc.UnaryUnaryMultiCallable[ + flwr.proto.fleet_pb2.PingRequest, + flwr.proto.fleet_pb2.PingResponse] + PullTaskIns: grpc.UnaryUnaryMultiCallable[ flwr.proto.fleet_pb2.PullTaskInsRequest, flwr.proto.fleet_pb2.PullTaskInsResponse] @@ -32,6 +36,10 @@ class FleetStub: HTTP API path: /api/v1/fleet/push-task-res """ + GetRun: grpc.UnaryUnaryMultiCallable[ + flwr.proto.fleet_pb2.GetRunRequest, + flwr.proto.fleet_pb2.GetRunResponse] + class FleetServicer(metaclass=abc.ABCMeta): @abc.abstractmethod @@ -46,6 +54,12 @@ class FleetServicer(metaclass=abc.ABCMeta): context: grpc.ServicerContext, ) -> flwr.proto.fleet_pb2.DeleteNodeResponse: ... + @abc.abstractmethod + def Ping(self, + request: flwr.proto.fleet_pb2.PingRequest, + context: grpc.ServicerContext, + ) -> flwr.proto.fleet_pb2.PingResponse: ... + @abc.abstractmethod def PullTaskIns(self, request: flwr.proto.fleet_pb2.PullTaskInsRequest, @@ -68,5 +82,11 @@ class FleetServicer(metaclass=abc.ABCMeta): """ pass + @abc.abstractmethod + def GetRun(self, + request: flwr.proto.fleet_pb2.GetRunRequest, + context: grpc.ServicerContext, + ) -> flwr.proto.fleet_pb2.GetRunResponse: ... + def add_FleetServicer_to_server(servicer: FleetServicer, server: grpc.Server) -> None: ... diff --git a/src/py/flwr/proto/task_pb2.py b/src/py/flwr/proto/task_pb2.py index 4d5f863e88dd..5f6e9e7be583 100644 --- a/src/py/flwr/proto/task_pb2.py +++ b/src/py/flwr/proto/task_pb2.py @@ -18,7 +18,7 @@ from flwr.proto import error_pb2 as flwr_dot_proto_dot_error__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x15\x66lwr/proto/task.proto\x12\nflwr.proto\x1a\x15\x66lwr/proto/node.proto\x1a\x1a\x66lwr/proto/recordset.proto\x1a\x1a\x66lwr/proto/transport.proto\x1a\x16\x66lwr/proto/error.proto\"\xf6\x01\n\x04Task\x12\"\n\x08producer\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\x12\"\n\x08\x63onsumer\x18\x02 \x01(\x0b\x32\x10.flwr.proto.Node\x12\x12\n\ncreated_at\x18\x03 \x01(\t\x12\x14\n\x0c\x64\x65livered_at\x18\x04 \x01(\t\x12\x0b\n\x03ttl\x18\x05 \x01(\t\x12\x10\n\x08\x61ncestry\x18\x06 \x03(\t\x12\x11\n\ttask_type\x18\x07 \x01(\t\x12(\n\trecordset\x18\x08 \x01(\x0b\x32\x15.flwr.proto.RecordSet\x12 \n\x05\x65rror\x18\t \x01(\x0b\x32\x11.flwr.proto.Error\"\\\n\x07TaskIns\x12\x0f\n\x07task_id\x18\x01 \x01(\t\x12\x10\n\x08group_id\x18\x02 \x01(\t\x12\x0e\n\x06run_id\x18\x03 \x01(\x12\x12\x1e\n\x04task\x18\x04 \x01(\x0b\x32\x10.flwr.proto.Task\"\\\n\x07TaskRes\x12\x0f\n\x07task_id\x18\x01 \x01(\t\x12\x10\n\x08group_id\x18\x02 \x01(\t\x12\x0e\n\x06run_id\x18\x03 \x01(\x12\x12\x1e\n\x04task\x18\x04 \x01(\x0b\x32\x10.flwr.proto.Taskb\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x15\x66lwr/proto/task.proto\x12\nflwr.proto\x1a\x15\x66lwr/proto/node.proto\x1a\x1a\x66lwr/proto/recordset.proto\x1a\x1a\x66lwr/proto/transport.proto\x1a\x16\x66lwr/proto/error.proto\"\x89\x02\n\x04Task\x12\"\n\x08producer\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\x12\"\n\x08\x63onsumer\x18\x02 \x01(\x0b\x32\x10.flwr.proto.Node\x12\x12\n\ncreated_at\x18\x03 \x01(\x01\x12\x14\n\x0c\x64\x65livered_at\x18\x04 \x01(\t\x12\x11\n\tpushed_at\x18\x05 \x01(\x01\x12\x0b\n\x03ttl\x18\x06 \x01(\x01\x12\x10\n\x08\x61ncestry\x18\x07 \x03(\t\x12\x11\n\ttask_type\x18\x08 \x01(\t\x12(\n\trecordset\x18\t \x01(\x0b\x32\x15.flwr.proto.RecordSet\x12 \n\x05\x65rror\x18\n \x01(\x0b\x32\x11.flwr.proto.Error\"\\\n\x07TaskIns\x12\x0f\n\x07task_id\x18\x01 \x01(\t\x12\x10\n\x08group_id\x18\x02 \x01(\t\x12\x0e\n\x06run_id\x18\x03 \x01(\x12\x12\x1e\n\x04task\x18\x04 \x01(\x0b\x32\x10.flwr.proto.Task\"\\\n\x07TaskRes\x12\x0f\n\x07task_id\x18\x01 \x01(\t\x12\x10\n\x08group_id\x18\x02 \x01(\t\x12\x0e\n\x06run_id\x18\x03 \x01(\x12\x12\x1e\n\x04task\x18\x04 \x01(\x0b\x32\x10.flwr.proto.Taskb\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) @@ -26,9 +26,9 @@ if _descriptor._USE_C_DESCRIPTORS == False: DESCRIPTOR._options = None _globals['_TASK']._serialized_start=141 - _globals['_TASK']._serialized_end=387 - _globals['_TASKINS']._serialized_start=389 - _globals['_TASKINS']._serialized_end=481 - _globals['_TASKRES']._serialized_start=483 - _globals['_TASKRES']._serialized_end=575 + _globals['_TASK']._serialized_end=406 + _globals['_TASKINS']._serialized_start=408 + _globals['_TASKINS']._serialized_end=500 + _globals['_TASKRES']._serialized_start=502 + _globals['_TASKRES']._serialized_end=594 # @@protoc_insertion_point(module_scope) diff --git a/src/py/flwr/proto/task_pb2.pyi b/src/py/flwr/proto/task_pb2.pyi index b9c10139cfb3..455791ac9e6e 100644 --- a/src/py/flwr/proto/task_pb2.pyi +++ b/src/py/flwr/proto/task_pb2.pyi @@ -20,6 +20,7 @@ class Task(google.protobuf.message.Message): CONSUMER_FIELD_NUMBER: builtins.int CREATED_AT_FIELD_NUMBER: builtins.int DELIVERED_AT_FIELD_NUMBER: builtins.int + PUSHED_AT_FIELD_NUMBER: builtins.int TTL_FIELD_NUMBER: builtins.int ANCESTRY_FIELD_NUMBER: builtins.int TASK_TYPE_FIELD_NUMBER: builtins.int @@ -29,9 +30,10 @@ class Task(google.protobuf.message.Message): def producer(self) -> flwr.proto.node_pb2.Node: ... @property def consumer(self) -> flwr.proto.node_pb2.Node: ... - created_at: typing.Text + created_at: builtins.float delivered_at: typing.Text - ttl: typing.Text + pushed_at: builtins.float + ttl: builtins.float @property def ancestry(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[typing.Text]: ... task_type: typing.Text @@ -43,16 +45,17 @@ class Task(google.protobuf.message.Message): *, producer: typing.Optional[flwr.proto.node_pb2.Node] = ..., consumer: typing.Optional[flwr.proto.node_pb2.Node] = ..., - created_at: typing.Text = ..., + created_at: builtins.float = ..., delivered_at: typing.Text = ..., - ttl: typing.Text = ..., + pushed_at: builtins.float = ..., + ttl: builtins.float = ..., ancestry: typing.Optional[typing.Iterable[typing.Text]] = ..., task_type: typing.Text = ..., recordset: typing.Optional[flwr.proto.recordset_pb2.RecordSet] = ..., error: typing.Optional[flwr.proto.error_pb2.Error] = ..., ) -> None: ... def HasField(self, field_name: typing_extensions.Literal["consumer",b"consumer","error",b"error","producer",b"producer","recordset",b"recordset"]) -> builtins.bool: ... - def ClearField(self, field_name: typing_extensions.Literal["ancestry",b"ancestry","consumer",b"consumer","created_at",b"created_at","delivered_at",b"delivered_at","error",b"error","producer",b"producer","recordset",b"recordset","task_type",b"task_type","ttl",b"ttl"]) -> None: ... + def ClearField(self, field_name: typing_extensions.Literal["ancestry",b"ancestry","consumer",b"consumer","created_at",b"created_at","delivered_at",b"delivered_at","error",b"error","producer",b"producer","pushed_at",b"pushed_at","recordset",b"recordset","task_type",b"task_type","ttl",b"ttl"]) -> None: ... global___Task = Task class TaskIns(google.protobuf.message.Message): diff --git a/src/py/flwr/server/app.py b/src/py/flwr/server/app.py index e04cfb37e118..7e06062311da 100644 --- a/src/py/flwr/server/app.py +++ b/src/py/flwr/server/app.py @@ -16,15 +16,21 @@ import argparse import asyncio +import csv import importlib.util import sys import threading from logging import ERROR, INFO, WARN from os.path import isfile from pathlib import Path -from typing import List, Optional, Tuple +from typing import List, Optional, Sequence, Set, Tuple import grpc +from cryptography.hazmat.primitives.asymmetric import ec +from cryptography.hazmat.primitives.serialization import ( + load_ssh_private_key, + load_ssh_public_key, +) from flwr.common import GRPC_MAX_MESSAGE_LENGTH, EventType, event from flwr.common.address import parse_address @@ -36,6 +42,10 @@ ) from flwr.common.exit_handlers import register_exit_handlers from flwr.common.logger import log +from flwr.common.secure_aggregation.crypto.symmetric_encryption import ( + public_key_to_bytes, + ssh_types_to_elliptic_curve, +) from flwr.proto.fleet_pb2_grpc import ( # pylint: disable=E0611 add_FleetServicer_to_server, ) @@ -51,6 +61,7 @@ start_grpc_server, ) from .superlink.fleet.grpc_rere.fleet_servicer import FleetServicer +from .superlink.fleet.grpc_rere.server_interceptor import AuthenticateServerInterceptor from .superlink.fleet.vce import start_vce from .superlink.state import StateFactory @@ -291,9 +302,11 @@ def run_fleet_api() -> None: # pylint: disable=too-many-branches, too-many-locals, too-many-statements def run_superlink() -> None: - """Run Flower server (Driver API and Fleet API).""" - log(INFO, "Starting Flower server") + """Run Flower SuperLink (Driver API and Fleet API).""" + log(INFO, "Starting Flower SuperLink") + event(EventType.RUN_SUPERLINK_ENTER) + args = _parse_args_run_superlink().parse_args() # Parse IP address @@ -352,10 +365,28 @@ def run_superlink() -> None: sys.exit(f"Fleet IP address ({address_arg}) cannot be parsed.") host, port, is_v6 = parsed_address address = f"[{host}]:{port}" if is_v6 else f"{host}:{port}" + + maybe_keys = _try_setup_client_authentication(args, certificates) + interceptors: Optional[Sequence[grpc.ServerInterceptor]] = None + if maybe_keys is not None: + ( + client_public_keys, + server_private_key, + server_public_key, + ) = maybe_keys + interceptors = [ + AuthenticateServerInterceptor( + client_public_keys, + server_private_key, + server_public_key, + ) + ] + fleet_server = _run_fleet_api_grpc_rere( address=address, state_factory=state_factory, certificates=certificates, + interceptors=interceptors, ) grpc_servers.append(fleet_server) elif args.fleet_api_type == TRANSPORT_TYPE_VCE: @@ -388,6 +419,70 @@ def run_superlink() -> None: driver_server.wait_for_termination(timeout=1) +def _try_setup_client_authentication( + args: argparse.Namespace, + certificates: Optional[Tuple[bytes, bytes, bytes]], +) -> Optional[Tuple[Set[bytes], ec.EllipticCurvePrivateKey, ec.EllipticCurvePublicKey]]: + if not args.require_client_authentication: + return None + + if certificates is None: + sys.exit( + "Client authentication only works over secure connections. " + "Please provide certificate paths using '--certificates' when " + "enabling '--require-client-authentication'." + ) + + client_keys_file_path = Path(args.require_client_authentication[0]) + if not client_keys_file_path.exists(): + sys.exit( + "The provided path to the client public keys CSV file does not exist: " + f"{client_keys_file_path}. " + "Please provide the CSV file path containing known client public keys " + "to '--require-client-authentication'." + ) + + client_public_keys: Set[bytes] = set() + ssh_private_key = load_ssh_private_key( + Path(args.require_client_authentication[1]).read_bytes(), + None, + ) + ssh_public_key = load_ssh_public_key( + Path(args.require_client_authentication[2]).read_bytes() + ) + + try: + server_private_key, server_public_key = ssh_types_to_elliptic_curve( + ssh_private_key, ssh_public_key + ) + except TypeError: + sys.exit( + "The file paths provided could not be read as a private and public " + "key pair. Client authentication requires an elliptic curve public and " + "private key pair. Please provide the file paths containing elliptic " + "curve private and public keys to '--require-client-authentication'." + ) + + with open(client_keys_file_path, newline="", encoding="utf-8") as csvfile: + reader = csv.reader(csvfile) + for row in reader: + for element in row: + public_key = load_ssh_public_key(element.encode()) + if isinstance(public_key, ec.EllipticCurvePublicKey): + client_public_keys.add(public_key_to_bytes(public_key)) + else: + sys.exit( + "Error: Unable to parse the public keys in the .csv " + "file. Please ensure that the .csv file contains valid " + "SSH public keys and try again." + ) + return ( + client_public_keys, + server_private_key, + server_public_key, + ) + + def _try_obtain_certificates( args: argparse.Namespace, ) -> Optional[Tuple[bytes, bytes, bytes]]: @@ -415,6 +510,7 @@ def _run_fleet_api_grpc_rere( address: str, state_factory: StateFactory, certificates: Optional[Tuple[bytes, bytes, bytes]], + interceptors: Optional[Sequence[grpc.ServerInterceptor]] = None, ) -> grpc.Server: """Run Fleet API (gRPC, request-response).""" # Create Fleet API gRPC server @@ -427,6 +523,7 @@ def _run_fleet_api_grpc_rere( server_address=address, max_message_length=GRPC_MAX_MESSAGE_LENGTH, certificates=certificates, + interceptors=interceptors, ) log(INFO, "Flower ECE: Starting Fleet API (gRPC-rere) on %s", address) @@ -568,9 +665,7 @@ def _parse_args_run_fleet_api() -> argparse.ArgumentParser: def _parse_args_run_superlink() -> argparse.ArgumentParser: """Parse command line arguments for both Driver API and Fleet API.""" parser = argparse.ArgumentParser( - description="This will start a Flower server " - "(meaning, a Driver API and a Fleet API), " - "that clients will be able to connect to.", + description="Start a Flower SuperLink", ) _add_args_common(parser=parser) @@ -606,6 +701,15 @@ def _add_args_common(parser: argparse.ArgumentParser) -> None: "Flower will just create a state in memory.", default=DATABASE, ) + parser.add_argument( + "--require-client-authentication", + nargs=3, + metavar=("CLIENT_KEYS", "SERVER_PRIVATE_KEY", "SERVER_PUBLIC_KEY"), + type=str, + help="Provide three file paths: (1) a .csv file containing a list of " + "known client public keys for authentication, (2) the server's private " + "key file, and (3) the server's public key file.", + ) def _add_args_driver_api(parser: argparse.ArgumentParser) -> None: diff --git a/src/py/flwr/server/compat/app.py b/src/py/flwr/server/compat/app.py index ff1d99b5366e..81da3f57e86a 100644 --- a/src/py/flwr/server/compat/app.py +++ b/src/py/flwr/server/compat/app.py @@ -29,7 +29,7 @@ from flwr.server.server_config import ServerConfig from flwr.server.strategy import Strategy -from ..driver import Driver +from ..driver import Driver, GrpcDriver from .app_utils import start_update_client_manager_thread DEFAULT_SERVER_ADDRESS_DRIVER = "[::]:9091" @@ -114,7 +114,7 @@ def start_driver( # pylint: disable=too-many-arguments, too-many-locals # Create the Driver if isinstance(root_certificates, str): root_certificates = Path(root_certificates).read_bytes() - driver = Driver( + driver = GrpcDriver( driver_service_address=address, root_certificates=root_certificates ) diff --git a/src/py/flwr/server/compat/app_utils.py b/src/py/flwr/server/compat/app_utils.py index 696ec1132c4a..1cdf1efbffb9 100644 --- a/src/py/flwr/server/compat/app_utils.py +++ b/src/py/flwr/server/compat/app_utils.py @@ -16,7 +16,6 @@ import threading -import time from typing import Dict, Tuple from ..client_manager import ClientManager @@ -60,6 +59,7 @@ def start_update_client_manager_thread( client_manager, f_stop, ), + daemon=True, ) thread.start() @@ -89,7 +89,7 @@ def _update_client_manager( for node_id in new_nodes: client_proxy = DriverClientProxy( node_id=node_id, - driver=driver.grpc_driver, # type: ignore + driver=driver, anonymous=False, run_id=driver.run_id, # type: ignore ) @@ -99,4 +99,5 @@ def _update_client_manager( raise RuntimeError("Could not register node.") # Sleep for 3 seconds - time.sleep(3) + if not f_stop.is_set(): + f_stop.wait(3) diff --git a/src/py/flwr/server/compat/app_utils_test.py b/src/py/flwr/server/compat/app_utils_test.py index 7e47e6eaaf32..023d65b0dc72 100644 --- a/src/py/flwr/server/compat/app_utils_test.py +++ b/src/py/flwr/server/compat/app_utils_test.py @@ -17,6 +17,8 @@ import time import unittest +from threading import Event +from typing import Optional from unittest.mock import Mock, patch from ..client_manager import SimpleClientManager @@ -29,9 +31,6 @@ class TestUtils(unittest.TestCase): def test_start_update_client_manager_thread(self) -> None: """Test start_update_client_manager_thread function.""" # Prepare - sleep = time.sleep - sleep_patch = patch("time.sleep", lambda x: sleep(x / 100)) - sleep_patch.start() expected_node_ids = list(range(100)) updated_expected_node_ids = list(range(80, 120)) driver = Mock() @@ -39,20 +38,30 @@ def test_start_update_client_manager_thread(self) -> None: driver.run_id = 123 driver.get_node_ids.return_value = expected_node_ids client_manager = SimpleClientManager() + original_wait = Event.wait + + def custom_wait(self: Event, timeout: Optional[float] = None) -> None: + if timeout is not None: + timeout /= 100 + original_wait(self, timeout) # Execute - thread, f_stop = start_update_client_manager_thread(driver, client_manager) - # Wait until all nodes are registered via `client_manager.sample()` - client_manager.sample(len(expected_node_ids)) - # Retrieve all nodes in `client_manager` - node_ids = {proxy.node_id for proxy in client_manager.all().values()} - # Update the GetNodesResponse and wait until the `client_manager` is updated - driver.get_node_ids.return_value = updated_expected_node_ids - sleep(0.1) - # Retrieve all nodes in `client_manager` - updated_node_ids = {proxy.node_id for proxy in client_manager.all().values()} - # Stop the thread - f_stop.set() + # Patching Event.wait with our custom function + with patch.object(Event, "wait", new=custom_wait): + thread, f_stop = start_update_client_manager_thread(driver, client_manager) + # Wait until all nodes are registered via `client_manager.sample()` + client_manager.sample(len(expected_node_ids)) + # Retrieve all nodes in `client_manager` + node_ids = {proxy.node_id for proxy in client_manager.all().values()} + # Update the GetNodesResponse and wait until the `client_manager` is updated + driver.get_node_ids.return_value = updated_expected_node_ids + time.sleep(0.1) + # Retrieve all nodes in `client_manager` + updated_node_ids = { + proxy.node_id for proxy in client_manager.all().values() + } + # Stop the thread + f_stop.set() # Assert assert node_ids == set(expected_node_ids) diff --git a/src/py/flwr/server/compat/driver_client_proxy.py b/src/py/flwr/server/compat/driver_client_proxy.py index 84c67149fad7..150803786f98 100644 --- a/src/py/flwr/server/compat/driver_client_proxy.py +++ b/src/py/flwr/server/compat/driver_client_proxy.py @@ -16,16 +16,14 @@ import time -from typing import List, Optional +from typing import Optional from flwr import common -from flwr.common import MessageType, MessageTypeLegacy, RecordSet +from flwr.common import Message, MessageType, MessageTypeLegacy, RecordSet from flwr.common import recordset_compat as compat -from flwr.common import serde -from flwr.proto import driver_pb2, node_pb2, task_pb2 # pylint: disable=E0611 from flwr.server.client_proxy import ClientProxy -from ..driver.grpc_driver import GrpcDriver +from ..driver.driver import Driver SLEEP_TIME = 1 @@ -33,7 +31,7 @@ class DriverClientProxy(ClientProxy): """Flower client proxy which delegates work using the Driver API.""" - def __init__(self, node_id: int, driver: GrpcDriver, anonymous: bool, run_id: int): + def __init__(self, node_id: int, driver: Driver, anonymous: bool, run_id: int): super().__init__(str(node_id)) self.node_id = node_id self.driver = driver @@ -114,55 +112,38 @@ def _send_receive_recordset( timeout: Optional[float], group_id: Optional[int], ) -> RecordSet: - task_ins = task_pb2.TaskIns( # pylint: disable=E1101 - task_id="", - group_id=str(group_id) if group_id is not None else "", - run_id=self.run_id, - task=task_pb2.Task( # pylint: disable=E1101 - producer=node_pb2.Node( # pylint: disable=E1101 - node_id=0, - anonymous=True, - ), - consumer=node_pb2.Node( # pylint: disable=E1101 - node_id=self.node_id, - anonymous=self.anonymous, - ), - task_type=task_type, - recordset=serde.recordset_to_proto(recordset), - ), - ) - push_task_ins_req = driver_pb2.PushTaskInsRequest( # pylint: disable=E1101 - task_ins_list=[task_ins] - ) - # Send TaskIns to Driver API - push_task_ins_res = self.driver.push_task_ins(req=push_task_ins_req) + # Create message + message = self.driver.create_message( + content=recordset, + message_type=task_type, + dst_node_id=self.node_id, + group_id=str(group_id) if group_id else "", + ttl=timeout, + ) - if len(push_task_ins_res.task_ids) != 1: - raise ValueError("Unexpected number of task_ids") + # Push message + message_ids = list(self.driver.push_messages(messages=[message])) + if len(message_ids) != 1: + raise ValueError("Unexpected number of message_ids") - task_id = push_task_ins_res.task_ids[0] - if task_id == "": - raise ValueError(f"Failed to schedule task for node {self.node_id}") + message_id = message_ids[0] + if message_id == "": + raise ValueError(f"Failed to send message to node {self.node_id}") if timeout: start_time = time.time() while True: - pull_task_res_req = driver_pb2.PullTaskResRequest( # pylint: disable=E1101 - node=node_pb2.Node(node_id=0, anonymous=True), # pylint: disable=E1101 - task_ids=[task_id], - ) - - # Ask Driver API for TaskRes - pull_task_res_res = self.driver.pull_task_res(req=pull_task_res_req) - - task_res_list: List[task_pb2.TaskRes] = list( # pylint: disable=E1101 - pull_task_res_res.task_res_list - ) - if len(task_res_list) == 1: - task_res = task_res_list[0] - return serde.recordset_from_proto(task_res.task.recordset) + messages = list(self.driver.pull_messages(message_ids)) + if len(messages) == 1: + msg: Message = messages[0] + if msg.has_error(): + raise ValueError( + f"Message contains an Error (reason: {msg.error.reason}). " + "It originated during client-side execution of a message." + ) + return msg.content if timeout is not None and time.time() > start_time + timeout: raise RuntimeError("Timeout reached") diff --git a/src/py/flwr/server/compat/driver_client_proxy_test.py b/src/py/flwr/server/compat/driver_client_proxy_test.py index 3494049c1064..d9e3d3bc0824 100644 --- a/src/py/flwr/server/compat/driver_client_proxy_test.py +++ b/src/py/flwr/server/compat/driver_client_proxy_test.py @@ -16,59 +16,43 @@ import unittest -from typing import Union, cast -from unittest.mock import MagicMock +import unittest.mock +from typing import Any, Callable, Iterable, Optional, Union, cast +from unittest.mock import Mock import numpy as np import flwr +from flwr.common import Error, Message, Metadata, RecordSet from flwr.common import recordset_compat as compat -from flwr.common import serde -from flwr.common.constant import MessageType, MessageTypeLegacy from flwr.common.typing import ( Code, Config, EvaluateIns, EvaluateRes, + FitIns, FitRes, GetParametersIns, GetParametersRes, + GetPropertiesIns, GetPropertiesRes, Parameters, Properties, Status, ) -from flwr.proto import driver_pb2, node_pb2, task_pb2 # pylint: disable=E0611 - -from .driver_client_proxy import DriverClientProxy +from flwr.server.compat.driver_client_proxy import DriverClientProxy MESSAGE_PARAMETERS = Parameters(tensors=[b"abc"], tensor_type="np") CLIENT_PROPERTIES = cast(Properties, {"tensor_type": "numpy.ndarray"}) CLIENT_STATUS = Status(code=Code.OK, message="OK") +ERROR_REPLY = Error(code=0, reason="mock error") -def _make_task( - res: Union[GetParametersRes, GetPropertiesRes, FitRes, EvaluateRes] -) -> task_pb2.Task: # pylint: disable=E1101 - if isinstance(res, GetParametersRes): - message_type = MessageTypeLegacy.GET_PARAMETERS - recordset = compat.getparametersres_to_recordset(res, True) - elif isinstance(res, GetPropertiesRes): - message_type = MessageTypeLegacy.GET_PROPERTIES - recordset = compat.getpropertiesres_to_recordset(res) - elif isinstance(res, FitRes): - message_type = MessageType.TRAIN - recordset = compat.fitres_to_recordset(res, True) - elif isinstance(res, EvaluateRes): - message_type = MessageType.EVALUATE - recordset = compat.evaluateres_to_recordset(res) - else: - raise ValueError(f"Unsupported type: {type(res)}") - return task_pb2.Task( # pylint: disable=E1101 - task_type=message_type, - recordset=serde.recordset_to_proto(recordset), - ) +RUN_ID = 61016 +NODE_ID = 1 +INSTRUCTION_MESSAGE_ID = "mock instruction message id" +REPLY_MESSAGE_ID = "mock reply message id" class DriverClientProxyTestCase(unittest.TestCase): @@ -76,170 +60,232 @@ class DriverClientProxyTestCase(unittest.TestCase): def setUp(self) -> None: """Set up mocks for tests.""" - self.driver = MagicMock() - self.driver.get_nodes.return_value = ( - driver_pb2.GetNodesResponse( # pylint: disable=E1101 - nodes=[ - node_pb2.Node(node_id=1, anonymous=False) # pylint: disable=E1101 - ] - ) + driver = Mock() + driver.get_node_ids.return_value = [1] + driver.create_message.side_effect = self._create_message_dummy + client = DriverClientProxy( + node_id=NODE_ID, driver=driver, anonymous=False, run_id=61016 ) + self.driver = driver + self.client = client + self.created_msg: Optional[Message] = None + self.called_times: int = 0 + def test_get_properties(self) -> None: """Test positive case.""" # Prepare - self.driver.push_task_ins.return_value = ( - driver_pb2.PushTaskInsResponse( # pylint: disable=E1101 - task_ids=["19341fd7-62e1-4eb4-beb4-9876d3acda32"] - ) - ) - self.driver.pull_task_res.return_value = ( - driver_pb2.PullTaskResResponse( # pylint: disable=E1101 - task_res_list=[ - task_pb2.TaskRes( # pylint: disable=E1101 - task_id="554bd3c8-8474-4b93-a7db-c7bec1bf0012", - group_id=str(0), - run_id=0, - task=_make_task( - GetPropertiesRes( - status=CLIENT_STATUS, properties=CLIENT_PROPERTIES - ) - ), - ) - ] - ) - ) - client = DriverClientProxy( - node_id=1, driver=self.driver, anonymous=True, run_id=0 - ) + res = GetPropertiesRes(status=CLIENT_STATUS, properties=CLIENT_PROPERTIES) + self.driver.push_messages.side_effect = self._get_push_messages(res) request_properties: Config = {"tensor_type": "str"} - ins: flwr.common.GetPropertiesIns = flwr.common.GetPropertiesIns( - config=request_properties - ) + ins = GetPropertiesIns(config=request_properties) # Execute - value: flwr.common.GetPropertiesRes = client.get_properties( - ins, timeout=None, group_id=0 - ) + value = self.client.get_properties(ins, timeout=None, group_id=0) # Assert - assert value.properties["tensor_type"] == "numpy.ndarray" + self._common_assertions(ins) + self.assertEqual(value.properties["tensor_type"], "numpy.ndarray") def test_get_parameters(self) -> None: """Test positive case.""" # Prepare - self.driver.push_task_ins.return_value = ( - driver_pb2.PushTaskInsResponse( # pylint: disable=E1101 - task_ids=["19341fd7-62e1-4eb4-beb4-9876d3acda32"] - ) - ) - self.driver.pull_task_res.return_value = ( - driver_pb2.PullTaskResResponse( # pylint: disable=E1101 - task_res_list=[ - task_pb2.TaskRes( # pylint: disable=E1101 - task_id="554bd3c8-8474-4b93-a7db-c7bec1bf0012", - group_id=str(0), - run_id=0, - task=_make_task( - GetParametersRes( - status=CLIENT_STATUS, - parameters=MESSAGE_PARAMETERS, - ) - ), - ) - ] - ) + res = GetParametersRes( + status=CLIENT_STATUS, + parameters=MESSAGE_PARAMETERS, ) - client = DriverClientProxy( - node_id=1, driver=self.driver, anonymous=True, run_id=0 - ) - get_parameters_ins = GetParametersIns(config={}) + self.driver.push_messages.side_effect = self._get_push_messages(res) + ins = GetParametersIns(config={}) # Execute - value: flwr.common.GetParametersRes = client.get_parameters( - ins=get_parameters_ins, timeout=None, group_id=0 - ) + value = self.client.get_parameters(ins, timeout=None, group_id=0) # Assert - assert value.parameters.tensors[0] == b"abc" + self._common_assertions(ins) + self.assertEqual(value, res) def test_fit(self) -> None: """Test positive case.""" # Prepare - self.driver.push_task_ins.return_value = ( - driver_pb2.PushTaskInsResponse( # pylint: disable=E1101 - task_ids=["19341fd7-62e1-4eb4-beb4-9876d3acda32"] - ) - ) - self.driver.pull_task_res.return_value = ( - driver_pb2.PullTaskResResponse( # pylint: disable=E1101 - task_res_list=[ - task_pb2.TaskRes( # pylint: disable=E1101 - task_id="554bd3c8-8474-4b93-a7db-c7bec1bf0012", - group_id=str(1), - run_id=0, - task=_make_task( - FitRes( - status=CLIENT_STATUS, - parameters=MESSAGE_PARAMETERS, - num_examples=10, - metrics={}, - ) - ), - ) - ] - ) - ) - client = DriverClientProxy( - node_id=1, driver=self.driver, anonymous=True, run_id=0 + res = FitRes( + status=CLIENT_STATUS, + parameters=MESSAGE_PARAMETERS, + num_examples=10, + metrics={}, ) + self.driver.push_messages.side_effect = self._get_push_messages(res) parameters = flwr.common.ndarrays_to_parameters([np.ones((2, 2))]) - ins: flwr.common.FitIns = flwr.common.FitIns(parameters, {}) + ins = FitIns(parameters, {}) # Execute - fit_res = client.fit(ins=ins, timeout=None, group_id=1) + value = self.client.fit(ins=ins, timeout=None, group_id=0) # Assert - assert fit_res.parameters.tensor_type == "np" - assert fit_res.parameters.tensors[0] == b"abc" - assert fit_res.num_examples == 10 + self._common_assertions(ins) + self.assertEqual(value, res) def test_evaluate(self) -> None: """Test positive case.""" # Prepare - self.driver.push_task_ins.return_value = ( - driver_pb2.PushTaskInsResponse( # pylint: disable=E1101 - task_ids=["19341fd7-62e1-4eb4-beb4-9876d3acda32"] - ) - ) - self.driver.pull_task_res.return_value = ( - driver_pb2.PullTaskResResponse( # pylint: disable=E1101 - task_res_list=[ - task_pb2.TaskRes( # pylint: disable=E1101 - task_id="554bd3c8-8474-4b93-a7db-c7bec1bf0012", - group_id=str(1), - run_id=0, - task=_make_task( - EvaluateRes( - status=CLIENT_STATUS, - loss=0.0, - num_examples=0, - metrics={}, - ) - ), - ) - ] - ) + res = EvaluateRes( + status=CLIENT_STATUS, + loss=0.0, + num_examples=0, + metrics={}, ) - client = DriverClientProxy( - node_id=1, driver=self.driver, anonymous=True, run_id=0 - ) - parameters = Parameters(tensors=[], tensor_type="np") - evaluate_ins = EvaluateIns(parameters, {}) + self.driver.push_messages.side_effect = self._get_push_messages(res) + parameters = Parameters(tensors=[b"random params%^&*F"], tensor_type="np") + ins = EvaluateIns(parameters, {}) # Execute - evaluate_res = client.evaluate(evaluate_ins, timeout=None, group_id=1) + value = self.client.evaluate(ins, timeout=None, group_id=0) # Assert - assert 0.0 == evaluate_res.loss - assert 0 == evaluate_res.num_examples + self._common_assertions(ins) + self.assertEqual(value, res) + + def test_get_properties_and_fail(self) -> None: + """Test negative case.""" + # Prepare + self.driver.push_messages.side_effect = self._get_push_messages( + None, error_reply=True + ) + request_properties: Config = {"tensor_type": "str"} + ins = GetPropertiesIns(config=request_properties) + + # Execute and assert + self.assertRaises( + Exception, self.client.get_properties, ins, timeout=None, group_id=0 + ) + self._common_assertions(ins) + + def test_get_parameters_and_fail(self) -> None: + """Test negative case.""" + # Prepare + self.driver.push_messages.side_effect = self._get_push_messages( + None, error_reply=True + ) + ins = GetParametersIns(config={}) + + # Execute and assert + self.assertRaises( + Exception, self.client.get_parameters, ins, timeout=None, group_id=0 + ) + self._common_assertions(ins) + + def test_fit_and_fail(self) -> None: + """Test negative case.""" + # Prepare + self.driver.push_messages.side_effect = self._get_push_messages( + None, error_reply=True + ) + parameters = flwr.common.ndarrays_to_parameters([np.ones((2, 2))]) + ins = FitIns(parameters, {}) + + # Execute and assert + self.assertRaises(Exception, self.client.fit, ins, timeout=None, group_id=0) + self._common_assertions(ins) + + def test_evaluate_and_fail(self) -> None: + """Test negative case.""" + # Prepare + self.driver.push_messages.side_effect = self._get_push_messages( + None, error_reply=True + ) + parameters = Parameters(tensors=[b"random params%^&*F"], tensor_type="np") + ins = EvaluateIns(parameters, {}) + + # Execute and assert + self.assertRaises( + Exception, self.client.evaluate, ins, timeout=None, group_id=0 + ) + self._common_assertions(ins) + + def _create_message_dummy( # pylint: disable=R0913 + self, + content: RecordSet, + message_type: str, + dst_node_id: int, + group_id: str, + ttl: Optional[float] = None, + ) -> Message: + """Create a new message. + + This is a method for the Mock object. + """ + self.called_times += 1 + ttl_ = 123456 if ttl is None else ttl + metadata = Metadata( + run_id=RUN_ID, + message_id="", # Will be set by the server + src_node_id=0, + dst_node_id=dst_node_id, + reply_to_message="", + group_id=group_id, + ttl=ttl_, + message_type=message_type, + ) + self.created_msg = Message(metadata=metadata, content=content) + return self.created_msg + + def _get_push_messages( + self, + res: Union[GetParametersRes, GetPropertiesRes, FitRes, EvaluateRes, None], + error_reply: bool = False, + ) -> Callable[[Iterable[Message]], Iterable[str]]: + """Get the push_messages function that sets the return value of pull_messages + when called.""" + + def push_messages(messages: Iterable[Message]) -> Iterable[str]: + msg = list(messages)[0] + if error_reply: + recordset = None + ret = msg.create_error_reply(ERROR_REPLY) + elif isinstance(res, GetParametersRes): + recordset = compat.getparametersres_to_recordset(res, True) + elif isinstance(res, GetPropertiesRes): + recordset = compat.getpropertiesres_to_recordset(res) + elif isinstance(res, FitRes): + recordset = compat.fitres_to_recordset(res, True) + elif isinstance(res, EvaluateRes): + recordset = compat.evaluateres_to_recordset(res) + else: + raise ValueError(f"Unsupported type: {type(res)}") + if recordset is not None: + ret = msg.create_reply(recordset) + ret.metadata.__dict__["_message_id"] = REPLY_MESSAGE_ID + + # Set the return value of `pull_messages` + self.driver.pull_messages.return_value = [ret] + return [INSTRUCTION_MESSAGE_ID] + + return push_messages + + def _common_assertions(self, original_ins: Any) -> None: + """Check common assertions.""" + # Check if the created message contains the orignal *Ins + assert self.created_msg is not None + actual_ins = { # type: ignore + GetPropertiesIns: compat.recordset_to_getpropertiesins, + GetParametersIns: compat.recordset_to_getparametersins, + FitIns: (lambda x: compat.recordset_to_fitins(x, True)), + EvaluateIns: (lambda x: compat.recordset_to_evaluateins(x, True)), + }[type(original_ins)](self.created_msg.content) + self.assertEqual(self.called_times, 1) + self.assertEqual(actual_ins, original_ins) + + # Check if push_messages is called once with expected args/kwargs. + self.driver.push_messages.assert_called_once() + try: + self.driver.push_messages.assert_any_call([self.created_msg]) + except AssertionError: + self.driver.push_messages.assert_any_call(messages=[self.created_msg]) + + # Check if pull_messages is called once with expected args/kwargs. + self.driver.pull_messages.assert_called_once() + try: + self.driver.pull_messages.assert_called_with([INSTRUCTION_MESSAGE_ID]) + except AssertionError: + self.driver.pull_messages.assert_called_with( + message_ids=[INSTRUCTION_MESSAGE_ID] + ) diff --git a/src/py/flwr/server/driver/__init__.py b/src/py/flwr/server/driver/__init__.py index b61f6eebf6a8..b24a4fd92cd4 100644 --- a/src/py/flwr/server/driver/__init__.py +++ b/src/py/flwr/server/driver/__init__.py @@ -16,9 +16,10 @@ from .driver import Driver -from .grpc_driver import GrpcDriver +from .grpc_driver import GrpcDriver, GrpcDriverHelper __all__ = [ "Driver", "GrpcDriver", + "GrpcDriverHelper", ] diff --git a/src/py/flwr/server/driver/driver.py b/src/py/flwr/server/driver/driver.py index 0098e0ce97c2..b95cec95ab47 100644 --- a/src/py/flwr/server/driver/driver.py +++ b/src/py/flwr/server/driver/driver.py @@ -1,4 +1,4 @@ -# Copyright 2022 Flower Labs GmbH. All Rights Reserved. +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,85 +12,26 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== -"""Flower driver service client.""" +"""Driver (abstract base class).""" -import time -from typing import Iterable, List, Optional, Tuple +from abc import ABC, abstractmethod +from typing import Iterable, List, Optional -from flwr.common import Message, Metadata, RecordSet -from flwr.common.serde import message_from_taskres, message_to_taskins -from flwr.proto.driver_pb2 import ( # pylint: disable=E0611 - CreateRunRequest, - GetNodesRequest, - PullTaskResRequest, - PushTaskInsRequest, -) -from flwr.proto.node_pb2 import Node # pylint: disable=E0611 -from flwr.proto.task_pb2 import TaskIns # pylint: disable=E0611 +from flwr.common import Message, RecordSet -from .grpc_driver import DEFAULT_SERVER_ADDRESS_DRIVER, GrpcDriver +class Driver(ABC): + """Abstract base Driver class for the Driver API.""" -class Driver: - """`Driver` class provides an interface to the Driver API. - - Parameters - ---------- - driver_service_address : Optional[str] - The IPv4 or IPv6 address of the Driver API server. - Defaults to `"[::]:9091"`. - certificates : bytes (default: None) - Tuple containing root certificate, server certificate, and private key - to start a secure SSL-enabled server. The tuple is expected to have - three bytes elements in the following order: - - * CA certificate. - * server certificate. - * server private key. - """ - - def __init__( - self, - driver_service_address: str = DEFAULT_SERVER_ADDRESS_DRIVER, - root_certificates: Optional[bytes] = None, - ) -> None: - self.addr = driver_service_address - self.root_certificates = root_certificates - self.grpc_driver: Optional[GrpcDriver] = None - self.run_id: Optional[int] = None - self.node = Node(node_id=0, anonymous=True) - - def _get_grpc_driver_and_run_id(self) -> Tuple[GrpcDriver, int]: - # Check if the GrpcDriver is initialized - if self.grpc_driver is None or self.run_id is None: - # Connect and create run - self.grpc_driver = GrpcDriver( - driver_service_address=self.addr, - root_certificates=self.root_certificates, - ) - self.grpc_driver.connect() - res = self.grpc_driver.create_run(CreateRunRequest()) - self.run_id = res.run_id - return self.grpc_driver, self.run_id - - def _check_message(self, message: Message) -> None: - # Check if the message is valid - if not ( - message.metadata.run_id == self.run_id - and message.metadata.src_node_id == self.node.node_id - and message.metadata.message_id == "" - and message.metadata.reply_to_message == "" - ): - raise ValueError(f"Invalid message: {message}") - + @abstractmethod def create_message( # pylint: disable=too-many-arguments self, content: RecordSet, message_type: str, dst_node_id: int, group_id: str, - ttl: str, + ttl: Optional[float] = None, ) -> Message: """Create a new message with specified parameters. @@ -110,36 +51,23 @@ def create_message( # pylint: disable=too-many-arguments group_id : str The ID of the group to which this message is associated. In some settings, this is used as the FL round. - ttl : str + ttl : Optional[float] (default: None) Time-to-live for the round trip of this message, i.e., the time from sending - this message to receiving a reply. It specifies the duration for which the - message and its potential reply are considered valid. + this message to receiving a reply. It specifies in seconds the duration for + which the message and its potential reply are considered valid. If unset, + the default TTL (i.e., `common.DEFAULT_TTL`) will be used. Returns ------- message : Message A new `Message` instance with the specified content and metadata. """ - _, run_id = self._get_grpc_driver_and_run_id() - metadata = Metadata( - run_id=run_id, - message_id="", # Will be set by the server - src_node_id=self.node.node_id, - dst_node_id=dst_node_id, - reply_to_message="", - group_id=group_id, - ttl=ttl, - message_type=message_type, - ) - return Message(metadata=metadata, content=content) + @abstractmethod def get_node_ids(self) -> List[int]: """Get node IDs.""" - grpc_driver, run_id = self._get_grpc_driver_and_run_id() - # Call GrpcDriver method - res = grpc_driver.get_nodes(GetNodesRequest(run_id=run_id)) - return [node.node_id for node in res.nodes] + @abstractmethod def push_messages(self, messages: Iterable[Message]) -> Iterable[str]: """Push messages to specified node IDs. @@ -157,20 +85,8 @@ def push_messages(self, messages: Iterable[Message]) -> Iterable[str]: An iterable of IDs for the messages that were sent, which can be used to pull replies. """ - grpc_driver, _ = self._get_grpc_driver_and_run_id() - # Construct TaskIns - task_ins_list: List[TaskIns] = [] - for msg in messages: - # Check message - self._check_message(msg) - # Convert Message to TaskIns - taskins = message_to_taskins(msg) - # Add to list - task_ins_list.append(taskins) - # Call GrpcDriver method - res = grpc_driver.push_task_ins(PushTaskInsRequest(task_ins_list=task_ins_list)) - return list(res.task_ids) + @abstractmethod def pull_messages(self, message_ids: Iterable[str]) -> Iterable[Message]: """Pull messages based on message IDs. @@ -187,15 +103,8 @@ def pull_messages(self, message_ids: Iterable[str]) -> Iterable[Message]: messages : Iterable[Message] An iterable of messages received. """ - grpc_driver, _ = self._get_grpc_driver_and_run_id() - # Pull TaskRes - res = grpc_driver.pull_task_res( - PullTaskResRequest(node=self.node, task_ids=message_ids) - ) - # Convert TaskRes to Message - msgs = [message_from_taskres(taskres) for taskres in res.task_res_list] - return msgs + @abstractmethod def send_and_receive( self, messages: Iterable[Message], @@ -229,28 +138,3 @@ def send_and_receive( replies for all sent messages. A message remains valid until its TTL, which is not affected by `timeout`. """ - # Push messages - msg_ids = set(self.push_messages(messages)) - - # Pull messages - end_time = time.time() + (timeout if timeout is not None else 0.0) - ret: List[Message] = [] - while timeout is None or time.time() < end_time: - res_msgs = self.pull_messages(msg_ids) - ret.extend(res_msgs) - msg_ids.difference_update( - {msg.metadata.reply_to_message for msg in res_msgs} - ) - if len(msg_ids) == 0: - break - # Sleep - time.sleep(3) - return ret - - def close(self) -> None: - """Disconnect from the SuperLink if connected.""" - # Check if GrpcDriver is initialized - if self.grpc_driver is None: - return - # Disconnect - self.grpc_driver.disconnect() diff --git a/src/py/flwr/server/driver/grpc_driver.py b/src/py/flwr/server/driver/grpc_driver.py index b6e2b2602cd5..d339f1b232f9 100644 --- a/src/py/flwr/server/driver/grpc_driver.py +++ b/src/py/flwr/server/driver/grpc_driver.py @@ -1,4 +1,4 @@ -# Copyright 2023 Flower Labs GmbH. All Rights Reserved. +# Copyright 2022 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,17 +12,19 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== -"""Flower driver service client.""" - +"""Flower gRPC Driver.""" +import time +import warnings from logging import DEBUG, ERROR, WARNING -from typing import Optional +from typing import Iterable, List, Optional, Tuple import grpc -from flwr.common import EventType, event +from flwr.common import DEFAULT_TTL, EventType, Message, Metadata, RecordSet, event from flwr.common.grpc import create_channel from flwr.common.logger import log +from flwr.common.serde import message_from_taskres, message_to_taskins from flwr.proto.driver_pb2 import ( # pylint: disable=E0611 CreateRunRequest, CreateRunResponse, @@ -34,19 +36,23 @@ PushTaskInsResponse, ) from flwr.proto.driver_pb2_grpc import DriverStub # pylint: disable=E0611 +from flwr.proto.node_pb2 import Node # pylint: disable=E0611 +from flwr.proto.task_pb2 import TaskIns # pylint: disable=E0611 + +from .driver import Driver DEFAULT_SERVER_ADDRESS_DRIVER = "[::]:9091" ERROR_MESSAGE_DRIVER_NOT_CONNECTED = """ [Driver] Error: Not connected. -Call `connect()` on the `GrpcDriver` instance before calling any of the other -`GrpcDriver` methods. +Call `connect()` on the `GrpcDriverHelper` instance before calling any of the other +`GrpcDriverHelper` methods. """ -class GrpcDriver: - """`GrpcDriver` provides access to the gRPC Driver API/service.""" +class GrpcDriverHelper: + """`GrpcDriverHelper` provides access to the gRPC Driver API/service.""" def __init__( self, @@ -89,7 +95,7 @@ def create_run(self, req: CreateRunRequest) -> CreateRunResponse: # Check if channel is open if self.stub is None: log(ERROR, ERROR_MESSAGE_DRIVER_NOT_CONNECTED) - raise ConnectionError("`GrpcDriver` instance not connected") + raise ConnectionError("`GrpcDriverHelper` instance not connected") # Call Driver API res: CreateRunResponse = self.stub.CreateRun(request=req) @@ -100,7 +106,7 @@ def get_nodes(self, req: GetNodesRequest) -> GetNodesResponse: # Check if channel is open if self.stub is None: log(ERROR, ERROR_MESSAGE_DRIVER_NOT_CONNECTED) - raise ConnectionError("`GrpcDriver` instance not connected") + raise ConnectionError("`GrpcDriverHelper` instance not connected") # Call gRPC Driver API res: GetNodesResponse = self.stub.GetNodes(request=req) @@ -111,7 +117,7 @@ def push_task_ins(self, req: PushTaskInsRequest) -> PushTaskInsResponse: # Check if channel is open if self.stub is None: log(ERROR, ERROR_MESSAGE_DRIVER_NOT_CONNECTED) - raise ConnectionError("`GrpcDriver` instance not connected") + raise ConnectionError("`GrpcDriverHelper` instance not connected") # Call gRPC Driver API res: PushTaskInsResponse = self.stub.PushTaskIns(request=req) @@ -122,8 +128,188 @@ def pull_task_res(self, req: PullTaskResRequest) -> PullTaskResResponse: # Check if channel is open if self.stub is None: log(ERROR, ERROR_MESSAGE_DRIVER_NOT_CONNECTED) - raise ConnectionError("`GrpcDriver` instance not connected") + raise ConnectionError("`GrpcDriverHelper` instance not connected") # Call Driver API res: PullTaskResResponse = self.stub.PullTaskRes(request=req) return res + + +class GrpcDriver(Driver): + """`Driver` class provides an interface to the Driver API. + + Parameters + ---------- + driver_service_address : Optional[str] + The IPv4 or IPv6 address of the Driver API server. + Defaults to `"[::]:9091"`. + certificates : bytes (default: None) + Tuple containing root certificate, server certificate, and private key + to start a secure SSL-enabled server. The tuple is expected to have + three bytes elements in the following order: + + * CA certificate. + * server certificate. + * server private key. + fab_id : str (default: None) + The identifier of the FAB used in the run. + fab_version : str (default: None) + The version of the FAB used in the run. + """ + + def __init__( + self, + driver_service_address: str = DEFAULT_SERVER_ADDRESS_DRIVER, + root_certificates: Optional[bytes] = None, + fab_id: Optional[str] = None, + fab_version: Optional[str] = None, + ) -> None: + self.addr = driver_service_address + self.root_certificates = root_certificates + self.driver_helper: Optional[GrpcDriverHelper] = None + self.run_id: Optional[int] = None + self.fab_id = fab_id if fab_id is not None else "" + self.fab_version = fab_version if fab_version is not None else "" + self.node = Node(node_id=0, anonymous=True) + + def _get_grpc_driver_helper_and_run_id(self) -> Tuple[GrpcDriverHelper, int]: + # Check if the GrpcDriverHelper is initialized + if self.driver_helper is None or self.run_id is None: + # Connect and create run + self.driver_helper = GrpcDriverHelper( + driver_service_address=self.addr, + root_certificates=self.root_certificates, + ) + self.driver_helper.connect() + req = CreateRunRequest(fab_id=self.fab_id, fab_version=self.fab_version) + res = self.driver_helper.create_run(req) + self.run_id = res.run_id + return self.driver_helper, self.run_id + + def _check_message(self, message: Message) -> None: + # Check if the message is valid + if not ( + message.metadata.run_id == self.run_id + and message.metadata.src_node_id == self.node.node_id + and message.metadata.message_id == "" + and message.metadata.reply_to_message == "" + and message.metadata.ttl > 0 + ): + raise ValueError(f"Invalid message: {message}") + + def create_message( # pylint: disable=too-many-arguments + self, + content: RecordSet, + message_type: str, + dst_node_id: int, + group_id: str, + ttl: Optional[float] = None, + ) -> Message: + """Create a new message with specified parameters. + + This method constructs a new `Message` with given content and metadata. + The `run_id` and `src_node_id` will be set automatically. + """ + _, run_id = self._get_grpc_driver_helper_and_run_id() + if ttl: + warnings.warn( + "A custom TTL was set, but note that the SuperLink does not enforce " + "the TTL yet. The SuperLink will start enforcing the TTL in a future " + "version of Flower.", + stacklevel=2, + ) + + ttl_ = DEFAULT_TTL if ttl is None else ttl + metadata = Metadata( + run_id=run_id, + message_id="", # Will be set by the server + src_node_id=self.node.node_id, + dst_node_id=dst_node_id, + reply_to_message="", + group_id=group_id, + ttl=ttl_, + message_type=message_type, + ) + return Message(metadata=metadata, content=content) + + def get_node_ids(self) -> List[int]: + """Get node IDs.""" + grpc_driver_helper, run_id = self._get_grpc_driver_helper_and_run_id() + # Call GrpcDriverHelper method + res = grpc_driver_helper.get_nodes(GetNodesRequest(run_id=run_id)) + return [node.node_id for node in res.nodes] + + def push_messages(self, messages: Iterable[Message]) -> Iterable[str]: + """Push messages to specified node IDs. + + This method takes an iterable of messages and sends each message + to the node specified in `dst_node_id`. + """ + grpc_driver_helper, _ = self._get_grpc_driver_helper_and_run_id() + # Construct TaskIns + task_ins_list: List[TaskIns] = [] + for msg in messages: + # Check message + self._check_message(msg) + # Convert Message to TaskIns + taskins = message_to_taskins(msg) + # Add to list + task_ins_list.append(taskins) + # Call GrpcDriverHelper method + res = grpc_driver_helper.push_task_ins( + PushTaskInsRequest(task_ins_list=task_ins_list) + ) + return list(res.task_ids) + + def pull_messages(self, message_ids: Iterable[str]) -> Iterable[Message]: + """Pull messages based on message IDs. + + This method is used to collect messages from the SuperLink that correspond to a + set of given message IDs. + """ + grpc_driver, _ = self._get_grpc_driver_helper_and_run_id() + # Pull TaskRes + res = grpc_driver.pull_task_res( + PullTaskResRequest(node=self.node, task_ids=message_ids) + ) + # Convert TaskRes to Message + msgs = [message_from_taskres(taskres) for taskres in res.task_res_list] + return msgs + + def send_and_receive( + self, + messages: Iterable[Message], + *, + timeout: Optional[float] = None, + ) -> Iterable[Message]: + """Push messages to specified node IDs and pull the reply messages. + + This method sends a list of messages to their destination node IDs and then + waits for the replies. It continues to pull replies until either all replies are + received or the specified timeout duration is exceeded. + """ + # Push messages + msg_ids = set(self.push_messages(messages)) + + # Pull messages + end_time = time.time() + (timeout if timeout is not None else 0.0) + ret: List[Message] = [] + while timeout is None or time.time() < end_time: + res_msgs = self.pull_messages(msg_ids) + ret.extend(res_msgs) + msg_ids.difference_update( + {msg.metadata.reply_to_message for msg in res_msgs} + ) + if len(msg_ids) == 0: + break + # Sleep + time.sleep(3) + return ret + + def close(self) -> None: + """Disconnect from the SuperLink if connected.""" + # Check if GrpcDriverHelper is initialized + if self.driver_helper is None: + return + # Disconnect + self.driver_helper.disconnect() diff --git a/src/py/flwr/server/driver/driver_test.py b/src/py/flwr/server/driver/grpc_driver_test.py similarity index 70% rename from src/py/flwr/server/driver/driver_test.py rename to src/py/flwr/server/driver/grpc_driver_test.py index 5136f4f90210..fbead0e3043d 100644 --- a/src/py/flwr/server/driver/driver_test.py +++ b/src/py/flwr/server/driver/grpc_driver_test.py @@ -19,7 +19,7 @@ import unittest from unittest.mock import Mock, patch -from flwr.common import RecordSet +from flwr.common import DEFAULT_TTL, RecordSet from flwr.common.message import Error from flwr.common.serde import error_to_proto, recordset_to_proto from flwr.proto.driver_pb2 import ( # pylint: disable=E0611 @@ -29,49 +29,50 @@ ) from flwr.proto.task_pb2 import Task, TaskRes # pylint: disable=E0611 -from .driver import Driver +from .grpc_driver import GrpcDriver -class TestDriver(unittest.TestCase): - """Tests for `Driver` class.""" +class TestGrpcDriver(unittest.TestCase): + """Tests for `GrpcDriver` class.""" def setUp(self) -> None: - """Initialize mock GrpcDriver and Driver instance before each test.""" + """Initialize mock GrpcDriverHelper and Driver instance before each test.""" mock_response = Mock() mock_response.run_id = 61016 - self.mock_grpc_driver = Mock() - self.mock_grpc_driver.create_run.return_value = mock_response + self.mock_grpc_driver_helper = Mock() + self.mock_grpc_driver_helper.create_run.return_value = mock_response self.patcher = patch( - "flwr.server.driver.driver.GrpcDriver", return_value=self.mock_grpc_driver + "flwr.server.driver.grpc_driver.GrpcDriverHelper", + return_value=self.mock_grpc_driver_helper, ) self.patcher.start() - self.driver = Driver() + self.driver = GrpcDriver() def tearDown(self) -> None: """Cleanup after each test.""" self.patcher.stop() def test_check_and_init_grpc_driver_already_initialized(self) -> None: - """Test that GrpcDriver doesn't initialize if run is created.""" + """Test that GrpcDriverHelper doesn't initialize if run is created.""" # Prepare - self.driver.grpc_driver = self.mock_grpc_driver + self.driver.driver_helper = self.mock_grpc_driver_helper self.driver.run_id = 61016 # Execute # pylint: disable-next=protected-access - self.driver._get_grpc_driver_and_run_id() + self.driver._get_grpc_driver_helper_and_run_id() # Assert - self.mock_grpc_driver.connect.assert_not_called() + self.mock_grpc_driver_helper.connect.assert_not_called() def test_check_and_init_grpc_driver_needs_initialization(self) -> None: - """Test GrpcDriver initialization when run is not created.""" + """Test GrpcDriverHelper initialization when run is not created.""" # Execute # pylint: disable-next=protected-access - self.driver._get_grpc_driver_and_run_id() + self.driver._get_grpc_driver_helper_and_run_id() # Assert - self.mock_grpc_driver.connect.assert_called_once() + self.mock_grpc_driver_helper.connect.assert_called_once() self.assertEqual(self.driver.run_id, 61016) def test_get_nodes(self) -> None: @@ -79,14 +80,14 @@ def test_get_nodes(self) -> None: # Prepare mock_response = Mock() mock_response.nodes = [Mock(node_id=404), Mock(node_id=200)] - self.mock_grpc_driver.get_nodes.return_value = mock_response + self.mock_grpc_driver_helper.get_nodes.return_value = mock_response # Execute node_ids = self.driver.get_node_ids() - args, kwargs = self.mock_grpc_driver.get_nodes.call_args + args, kwargs = self.mock_grpc_driver_helper.get_nodes.call_args # Assert - self.mock_grpc_driver.connect.assert_called_once() + self.mock_grpc_driver_helper.connect.assert_called_once() self.assertEqual(len(args), 1) self.assertEqual(len(kwargs), 0) self.assertIsInstance(args[0], GetNodesRequest) @@ -97,17 +98,18 @@ def test_push_messages_valid(self) -> None: """Test pushing valid messages.""" # Prepare mock_response = Mock(task_ids=["id1", "id2"]) - self.mock_grpc_driver.push_task_ins.return_value = mock_response + self.mock_grpc_driver_helper.push_task_ins.return_value = mock_response msgs = [ - self.driver.create_message(RecordSet(), "", 0, "", "") for _ in range(2) + self.driver.create_message(RecordSet(), "", 0, "", DEFAULT_TTL) + for _ in range(2) ] # Execute msg_ids = self.driver.push_messages(msgs) - args, kwargs = self.mock_grpc_driver.push_task_ins.call_args + args, kwargs = self.mock_grpc_driver_helper.push_task_ins.call_args # Assert - self.mock_grpc_driver.connect.assert_called_once() + self.mock_grpc_driver_helper.connect.assert_called_once() self.assertEqual(len(args), 1) self.assertEqual(len(kwargs), 0) self.assertIsInstance(args[0], PushTaskInsRequest) @@ -119,12 +121,13 @@ def test_push_messages_invalid(self) -> None: """Test pushing invalid messages.""" # Prepare mock_response = Mock(task_ids=["id1", "id2"]) - self.mock_grpc_driver.push_task_ins.return_value = mock_response + self.mock_grpc_driver_helper.push_task_ins.return_value = mock_response msgs = [ - self.driver.create_message(RecordSet(), "", 0, "", "") for _ in range(2) + self.driver.create_message(RecordSet(), "", 0, "", DEFAULT_TTL) + for _ in range(2) ] # Use invalid run_id - msgs[1].metadata._run_id += 1 # pylint: disable=protected-access + msgs[1].metadata.__dict__["_run_id"] += 1 # pylint: disable=protected-access # Execute and assert with self.assertRaises(ValueError): @@ -142,16 +145,16 @@ def test_pull_messages_with_given_message_ids(self) -> None: ), TaskRes(task=Task(ancestry=["id3"], error=error_to_proto(Error(code=0)))), ] - self.mock_grpc_driver.pull_task_res.return_value = mock_response + self.mock_grpc_driver_helper.pull_task_res.return_value = mock_response msg_ids = ["id1", "id2", "id3"] # Execute msgs = self.driver.pull_messages(msg_ids) reply_tos = {msg.metadata.reply_to_message for msg in msgs} - args, kwargs = self.mock_grpc_driver.pull_task_res.call_args + args, kwargs = self.mock_grpc_driver_helper.pull_task_res.call_args # Assert - self.mock_grpc_driver.connect.assert_called_once() + self.mock_grpc_driver_helper.connect.assert_called_once() self.assertEqual(len(args), 1) self.assertEqual(len(kwargs), 0) self.assertIsInstance(args[0], PullTaskResRequest) @@ -162,15 +165,15 @@ def test_send_and_receive_messages_complete(self) -> None: """Test send and receive all messages successfully.""" # Prepare mock_response = Mock(task_ids=["id1"]) - self.mock_grpc_driver.push_task_ins.return_value = mock_response + self.mock_grpc_driver_helper.push_task_ins.return_value = mock_response # The response message must include either `content` (i.e. a recordset) or # an `Error`. We choose the latter in this case error_proto = error_to_proto(Error(code=0)) mock_response = Mock( task_res_list=[TaskRes(task=Task(ancestry=["id1"], error=error_proto))] ) - self.mock_grpc_driver.pull_task_res.return_value = mock_response - msgs = [self.driver.create_message(RecordSet(), "", 0, "", "")] + self.mock_grpc_driver_helper.pull_task_res.return_value = mock_response + msgs = [self.driver.create_message(RecordSet(), "", 0, "", DEFAULT_TTL)] # Execute ret_msgs = list(self.driver.send_and_receive(msgs)) @@ -184,10 +187,10 @@ def test_send_and_receive_messages_timeout(self) -> None: # Prepare sleep_fn = time.sleep mock_response = Mock(task_ids=["id1"]) - self.mock_grpc_driver.push_task_ins.return_value = mock_response + self.mock_grpc_driver_helper.push_task_ins.return_value = mock_response mock_response = Mock(task_res_list=[]) - self.mock_grpc_driver.pull_task_res.return_value = mock_response - msgs = [self.driver.create_message(RecordSet(), "", 0, "", "")] + self.mock_grpc_driver_helper.pull_task_res.return_value = mock_response + msgs = [self.driver.create_message(RecordSet(), "", 0, "", DEFAULT_TTL)] # Execute with patch("time.sleep", side_effect=lambda t: sleep_fn(t * 0.01)): @@ -202,13 +205,13 @@ def test_del_with_initialized_driver(self) -> None: """Test cleanup behavior when Driver is initialized.""" # Prepare # pylint: disable-next=protected-access - self.driver._get_grpc_driver_and_run_id() + self.driver._get_grpc_driver_helper_and_run_id() # Execute self.driver.close() # Assert - self.mock_grpc_driver.disconnect.assert_called_once() + self.mock_grpc_driver_helper.disconnect.assert_called_once() def test_del_with_uninitialized_driver(self) -> None: """Test cleanup behavior when Driver is not initialized.""" @@ -216,4 +219,4 @@ def test_del_with_uninitialized_driver(self) -> None: self.driver.close() # Assert - self.mock_grpc_driver.disconnect.assert_not_called() + self.mock_grpc_driver_helper.disconnect.assert_not_called() diff --git a/src/py/flwr/server/run_serverapp.py b/src/py/flwr/server/run_serverapp.py index 2f0f1185847e..9cc7974d34da 100644 --- a/src/py/flwr/server/run_serverapp.py +++ b/src/py/flwr/server/run_serverapp.py @@ -25,7 +25,7 @@ from flwr.common.logger import log, update_console_handler from flwr.common.object_ref import load_app -from .driver.driver import Driver +from .driver import Driver, GrpcDriver from .server_app import LoadServerAppError, ServerApp @@ -128,13 +128,15 @@ def run_server_app() -> None: server_app_dir = args.dir server_app_attr = getattr(args, "server-app") - # Initialize Driver - driver = Driver( + # Initialize GrpcDriver + driver = GrpcDriver( driver_service_address=args.server, root_certificates=root_certificates, + fab_id=args.fab_id, + fab_version=args.fab_version, ) - # Run the Server App with the Driver + # Run the ServerApp with the Driver run(driver=driver, server_app_dir=server_app_dir, server_app_attr=server_app_attr) # Clean up @@ -183,5 +185,17 @@ def _parse_args_run_server_app() -> argparse.ArgumentParser: "app from there." " Default: current working directory.", ) + parser.add_argument( + "--fab-id", + default=None, + type=str, + help="The identifier of the FAB used in the run.", + ) + parser.add_argument( + "--fab-version", + default=None, + type=str, + help="The version of the FAB used in the run.", + ) return parser diff --git a/src/py/flwr/server/server_app.py b/src/py/flwr/server/server_app.py index 1b2eab87fdaa..ea2eb3fd1a69 100644 --- a/src/py/flwr/server/server_app.py +++ b/src/py/flwr/server/server_app.py @@ -18,6 +18,7 @@ from typing import Callable, Optional from flwr.common import Context, RecordSet +from flwr.common.logger import warn_preview_feature from flwr.server.strategy import Strategy from .client_manager import ClientManager @@ -120,6 +121,8 @@ def main_decorator(main_fn: ServerAppCallable) -> ServerAppCallable: """, ) + warn_preview_feature("ServerApp-register-main-function") + # Register provided function with the ServerApp object self._main = main_fn diff --git a/src/py/flwr/server/server_test.py b/src/py/flwr/server/server_test.py index 274e5289fee1..51071c13f895 100644 --- a/src/py/flwr/server/server_test.py +++ b/src/py/flwr/server/server_test.py @@ -15,9 +15,22 @@ """Flower server tests.""" +import argparse +import csv +import tempfile +from pathlib import Path from typing import List, Optional import numpy as np +from cryptography.hazmat.primitives.asymmetric import ec +from cryptography.hazmat.primitives.serialization import ( + Encoding, + NoEncryption, + PrivateFormat, + PublicFormat, + load_ssh_private_key, + load_ssh_public_key, +) from flwr.common import ( Code, @@ -35,8 +48,14 @@ Status, ndarray_to_bytes, ) +from flwr.common.secure_aggregation.crypto.symmetric_encryption import ( + generate_key_pairs, + private_key_to_bytes, + public_key_to_bytes, +) from flwr.server.client_manager import SimpleClientManager +from .app import _try_setup_client_authentication from .client_proxy import ClientProxy from .server import Server, evaluate_clients, fit_clients @@ -182,3 +201,71 @@ def test_set_max_workers() -> None: # Assert assert server.max_workers == 42 + + +def test_setup_client_auth() -> None: # pylint: disable=R0914 + """Test setup client authentication.""" + # Prepare + _, first_public_key = generate_key_pairs() + private_key, public_key = generate_key_pairs() + + server_public_key = public_key.public_bytes( + encoding=Encoding.OpenSSH, format=PublicFormat.OpenSSH + ) + server_private_key = private_key.private_bytes( + Encoding.PEM, PrivateFormat.OpenSSH, NoEncryption() + ) + _, second_public_key = generate_key_pairs() + + # Execute + with tempfile.TemporaryDirectory() as temp_dir: + # Initialize temporary files + client_keys_file_path = Path(temp_dir) / "client_keys.csv" + server_private_key_path = Path(temp_dir) / "server_private_key" + server_public_key_path = Path(temp_dir) / "server_public_key" + + # Fill the files with relevant keys + with open(client_keys_file_path, "w", newline="", encoding="utf-8") as csvfile: + writer = csv.writer(csvfile) + writer.writerow( + [ + first_public_key.public_bytes( + encoding=Encoding.OpenSSH, format=PublicFormat.OpenSSH + ).decode(), + second_public_key.public_bytes( + encoding=Encoding.OpenSSH, format=PublicFormat.OpenSSH + ).decode(), + ] + ) + server_public_key_path.write_bytes(server_public_key) + server_private_key_path.write_bytes(server_private_key) + + # Mock argparse with `require-client-authentication`` flag + mock_args = argparse.Namespace( + require_client_authentication=[ + str(client_keys_file_path), + str(server_private_key_path), + str(server_public_key_path), + ] + ) + + # Run _try_setup_client_authentication + result = _try_setup_client_authentication(mock_args, (b"", b"", b"")) + + expected_private_key = load_ssh_private_key(server_private_key, None) + expected_public_key = load_ssh_public_key(server_public_key) + + # Assert + assert isinstance(expected_private_key, ec.EllipticCurvePrivateKey) + assert isinstance(expected_public_key, ec.EllipticCurvePublicKey) + assert result is not None + assert result[0] == { + public_key_to_bytes(first_public_key), + public_key_to_bytes(second_public_key), + } + assert private_key_to_bytes(result[1]) == private_key_to_bytes( + expected_private_key + ) + assert public_key_to_bytes(result[2]) == public_key_to_bytes( + expected_public_key + ) diff --git a/src/py/flwr/server/strategy/dp_adaptive_clipping.py b/src/py/flwr/server/strategy/dp_adaptive_clipping.py index 1acfd4613a0a..9579ba9d1599 100644 --- a/src/py/flwr/server/strategy/dp_adaptive_clipping.py +++ b/src/py/flwr/server/strategy/dp_adaptive_clipping.py @@ -200,7 +200,7 @@ def aggregate_fit( log( INFO, - "aggregate_fit: parameters are clipped by value: %s.", + "aggregate_fit: parameters are clipped by value: %.4f.", self.clipping_norm, ) @@ -234,7 +234,8 @@ def aggregate_fit( ) log( INFO, - "aggregate_fit: central DP noise with standard deviation: %s added to parameters.", + "aggregate_fit: central DP noise with " + "standard deviation: %.4f added to parameters.", compute_stdv( self.noise_multiplier, self.clipping_norm, self.num_sampled_clients ), @@ -424,7 +425,8 @@ def aggregate_fit( ) log( INFO, - "aggregate_fit: central DP noise with standard deviation: %s added to parameters.", + "aggregate_fit: central DP noise with " + "standard deviation: %.4f added to parameters.", compute_stdv( self.noise_multiplier, self.clipping_norm, self.num_sampled_clients ), diff --git a/src/py/flwr/server/strategy/dp_fixed_clipping.py b/src/py/flwr/server/strategy/dp_fixed_clipping.py index 61e8123e28d7..f85fd75bf8ce 100644 --- a/src/py/flwr/server/strategy/dp_fixed_clipping.py +++ b/src/py/flwr/server/strategy/dp_fixed_clipping.py @@ -158,7 +158,7 @@ def aggregate_fit( ) log( INFO, - "aggregate_fit: parameters are clipped by value: %s.", + "aggregate_fit: parameters are clipped by value: %.4f.", self.clipping_norm, ) # Convert back to parameters @@ -180,7 +180,8 @@ def aggregate_fit( log( INFO, - "aggregate_fit: central DP noise with standard deviation: %s added to parameters.", + "aggregate_fit: central DP noise with " + "standard deviation: %.4f added to parameters.", compute_stdv( self.noise_multiplier, self.clipping_norm, self.num_sampled_clients ), @@ -337,11 +338,13 @@ def aggregate_fit( ) log( INFO, - "aggregate_fit: central DP noise with standard deviation: %s added to parameters.", + "aggregate_fit: central DP noise with " + "standard deviation: %.4f added to parameters.", compute_stdv( self.noise_multiplier, self.clipping_norm, self.num_sampled_clients ), ) + return aggregated_params, metrics def aggregate_evaluate( diff --git a/src/py/flwr/server/superlink/driver/driver_servicer.py b/src/py/flwr/server/superlink/driver/driver_servicer.py index 59e51ef52d8e..68374e5bdc73 100644 --- a/src/py/flwr/server/superlink/driver/driver_servicer.py +++ b/src/py/flwr/server/superlink/driver/driver_servicer.py @@ -15,6 +15,7 @@ """Driver API servicer.""" +import time from logging import DEBUG, INFO from typing import List, Optional, Set from uuid import UUID @@ -63,7 +64,7 @@ def CreateRun( """Create run ID.""" log(INFO, "DriverServicer.CreateRun") state: State = self.state_factory.state() - run_id = state.create_run() + run_id = state.create_run(request.fab_id, request.fab_version) return CreateRunResponse(run_id=run_id) def PushTaskIns( @@ -72,6 +73,11 @@ def PushTaskIns( """Push a set of TaskIns.""" log(DEBUG, "DriverServicer.PushTaskIns") + # Set pushed_at (timestamp in seconds) + pushed_at = time.time() + for task_ins in request.task_ins_list: + task_ins.task.pushed_at = pushed_at + # Validate request _raise_if(len(request.task_ins_list) == 0, "`task_ins_list` must not be empty") for task_ins in request.task_ins_list: diff --git a/src/py/flwr/server/superlink/fleet/grpc_bidi/grpc_server.py b/src/py/flwr/server/superlink/fleet/grpc_bidi/grpc_server.py index 82f049844bd6..6aeaa7ef413f 100644 --- a/src/py/flwr/server/superlink/fleet/grpc_bidi/grpc_server.py +++ b/src/py/flwr/server/superlink/fleet/grpc_bidi/grpc_server.py @@ -18,7 +18,7 @@ import concurrent.futures import sys from logging import ERROR -from typing import Any, Callable, Optional, Tuple, Union +from typing import Any, Callable, Optional, Sequence, Tuple, Union import grpc @@ -162,6 +162,7 @@ def generic_create_grpc_server( # pylint: disable=too-many-arguments max_message_length: int = GRPC_MAX_MESSAGE_LENGTH, keepalive_time_ms: int = 210000, certificates: Optional[Tuple[bytes, bytes, bytes]] = None, + interceptors: Optional[Sequence[grpc.ServerInterceptor]] = None, ) -> grpc.Server: """Create a gRPC server with a single servicer. @@ -249,6 +250,7 @@ def generic_create_grpc_server( # pylint: disable=too-many-arguments # returning RESOURCE_EXHAUSTED status, or None to indicate no limit. maximum_concurrent_rpcs=max_concurrent_workers, options=options, + interceptors=interceptors, ) add_servicer_to_server_fn(servicer, server) diff --git a/src/py/flwr/server/superlink/fleet/grpc_rere/fleet_servicer.py b/src/py/flwr/server/superlink/fleet/grpc_rere/fleet_servicer.py index 278474477379..03a2ec064213 100644 --- a/src/py/flwr/server/superlink/fleet/grpc_rere/fleet_servicer.py +++ b/src/py/flwr/server/superlink/fleet/grpc_rere/fleet_servicer.py @@ -15,7 +15,7 @@ """Fleet API gRPC request-response servicer.""" -from logging import INFO +from logging import DEBUG, INFO import grpc @@ -26,6 +26,10 @@ CreateNodeResponse, DeleteNodeRequest, DeleteNodeResponse, + GetRunRequest, + GetRunResponse, + PingRequest, + PingResponse, PullTaskInsRequest, PullTaskInsResponse, PushTaskResRequest, @@ -61,6 +65,14 @@ def DeleteNode( state=self.state_factory.state(), ) + def Ping(self, request: PingRequest, context: grpc.ServicerContext) -> PingResponse: + """.""" + log(DEBUG, "FleetServicer.Ping") + return message_handler.ping( + request=request, + state=self.state_factory.state(), + ) + def PullTaskIns( self, request: PullTaskInsRequest, context: grpc.ServicerContext ) -> PullTaskInsResponse: @@ -80,3 +92,13 @@ def PushTaskRes( request=request, state=self.state_factory.state(), ) + + def GetRun( + self, request: GetRunRequest, context: grpc.ServicerContext + ) -> GetRunResponse: + """Get run information.""" + log(INFO, "FleetServicer.GetRun") + return message_handler.get_run( + request=request, + state=self.state_factory.state(), + ) diff --git a/src/py/flwr/server/superlink/fleet/grpc_rere/server_interceptor.py b/src/py/flwr/server/superlink/fleet/grpc_rere/server_interceptor.py new file mode 100644 index 000000000000..29835132b34a --- /dev/null +++ b/src/py/flwr/server/superlink/fleet/grpc_rere/server_interceptor.py @@ -0,0 +1,174 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Flower server interceptor.""" + + +import base64 +from logging import INFO +from typing import Any, Callable, Sequence, Set, Tuple, Union + +import grpc +from cryptography.hazmat.primitives.asymmetric import ec + +from flwr.common.logger import log +from flwr.common.secure_aggregation.crypto.symmetric_encryption import ( + bytes_to_public_key, + generate_shared_key, + public_key_to_bytes, + verify_hmac, +) +from flwr.proto.fleet_pb2 import ( # pylint: disable=E0611 + CreateNodeRequest, + CreateNodeResponse, + DeleteNodeRequest, + DeleteNodeResponse, + GetRunRequest, + GetRunResponse, + PingRequest, + PingResponse, + PullTaskInsRequest, + PullTaskInsResponse, + PushTaskResRequest, + PushTaskResResponse, +) + +_PUBLIC_KEY_HEADER = "public-key" +_AUTH_TOKEN_HEADER = "auth-token" + +Request = Union[ + CreateNodeRequest, + DeleteNodeRequest, + PullTaskInsRequest, + PushTaskResRequest, + GetRunRequest, + PingRequest, +] + +Response = Union[ + CreateNodeResponse, + DeleteNodeResponse, + PullTaskInsResponse, + PushTaskResResponse, + GetRunResponse, + PingResponse, +] + + +def _get_value_from_tuples( + key_string: str, tuples: Sequence[Tuple[str, Union[str, bytes]]] +) -> bytes: + value = next((value for key, value in tuples if key == key_string), "") + if isinstance(value, str): + return value.encode() + + return value + + +class AuthenticateServerInterceptor(grpc.ServerInterceptor): # type: ignore + """Server interceptor for client authentication.""" + + def __init__( + self, + client_public_keys: Set[bytes], + private_key: ec.EllipticCurvePrivateKey, + public_key: ec.EllipticCurvePublicKey, + ): + self.server_private_key = private_key + self.client_public_keys = client_public_keys + self.encoded_server_public_key = base64.urlsafe_b64encode( + public_key_to_bytes(public_key) + ) + log( + INFO, + "Client authentication enabled with %d known public keys", + len(client_public_keys), + ) + + def intercept_service( + self, + continuation: Callable[[Any], Any], + handler_call_details: grpc.HandlerCallDetails, + ) -> grpc.RpcMethodHandler: + """Flower server interceptor authentication logic. + + Intercept all unary calls from clients and authenticate clients by validating + auth metadata sent by the client. Continue RPC call if client is authenticated, + else, terminate RPC call by setting context to abort. + """ + # One of the method handlers in + # `flwr.server.superlink.fleet.grpc_rere.fleet_server.FleetServicer` + method_handler: grpc.RpcMethodHandler = continuation(handler_call_details) + return self._generic_auth_unary_method_handler(method_handler) + + def _generic_auth_unary_method_handler( + self, method_handler: grpc.RpcMethodHandler + ) -> grpc.RpcMethodHandler: + def _generic_method_handler( + request: Request, + context: grpc.ServicerContext, + ) -> Response: + client_public_key_bytes = base64.urlsafe_b64decode( + _get_value_from_tuples( + _PUBLIC_KEY_HEADER, context.invocation_metadata() + ) + ) + is_public_key_known = client_public_key_bytes in self.client_public_keys + if not is_public_key_known: + context.abort(grpc.StatusCode.UNAUTHENTICATED, "Access denied") + + if isinstance(request, CreateNodeRequest): + context.send_initial_metadata( + ( + ( + _PUBLIC_KEY_HEADER, + self.encoded_server_public_key, + ), + ) + ) + elif isinstance( + request, + ( + DeleteNodeRequest, + PullTaskInsRequest, + PushTaskResRequest, + GetRunRequest, + PingRequest, + ), + ): + hmac_value = base64.urlsafe_b64decode( + _get_value_from_tuples( + _AUTH_TOKEN_HEADER, context.invocation_metadata() + ) + ) + client_public_key = bytes_to_public_key(client_public_key_bytes) + shared_secret = generate_shared_key( + self.server_private_key, + client_public_key, + ) + verify = verify_hmac( + shared_secret, request.SerializeToString(True), hmac_value + ) + if not verify: + context.abort(grpc.StatusCode.UNAUTHENTICATED, "Access denied") + else: + context.abort(grpc.StatusCode.UNAUTHENTICATED, "Access denied") + + return method_handler.unary_unary(request, context) # type: ignore + + return grpc.unary_unary_rpc_method_handler( + _generic_method_handler, + request_deserializer=method_handler.request_deserializer, + response_serializer=method_handler.response_serializer, + ) diff --git a/src/py/flwr/server/superlink/fleet/grpc_rere/server_interceptor_test.py b/src/py/flwr/server/superlink/fleet/grpc_rere/server_interceptor_test.py new file mode 100644 index 000000000000..4b0cc2daf86f --- /dev/null +++ b/src/py/flwr/server/superlink/fleet/grpc_rere/server_interceptor_test.py @@ -0,0 +1,396 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Flower server interceptor tests.""" + + +import base64 +import unittest + +import grpc + +from flwr.common.secure_aggregation.crypto.symmetric_encryption import ( + compute_hmac, + generate_key_pairs, + generate_shared_key, + public_key_to_bytes, +) +from flwr.proto.fleet_pb2 import ( # pylint: disable=E0611 + CreateNodeRequest, + CreateNodeResponse, + DeleteNodeRequest, + DeleteNodeResponse, + GetRunRequest, + GetRunResponse, + PingRequest, + PingResponse, + PullTaskInsRequest, + PullTaskInsResponse, + PushTaskResRequest, + PushTaskResResponse, +) +from flwr.proto.task_pb2 import TaskRes # pylint: disable=E0611 +from flwr.server.app import ADDRESS_FLEET_API_GRPC_RERE, _run_fleet_api_grpc_rere +from flwr.server.superlink.state.state_factory import StateFactory + +from .server_interceptor import ( + _AUTH_TOKEN_HEADER, + _PUBLIC_KEY_HEADER, + AuthenticateServerInterceptor, +) + + +class TestServerInterceptor(unittest.TestCase): # pylint: disable=R0902 + """Server interceptor tests.""" + + def setUp(self) -> None: + """Initialize mock stub and server interceptor.""" + self._client_private_key, self._client_public_key = generate_key_pairs() + self._server_private_key, self._server_public_key = generate_key_pairs() + + state_factory = StateFactory(":flwr-in-memory-state:") + + self._server_interceptor = AuthenticateServerInterceptor( + {public_key_to_bytes(self._client_public_key)}, + self._server_private_key, + self._server_public_key, + ) + self._server: grpc.Server = _run_fleet_api_grpc_rere( + ADDRESS_FLEET_API_GRPC_RERE, state_factory, None, [self._server_interceptor] + ) + + self._channel = grpc.insecure_channel("localhost:9092") + self._create_node = self._channel.unary_unary( + "/flwr.proto.Fleet/CreateNode", + request_serializer=CreateNodeRequest.SerializeToString, + response_deserializer=CreateNodeResponse.FromString, + ) + self._delete_node = self._channel.unary_unary( + "/flwr.proto.Fleet/DeleteNode", + request_serializer=DeleteNodeRequest.SerializeToString, + response_deserializer=DeleteNodeResponse.FromString, + ) + self._pull_task_ins = self._channel.unary_unary( + "/flwr.proto.Fleet/PullTaskIns", + request_serializer=PullTaskInsRequest.SerializeToString, + response_deserializer=PullTaskInsResponse.FromString, + ) + self._push_task_res = self._channel.unary_unary( + "/flwr.proto.Fleet/PushTaskRes", + request_serializer=PushTaskResRequest.SerializeToString, + response_deserializer=PushTaskResResponse.FromString, + ) + self._get_run = self._channel.unary_unary( + "/flwr.proto.Fleet/GetRun", + request_serializer=GetRunRequest.SerializeToString, + response_deserializer=GetRunResponse.FromString, + ) + self._ping = self._channel.unary_unary( + "/flwr.proto.Fleet/Ping", + request_serializer=PingRequest.SerializeToString, + response_deserializer=PingResponse.FromString, + ) + + def tearDown(self) -> None: + """Clean up grpc server.""" + self._server.stop(None) + + def test_successful_create_node_with_metadata(self) -> None: + """Test server interceptor for creating node.""" + # Prepare + public_key_bytes = base64.urlsafe_b64encode( + public_key_to_bytes(self._client_public_key) + ) + + # Execute + response, call = self._create_node.with_call( + request=CreateNodeRequest(), + metadata=((_PUBLIC_KEY_HEADER, public_key_bytes),), + ) + + expected_metadata = ( + _PUBLIC_KEY_HEADER, + base64.urlsafe_b64encode( + public_key_to_bytes(self._server_public_key) + ).decode(), + ) + + # Assert + assert call.initial_metadata()[0] == expected_metadata + assert isinstance(response, CreateNodeResponse) + + def test_unsuccessful_create_node_with_metadata(self) -> None: + """Test server interceptor for creating node unsuccessfully.""" + # Prepare + _, client_public_key = generate_key_pairs() + public_key_bytes = base64.urlsafe_b64encode( + public_key_to_bytes(client_public_key) + ) + + # Execute & Assert + with self.assertRaises(grpc.RpcError): + self._create_node.with_call( + request=CreateNodeRequest(), + metadata=((_PUBLIC_KEY_HEADER, public_key_bytes),), + ) + + def test_successful_delete_node_with_metadata(self) -> None: + """Test server interceptor for deleting node.""" + # Prepare + request = DeleteNodeRequest() + shared_secret = generate_shared_key( + self._client_private_key, self._server_public_key + ) + hmac_value = base64.urlsafe_b64encode( + compute_hmac(shared_secret, request.SerializeToString(True)) + ) + public_key_bytes = base64.urlsafe_b64encode( + public_key_to_bytes(self._client_public_key) + ) + + # Execute + response, call = self._delete_node.with_call( + request=request, + metadata=( + (_PUBLIC_KEY_HEADER, public_key_bytes), + (_AUTH_TOKEN_HEADER, hmac_value), + ), + ) + + # Assert + assert isinstance(response, DeleteNodeResponse) + assert grpc.StatusCode.OK == call.code() + + def test_unsuccessful_delete_node_with_metadata(self) -> None: + """Test server interceptor for deleting node unsuccessfully.""" + # Prepare + request = DeleteNodeRequest() + client_private_key, _ = generate_key_pairs() + shared_secret = generate_shared_key(client_private_key, self._server_public_key) + hmac_value = base64.urlsafe_b64encode( + compute_hmac(shared_secret, request.SerializeToString(True)) + ) + public_key_bytes = base64.urlsafe_b64encode( + public_key_to_bytes(self._client_public_key) + ) + + # Execute & Assert + with self.assertRaises(grpc.RpcError): + self._delete_node.with_call( + request=request, + metadata=( + (_PUBLIC_KEY_HEADER, public_key_bytes), + (_AUTH_TOKEN_HEADER, hmac_value), + ), + ) + + def test_successful_pull_task_ins_with_metadata(self) -> None: + """Test server interceptor for pull task ins.""" + # Prepare + request = PullTaskInsRequest() + shared_secret = generate_shared_key( + self._client_private_key, self._server_public_key + ) + hmac_value = base64.urlsafe_b64encode( + compute_hmac(shared_secret, request.SerializeToString(True)) + ) + public_key_bytes = base64.urlsafe_b64encode( + public_key_to_bytes(self._client_public_key) + ) + + # Execute + response, call = self._pull_task_ins.with_call( + request=request, + metadata=( + (_PUBLIC_KEY_HEADER, public_key_bytes), + (_AUTH_TOKEN_HEADER, hmac_value), + ), + ) + + # Assert + assert isinstance(response, PullTaskInsResponse) + assert grpc.StatusCode.OK == call.code() + + def test_unsuccessful_pull_task_ins_with_metadata(self) -> None: + """Test server interceptor for pull task ins unsuccessfully.""" + # Prepare + request = PullTaskInsRequest() + client_private_key, _ = generate_key_pairs() + shared_secret = generate_shared_key(client_private_key, self._server_public_key) + hmac_value = base64.urlsafe_b64encode( + compute_hmac(shared_secret, request.SerializeToString(True)) + ) + public_key_bytes = base64.urlsafe_b64encode( + public_key_to_bytes(self._client_public_key) + ) + + # Execute & Assert + with self.assertRaises(grpc.RpcError): + self._pull_task_ins.with_call( + request=request, + metadata=( + (_PUBLIC_KEY_HEADER, public_key_bytes), + (_AUTH_TOKEN_HEADER, hmac_value), + ), + ) + + def test_successful_push_task_res_with_metadata(self) -> None: + """Test server interceptor for push task res.""" + # Prepare + request = PushTaskResRequest(task_res_list=[TaskRes()]) + shared_secret = generate_shared_key( + self._client_private_key, self._server_public_key + ) + hmac_value = base64.urlsafe_b64encode( + compute_hmac(shared_secret, request.SerializeToString(True)) + ) + public_key_bytes = base64.urlsafe_b64encode( + public_key_to_bytes(self._client_public_key) + ) + + # Execute + response, call = self._push_task_res.with_call( + request=request, + metadata=( + (_PUBLIC_KEY_HEADER, public_key_bytes), + (_AUTH_TOKEN_HEADER, hmac_value), + ), + ) + + # Assert + assert isinstance(response, PushTaskResResponse) + assert grpc.StatusCode.OK == call.code() + + def test_unsuccessful_push_task_res_with_metadata(self) -> None: + """Test server interceptor for push task res unsuccessfully.""" + # Prepare + request = PushTaskResRequest(task_res_list=[TaskRes()]) + client_private_key, _ = generate_key_pairs() + shared_secret = generate_shared_key(client_private_key, self._server_public_key) + hmac_value = base64.urlsafe_b64encode( + compute_hmac(shared_secret, request.SerializeToString(True)) + ) + public_key_bytes = base64.urlsafe_b64encode( + public_key_to_bytes(self._client_public_key) + ) + + # Execute & Assert + with self.assertRaises(grpc.RpcError): + self._push_task_res.with_call( + request=request, + metadata=( + (_PUBLIC_KEY_HEADER, public_key_bytes), + (_AUTH_TOKEN_HEADER, hmac_value), + ), + ) + + def test_successful_get_run_with_metadata(self) -> None: + """Test server interceptor for pull task ins.""" + # Prepare + request = GetRunRequest(run_id=0) + shared_secret = generate_shared_key( + self._client_private_key, self._server_public_key + ) + hmac_value = base64.urlsafe_b64encode( + compute_hmac(shared_secret, request.SerializeToString(True)) + ) + public_key_bytes = base64.urlsafe_b64encode( + public_key_to_bytes(self._client_public_key) + ) + + # Execute + response, call = self._get_run.with_call( + request=request, + metadata=( + (_PUBLIC_KEY_HEADER, public_key_bytes), + (_AUTH_TOKEN_HEADER, hmac_value), + ), + ) + + # Assert + assert isinstance(response, GetRunResponse) + assert grpc.StatusCode.OK == call.code() + + def test_unsuccessful_get_run_with_metadata(self) -> None: + """Test server interceptor for pull task ins unsuccessfully.""" + # Prepare + request = GetRunRequest(run_id=0) + client_private_key, _ = generate_key_pairs() + shared_secret = generate_shared_key(client_private_key, self._server_public_key) + hmac_value = base64.urlsafe_b64encode( + compute_hmac(shared_secret, request.SerializeToString(True)) + ) + public_key_bytes = base64.urlsafe_b64encode( + public_key_to_bytes(self._client_public_key) + ) + + # Execute & Assert + with self.assertRaises(grpc.RpcError): + self._get_run.with_call( + request=request, + metadata=( + (_PUBLIC_KEY_HEADER, public_key_bytes), + (_AUTH_TOKEN_HEADER, hmac_value), + ), + ) + + def test_successful_ping_with_metadata(self) -> None: + """Test server interceptor for pull task ins.""" + # Prepare + request = PingRequest() + shared_secret = generate_shared_key( + self._client_private_key, self._server_public_key + ) + hmac_value = base64.urlsafe_b64encode( + compute_hmac(shared_secret, request.SerializeToString(True)) + ) + public_key_bytes = base64.urlsafe_b64encode( + public_key_to_bytes(self._client_public_key) + ) + + # Execute + response, call = self._ping.with_call( + request=request, + metadata=( + (_PUBLIC_KEY_HEADER, public_key_bytes), + (_AUTH_TOKEN_HEADER, hmac_value), + ), + ) + + # Assert + assert isinstance(response, PingResponse) + assert grpc.StatusCode.OK == call.code() + + def test_unsuccessful_ping_with_metadata(self) -> None: + """Test server interceptor for pull task ins unsuccessfully.""" + # Prepare + request = PingRequest() + client_private_key, _ = generate_key_pairs() + shared_secret = generate_shared_key(client_private_key, self._server_public_key) + hmac_value = base64.urlsafe_b64encode( + compute_hmac(shared_secret, request.SerializeToString(True)) + ) + public_key_bytes = base64.urlsafe_b64encode( + public_key_to_bytes(self._client_public_key) + ) + + # Execute & Assert + with self.assertRaises(grpc.RpcError): + self._ping.with_call( + request=request, + metadata=( + (_PUBLIC_KEY_HEADER, public_key_bytes), + (_AUTH_TOKEN_HEADER, hmac_value), + ), + ) diff --git a/src/py/flwr/server/superlink/fleet/message_handler/message_handler.py b/src/py/flwr/server/superlink/fleet/message_handler/message_handler.py index c99a7854d53a..83b005a4cb8e 100644 --- a/src/py/flwr/server/superlink/fleet/message_handler/message_handler.py +++ b/src/py/flwr/server/superlink/fleet/message_handler/message_handler.py @@ -15,6 +15,7 @@ """Fleet API message handlers.""" +import time from typing import List, Optional from uuid import UUID @@ -23,11 +24,16 @@ CreateNodeResponse, DeleteNodeRequest, DeleteNodeResponse, + GetRunRequest, + GetRunResponse, + PingRequest, + PingResponse, PullTaskInsRequest, PullTaskInsResponse, PushTaskResRequest, PushTaskResResponse, Reconnect, + Run, ) from flwr.proto.node_pb2 import Node # pylint: disable=E0611 from flwr.proto.task_pb2 import TaskIns, TaskRes # pylint: disable=E0611 @@ -40,7 +46,7 @@ def create_node( ) -> CreateNodeResponse: """.""" # Create node - node_id = state.create_node() + node_id = state.create_node(ping_interval=request.ping_interval) return CreateNodeResponse(node=Node(node_id=node_id, anonymous=False)) @@ -55,6 +61,15 @@ def delete_node(request: DeleteNodeRequest, state: State) -> DeleteNodeResponse: return DeleteNodeResponse() +def ping( + request: PingRequest, # pylint: disable=unused-argument + state: State, # pylint: disable=unused-argument +) -> PingResponse: + """.""" + res = state.acknowledge_ping(request.node.node_id, request.ping_interval) + return PingResponse(success=res) + + def pull_task_ins(request: PullTaskInsRequest, state: State) -> PullTaskInsResponse: """Pull TaskIns handler.""" # Get node_id if client node is not anonymous @@ -77,6 +92,9 @@ def push_task_res(request: PushTaskResRequest, state: State) -> PushTaskResRespo task_res: TaskRes = request.task_res_list[0] # pylint: enable=no-member + # Set pushed_at (timestamp in seconds) + task_res.task.pushed_at = time.time() + # Store TaskRes in State task_id: Optional[UUID] = state.store_task_res(task_res=task_res) @@ -86,3 +104,12 @@ def push_task_res(request: PushTaskResRequest, state: State) -> PushTaskResRespo results={str(task_id): 0}, ) return response + + +def get_run( + request: GetRunRequest, state: State # pylint: disable=W0613 +) -> GetRunResponse: + """Get run information.""" + run_id, fab_id, fab_version = state.get_run(request.run_id) + run = Run(run_id=run_id, fab_id=fab_id, fab_version=fab_version) + return GetRunResponse(run=run) diff --git a/src/py/flwr/server/superlink/fleet/rest_rere/rest_api.py b/src/py/flwr/server/superlink/fleet/rest_rere/rest_api.py index b022b34c68c8..8ac7c6cfc613 100644 --- a/src/py/flwr/server/superlink/fleet/rest_rere/rest_api.py +++ b/src/py/flwr/server/superlink/fleet/rest_rere/rest_api.py @@ -21,6 +21,8 @@ from flwr.proto.fleet_pb2 import ( # pylint: disable=E0611 CreateNodeRequest, DeleteNodeRequest, + GetRunRequest, + PingRequest, PullTaskInsRequest, PushTaskResRequest, ) @@ -152,11 +154,67 @@ async def push_task_res(request: Request) -> Response: # Check if token is need ) +async def ping(request: Request) -> Response: + """Ping.""" + _check_headers(request.headers) + + # Get the request body as raw bytes + ping_request_bytes: bytes = await request.body() + + # Deserialize ProtoBuf + ping_request_proto = PingRequest() + ping_request_proto.ParseFromString(ping_request_bytes) + + # Get state from app + state: State = app.state.STATE_FACTORY.state() + + # Handle message + ping_response_proto = message_handler.ping(request=ping_request_proto, state=state) + + # Return serialized ProtoBuf + ping_response_bytes = ping_response_proto.SerializeToString() + return Response( + status_code=200, + content=ping_response_bytes, + headers={"Content-Type": "application/protobuf"}, + ) + + +async def get_run(request: Request) -> Response: + """GetRun.""" + _check_headers(request.headers) + + # Get the request body as raw bytes + get_run_request_bytes: bytes = await request.body() + + # Deserialize ProtoBuf + get_run_request_proto = GetRunRequest() + get_run_request_proto.ParseFromString(get_run_request_bytes) + + # Get state from app + state: State = app.state.STATE_FACTORY.state() + + # Handle message + get_run_response_proto = message_handler.get_run( + request=get_run_request_proto, state=state + ) + + # Return serialized ProtoBuf + get_run_response_bytes = get_run_response_proto.SerializeToString() + return Response( + status_code=200, + content=get_run_response_bytes, + headers={"Content-Type": "application/protobuf"}, + ) + + routes = [ Route("/api/v0/fleet/create-node", create_node, methods=["POST"]), Route("/api/v0/fleet/delete-node", delete_node, methods=["POST"]), Route("/api/v0/fleet/pull-task-ins", pull_task_ins, methods=["POST"]), Route("/api/v0/fleet/push-task-res", push_task_res, methods=["POST"]), + Route("/api/v0/fleet/ping", ping, methods=["POST"]), + Route("/api/v0/fleet/get-run", get_run, methods=["POST"]), ] app: Starlette = Starlette( diff --git a/src/py/flwr/server/superlink/fleet/vce/backend/raybackend.py b/src/py/flwr/server/superlink/fleet/vce/backend/raybackend.py index 8ef0d54622ae..9bede09edf09 100644 --- a/src/py/flwr/server/superlink/fleet/vce/backend/raybackend.py +++ b/src/py/flwr/server/superlink/fleet/vce/backend/raybackend.py @@ -20,7 +20,7 @@ import ray -from flwr.client.client_app import ClientApp, LoadClientAppError +from flwr.client.client_app import ClientApp from flwr.common.context import Context from flwr.common.logger import log from flwr.common.message import Message @@ -151,7 +151,6 @@ async def process_message( ) await future - # Fetch result ( out_mssg, @@ -160,13 +159,15 @@ async def process_message( return out_mssg, updated_context - except LoadClientAppError as load_ex: + except Exception as ex: log( ERROR, "An exception was raised when processing a message by %s", self.__class__.__name__, ) - raise load_ex + # add actor back into pool + await self.pool.add_actor_back_to_pool(future) + raise ex async def terminate(self) -> None: """Terminate all actors in actor pool.""" diff --git a/src/py/flwr/server/superlink/fleet/vce/backend/raybackend_test.py b/src/py/flwr/server/superlink/fleet/vce/backend/raybackend_test.py index 2610307bb749..dcac0b81d666 100644 --- a/src/py/flwr/server/superlink/fleet/vce/backend/raybackend_test.py +++ b/src/py/flwr/server/superlink/fleet/vce/backend/raybackend_test.py @@ -25,6 +25,7 @@ from flwr.client import Client, NumPyClient from flwr.client.client_app import ClientApp, LoadClientAppError from flwr.common import ( + DEFAULT_TTL, Config, ConfigsRecord, Context, @@ -111,7 +112,7 @@ def _create_message_and_context() -> Tuple[Message, Context, float]: src_node_id=0, dst_node_id=0, reply_to_message="", - ttl="", + ttl=DEFAULT_TTL, message_type=MessageTypeLegacy.GET_PROPERTIES, ), ) diff --git a/src/py/flwr/server/superlink/fleet/vce/vce_api.py b/src/py/flwr/server/superlink/fleet/vce/vce_api.py index a693c968d0e8..9c27fca79c12 100644 --- a/src/py/flwr/server/superlink/fleet/vce/vce_api.py +++ b/src/py/flwr/server/superlink/fleet/vce/vce_api.py @@ -14,16 +14,19 @@ # ============================================================================== """Fleet Simulation Engine API.""" - import asyncio import json +import sys +import time import traceback from logging import DEBUG, ERROR, INFO, WARN from typing import Callable, Dict, List, Optional -from flwr.client.client_app import ClientApp, LoadClientAppError +from flwr.client.client_app import ClientApp, ClientAppException, LoadClientAppError from flwr.client.node_state import NodeState +from flwr.common.constant import PING_MAX_INTERVAL, ErrorCode from flwr.common.logger import log +from flwr.common.message import Error from flwr.common.object_ref import load_app from flwr.common.serde import message_from_taskins, message_to_taskres from flwr.proto.task_pb2 import TaskIns # pylint: disable=E0611 @@ -41,7 +44,7 @@ def _register_nodes( nodes_mapping: NodeToPartitionMapping = {} state = state_factory.state() for i in range(num_nodes): - node_id = state.create_node() + node_id = state.create_node(ping_interval=PING_MAX_INTERVAL) nodes_mapping[node_id] = i log(INFO, "Registered %i nodes", len(nodes_mapping)) return nodes_mapping @@ -59,6 +62,7 @@ async def worker( """Get TaskIns from queue and pass it to an actor in the pool to execute it.""" state = state_factory.state() while True: + out_mssg = None try: task_ins: TaskIns = await queue.get() node_id = task_ins.task.consumer.node_id @@ -82,24 +86,34 @@ async def worker( task_ins.run_id, context=updated_context ) - # Convert to TaskRes - task_res = message_to_taskres(out_mssg) - # Store TaskRes in state - state.store_task_res(task_res) - except asyncio.CancelledError as e: - log(DEBUG, "Async worker: %s", e) + log(DEBUG, "Terminating async worker: %s", e) break - except LoadClientAppError as app_ex: - log(ERROR, "Async worker: %s", app_ex) - log(ERROR, traceback.format_exc()) - raise - + # Exceptions aren't raised but reported as an error message except Exception as ex: # pylint: disable=broad-exception-caught log(ERROR, ex) log(ERROR, traceback.format_exc()) - break + + if isinstance(ex, ClientAppException): + e_code = ErrorCode.CLIENT_APP_RAISED_EXCEPTION + elif isinstance(ex, LoadClientAppError): + e_code = ErrorCode.LOAD_CLIENT_APP_EXCEPTION + else: + e_code = ErrorCode.UNKNOWN + + reason = str(type(ex)) + ":<'" + str(ex) + "'>" + out_mssg = message.create_error_reply( + error=Error(code=e_code, reason=reason) + ) + + finally: + if out_mssg: + # Convert to TaskRes + task_res = message_to_taskres(out_mssg) + # Store TaskRes in state + task_res.task.pushed_at = time.time() + state.store_task_res(task_res) async def add_taskins_to_queue( @@ -218,7 +232,8 @@ async def run( await backend.terminate() -# pylint: disable=too-many-arguments,unused-argument,too-many-locals +# pylint: disable=too-many-arguments,unused-argument,too-many-locals,too-many-branches +# pylint: disable=too-many-statements def start_vce( backend_name: str, backend_config_json_stream: str, @@ -300,12 +315,14 @@ def backend_fn() -> Backend: """Instantiate a Backend.""" return backend_type(backend_config, work_dir=app_dir) - log(INFO, "client_app_attr = %s", client_app_attr) - # Load ClientApp if needed def _load() -> ClientApp: if client_app_attr: + + if app_dir is not None: + sys.path.insert(0, app_dir) + app: ClientApp = load_app(client_app_attr, LoadClientAppError) if not isinstance(app, ClientApp): @@ -319,13 +336,30 @@ def _load() -> ClientApp: app_fn = _load - asyncio.run( - run( - app_fn, - backend_fn, - nodes_mapping, - state_factory, - node_states, - f_stop, + try: + # Test if ClientApp can be loaded + _ = app_fn() + + # Run main simulation loop + asyncio.run( + run( + app_fn, + backend_fn, + nodes_mapping, + state_factory, + node_states, + f_stop, + ) ) - ) + except LoadClientAppError as loadapp_ex: + f_stop_delay = 10 + log( + ERROR, + "LoadClientAppError exception encountered. Terminating simulation in %is", + f_stop_delay, + ) + time.sleep(f_stop_delay) + f_stop.set() # set termination event + raise loadapp_ex + except Exception as ex: + raise ex diff --git a/src/py/flwr/server/superlink/fleet/vce/vce_api_test.py b/src/py/flwr/server/superlink/fleet/vce/vce_api_test.py index 8c37399ae295..1da726f88f1e 100644 --- a/src/py/flwr/server/superlink/fleet/vce/vce_api_test.py +++ b/src/py/flwr/server/superlink/fleet/vce/vce_api_test.py @@ -17,6 +17,7 @@ import asyncio import threading +import time from itertools import cycle from json import JSONDecodeError from math import pi @@ -26,7 +27,14 @@ from unittest import IsolatedAsyncioTestCase from uuid import UUID -from flwr.common import GetPropertiesIns, Message, MessageTypeLegacy, Metadata +from flwr.client.client_app import LoadClientAppError +from flwr.common import ( + DEFAULT_TTL, + GetPropertiesIns, + Message, + MessageTypeLegacy, + Metadata, +) from flwr.common.recordset_compat import getpropertiesins_to_recordset from flwr.common.serde import message_from_taskres, message_to_taskins from flwr.server.superlink.fleet.vce.vce_api import ( @@ -46,7 +54,6 @@ def terminate_simulation(f_stop: asyncio.Event, sleep_duration: int) -> None: def init_state_factory_nodes_mapping( num_nodes: int, num_messages: int, - erroneous_message: Optional[bool] = False, ) -> Tuple[StateFactory, NodeToPartitionMapping, Dict[UUID, float]]: """Instatiate StateFactory, register nodes and pre-insert messages in the state.""" # Register a state and a run_id in it @@ -61,7 +68,6 @@ def init_state_factory_nodes_mapping( nodes_mapping=nodes_mapping, run_id=run_id, num_messages=num_messages, - erroneous_message=erroneous_message, ) return state_factory, nodes_mapping, expected_results @@ -72,11 +78,10 @@ def register_messages_into_state( nodes_mapping: NodeToPartitionMapping, run_id: int, num_messages: int, - erroneous_message: Optional[bool] = False, ) -> Dict[UUID, float]: """Register `num_messages` into the state factory.""" state: InMemoryState = state_factory.state() # type: ignore - state.run_ids.add(run_id) + state.run_ids[run_id] = ("Mock/mock", "v1.0.0") # Artificially add TaskIns to state so they can be processed # by the Simulation Engine logic nodes_cycle = cycle(nodes_mapping.keys()) # we have more messages than supernodes @@ -97,16 +102,15 @@ def register_messages_into_state( src_node_id=0, dst_node_id=dst_node_id, # indicate destination node reply_to_message="", - ttl="", - message_type=( - "a bad message" - if erroneous_message - else MessageTypeLegacy.GET_PROPERTIES - ), + ttl=DEFAULT_TTL, + message_type=MessageTypeLegacy.GET_PROPERTIES, ), ) # Convert Message to TaskIns taskins = message_to_taskins(message) + # Normally recorded by the driver servicer + # but since we don't have one in this test, we do this manually + taskins.task.pushed_at = time.time() # Instert in state task_id = state.store_task_ins(taskins) if task_id: @@ -190,32 +194,13 @@ def test_erroneous_client_app_attr(self) -> None: state_factory, nodes_mapping, _ = init_state_factory_nodes_mapping( num_nodes=num_nodes, num_messages=num_messages ) - with self.assertRaises(RuntimeError): + with self.assertRaises(LoadClientAppError): start_and_shutdown( client_app_attr="totally_fictitious_app:client", state_factory=state_factory, nodes_mapping=nodes_mapping, ) - def test_erroneous_messages(self) -> None: - """Test handling of error in async worker (consumer). - - We register messages which will trigger an error when handling, triggering an - error. - """ - num_messages = 100 - num_nodes = 59 - - state_factory, nodes_mapping, _ = init_state_factory_nodes_mapping( - num_nodes=num_nodes, num_messages=num_messages, erroneous_message=True - ) - - with self.assertRaises(RuntimeError): - start_and_shutdown( - state_factory=state_factory, - nodes_mapping=nodes_mapping, - ) - def test_erroneous_backend_config(self) -> None: """Backend Config should be a JSON stream.""" with self.assertRaises(JSONDecodeError): diff --git a/src/py/flwr/server/superlink/state/in_memory_state.py b/src/py/flwr/server/superlink/state/in_memory_state.py index ac1ab158e254..ebccac3509f0 100644 --- a/src/py/flwr/server/superlink/state/in_memory_state.py +++ b/src/py/flwr/server/superlink/state/in_memory_state.py @@ -17,9 +17,9 @@ import os import threading -from datetime import datetime, timedelta +import time from logging import ERROR -from typing import Dict, List, Optional, Set +from typing import Dict, List, Optional, Set, Tuple from uuid import UUID, uuid4 from flwr.common import log, now @@ -27,15 +27,22 @@ from flwr.server.superlink.state.state import State from flwr.server.utils import validate_task_ins_or_res +from .utils import make_node_unavailable_taskres -class InMemoryState(State): + +class InMemoryState(State): # pylint: disable=R0902 """In-memory State implementation.""" def __init__(self) -> None: - self.node_ids: Set[int] = set() - self.run_ids: Set[int] = set() + # Map node_id to (online_until, ping_interval) + self.node_ids: Dict[int, Tuple[float, float]] = {} + # Map run_id to (fab_id, fab_version) + self.run_ids: Dict[int, Tuple[str, str]] = {} self.task_ins_store: Dict[UUID, TaskIns] = {} self.task_res_store: Dict[UUID, TaskRes] = {} + self.client_public_keys: Set[bytes] = set() + self.server_public_key: Optional[bytes] = None + self.server_private_key: Optional[bytes] = None self.lock = threading.Lock() def store_task_ins(self, task_ins: TaskIns) -> Optional[UUID]: @@ -50,15 +57,11 @@ def store_task_ins(self, task_ins: TaskIns) -> Optional[UUID]: log(ERROR, "`run_id` is invalid") return None - # Create task_id, created_at and ttl + # Create task_id task_id = uuid4() - created_at: datetime = now() - ttl: datetime = created_at + timedelta(hours=24) # Store TaskIns task_ins.task_id = str(task_id) - task_ins.task.created_at = created_at.isoformat() - task_ins.task.ttl = ttl.isoformat() with self.lock: self.task_ins_store[task_id] = task_ins @@ -113,15 +116,11 @@ def store_task_res(self, task_res: TaskRes) -> Optional[UUID]: log(ERROR, "`run_id` is invalid") return None - # Create task_id, created_at and ttl + # Create task_id task_id = uuid4() - created_at: datetime = now() - ttl: datetime = created_at + timedelta(hours=24) # Store TaskRes task_res.task_id = str(task_id) - task_res.task.created_at = created_at.isoformat() - task_res.task.ttl = ttl.isoformat() with self.lock: self.task_res_store[task_id] = task_res @@ -136,14 +135,31 @@ def get_task_res(self, task_ids: Set[UUID], limit: Optional[int]) -> List[TaskRe with self.lock: # Find TaskRes that were not delivered yet task_res_list: List[TaskRes] = [] + replied_task_ids: Set[UUID] = set() for _, task_res in self.task_res_store.items(): - if ( - UUID(task_res.task.ancestry[0]) in task_ids - and task_res.task.delivered_at == "" - ): + reply_to = UUID(task_res.task.ancestry[0]) + if reply_to in task_ids and task_res.task.delivered_at == "": task_res_list.append(task_res) + replied_task_ids.add(reply_to) + if limit and len(task_res_list) == limit: + break + + # Check if the node is offline + for task_id in task_ids - replied_task_ids: if limit and len(task_res_list) == limit: break + task_ins = self.task_ins_store.get(task_id) + if task_ins is None: + continue + node_id = task_ins.task.consumer.node_id + online_until, _ = self.node_ids[node_id] + # Generate a TaskRes containing an error reply if the node is offline. + if online_until < time.time(): + err_taskres = make_node_unavailable_taskres( + ref_taskins=task_ins, + ) + self.task_res_store[UUID(err_taskres.task_id)] = err_taskres + task_res_list.append(err_taskres) # Mark all of them as delivered delivered_at = now().isoformat() @@ -189,22 +205,24 @@ def num_task_res(self) -> int: """ return len(self.task_res_store) - def create_node(self) -> int: + def create_node(self, ping_interval: float) -> int: """Create, store in state, and return `node_id`.""" # Sample a random int64 as node_id node_id: int = int.from_bytes(os.urandom(8), "little", signed=True) - if node_id not in self.node_ids: - self.node_ids.add(node_id) - return node_id + with self.lock: + if node_id not in self.node_ids: + self.node_ids[node_id] = (time.time() + ping_interval, ping_interval) + return node_id log(ERROR, "Unexpected node registration failure.") return 0 def delete_node(self, node_id: int) -> None: """Delete a client node.""" - if node_id not in self.node_ids: - raise ValueError(f"Node {node_id} not found") - self.node_ids.remove(node_id) + with self.lock: + if node_id not in self.node_ids: + raise ValueError(f"Node {node_id} not found") + del self.node_ids[node_id] def get_nodes(self, run_id: int) -> Set[int]: """Return all available client nodes. @@ -214,17 +232,73 @@ def get_nodes(self, run_id: int) -> Set[int]: If the provided `run_id` does not exist or has no matching nodes, an empty `Set` MUST be returned. """ - if run_id not in self.run_ids: - return set() - return self.node_ids - - def create_run(self) -> int: - """Create one run.""" + with self.lock: + if run_id not in self.run_ids: + return set() + current_time = time.time() + return { + node_id + for node_id, (online_until, _) in self.node_ids.items() + if online_until > current_time + } + + def create_run(self, fab_id: str, fab_version: str) -> int: + """Create a new run for the specified `fab_id` and `fab_version`.""" # Sample a random int64 as run_id - run_id: int = int.from_bytes(os.urandom(8), "little", signed=True) + with self.lock: + run_id: int = int.from_bytes(os.urandom(8), "little", signed=True) - if run_id not in self.run_ids: - self.run_ids.add(run_id) - return run_id + if run_id not in self.run_ids: + self.run_ids[run_id] = (fab_id, fab_version) + return run_id log(ERROR, "Unexpected run creation failure.") return 0 + + def store_server_public_private_key( + self, public_key: bytes, private_key: bytes + ) -> None: + """Store `server_public_key` and `server_private_key` in state.""" + with self.lock: + if self.server_private_key is None and self.server_public_key is None: + self.server_private_key = private_key + self.server_public_key = public_key + else: + raise RuntimeError("Server public and private key already set") + + def get_server_private_key(self) -> Optional[bytes]: + """Retrieve `server_private_key` in urlsafe bytes.""" + return self.server_private_key + + def get_server_public_key(self) -> Optional[bytes]: + """Retrieve `server_public_key` in urlsafe bytes.""" + return self.server_public_key + + def store_client_public_keys(self, public_keys: Set[bytes]) -> None: + """Store a set of `client_public_keys` in state.""" + with self.lock: + self.client_public_keys = public_keys + + def store_client_public_key(self, public_key: bytes) -> None: + """Store a `client_public_key` in state.""" + with self.lock: + self.client_public_keys.add(public_key) + + def get_client_public_keys(self) -> Set[bytes]: + """Retrieve all currently stored `client_public_keys` as a set.""" + return self.client_public_keys + + def get_run(self, run_id: int) -> Tuple[int, str, str]: + """Retrieve information about the run with the specified `run_id`.""" + with self.lock: + if run_id not in self.run_ids: + log(ERROR, "`run_id` is invalid") + return 0, "", "" + return run_id, *self.run_ids[run_id] + + def acknowledge_ping(self, node_id: int, ping_interval: float) -> bool: + """Acknowledge a ping received from a node, serving as a heartbeat.""" + with self.lock: + if node_id in self.node_ids: + self.node_ids[node_id] = (time.time() + ping_interval, ping_interval) + return True + return False diff --git a/src/py/flwr/server/superlink/state/sqlite_state.py b/src/py/flwr/server/superlink/state/sqlite_state.py index 224c16cdf013..39ed92637902 100644 --- a/src/py/flwr/server/superlink/state/sqlite_state.py +++ b/src/py/flwr/server/superlink/state/sqlite_state.py @@ -18,9 +18,9 @@ import os import re import sqlite3 -from datetime import datetime, timedelta +import time from logging import DEBUG, ERROR -from typing import Any, Dict, List, Optional, Set, Tuple, Union, cast +from typing import Any, Dict, List, Optional, Sequence, Set, Tuple, Union, cast from uuid import UUID, uuid4 from flwr.common import log, now @@ -30,16 +30,38 @@ from flwr.server.utils.validator import validate_task_ins_or_res from .state import State +from .utils import make_node_unavailable_taskres SQL_CREATE_TABLE_NODE = """ CREATE TABLE IF NOT EXISTS node( - node_id INTEGER UNIQUE + node_id INTEGER UNIQUE, + online_until REAL, + ping_interval REAL ); """ +SQL_CREATE_TABLE_CREDENTIAL = """ +CREATE TABLE IF NOT EXISTS credential( + public_key BLOB PRIMARY KEY, + private_key BLOB +); +""" + +SQL_CREATE_TABLE_PUBLIC_KEY = """ +CREATE TABLE IF NOT EXISTS public_key( + public_key BLOB UNIQUE +); +""" + +SQL_CREATE_INDEX_ONLINE_UNTIL = """ +CREATE INDEX IF NOT EXISTS idx_online_until ON node (online_until); +""" + SQL_CREATE_TABLE_RUN = """ CREATE TABLE IF NOT EXISTS run( - run_id INTEGER UNIQUE + run_id INTEGER UNIQUE, + fab_id TEXT, + fab_version TEXT ); """ @@ -52,9 +74,10 @@ producer_node_id INTEGER, consumer_anonymous BOOLEAN, consumer_node_id INTEGER, - created_at TEXT, + created_at REAL, delivered_at TEXT, - ttl TEXT, + pushed_at REAL, + ttl REAL, ancestry TEXT, task_type TEXT, recordset BLOB, @@ -62,7 +85,6 @@ ); """ - SQL_CREATE_TABLE_TASK_RES = """ CREATE TABLE IF NOT EXISTS task_res( task_id TEXT UNIQUE, @@ -72,9 +94,10 @@ producer_node_id INTEGER, consumer_anonymous BOOLEAN, consumer_node_id INTEGER, - created_at TEXT, + created_at REAL, delivered_at TEXT, - ttl TEXT, + pushed_at REAL, + ttl REAL, ancestry TEXT, task_type TEXT, recordset BLOB, @@ -82,10 +105,10 @@ ); """ -DictOrTuple = Union[Tuple[Any], Dict[str, Any]] +DictOrTuple = Union[Tuple[Any, ...], Dict[str, Any]] -class SqliteState(State): +class SqliteState(State): # pylint: disable=R0904 """SQLite-based state implementation.""" def __init__( @@ -123,6 +146,9 @@ def initialize(self, log_queries: bool = False) -> List[Tuple[str]]: cur.execute(SQL_CREATE_TABLE_TASK_INS) cur.execute(SQL_CREATE_TABLE_TASK_RES) cur.execute(SQL_CREATE_TABLE_NODE) + cur.execute(SQL_CREATE_TABLE_CREDENTIAL) + cur.execute(SQL_CREATE_TABLE_PUBLIC_KEY) + cur.execute(SQL_CREATE_INDEX_ONLINE_UNTIL) res = cur.execute("SELECT name FROM sqlite_schema;") return res.fetchall() @@ -130,7 +156,7 @@ def initialize(self, log_queries: bool = False) -> List[Tuple[str]]: def query( self, query: str, - data: Optional[Union[List[DictOrTuple], DictOrTuple]] = None, + data: Optional[Union[Sequence[DictOrTuple], DictOrTuple]] = None, ) -> List[Dict[str, Any]]: """Execute a SQL query.""" if self.conn is None: @@ -185,15 +211,11 @@ def store_task_ins(self, task_ins: TaskIns) -> Optional[UUID]: log(ERROR, errors) return None - # Create task_id, created_at and ttl + # Create task_id task_id = uuid4() - created_at: datetime = now() - ttl: datetime = created_at + timedelta(hours=24) # Store TaskIns task_ins.task_id = str(task_id) - task_ins.task.created_at = created_at.isoformat() - task_ins.task.ttl = ttl.isoformat() data = (task_ins_to_dict(task_ins),) columns = ", ".join([f":{key}" for key in data[0]]) query = f"INSERT INTO task_ins VALUES({columns});" @@ -320,15 +342,11 @@ def store_task_res(self, task_res: TaskRes) -> Optional[UUID]: log(ERROR, errors) return None - # Create task_id, created_at and ttl + # Create task_id task_id = uuid4() - created_at: datetime = now() - ttl: datetime = created_at + timedelta(hours=24) # Store TaskIns task_res.task_id = str(task_id) - task_res.task.created_at = created_at.isoformat() - task_res.task.ttl = ttl.isoformat() data = (task_res_to_dict(task_res),) columns = ", ".join([f":{key}" for key in data[0]]) query = f"INSERT INTO task_res VALUES({columns});" @@ -343,6 +361,7 @@ def store_task_res(self, task_res: TaskRes) -> Optional[UUID]: return task_id + # pylint: disable-next=R0914 def get_task_res(self, task_ids: Set[UUID], limit: Optional[int]) -> List[TaskRes]: """Get TaskRes for task_ids. @@ -373,7 +392,7 @@ def get_task_res(self, task_ids: Set[UUID], limit: Optional[int]) -> List[TaskRe AND delivered_at = "" """ - data: Dict[str, Union[str, int]] = {} + data: Dict[str, Union[str, float, int]] = {} if limit is not None: query += " LIMIT :limit" @@ -407,6 +426,54 @@ def get_task_res(self, task_ids: Set[UUID], limit: Optional[int]) -> List[TaskRe rows = self.query(query, data) result = [dict_to_task_res(row) for row in rows] + + # 1. Query: Fetch consumer_node_id of remaining task_ids + # Assume the ancestry field only contains one element + data.clear() + replied_task_ids: Set[UUID] = {UUID(str(row["ancestry"])) for row in rows} + remaining_task_ids = task_ids - replied_task_ids + placeholders = ",".join([f":id_{i}" for i in range(len(remaining_task_ids))]) + query = f""" + SELECT consumer_node_id + FROM task_ins + WHERE task_id IN ({placeholders}); + """ + for index, task_id in enumerate(remaining_task_ids): + data[f"id_{index}"] = str(task_id) + node_ids = [int(row["consumer_node_id"]) for row in self.query(query, data)] + + # 2. Query: Select offline nodes + placeholders = ",".join([f":id_{i}" for i in range(len(node_ids))]) + query = f""" + SELECT node_id + FROM node + WHERE node_id IN ({placeholders}) + AND online_until < :time; + """ + data = {f"id_{i}": str(node_id) for i, node_id in enumerate(node_ids)} + data["time"] = time.time() + offline_node_ids = [int(row["node_id"]) for row in self.query(query, data)] + + # 3. Query: Select TaskIns for offline nodes + placeholders = ",".join([f":id_{i}" for i in range(len(offline_node_ids))]) + query = f""" + SELECT * + FROM task_ins + WHERE consumer_node_id IN ({placeholders}); + """ + data = {f"id_{i}": str(node_id) for i, node_id in enumerate(offline_node_ids)} + task_ins_rows = self.query(query, data) + + # Make TaskRes containing node unavailabe error + for row in task_ins_rows: + if limit and len(result) == limit: + break + task_ins = dict_to_task_ins(row) + err_taskres = make_node_unavailable_taskres( + ref_taskins=task_ins, + ) + result.append(err_taskres) + return result def num_task_ins(self) -> int: @@ -467,14 +534,17 @@ def delete_tasks(self, task_ids: Set[UUID]) -> None: return None - def create_node(self) -> int: + def create_node(self, ping_interval: float) -> int: """Create, store in state, and return `node_id`.""" # Sample a random int64 as node_id node_id: int = int.from_bytes(os.urandom(8), "little", signed=True) - query = "INSERT INTO node VALUES(:node_id);" + query = ( + "INSERT INTO node (node_id, online_until, ping_interval) VALUES (?, ?, ?)" + ) + try: - self.query(query, {"node_id": node_id}) + self.query(query, (node_id, time.time() + ping_interval, ping_interval)) except sqlite3.IntegrityError: log(ERROR, "Unexpected node registration failure.") return 0 @@ -499,13 +569,13 @@ def get_nodes(self, run_id: int) -> Set[int]: return set() # Get nodes - query = "SELECT * FROM node;" - rows = self.query(query) + query = "SELECT node_id FROM node WHERE online_until > ?;" + rows = self.query(query, (time.time(),)) result: Set[int] = {row["node_id"] for row in rows} return result - def create_run(self) -> int: - """Create one run and store it in state.""" + def create_run(self, fab_id: str, fab_version: str) -> int: + """Create a new run for the specified `fab_id` and `fab_version`.""" # Sample a random int64 as run_id run_id: int = int.from_bytes(os.urandom(8), "little", signed=True) @@ -513,12 +583,86 @@ def create_run(self) -> int: query = "SELECT COUNT(*) FROM run WHERE run_id = ?;" # If run_id does not exist if self.query(query, (run_id,))[0]["COUNT(*)"] == 0: - query = "INSERT INTO run VALUES(:run_id);" - self.query(query, {"run_id": run_id}) + query = "INSERT INTO run (run_id, fab_id, fab_version) VALUES (?, ?, ?);" + self.query(query, (run_id, fab_id, fab_version)) return run_id log(ERROR, "Unexpected run creation failure.") return 0 + def store_server_public_private_key( + self, public_key: bytes, private_key: bytes + ) -> None: + """Store `server_public_key` and `server_private_key` in state.""" + query = "SELECT COUNT(*) FROM credential" + count = self.query(query)[0]["COUNT(*)"] + if count < 1: + query = ( + "INSERT OR REPLACE INTO credential (public_key, private_key) " + "VALUES (:public_key, :private_key)" + ) + self.query(query, {"public_key": public_key, "private_key": private_key}) + else: + raise RuntimeError("Server public and private key already set") + + def get_server_private_key(self) -> Optional[bytes]: + """Retrieve `server_private_key` in urlsafe bytes.""" + query = "SELECT private_key FROM credential" + rows = self.query(query) + try: + private_key: Optional[bytes] = rows[0]["private_key"] + except IndexError: + private_key = None + return private_key + + def get_server_public_key(self) -> Optional[bytes]: + """Retrieve `server_public_key` in urlsafe bytes.""" + query = "SELECT public_key FROM credential" + rows = self.query(query) + try: + public_key: Optional[bytes] = rows[0]["public_key"] + except IndexError: + public_key = None + return public_key + + def store_client_public_keys(self, public_keys: Set[bytes]) -> None: + """Store a set of `client_public_keys` in state.""" + query = "INSERT INTO public_key (public_key) VALUES (?)" + data = [(key,) for key in public_keys] + self.query(query, data) + + def store_client_public_key(self, public_key: bytes) -> None: + """Store a `client_public_key` in state.""" + query = "INSERT INTO public_key (public_key) VALUES (:public_key)" + self.query(query, {"public_key": public_key}) + + def get_client_public_keys(self) -> Set[bytes]: + """Retrieve all currently stored `client_public_keys` as a set.""" + query = "SELECT public_key FROM public_key" + rows = self.query(query) + result: Set[bytes] = {row["public_key"] for row in rows} + return result + + def get_run(self, run_id: int) -> Tuple[int, str, str]: + """Retrieve information about the run with the specified `run_id`.""" + query = "SELECT * FROM run WHERE run_id = ?;" + try: + row = self.query(query, (run_id,))[0] + return run_id, row["fab_id"], row["fab_version"] + except sqlite3.IntegrityError: + log(ERROR, "`run_id` does not exist.") + return 0, "", "" + + def acknowledge_ping(self, node_id: int, ping_interval: float) -> bool: + """Acknowledge a ping received from a node, serving as a heartbeat.""" + # Update `online_until` and `ping_interval` for the given `node_id` + query = "UPDATE node SET online_until = ?, ping_interval = ? WHERE node_id = ?;" + try: + self.query(query, (time.time() + ping_interval, ping_interval, node_id)) + return True + except sqlite3.IntegrityError: + log(ERROR, "`node_id` does not exist.") + return False + def dict_factory( cursor: sqlite3.Cursor, @@ -544,6 +688,7 @@ def task_ins_to_dict(task_msg: TaskIns) -> Dict[str, Any]: "consumer_node_id": task_msg.task.consumer.node_id, "created_at": task_msg.task.created_at, "delivered_at": task_msg.task.delivered_at, + "pushed_at": task_msg.task.pushed_at, "ttl": task_msg.task.ttl, "ancestry": ",".join(task_msg.task.ancestry), "task_type": task_msg.task.task_type, @@ -564,6 +709,7 @@ def task_res_to_dict(task_msg: TaskRes) -> Dict[str, Any]: "consumer_node_id": task_msg.task.consumer.node_id, "created_at": task_msg.task.created_at, "delivered_at": task_msg.task.delivered_at, + "pushed_at": task_msg.task.pushed_at, "ttl": task_msg.task.ttl, "ancestry": ",".join(task_msg.task.ancestry), "task_type": task_msg.task.task_type, @@ -592,6 +738,7 @@ def dict_to_task_ins(task_dict: Dict[str, Any]) -> TaskIns: ), created_at=task_dict["created_at"], delivered_at=task_dict["delivered_at"], + pushed_at=task_dict["pushed_at"], ttl=task_dict["ttl"], ancestry=task_dict["ancestry"].split(","), task_type=task_dict["task_type"], @@ -621,6 +768,7 @@ def dict_to_task_res(task_dict: Dict[str, Any]) -> TaskRes: ), created_at=task_dict["created_at"], delivered_at=task_dict["delivered_at"], + pushed_at=task_dict["pushed_at"], ttl=task_dict["ttl"], ancestry=task_dict["ancestry"].split(","), task_type=task_dict["task_type"], diff --git a/src/py/flwr/server/superlink/state/sqlite_state_test.py b/src/py/flwr/server/superlink/state/sqlite_state_test.py index 9eef71e396e3..20927df1cf12 100644 --- a/src/py/flwr/server/superlink/state/sqlite_state_test.py +++ b/src/py/flwr/server/superlink/state/sqlite_state_test.py @@ -38,6 +38,7 @@ def test_ins_res_to_dict(self) -> None: "consumer_node_id", "created_at", "delivered_at", + "pushed_at", "ttl", "ancestry", "task_type", diff --git a/src/py/flwr/server/superlink/state/state.py b/src/py/flwr/server/superlink/state/state.py index 9337ae6d8624..7992aa2345a1 100644 --- a/src/py/flwr/server/superlink/state/state.py +++ b/src/py/flwr/server/superlink/state/state.py @@ -16,7 +16,7 @@ import abc -from typing import List, Optional, Set +from typing import List, Optional, Set, Tuple from uuid import UUID from flwr.proto.task_pb2 import TaskIns, TaskRes # pylint: disable=E0611 @@ -132,7 +132,7 @@ def delete_tasks(self, task_ids: Set[UUID]) -> None: """Delete all delivered TaskIns/TaskRes pairs.""" @abc.abstractmethod - def create_node(self) -> int: + def create_node(self, ping_interval: float) -> int: """Create, store in state, and return `node_id`.""" @abc.abstractmethod @@ -150,5 +150,68 @@ def get_nodes(self, run_id: int) -> Set[int]: """ @abc.abstractmethod - def create_run(self) -> int: - """Create one run.""" + def create_run(self, fab_id: str, fab_version: str) -> int: + """Create a new run for the specified `fab_id` and `fab_version`.""" + + @abc.abstractmethod + def get_run(self, run_id: int) -> Tuple[int, str, str]: + """Retrieve information about the run with the specified `run_id`. + + Parameters + ---------- + run_id : int + The identifier of the run. + + Returns + ------- + Tuple[int, str, str] + A tuple containing three elements: + - `run_id`: The identifier of the run, same as the specified `run_id`. + - `fab_id`: The identifier of the FAB used in the specified run. + - `fab_version`: The version of the FAB used in the specified run. + """ + + @abc.abstractmethod + def store_server_public_private_key( + self, public_key: bytes, private_key: bytes + ) -> None: + """Store `server_public_key` and `server_private_key` in state.""" + + @abc.abstractmethod + def get_server_private_key(self) -> Optional[bytes]: + """Retrieve `server_private_key` in urlsafe bytes.""" + + @abc.abstractmethod + def get_server_public_key(self) -> Optional[bytes]: + """Retrieve `server_public_key` in urlsafe bytes.""" + + @abc.abstractmethod + def store_client_public_keys(self, public_keys: Set[bytes]) -> None: + """Store a set of `client_public_keys` in state.""" + + @abc.abstractmethod + def store_client_public_key(self, public_key: bytes) -> None: + """Store a `client_public_key` in state.""" + + @abc.abstractmethod + def get_client_public_keys(self) -> Set[bytes]: + """Retrieve all currently stored `client_public_keys` as a set.""" + + @abc.abstractmethod + def acknowledge_ping(self, node_id: int, ping_interval: float) -> bool: + """Acknowledge a ping received from a node, serving as a heartbeat. + + Parameters + ---------- + node_id : int + The `node_id` from which the ping was received. + ping_interval : float + The interval (in seconds) from the current timestamp within which the next + ping from this node must be received. This acts as a hard deadline to ensure + an accurate assessment of the node's availability. + + Returns + ------- + is_acknowledged : bool + True if the ping is successfully acknowledged; otherwise, False. + """ diff --git a/src/py/flwr/server/superlink/state/state_test.py b/src/py/flwr/server/superlink/state/state_test.py index d0470a7ce7f7..0aeb7b064ad6 100644 --- a/src/py/flwr/server/superlink/state/state_test.py +++ b/src/py/flwr/server/superlink/state/state_test.py @@ -16,12 +16,21 @@ # pylint: disable=invalid-name, disable=R0904 import tempfile +import time import unittest from abc import abstractmethod from datetime import datetime, timezone from typing import List +from unittest.mock import patch from uuid import uuid4 +from flwr.common import DEFAULT_TTL +from flwr.common.constant import ErrorCode +from flwr.common.secure_aggregation.crypto.symmetric_encryption import ( + generate_key_pairs, + private_key_to_bytes, + public_key_to_bytes, +) from flwr.proto.node_pb2 import Node # pylint: disable=E0611 from flwr.proto.recordset_pb2 import RecordSet # pylint: disable=E0611 from flwr.proto.task_pb2 import Task, TaskIns, TaskRes # pylint: disable=E0611 @@ -39,6 +48,20 @@ def state_factory(self) -> State: """Provide state implementation to test.""" raise NotImplementedError() + def test_create_and_get_run(self) -> None: + """Test if create_run and get_run work correctly.""" + # Prepare + state: State = self.state_factory() + run_id = state.create_run("Mock/mock", "v1.0.0") + + # Execute + actual_run_id, fab_id, fab_version = state.get_run(run_id) + + # Assert + assert actual_run_id == run_id + assert fab_id == "Mock/mock" + assert fab_version == "v1.0.0" + def test_get_task_ins_empty(self) -> None: """Validate that a new state has no TaskIns.""" # Prepare @@ -66,14 +89,13 @@ def test_store_task_ins_one(self) -> None: # Prepare consumer_node_id = 1 state = self.state_factory() - run_id = state.create_run() + run_id = state.create_run("mock/mock", "v1.0.0") task_ins = create_task_ins( consumer_node_id=consumer_node_id, anonymous=False, run_id=run_id ) - assert task_ins.task.created_at == "" # pylint: disable=no-member + assert task_ins.task.created_at < time.time() # pylint: disable=no-member assert task_ins.task.delivered_at == "" # pylint: disable=no-member - assert task_ins.task.ttl == "" # pylint: disable=no-member # Execute state.store_task_ins(task_ins=task_ins) @@ -89,26 +111,20 @@ def test_store_task_ins_one(self) -> None: actual_task = actual_task_ins.task - assert actual_task.created_at != "" assert actual_task.delivered_at != "" - assert actual_task.ttl != "" - assert datetime.fromisoformat(actual_task.created_at) > datetime( - 2020, 1, 1, tzinfo=timezone.utc - ) + assert actual_task.created_at < actual_task.pushed_at assert datetime.fromisoformat(actual_task.delivered_at) > datetime( 2020, 1, 1, tzinfo=timezone.utc ) - assert datetime.fromisoformat(actual_task.ttl) > datetime( - 2020, 1, 1, tzinfo=timezone.utc - ) + assert actual_task.ttl > 0 def test_store_and_delete_tasks(self) -> None: """Test delete_tasks.""" # Prepare consumer_node_id = 1 state = self.state_factory() - run_id = state.create_run() + run_id = state.create_run("mock/mock", "v1.0.0") task_ins_0 = create_task_ins( consumer_node_id=consumer_node_id, anonymous=False, run_id=run_id ) @@ -182,7 +198,7 @@ def test_task_ins_store_anonymous_and_retrieve_anonymous(self) -> None: """ # Prepare state: State = self.state_factory() - run_id = state.create_run() + run_id = state.create_run("mock/mock", "v1.0.0") task_ins = create_task_ins(consumer_node_id=0, anonymous=True, run_id=run_id) # Execute @@ -197,7 +213,7 @@ def test_task_ins_store_anonymous_and_fail_retrieving_identitiy(self) -> None: """Store anonymous TaskIns and fail to retrieve it.""" # Prepare state: State = self.state_factory() - run_id = state.create_run() + run_id = state.create_run("mock/mock", "v1.0.0") task_ins = create_task_ins(consumer_node_id=0, anonymous=True, run_id=run_id) # Execute @@ -211,7 +227,7 @@ def test_task_ins_store_identity_and_fail_retrieving_anonymous(self) -> None: """Store identity TaskIns and fail retrieving it as anonymous.""" # Prepare state: State = self.state_factory() - run_id = state.create_run() + run_id = state.create_run("mock/mock", "v1.0.0") task_ins = create_task_ins(consumer_node_id=1, anonymous=False, run_id=run_id) # Execute @@ -225,7 +241,7 @@ def test_task_ins_store_identity_and_retrieve_identity(self) -> None: """Store identity TaskIns and retrieve it.""" # Prepare state: State = self.state_factory() - run_id = state.create_run() + run_id = state.create_run("mock/mock", "v1.0.0") task_ins = create_task_ins(consumer_node_id=1, anonymous=False, run_id=run_id) # Execute @@ -242,7 +258,7 @@ def test_task_ins_store_delivered_and_fail_retrieving(self) -> None: """Fail retrieving delivered task.""" # Prepare state: State = self.state_factory() - run_id = state.create_run() + run_id = state.create_run("mock/mock", "v1.0.0") task_ins = create_task_ins(consumer_node_id=1, anonymous=False, run_id=run_id) # Execute @@ -285,7 +301,7 @@ def test_task_res_store_and_retrieve_by_task_ins_id(self) -> None: """Store TaskRes retrieve it by task_ins_id.""" # Prepare state: State = self.state_factory() - run_id = state.create_run() + run_id = state.create_run("mock/mock", "v1.0.0") task_ins_id = uuid4() task_res = create_task_res( producer_node_id=0, @@ -306,7 +322,7 @@ def test_node_ids_initial_state(self) -> None: """Test retrieving all node_ids and empty initial state.""" # Prepare state: State = self.state_factory() - run_id = state.create_run() + run_id = state.create_run("mock/mock", "v1.0.0") # Execute retrieved_node_ids = state.get_nodes(run_id) @@ -318,12 +334,12 @@ def test_create_node_and_get_nodes(self) -> None: """Test creating a client node.""" # Prepare state: State = self.state_factory() - run_id = state.create_run() + run_id = state.create_run("mock/mock", "v1.0.0") node_ids = [] # Execute for _ in range(10): - node_ids.append(state.create_node()) + node_ids.append(state.create_node(ping_interval=10)) retrieved_node_ids = state.get_nodes(run_id) # Assert @@ -334,8 +350,8 @@ def test_delete_node(self) -> None: """Test deleting a client node.""" # Prepare state: State = self.state_factory() - run_id = state.create_run() - node_id = state.create_node() + run_id = state.create_run("mock/mock", "v1.0.0") + node_id = state.create_node(ping_interval=10) # Execute state.delete_node(node_id) @@ -348,9 +364,9 @@ def test_get_nodes_invalid_run_id(self) -> None: """Test retrieving all node_ids with invalid run_id.""" # Prepare state: State = self.state_factory() - state.create_run() + state.create_run("mock/mock", "v1.0.0") invalid_run_id = 61016 - state.create_node() + state.create_node(ping_interval=10) # Execute retrieved_node_ids = state.get_nodes(invalid_run_id) @@ -362,7 +378,7 @@ def test_num_task_ins(self) -> None: """Test if num_tasks returns correct number of not delivered task_ins.""" # Prepare state: State = self.state_factory() - run_id = state.create_run() + run_id = state.create_run("mock/mock", "v1.0.0") task_0 = create_task_ins(consumer_node_id=0, anonymous=True, run_id=run_id) task_1 = create_task_ins(consumer_node_id=0, anonymous=True, run_id=run_id) @@ -380,7 +396,7 @@ def test_num_task_res(self) -> None: """Test if num_tasks returns correct number of not delivered task_res.""" # Prepare state: State = self.state_factory() - run_id = state.create_run() + run_id = state.create_run("mock/mock", "v1.0.0") task_0 = create_task_res( producer_node_id=0, anonymous=True, ancestry=["1"], run_id=run_id ) @@ -398,6 +414,146 @@ def test_num_task_res(self) -> None: # Assert assert num == 2 + def test_server_public_private_key(self) -> None: + """Test get server public and private key after inserting.""" + # Prepare + state: State = self.state_factory() + private_key, public_key = generate_key_pairs() + private_key_bytes = private_key_to_bytes(private_key) + public_key_bytes = public_key_to_bytes(public_key) + + # Execute + state.store_server_public_private_key(public_key_bytes, private_key_bytes) + server_private_key = state.get_server_private_key() + server_public_key = state.get_server_public_key() + + # Assert + assert server_private_key == private_key_bytes + assert server_public_key == public_key_bytes + + def test_server_public_private_key_none(self) -> None: + """Test get server public and private key without inserting.""" + # Prepare + state: State = self.state_factory() + + # Execute + server_private_key = state.get_server_private_key() + server_public_key = state.get_server_public_key() + + # Assert + assert server_private_key is None + assert server_public_key is None + + def test_store_server_public_private_key_twice(self) -> None: + """Test inserting public and private key twice.""" + # Prepare + state: State = self.state_factory() + private_key, public_key = generate_key_pairs() + private_key_bytes = private_key_to_bytes(private_key) + public_key_bytes = public_key_to_bytes(public_key) + new_private_key, new_public_key = generate_key_pairs() + new_private_key_bytes = private_key_to_bytes(new_private_key) + new_public_key_bytes = public_key_to_bytes(new_public_key) + + # Execute + state.store_server_public_private_key(public_key_bytes, private_key_bytes) + + # Assert + with self.assertRaises(RuntimeError): + state.store_server_public_private_key( + new_public_key_bytes, new_private_key_bytes + ) + + def test_client_public_keys(self) -> None: + """Test store_client_public_keys and get_client_public_keys from state.""" + # Prepare + state: State = self.state_factory() + key_pairs = [generate_key_pairs() for _ in range(3)] + public_keys = {public_key_to_bytes(pair[1]) for pair in key_pairs} + + # Execute + state.store_client_public_keys(public_keys) + client_public_keys = state.get_client_public_keys() + + # Assert + assert client_public_keys == public_keys + + def test_client_public_key(self) -> None: + """Test store_client_public_key and get_client_public_keys from state.""" + # Prepare + state: State = self.state_factory() + key_pairs = [generate_key_pairs() for _ in range(3)] + public_keys = {public_key_to_bytes(pair[1]) for pair in key_pairs} + + # Execute + for public_key in public_keys: + state.store_client_public_key(public_key) + client_public_keys = state.get_client_public_keys() + + # Assert + assert client_public_keys == public_keys + + def test_acknowledge_ping(self) -> None: + """Test if acknowledge_ping works and if get_nodes return online nodes.""" + # Prepare + state: State = self.state_factory() + run_id = state.create_run("mock/mock", "v1.0.0") + node_ids = [state.create_node(ping_interval=10) for _ in range(100)] + for node_id in node_ids[:70]: + state.acknowledge_ping(node_id, ping_interval=30) + for node_id in node_ids[70:]: + state.acknowledge_ping(node_id, ping_interval=90) + + # Execute + current_time = time.time() + with patch("time.time", side_effect=lambda: current_time + 50): + actual_node_ids = state.get_nodes(run_id) + + # Assert + self.assertSetEqual(actual_node_ids, set(node_ids[70:])) + + def test_node_unavailable_error(self) -> None: + """Test if get_task_res return TaskRes containing node unavailable error.""" + # Prepare + state: State = self.state_factory() + run_id = state.create_run("mock/mock", "v1.0.0") + node_id_0 = state.create_node(ping_interval=90) + node_id_1 = state.create_node(ping_interval=30) + # Create and store TaskIns + task_ins_0 = create_task_ins( + consumer_node_id=node_id_0, anonymous=False, run_id=run_id + ) + task_ins_1 = create_task_ins( + consumer_node_id=node_id_1, anonymous=False, run_id=run_id + ) + task_id_0 = state.store_task_ins(task_ins=task_ins_0) + task_id_1 = state.store_task_ins(task_ins=task_ins_1) + assert task_id_0 is not None and task_id_1 is not None + + # Get TaskIns to mark them delivered + state.get_task_ins(node_id=node_id_0, limit=None) + + # Create and store TaskRes + task_res_0 = create_task_res( + producer_node_id=100, + anonymous=False, + ancestry=[str(task_id_0)], + run_id=run_id, + ) + state.store_task_res(task_res_0) + + # Execute + current_time = time.time() + task_res_list: List[TaskRes] = [] + with patch("time.time", side_effect=lambda: current_time + 50): + task_res_list = state.get_task_res({task_id_0, task_id_1}, limit=None) + + # Assert + assert len(task_res_list) == 2 + err_taskres = task_res_list[1] + assert err_taskres.task.HasField("error") + assert err_taskres.task.error.code == ErrorCode.NODE_UNAVAILABLE + def create_task_ins( consumer_node_id: int, @@ -420,8 +576,11 @@ def create_task_ins( consumer=consumer, task_type="mock", recordset=RecordSet(parameters={}, metrics={}, configs={}), + ttl=DEFAULT_TTL, + created_at=time.time(), ), ) + task.task.pushed_at = time.time() return task @@ -442,8 +601,11 @@ def create_task_res( ancestry=ancestry, task_type="mock", recordset=RecordSet(parameters={}, metrics={}, configs={}), + ttl=DEFAULT_TTL, + created_at=time.time(), ), ) + task_res.task.pushed_at = time.time() return task_res @@ -477,7 +639,7 @@ def test_initialize(self) -> None: result = state.query("SELECT name FROM sqlite_schema;") # Assert - assert len(result) == 8 + assert len(result) == 13 class SqliteFileBasedTest(StateTest, unittest.TestCase): @@ -502,7 +664,7 @@ def test_initialize(self) -> None: result = state.query("SELECT name FROM sqlite_schema;") # Assert - assert len(result) == 8 + assert len(result) == 13 if __name__ == "__main__": diff --git a/src/py/flwr/server/superlink/state/utils.py b/src/py/flwr/server/superlink/state/utils.py new file mode 100644 index 000000000000..233a90946cc7 --- /dev/null +++ b/src/py/flwr/server/superlink/state/utils.py @@ -0,0 +1,56 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Utility functions for State.""" + + +import time +from logging import ERROR +from uuid import uuid4 + +from flwr.common import log +from flwr.common.constant import ErrorCode +from flwr.proto.error_pb2 import Error # pylint: disable=E0611 +from flwr.proto.node_pb2 import Node # pylint: disable=E0611 +from flwr.proto.task_pb2 import Task, TaskIns, TaskRes # pylint: disable=E0611 + +NODE_UNAVAILABLE_ERROR_REASON = ( + "Error: Node Unavailable - The destination node is currently unavailable. " + "It exceeds the time limit specified in its last ping." +) + + +def make_node_unavailable_taskres(ref_taskins: TaskIns) -> TaskRes: + """Generate a TaskRes with a node unavailable error from a TaskIns.""" + current_time = time.time() + ttl = ref_taskins.task.ttl - (current_time - ref_taskins.task.created_at) + if ttl < 0: + log(ERROR, "Creating TaskRes for TaskIns that exceeds its TTL.") + ttl = 0 + return TaskRes( + task_id=str(uuid4()), + group_id=ref_taskins.group_id, + run_id=ref_taskins.run_id, + task=Task( + producer=Node(node_id=ref_taskins.task.consumer.node_id, anonymous=False), + consumer=Node(node_id=ref_taskins.task.producer.node_id, anonymous=False), + created_at=current_time, + ttl=ttl, + ancestry=[ref_taskins.task_id], + task_type=ref_taskins.task.task_type, + error=Error( + code=ErrorCode.NODE_UNAVAILABLE, reason=NODE_UNAVAILABLE_ERROR_REASON + ), + ), + ) diff --git a/src/py/flwr/server/utils/validator.py b/src/py/flwr/server/utils/validator.py index f9b271beafdc..c0b0ec85761c 100644 --- a/src/py/flwr/server/utils/validator.py +++ b/src/py/flwr/server/utils/validator.py @@ -31,13 +31,21 @@ def validate_task_ins_or_res(tasks_ins_res: Union[TaskIns, TaskRes]) -> List[str if not tasks_ins_res.HasField("task"): validation_errors.append("`task` does not set field `task`") - # Created/delivered/TTL - if tasks_ins_res.task.created_at != "": - validation_errors.append("`created_at` must be an empty str") + # Created/delivered/TTL/Pushed + if ( + tasks_ins_res.task.created_at < 1711497600.0 + ): # unix timestamp of 27 March 2024 00h:00m:00s UTC + validation_errors.append( + "`created_at` must be a float that records the unix timestamp " + "in seconds when the message was created." + ) if tasks_ins_res.task.delivered_at != "": validation_errors.append("`delivered_at` must be an empty str") - if tasks_ins_res.task.ttl != "": - validation_errors.append("`ttl` must be an empty str") + if tasks_ins_res.task.ttl <= 0: + validation_errors.append("`ttl` must be higher than zero") + if tasks_ins_res.task.pushed_at < 1711497600.0: + # unix timestamp of 27 March 2024 00h:00m:00s UTC + validation_errors.append("`pushed_at` is not a recent timestamp") # TaskIns specific if isinstance(tasks_ins_res, TaskIns): @@ -66,8 +74,11 @@ def validate_task_ins_or_res(tasks_ins_res: Union[TaskIns, TaskRes]) -> List[str # Content check if tasks_ins_res.task.task_type == "": validation_errors.append("`task_type` MUST be set") - if not tasks_ins_res.task.HasField("recordset"): - validation_errors.append("`recordset` MUST be set") + if not ( + tasks_ins_res.task.HasField("recordset") + ^ tasks_ins_res.task.HasField("error") + ): + validation_errors.append("Either `recordset` or `error` MUST be set") # Ancestors if len(tasks_ins_res.task.ancestry) != 0: @@ -106,8 +117,11 @@ def validate_task_ins_or_res(tasks_ins_res: Union[TaskIns, TaskRes]) -> List[str # Content check if tasks_ins_res.task.task_type == "": validation_errors.append("`task_type` MUST be set") - if not tasks_ins_res.task.HasField("recordset"): - validation_errors.append("`recordset` MUST be set") + if not ( + tasks_ins_res.task.HasField("recordset") + ^ tasks_ins_res.task.HasField("error") + ): + validation_errors.append("Either `recordset` or `error` MUST be set") # Ancestors if len(tasks_ins_res.task.ancestry) == 0: diff --git a/src/py/flwr/server/utils/validator_test.py b/src/py/flwr/server/utils/validator_test.py index 8e0849508020..61fe094c23d4 100644 --- a/src/py/flwr/server/utils/validator_test.py +++ b/src/py/flwr/server/utils/validator_test.py @@ -15,9 +15,11 @@ """Validator tests.""" +import time import unittest from typing import List, Tuple +from flwr.common import DEFAULT_TTL from flwr.proto.node_pb2 import Node # pylint: disable=E0611 from flwr.proto.recordset_pb2 import RecordSet # pylint: disable=E0611 from flwr.proto.task_pb2 import Task, TaskIns, TaskRes # pylint: disable=E0611 @@ -96,8 +98,12 @@ def create_task_ins( consumer=consumer, task_type="mock", recordset=RecordSet(parameters={}, metrics={}, configs={}), + ttl=DEFAULT_TTL, + created_at=time.time(), ), ) + + task.task.pushed_at = time.time() return task @@ -117,6 +123,10 @@ def create_task_res( ancestry=ancestry, task_type="mock", recordset=RecordSet(parameters={}, metrics={}, configs={}), + ttl=DEFAULT_TTL, + created_at=time.time(), ), ) + + task_res.task.pushed_at = time.time() return task_res diff --git a/src/py/flwr/server/workflow/default_workflows.py b/src/py/flwr/server/workflow/default_workflows.py index 876ae56dcadc..ac023cc98ca5 100644 --- a/src/py/flwr/server/workflow/default_workflows.py +++ b/src/py/flwr/server/workflow/default_workflows.py @@ -127,7 +127,6 @@ def default_init_params_workflow(driver: Driver, context: Context) -> None: message_type=MessageTypeLegacy.GET_PARAMETERS, dst_node_id=random_client.node_id, group_id="0", - ttl="", ) ] ) @@ -226,7 +225,6 @@ def default_fit_workflow( # pylint: disable=R0914 message_type=MessageType.TRAIN, dst_node_id=proxy.node_id, group_id=str(current_round), - ttl="", ) for proxy, fitins in client_instructions ] @@ -306,7 +304,6 @@ def default_evaluate_workflow(driver: Driver, context: Context) -> None: message_type=MessageType.EVALUATE, dst_node_id=proxy.node_id, group_id=str(current_round), - ttl="", ) for proxy, evalins in client_instructions ] diff --git a/src/py/flwr/server/workflow/secure_aggregation/secaggplus_workflow.py b/src/py/flwr/server/workflow/secure_aggregation/secaggplus_workflow.py index 42ee9c15f1cd..d6d97c28f313 100644 --- a/src/py/flwr/server/workflow/secure_aggregation/secaggplus_workflow.py +++ b/src/py/flwr/server/workflow/secure_aggregation/secaggplus_workflow.py @@ -373,7 +373,6 @@ def make(nid: int) -> Message: message_type=MessageType.TRAIN, dst_node_id=nid, group_id=str(cfg[WorkflowKey.CURRENT_ROUND]), - ttl="", ) log( @@ -421,7 +420,6 @@ def make(nid: int) -> Message: message_type=MessageType.TRAIN, dst_node_id=nid, group_id=str(cfg[WorkflowKey.CURRENT_ROUND]), - ttl="", ) # Broadcast public keys to clients and receive secret key shares @@ -492,7 +490,6 @@ def make(nid: int) -> Message: message_type=MessageType.TRAIN, dst_node_id=nid, group_id=str(cfg[WorkflowKey.CURRENT_ROUND]), - ttl="", ) log( @@ -563,7 +560,6 @@ def make(nid: int) -> Message: message_type=MessageType.TRAIN, dst_node_id=nid, group_id=str(current_round), - ttl="", ) log( diff --git a/src/py/flwr/simulation/__init__.py b/src/py/flwr/simulation/__init__.py index d36d9977d1c5..57b0b01eb319 100644 --- a/src/py/flwr/simulation/__init__.py +++ b/src/py/flwr/simulation/__init__.py @@ -17,7 +17,7 @@ import importlib -from flwr.simulation.run_simulation import run_simulation, run_simulation_from_cli +from flwr.simulation.run_simulation import run_simulation is_ray_installed = importlib.util.find_spec("ray") is not None @@ -36,4 +36,4 @@ def start_simulation(*args, **kwargs): # type: ignore raise ImportError(RAY_IMPORT_ERROR) -__all__ = ["start_simulation", "run_simulation_from_cli", "run_simulation"] +__all__ = ["start_simulation", "run_simulation"] diff --git a/src/py/flwr/simulation/app.py b/src/py/flwr/simulation/app.py index ff18f37664be..4b4b7249ccd3 100644 --- a/src/py/flwr/simulation/app.py +++ b/src/py/flwr/simulation/app.py @@ -15,6 +15,8 @@ """Flower simulation app.""" +import asyncio +import logging import sys import threading import traceback @@ -27,7 +29,7 @@ from flwr.client import ClientFn from flwr.common import EventType, event -from flwr.common.logger import log +from flwr.common.logger import log, set_logger_propagation from flwr.server.client_manager import ClientManager from flwr.server.history import History from flwr.server.server import Server, init_defaults, run_fl @@ -156,6 +158,7 @@ def start_simulation( is an advanced feature. For all details, please refer to the Ray documentation: https://docs.ray.io/en/latest/ray-core/scheduling/index.html + Returns ------- hist : flwr.server.history.History @@ -167,6 +170,18 @@ def start_simulation( {"num_clients": len(clients_ids) if clients_ids is not None else num_clients}, ) + # Set logger propagation + loop: Optional[asyncio.AbstractEventLoop] = None + try: + loop = asyncio.get_running_loop() + except RuntimeError: + loop = None + finally: + if loop and loop.is_running(): + # Set logger propagation to False to prevent duplicated log output in Colab. + logger = logging.getLogger("flwr") + _ = set_logger_propagation(logger, False) + # Initialize server and server config initialized_server, initialized_config = init_defaults( server=server, diff --git a/src/py/flwr/simulation/ray_transport/ray_actor.py b/src/py/flwr/simulation/ray_transport/ray_actor.py index 08d0576e39f0..9caf0fc3e6c0 100644 --- a/src/py/flwr/simulation/ray_transport/ray_actor.py +++ b/src/py/flwr/simulation/ray_transport/ray_actor.py @@ -16,7 +16,6 @@ import asyncio import threading -import traceback from abc import ABC from logging import DEBUG, ERROR, WARNING from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Type, Union @@ -25,22 +24,13 @@ from ray import ObjectRef from ray.util.actor_pool import ActorPool -from flwr.client.client_app import ClientApp, LoadClientAppError +from flwr.client.client_app import ClientApp, ClientAppException, LoadClientAppError from flwr.common import Context, Message from flwr.common.logger import log ClientAppFn = Callable[[], ClientApp] -class ClientException(Exception): - """Raised when client side logic crashes with an exception.""" - - def __init__(self, message: str): - div = ">" * 7 - self.message = "\n" + div + "A ClientException occurred." + message - super().__init__(self.message) - - class VirtualClientEngineActor(ABC): """Abstract base class for VirtualClientEngine Actors.""" @@ -71,17 +61,7 @@ def run( raise load_ex except Exception as ex: - client_trace = traceback.format_exc() - mssg = ( - "\n\tSomething went wrong when running your client run." - "\n\tClient " - + cid - + " crashed when the " - + self.__class__.__name__ - + " was running its run." - "\n\tException triggered on the client side: " + client_trace, - ) - raise ClientException(str(mssg)) from ex + raise ClientAppException(str(ex)) from ex return cid, out_message, context @@ -493,13 +473,17 @@ async def submit( self._future_to_actor[future] = actor return future + async def add_actor_back_to_pool(self, future: Any) -> None: + """Ad actor assigned to run future back into the pool.""" + actor = self._future_to_actor.pop(future) + await self.pool.put(actor) + async def fetch_result_and_return_actor_to_pool( self, future: Any ) -> Tuple[Message, Context]: """Pull result given a future and add actor back to pool.""" # Get actor that ran job - actor = self._future_to_actor.pop(future) - await self.pool.put(actor) + await self.add_actor_back_to_pool(future) # Retrieve result for object store # Instead of doing ray.get(future) we await it _, out_mssg, updated_context = await future diff --git a/src/py/flwr/simulation/ray_transport/ray_client_proxy.py b/src/py/flwr/simulation/ray_transport/ray_client_proxy.py index c3493163ac52..5e344eb087ee 100644 --- a/src/py/flwr/simulation/ray_transport/ray_client_proxy.py +++ b/src/py/flwr/simulation/ray_transport/ray_client_proxy.py @@ -23,7 +23,7 @@ from flwr.client import ClientFn from flwr.client.client_app import ClientApp from flwr.client.node_state import NodeState -from flwr.common import Message, Metadata, RecordSet +from flwr.common import DEFAULT_TTL, Message, Metadata, RecordSet from flwr.common.constant import MessageType, MessageTypeLegacy from flwr.common.logger import log from flwr.common.recordset_compat import ( @@ -105,7 +105,7 @@ def _wrap_recordset_in_message( src_node_id=0, dst_node_id=int(self.cid), reply_to_message="", - ttl=str(timeout) if timeout else "", + ttl=timeout if timeout else DEFAULT_TTL, message_type=message_type, partition_id=int(self.cid), ), diff --git a/src/py/flwr/simulation/ray_transport/ray_client_proxy_test.py b/src/py/flwr/simulation/ray_transport/ray_client_proxy_test.py index 22c5425cd9fd..9680b3846f1d 100644 --- a/src/py/flwr/simulation/ray_transport/ray_client_proxy_test.py +++ b/src/py/flwr/simulation/ray_transport/ray_client_proxy_test.py @@ -24,6 +24,7 @@ from flwr.client import Client, NumPyClient from flwr.client.client_app import ClientApp from flwr.common import ( + DEFAULT_TTL, Config, ConfigsRecord, Context, @@ -202,7 +203,7 @@ def _load_app() -> ClientApp: src_node_id=0, dst_node_id=12345, reply_to_message="", - ttl="", + ttl=DEFAULT_TTL, message_type=MessageTypeLegacy.GET_PROPERTIES, partition_id=int(cid), ), diff --git a/src/py/flwr/simulation/run_simulation.py b/src/py/flwr/simulation/run_simulation.py index 56fce363726a..113b4c594ba5 100644 --- a/src/py/flwr/simulation/run_simulation.py +++ b/src/py/flwr/simulation/run_simulation.py @@ -28,8 +28,9 @@ from flwr.client import ClientApp from flwr.common import EventType, event, log +from flwr.common.logger import set_logger_propagation from flwr.common.typing import ConfigsRecordValues -from flwr.server.driver.driver import Driver +from flwr.server.driver import Driver, GrpcDriver from flwr.server.run_serverapp import run from flwr.server.server_app import ServerApp from flwr.server.superlink.driver.driver_grpc import run_driver_api_grpc @@ -204,7 +205,7 @@ def _main_loop( serverapp_th = None try: # Initialize Driver - driver = Driver( + driver = GrpcDriver( driver_service_address=driver_api_address, root_certificates=None, ) @@ -364,6 +365,8 @@ def _run_simulation( finally: if run_in_thread: + # Set logger propagation to False to prevent duplicated log output in Colab. + logger = set_logger_propagation(logger, False) log(DEBUG, "Starting Simulation Engine on a new thread.") simulation_engine_th = threading.Thread(target=_main_loop, args=args) simulation_engine_th.start() diff --git a/src/py/flwr_example/__init__.py b/src/py/flwr_example/__init__.py deleted file mode 100644 index cd2e721e36b5..000000000000 --- a/src/py/flwr_example/__init__.py +++ /dev/null @@ -1,27 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""DEPRECATED Flower usage examples. - -Please note that the entire `flwr_examples` packages will be removed in a -future release. Examples will be migrated to the `/examples` directory. -""" - -warning = """ -DEPRECATION WARNING: Flower usage examples will be removed. - -All examples will be migrated to the `examples` directory. The `flwr_example` -package will be removed in a future release. -""" -print(warning) diff --git a/src/py/flwr_example/pytorch_cifar/__init__.py b/src/py/flwr_example/pytorch_cifar/__init__.py deleted file mode 100644 index e1a6d4c2e25e..000000000000 --- a/src/py/flwr_example/pytorch_cifar/__init__.py +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Flower example using PyTorch for CIFAR-10 image classification.""" - - -DEFAULT_SERVER_ADDRESS = "[::]:8080" diff --git a/src/py/flwr_example/pytorch_cifar/cifar.py b/src/py/flwr_example/pytorch_cifar/cifar.py deleted file mode 100644 index 279e635de356..000000000000 --- a/src/py/flwr_example/pytorch_cifar/cifar.py +++ /dev/null @@ -1,153 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""PyTorch CIFAR-10 image classification. - -The code is generally adapted from 'PyTorch: A 60 Minute Blitz'. Further -explanations are given in the official PyTorch tutorial: - -https://pytorch.org/tutorials/beginner/blitz/cifar10_tutorial.html -""" - - -# mypy: ignore-errors -# pylint: disable=W0223 - - -from collections import OrderedDict -from typing import Tuple - -import torch -import torch.nn as nn -import torch.nn.functional as F -import torchvision -import torchvision.transforms as transforms -from torch import Tensor - -import flwr as fl - -DATA_ROOT = "~/.flower/data/cifar-10" - - -# pylint: disable=unsubscriptable-object,bad-option-value,R1725 -class Net(nn.Module): - """Simple CNN adapted from 'PyTorch: A 60 Minute Blitz'.""" - - def __init__(self) -> None: - super(Net, self).__init__() - self.conv1 = nn.Conv2d(3, 6, 5) - self.pool = nn.MaxPool2d(2, 2) - self.conv2 = nn.Conv2d(6, 16, 5) - self.fc1 = nn.Linear(16 * 5 * 5, 120) - self.fc2 = nn.Linear(120, 84) - self.fc3 = nn.Linear(84, 10) - - # pylint: disable=arguments-differ,invalid-name - def forward(self, x: Tensor) -> Tensor: - """Compute forward pass.""" - x = self.pool(F.relu(self.conv1(x))) - x = self.pool(F.relu(self.conv2(x))) - x = x.view(-1, 16 * 5 * 5) - x = F.relu(self.fc1(x)) - x = F.relu(self.fc2(x)) - x = self.fc3(x) - return x - - def get_weights(self) -> fl.common.NDArrays: - """Get model weights as a list of NumPy ndarrays.""" - return [val.cpu().numpy() for _, val in self.state_dict().items()] - - def set_weights(self, weights: fl.common.NDArrays) -> None: - """Set model weights from a list of NumPy ndarrays.""" - state_dict = OrderedDict( - {k: torch.tensor(v) for k, v in zip(self.state_dict().keys(), weights)} - ) - self.load_state_dict(state_dict, strict=True) - - -def load_model() -> Net: - """Load a simple CNN.""" - return Net() - - -# pylint: disable=unused-argument -def load_data() -> Tuple[torchvision.datasets.CIFAR10, torchvision.datasets.CIFAR10]: - """Load CIFAR-10 (training and test set).""" - transform = transforms.Compose( - [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))] - ) - trainset = torchvision.datasets.CIFAR10( - root=DATA_ROOT, train=True, download=True, transform=transform - ) - testset = torchvision.datasets.CIFAR10( - root=DATA_ROOT, train=False, download=True, transform=transform - ) - return trainset, testset - - -def train( - net: Net, - trainloader: torch.utils.data.DataLoader, - epochs: int, - device: torch.device, # pylint: disable=no-member -) -> None: - """Train the network.""" - # Define loss and optimizer - criterion = nn.CrossEntropyLoss() - optimizer = torch.optim.SGD(net.parameters(), lr=0.001, momentum=0.9) - - print(f"Training {epochs} epoch(s) w/ {len(trainloader)} batches each") - - # Train the network - for epoch in range(epochs): # loop over the dataset multiple times - running_loss = 0.0 - for i, data in enumerate(trainloader, 0): - images, labels = data[0].to(device), data[1].to(device) - - # zero the parameter gradients - optimizer.zero_grad() - - # forward + backward + optimize - outputs = net(images) - loss = criterion(outputs, labels) - loss.backward() - optimizer.step() - - # print statistics - running_loss += loss.item() - if i % 2000 == 1999: # print every 2000 mini-batches - print("[%d, %5d] loss: %.3f" % (epoch + 1, i + 1, running_loss / 2000)) - running_loss = 0.0 - - -def test( - net: Net, - testloader: torch.utils.data.DataLoader, - device: torch.device, # pylint: disable=no-member -) -> Tuple[float, float]: - """Validate the network on the entire test set.""" - criterion = nn.CrossEntropyLoss() - correct = 0 - total = 0 - loss = 0.0 - with torch.no_grad(): - for data in testloader: - images, labels = data[0].to(device), data[1].to(device) - outputs = net(images) - loss += criterion(outputs, labels).item() - _, predicted = torch.max(outputs.data, 1) # pylint: disable=no-member - total += labels.size(0) - correct += (predicted == labels).sum().item() - accuracy = correct / total - return loss, accuracy diff --git a/src/py/flwr_example/pytorch_cifar/cifar_test.py b/src/py/flwr_example/pytorch_cifar/cifar_test.py deleted file mode 100644 index e9f908531020..000000000000 --- a/src/py/flwr_example/pytorch_cifar/cifar_test.py +++ /dev/null @@ -1,76 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Tests for PyTorch CIFAR-10 image classification.""" - - -import unittest - -import numpy as np - -from flwr.common import NDArrays - -from . import cifar - - -class CifarTestCase(unittest.TestCase): - """Tests for cifar module.""" - - def test_load_model(self) -> None: - """Test the number of (trainable) model parameters.""" - # pylint: disable=no-self-use - - # Prepare - expected = 62006 - - # Execute - model: cifar.Net = cifar.load_model() - actual = sum(p.numel() for p in model.parameters() if p.requires_grad) - - # Assert - assert actual == expected - - def test_get_weights(self) -> None: - """Test get_weights.""" - # pylint: disable=no-self-use - - # Prepare - model: cifar.Net = cifar.load_model() - expected = 10 - - # Execute - weights: NDArrays = model.get_weights() - - # Assert - assert len(weights) == expected - - def test_set_weights(self) -> None: - """Test set_weights.""" - # pylint: disable=no-self-use - - # Prepare - weights_expected: NDArrays = cifar.load_model().get_weights() - model: cifar.Net = cifar.load_model() - - # Execute - model.set_weights(weights_expected) - weights_actual: NDArrays = model.get_weights() - - # Assert - for nda_expected, nda_actual in zip(weights_expected, weights_actual): - np.testing.assert_array_equal(nda_expected, nda_actual) - - -if __name__ == "__main__": - unittest.main(verbosity=2) diff --git a/src/py/flwr_example/pytorch_cifar/client.py b/src/py/flwr_example/pytorch_cifar/client.py deleted file mode 100644 index 369e6a84377d..000000000000 --- a/src/py/flwr_example/pytorch_cifar/client.py +++ /dev/null @@ -1,148 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Flower client example using PyTorch for CIFAR-10 image classification.""" - - -import argparse -import timeit - -import torch -import torchvision - -import flwr as fl -from flwr.common import ( - EvaluateIns, - EvaluateRes, - FitIns, - FitRes, - NDArrays, - ParametersRes, -) - -from . import DEFAULT_SERVER_ADDRESS, cifar - -# pylint: disable=no-member -DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") -# pylint: enable=no-member - - -class CifarClient(fl.client.Client): - """Flower client implementing CIFAR-10 image classification using PyTorch.""" - - def __init__( - self, - cid: str, - model: cifar.Net, - trainset: torchvision.datasets.CIFAR10, - testset: torchvision.datasets.CIFAR10, - ) -> None: - self.cid = cid - self.model = model - self.trainset = trainset - self.testset = testset - - def get_parameters(self) -> ParametersRes: - print(f"Client {self.cid}: get_parameters") - - weights: NDArrays = self.model.get_weights() - parameters = fl.common.ndarrays_to_parameters(weights) - return ParametersRes(parameters=parameters) - - def fit(self, ins: FitIns) -> FitRes: - print(f"Client {self.cid}: fit") - - weights: NDArrays = fl.common.parameters_to_ndarrays(ins.parameters) - config = ins.config - fit_begin = timeit.default_timer() - - # Get training config - epochs = int(config["epochs"]) - batch_size = int(config["batch_size"]) - - # Set model parameters - self.model.set_weights(weights) - - # Train model - trainloader = torch.utils.data.DataLoader( - self.trainset, batch_size=batch_size, shuffle=True - ) - cifar.train(self.model, trainloader, epochs=epochs, device=DEVICE) - - # Return the refined weights and the number of examples used for training - weights_prime: NDArrays = self.model.get_weights() - params_prime = fl.common.ndarrays_to_parameters(weights_prime) - num_examples_train = len(self.trainset) - fit_duration = timeit.default_timer() - fit_begin - return FitRes( - parameters=params_prime, - num_examples=num_examples_train, - num_examples_ceil=num_examples_train, - fit_duration=fit_duration, - ) - - def evaluate(self, ins: EvaluateIns) -> EvaluateRes: - print(f"Client {self.cid}: evaluate") - - weights = fl.common.parameters_to_ndarrays(ins.parameters) - - # Use provided weights to update the local model - self.model.set_weights(weights) - - # Evaluate the updated model on the local dataset - testloader = torch.utils.data.DataLoader( - self.testset, batch_size=32, shuffle=False - ) - loss, accuracy = cifar.test(self.model, testloader, device=DEVICE) - - # Return the number of evaluation examples and the evaluation result (loss) - return EvaluateRes( - loss=float(loss), num_examples=len(self.testset), accuracy=float(accuracy) - ) - - -def main() -> None: - """Load data, create and start CifarClient.""" - parser = argparse.ArgumentParser(description="Flower") - parser.add_argument( - "--server_address", - type=str, - default=DEFAULT_SERVER_ADDRESS, - help=f"gRPC server address (default: {DEFAULT_SERVER_ADDRESS})", - ) - parser.add_argument( - "--cid", type=str, required=True, help="Client CID (no default)" - ) - parser.add_argument( - "--log_host", - type=str, - help="Logserver address (no default)", - ) - args = parser.parse_args() - - # Configure logger - fl.common.logger.configure(f"client_{args.cid}", host=args.log_host) - - # Load model and data - model = cifar.load_model() - model.to(DEVICE) - trainset, testset = cifar.load_data() - - # Start client - client = CifarClient(args.cid, model, trainset, testset) - fl.client.start_client(args.server_address, client) - - -if __name__ == "__main__": - main() diff --git a/src/py/flwr_example/pytorch_cifar/run-clients.sh b/src/py/flwr_example/pytorch_cifar/run-clients.sh deleted file mode 100755 index de2c6e0d0cb4..000000000000 --- a/src/py/flwr_example/pytorch_cifar/run-clients.sh +++ /dev/null @@ -1,31 +0,0 @@ -#!/bin/bash - -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -set -e - -SERVER_ADDRESS="[::]:8080" -NUM_CLIENTS=2 - -echo "Starting $NUM_CLIENTS clients." -for ((i = 0; i < $NUM_CLIENTS; i++)) -do - echo "Starting client(cid=$i) with partition $i out of $NUM_CLIENTS clients." - python -m flwr_example.pytorch_cifar.client \ - --cid=$i \ - --server_address=$SERVER_ADDRESS & -done -echo "Started $NUM_CLIENTS clients." diff --git a/src/py/flwr_example/pytorch_cifar/run-server.sh b/src/py/flwr_example/pytorch_cifar/run-server.sh deleted file mode 100755 index 6bb0d6148bc3..000000000000 --- a/src/py/flwr_example/pytorch_cifar/run-server.sh +++ /dev/null @@ -1,23 +0,0 @@ -#!/bin/bash - -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -# Start a Flower server -python -m flwr_example.pytorch_cifar.server \ - --rounds=5 \ - --sample_fraction=1.0 \ - --min_sample_size=2 \ - --min_num_clients=2 diff --git a/src/py/flwr_example/pytorch_cifar/server.py b/src/py/flwr_example/pytorch_cifar/server.py deleted file mode 100644 index 5acb4a114b44..000000000000 --- a/src/py/flwr_example/pytorch_cifar/server.py +++ /dev/null @@ -1,121 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Minimal example on how to start a simple Flower server.""" - - -import argparse -from typing import Callable, Dict, Optional, Tuple - -import torch -import torchvision - -import flwr as fl - -from . import DEFAULT_SERVER_ADDRESS, cifar - -# pylint: disable=no-member -DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") -# pylint: enable=no-member - - -def main() -> None: - """Start server and train five rounds.""" - parser = argparse.ArgumentParser(description="Flower") - parser.add_argument( - "--server_address", - type=str, - default=DEFAULT_SERVER_ADDRESS, - help=f"gRPC server address (default: {DEFAULT_SERVER_ADDRESS})", - ) - parser.add_argument( - "--rounds", - type=int, - default=1, - help="Number of rounds of federated learning (default: 1)", - ) - parser.add_argument( - "--sample_fraction", - type=float, - default=1.0, - help="Fraction of available clients used for fit/evaluate (default: 1.0)", - ) - parser.add_argument( - "--min_sample_size", - type=int, - default=2, - help="Minimum number of clients used for fit/evaluate (default: 2)", - ) - parser.add_argument( - "--min_num_clients", - type=int, - default=2, - help="Minimum number of available clients required for sampling (default: 2)", - ) - parser.add_argument( - "--log_host", - type=str, - help="Logserver address (no default)", - ) - args = parser.parse_args() - - # Load evaluation data - _, testset = cifar.load_data() - - # Create strategy - strategy = fl.server.strategy.FedAvg( - fraction_fit=args.sample_fraction, - min_fit_clients=args.min_sample_size, - min_available_clients=args.min_num_clients, - evaluate_fn=get_evaluate_fn(testset), - on_fit_config_fn=fit_config, - ) - - # Configure logger and start server - fl.common.logger.configure("server", host=args.log_host) - fl.server.start_server( - args.server_address, - config={"num_rounds": args.rounds}, - strategy=strategy, - ) - - -def fit_config(server_round: int) -> Dict[str, fl.common.Scalar]: - """Return a configuration with static batch size and (local) epochs.""" - config: Dict[str, fl.common.Scalar] = { - "epoch_global": str(server_round), - "epochs": str(1), - "batch_size": str(32), - } - return config - - -def get_evaluate_fn( - testset: torchvision.datasets.CIFAR10, -) -> Callable[[fl.common.NDArrays], Optional[Tuple[float, float]]]: - """Return an evaluation function for centralized evaluation.""" - - def evaluate(weights: fl.common.NDArrays) -> Optional[Tuple[float, float]]: - """Use the entire CIFAR-10 test set for evaluation.""" - model = cifar.load_model() - model.set_weights(weights) - model.to(DEVICE) - testloader = torch.utils.data.DataLoader(testset, batch_size=32, shuffle=False) - return cifar.test(model, testloader, device=DEVICE) - - return evaluate - - -if __name__ == "__main__": - main() diff --git a/src/py/flwr_example/pytorch_imagenet/__init__.py b/src/py/flwr_example/pytorch_imagenet/__init__.py deleted file mode 100644 index 5a78f1f627df..000000000000 --- a/src/py/flwr_example/pytorch_imagenet/__init__.py +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Flower example using PyTorch for ImageNet image classification.""" - - -DEFAULT_SERVER_ADDRESS = "[::]:8080" diff --git a/src/py/flwr_example/pytorch_imagenet/client.py b/src/py/flwr_example/pytorch_imagenet/client.py deleted file mode 100644 index 72732edee6ef..000000000000 --- a/src/py/flwr_example/pytorch_imagenet/client.py +++ /dev/null @@ -1,206 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Flower client example using PyTorch for Imagenet image classification.""" - - -import argparse -import timeit -from collections import OrderedDict - -import numpy as np -import torch -import torchvision -import torchvision.models as models - -import flwr as fl - -from . import imagenet - -DEFAULT_SERVER_ADDRESS = "[::]:8080" - -# pylint: disable=no-member -DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu") -# pylint: enable=no-member - - -def get_weights(model: torch.nn.ModuleList) -> fl.common.NDArrays: - """Get model weights as a list of NumPy ndarrays.""" - return [val.cpu().numpy() for _, val in model.state_dict().items()] - - -def set_weights(model: torch.nn.ModuleList, weights: fl.common.NDArrays) -> None: - """Set model weights from a list of NumPy ndarrays.""" - state_dict = OrderedDict( - { - k: torch.tensor(np.atleast_1d(v)) - for k, v in zip(model.state_dict().keys(), weights) - } - ) - model.load_state_dict(state_dict, strict=True) - - -class ImageNetClient(fl.client.Client): - """Flower client implementing ImageNet image classification using PyTorch.""" - - def __init__( - self, - cid: str, - trainset: torchvision.datasets, - testset: torchvision.datasets, - nb_clients: int, - ) -> None: - self.cid = cid - self.model = models.resnet18().to(DEVICE) - self.trainset = trainset - self.testset = testset - self.nb_clients = nb_clients - - def get_parameters(self) -> fl.common.ParametersRes: - print(f"Client {self.cid}: get_parameters") - weights: fl.common.NDArrays = get_weights(self.model) - parameters = fl.common.ndarrays_to_parameters(weights) - return fl.common.ParametersRes(parameters=parameters) - - def fit(self, ins: fl.common.FitIns) -> fl.common.FitRes: - # Set the seed so we are sure to generate the same global batches - # indices across all clients - np.random.seed(123) - - print(f"Client {self.cid}: fit") - - weights: fl.common.NDArrays = fl.common.parameters_to_ndarrays(ins.parameters) - config = ins.config - fit_begin = timeit.default_timer() - - # Get training config - epochs = int(config["epochs"]) - batch_size = int(config["batch_size"]) - - # Set model parameters - set_weights(self.model, weights) - - # Get the data corresponding to this client - dataset_size = len(self.trainset) - nb_samples_per_clients = dataset_size // self.nb_clients - dataset_indices = list(range(dataset_size)) - np.random.shuffle(dataset_indices) - - # Get starting and ending indices w.r.t cid - start_ind = int(self.cid) * nb_samples_per_clients - end_ind = (int(self.cid) * nb_samples_per_clients) + nb_samples_per_clients - train_sampler = torch.utils.data.SubsetRandomSampler( - dataset_indices[start_ind:end_ind] - ) - - # Train model - trainloader = torch.utils.data.DataLoader( - self.trainset, batch_size=batch_size, shuffle=False, sampler=train_sampler - ) - - imagenet.train(self.model, trainloader, epochs=epochs, device=DEVICE) - - # Return the refined weights and the number of examples used for training - weights_prime: fl.common.NDArrays = get_weights(self.model) - params_prime = fl.common.ndarrays_to_parameters(weights_prime) - num_examples_train = len(self.trainset) - fit_duration = timeit.default_timer() - fit_begin - return fl.common.FitRes( - parameters=params_prime, - num_examples=num_examples_train, - num_examples_ceil=num_examples_train, - fit_duration=fit_duration, - ) - - def evaluate(self, ins: fl.common.EvaluateIns) -> fl.common.EvaluateRes: - # Set the set so we are sure to generate the same batches - # across all clients. - np.random.seed(123) - - print(f"Client {self.cid}: evaluate") - - config = ins.config - batch_size = int(config["batch_size"]) - - weights = fl.common.parameters_to_ndarrays(ins.parameters) - - # Use provided weights to update the local model - set_weights(self.model, weights) - - # Get the data corresponding to this client - dataset_size = len(self.testset) - nb_samples_per_clients = dataset_size // self.nb_clients - dataset_indices = list(range(dataset_size)) - np.random.shuffle(dataset_indices) - - # Get starting and ending indices w.r.t cid - start_ind = int(self.cid) * nb_samples_per_clients - end_ind = (int(self.cid) * nb_samples_per_clients) + nb_samples_per_clients - test_sampler = torch.utils.data.SubsetRandomSampler( - dataset_indices[start_ind:end_ind] - ) - - # Evaluate the updated model on the local dataset - testloader = torch.utils.data.DataLoader( - self.testset, batch_size=batch_size, shuffle=False, sampler=test_sampler - ) - - loss, accuracy = imagenet.test(self.model, testloader, device=DEVICE) - - # Return the number of evaluation examples and the evaluation result (loss) - return fl.common.EvaluateRes( - loss=float(loss), num_examples=len(self.testset), accuracy=float(accuracy) - ) - - -def main() -> None: - """Load data, create and start CifarClient.""" - parser = argparse.ArgumentParser(description="Flower") - parser.add_argument( - "--server_address", - type=str, - default=DEFAULT_SERVER_ADDRESS, - help=f"gRPC server address (default: {DEFAULT_SERVER_ADDRESS})", - ) - parser.add_argument( - "--cid", type=str, required=True, help="Client CID (no default)" - ) - parser.add_argument( - "--data_path", type=str, required=True, help="ImageNet datapath" - ) - parser.add_argument( - "--log_host", - type=str, - help="Logserver address (no default)", - ) - parser.add_argument( - "--nb_clients", - type=int, - default=40, - help="Total number of clients", - ) - args = parser.parse_args() - - # Configure logger - fl.common.logger.configure(f"client_{args.cid}", host=args.log_host) - - trainset, testset = imagenet.load_data(args.data_path) - - # Start client - client = ImageNetClient(args.cid, trainset, testset, args.nb_clients) - fl.client.start_client(args.server_address, client) - - -if __name__ == "__main__": - main() diff --git a/src/py/flwr_example/pytorch_imagenet/imagenet.py b/src/py/flwr_example/pytorch_imagenet/imagenet.py deleted file mode 100644 index 20ec24fd85bf..000000000000 --- a/src/py/flwr_example/pytorch_imagenet/imagenet.py +++ /dev/null @@ -1,169 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""PyTorch ImageNet image classification. - -ImageNet dataset must be downloaded first -http://image-net.org - -""" - - -# mypy: ignore-errors - - -import os -from collections import OrderedDict -from typing import Tuple - -import numpy as np -import torch -import torch.nn as nn -import torch.nn.functional as F -import torchvision -import torchvision.datasets as datasets -import torchvision.transforms as transforms -from torch import Tensor -from tqdm import tqdm - -import flwr as fl - - -def load_data(data_path) -> Tuple[datasets.ImageFolder, datasets.ImageFolder]: - """Load ImageNet (training and val set).""" - - # Load ImageNet and normalize - traindir = os.path.join(data_path, "train") - valdir = os.path.join(data_path, "val") - - normalize = transforms.Normalize( - mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225] - ) - - trainset = datasets.ImageFolder( - traindir, - transforms.Compose( - [ - transforms.RandomResizedCrop(224), - transforms.RandomHorizontalFlip(), - transforms.ToTensor(), - normalize, - ] - ), - ) - - valset = datasets.ImageFolder( - valdir, - transforms.Compose( - [ - transforms.Resize(256), - transforms.CenterCrop(224), - transforms.ToTensor(), - normalize, - ] - ), - ) - - return trainset, valset - - -def train( - net: torch.nn.ModuleList, - trainloader: torch.utils.data.DataLoader, - epochs: int, - device: torch.device, -) -> None: - """Train the network.""" - - # Define loss and optimizer - criterion = nn.CrossEntropyLoss() - optimizer = torch.optim.Adadelta(net.parameters(), lr=1.0) - - print(f"Training {epochs} epoch(s) w/ {len(trainloader)} batches each") - - # Train the network - for epoch in range(epochs): # loop over the dataset multiple times - running_loss = 0.0 - acc1 = 0.0 - acc5 = 0.0 - for i, data in enumerate(tqdm(trainloader), 0): - images, labels = data[0].to(device), data[1].to(device) - - # zero the parameter gradients - optimizer.zero_grad() - - # forward + backward + optimize - outputs = net(images) - loss = criterion(outputs, labels) - loss.backward() - optimizer.step() - - # print statistics - running_loss += loss.item() - tmp1, tmp2 = accuracy(outputs, labels, topk=(1, 5)) - acc1, acc5 = acc1 + tmp1, acc5 + tmp2 - if i % 5 == 4: # print every 5 mini-batches - print( - "[%d, %5d] loss: %.3f acc1: %.3f acc5: %.3f" - % ( - epoch + 1, - i + 1, - running_loss / (i + 1), - acc1 / (i + 1), - acc5 / (i + 1), - ), - flush=True, - ) - - -def test( - net: torch.nn.ModuleList, - testloader: torch.utils.data.DataLoader, - device: torch.device, -) -> Tuple[float, float]: - """Validate the network on the entire test set.""" - criterion = nn.CrossEntropyLoss() - total = 0 - loss = 0.0 - acc1 = 0.0 - acc5 = 0.0 - with torch.no_grad(): - i = 0 - for data in tqdm(testloader): - images, labels = data[0].to(device), data[1].to(device) - outputs = net(images) - loss += criterion(outputs, labels).item() - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - tmp1, tmp2 = accuracy(outputs, labels, topk=(1, 5)) - acc1, acc5 = acc1 + tmp1, acc5 + tmp2 - i += 1 - return loss / i, acc1 / i - - -def accuracy(output, target, topk=(1,)): - """Computes the accuracy over the k top predictions for the specified values of k""" - with torch.no_grad(): - maxk = max(topk) - batch_size = target.size(0) - - _, pred = output.topk(maxk, 1, True, True) - pred = pred.t() - correct = pred.eq(target.view(1, -1).expand_as(pred)) - - res = [] - for k in topk: - correct_k = correct[:k].view(-1).float().sum(0, keepdim=True) - res.append(correct_k.mul_(100.0 / batch_size)) - return res diff --git a/src/py/flwr_example/pytorch_imagenet/run-server.sh b/src/py/flwr_example/pytorch_imagenet/run-server.sh deleted file mode 100755 index cd6909202d6f..000000000000 --- a/src/py/flwr_example/pytorch_imagenet/run-server.sh +++ /dev/null @@ -1,26 +0,0 @@ -#!/bin/bash - -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -IMAGENET_PATH="~/Downloads/imagenet-object-localization-challenge/" - -# Start a Flower server -python -m flwr_example.pytorch_imagenet.server \ - --rounds=100 \ - --sample_fraction=0.25 \ - --min_sample_size=10 \ - --min_num_clients=30 \ - --data_path=$IMAGENET_PATH diff --git a/src/py/flwr_example/pytorch_imagenet/server.py b/src/py/flwr_example/pytorch_imagenet/server.py deleted file mode 100644 index cde8a3d572fd..000000000000 --- a/src/py/flwr_example/pytorch_imagenet/server.py +++ /dev/null @@ -1,134 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Minimal example on how to start a simple Flower server.""" - - -import argparse -from typing import Callable, Dict, Optional, Tuple - -import torch -import torchvision -import torchvision.models as models - -import flwr as fl - -from . import imagenet -from .client import get_weights, set_weights - -DEFAULT_SERVER_ADDRESS = "[::]:8080" - -# pylint: disable=no-member -DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") -# pylint: enable=no-member - - -def main() -> None: - """Start server and train five rounds.""" - parser = argparse.ArgumentParser(description="Flower") - parser.add_argument( - "--server_address", - type=str, - default=DEFAULT_SERVER_ADDRESS, - help=f"gRPC server address (default: {DEFAULT_SERVER_ADDRESS})", - ) - parser.add_argument( - "--rounds", - type=int, - default=1, - help="Number of rounds of federated learning (default: 1)", - ) - parser.add_argument( - "--data_path", type=str, required=True, help="ImageNet datapath" - ) - parser.add_argument( - "--sample_fraction", - type=float, - default=1.0, - help="Fraction of available clients used for fit/evaluate (default: 1.0)", - ) - parser.add_argument( - "--min_sample_size", - type=int, - default=2, - help="Minimum number of clients used for fit/evaluate (default: 2)", - ) - parser.add_argument( - "--min_num_clients", - type=int, - default=2, - help="Minimum number of available clients required for sampling (default: 2)", - ) - parser.add_argument( - "--log_host", - type=str, - help="Logserver address (no default)", - ) - args = parser.parse_args() - - # Load evaluation data - _, testset = imagenet.load_data(args.data_path) - - # Create strategy - strategy = fl.server.strategy.FedAvg( - fraction_fit=args.sample_fraction, - min_fit_clients=args.min_sample_size, - min_available_clients=args.min_num_clients, - evaluate_fn=get_evaluate_fn(testset), - on_fit_config_fn=fit_config, - ) - - # Configure logger and start server - fl.common.logger.configure("server", host=args.log_host) - fl.server.start_server( - args.server_address, - config={"num_rounds": args.rounds}, - strategy=strategy, - ) - - -def fit_config(server_round: int) -> Dict[str, fl.common.Scalar]: - """Return a configuration with static batch size and (local) epochs.""" - config: Dict[str, fl.common.Scalar] = { - "epoch_global": str(server_round), - "epochs": str(5), - "batch_size": str(128), - } - return config - - -def get_evaluate_fn( - testset: torchvision.datasets, -) -> Callable[[fl.common.NDArrays], Optional[Tuple[float, float]]]: - """Return an evaluation function for centralized evaluation.""" - - def evaluate(weights: fl.common.NDArrays) -> Optional[Tuple[float, float]]: - """Use the entire ImageNet test set for evaluation.""" - - model = models.resnet18() - - set_weights(model, weights) - model.to(DEVICE) - model.eval() - - testloader = torch.utils.data.DataLoader( - testset, num_workers=6, batch_size=128, shuffle=False - ) - return imagenet.test(model, testloader, device=DEVICE) - - return evaluate - - -if __name__ == "__main__": - main() diff --git a/src/py/flwr_example/pytorch_save_weights/__init__.py b/src/py/flwr_example/pytorch_save_weights/__init__.py deleted file mode 100644 index 7a4638952123..000000000000 --- a/src/py/flwr_example/pytorch_save_weights/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Flower example using PyTorch for CIFAR-10 image classification.""" diff --git a/src/py/flwr_example/pytorch_save_weights/cifar.py b/src/py/flwr_example/pytorch_save_weights/cifar.py deleted file mode 100644 index e9fb0552a054..000000000000 --- a/src/py/flwr_example/pytorch_save_weights/cifar.py +++ /dev/null @@ -1,155 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""PyTorch CIFAR-10 image classification. - -The code is generally adapted from 'PyTorch: A 60 Minute Blitz'. Further -explanations are given in the official PyTorch tutorial: - -https://pytorch.org/tutorials/beginner/blitz/cifar10_tutorial.html -""" - - -# mypy: ignore-errors -# pylint: disable=W0223 - - -from collections import OrderedDict -from typing import Tuple - -import torch -import torch.nn as nn -import torch.nn.functional as F -import torchvision -import torchvision.transforms as transforms -from torch import Tensor - -import flwr as fl - -DATA_ROOT = "~/.flower/data/cifar-10" - - -# pylint: disable=unsubscriptable-object -class Net(nn.Module): - """Simple CNN adapted from 'PyTorch: A 60 Minute Blitz'.""" - - def __init__(self) -> None: - super(Net, self).__init__() - self.conv1 = nn.Conv2d(3, 6, 5) - self.pool = nn.MaxPool2d(2, 2) - self.conv2 = nn.Conv2d(6, 16, 5) - self.fc1 = nn.Linear(16 * 5 * 5, 120) - self.fc2 = nn.Linear(120, 84) - self.fc3 = nn.Linear(84, 10) - - # pylint: disable=arguments-differ,invalid-name - def forward(self, x: Tensor) -> Tensor: - """Compute forward pass.""" - x = self.pool(F.relu(self.conv1(x))) - x = self.pool(F.relu(self.conv2(x))) - x = x.view(-1, 16 * 5 * 5) - x = F.relu(self.fc1(x)) - x = F.relu(self.fc2(x)) - x = self.fc3(x) - return x - - def get_weights(self) -> fl.common.NDArrays: - """Get model weights as a list of NumPy ndarrays.""" - return [val.cpu().numpy() for _, val in self.state_dict().items()] - - def set_weights(self, weights: fl.common.NDArrays) -> None: - """Set model weights from a list of NumPy ndarrays.""" - state_dict = OrderedDict( - {k: torch.tensor(v) for k, v in zip(self.state_dict().keys(), weights)} - ) - self.load_state_dict(state_dict, strict=True) - - -# pylint: disable=unused-argument -def load_data() -> Tuple[torch.utils.data.DataLoader, torch.utils.data.DataLoader]: - """Load CIFAR-10 (training and test set).""" - transform = transforms.Compose( - [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))] - ) - - # Training set - trainset = torchvision.datasets.CIFAR10( - root=DATA_ROOT, train=True, download=True, transform=transform - ) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=32, shuffle=True) - - # Test set - testset = torchvision.datasets.CIFAR10( - root=DATA_ROOT, train=False, download=True, transform=transform - ) - testloader = torch.utils.data.DataLoader(testset, batch_size=32, shuffle=False) - - return trainloader, testloader - - -def train( - net: Net, - trainloader: torch.utils.data.DataLoader, - epochs: int, - device: torch.device, # pylint: disable=no-member -) -> None: - """Train the network.""" - # Define loss and optimizer - criterion = nn.CrossEntropyLoss() - optimizer = torch.optim.SGD(net.parameters(), lr=0.001, momentum=0.9) - - print(f"Training {epochs} epoch(s) w/ {len(trainloader)} batches each") - - # Train the network - for epoch in range(epochs): # loop over the dataset multiple times - running_loss = 0.0 - for i, data in enumerate(trainloader, 0): - images, labels = data[0].to(device), data[1].to(device) - - # zero the parameter gradients - optimizer.zero_grad() - - # forward + backward + optimize - outputs = net(images) - loss = criterion(outputs, labels) - loss.backward() - optimizer.step() - - # print statistics - running_loss += loss.item() - if i % 2000 == 1999: # print every 2000 mini-batches - print("[%d, %5d] loss: %.3f" % (epoch + 1, i + 1, running_loss / 2000)) - running_loss = 0.0 - - -def test( - net: Net, - testloader: torch.utils.data.DataLoader, - device: torch.device, # pylint: disable=no-member -) -> Tuple[float, float]: - """Validate the network on the entire test set.""" - criterion = nn.CrossEntropyLoss() - correct = 0 - total = 0 - loss = 0.0 - with torch.no_grad(): - for data in testloader: - images, labels = data[0].to(device), data[1].to(device) - outputs = net(images) - loss += criterion(outputs, labels).item() - _, predicted = torch.max(outputs.data, 1) # pylint: disable=no-member - total += labels.size(0) - correct += (predicted == labels).sum().item() - accuracy = correct / total - return loss, accuracy diff --git a/src/py/flwr_example/pytorch_save_weights/client.py b/src/py/flwr_example/pytorch_save_weights/client.py deleted file mode 100644 index 6f35fdb881c3..000000000000 --- a/src/py/flwr_example/pytorch_save_weights/client.py +++ /dev/null @@ -1,98 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Flower client example using PyTorch for CIFAR-10 image classification.""" - - -import argparse -import timeit -from typing import Dict, List, Tuple - -import numpy as np -import torch -import torchvision - -import flwr as fl -from flwr.common import ( - EvaluateIns, - EvaluateRes, - FitIns, - FitRes, - NDArrays, - ParametersRes, -) - -from . import cifar - -# pylint: disable=no-member -DEVICE: str = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") - - -# Flower Client -class CifarClient(fl.client.NumPyClient): - """Flower client implementing CIFAR-10 image classification using PyTorch.""" - - def __init__( - self, - model: cifar.Net, - trainloader: torch.utils.data.DataLoader, - testloader: torch.utils.data.DataLoader, - ) -> None: - self.model = model - self.trainloader = trainloader - self.testloader = testloader - - def get_parameters(self) -> List[np.ndarray]: - return self.model.get_weights() - - def fit( - self, parameters: List[np.ndarray], config: Dict[str, fl.common.Scalar] - ) -> Tuple[List[np.ndarray], int]: - # Set model parameters - self.model.set_weights(parameters) - - # Train model - cifar.train(self.model, self.trainloader, epochs=1, device=DEVICE) - - # Return the updated model parameters - return self.model.get_weights(), len(self.trainloader) - - def evaluate( - self, parameters: List[np.ndarray], config: Dict[str, fl.common.Scalar] - ) -> Tuple[int, float, float]: - # Use provided weights to update the local model - self.model.set_weights(parameters) - - # Evaluate the updated model on the local dataset - loss, accuracy = cifar.test(self.model, self.testloader, device=DEVICE) - - # Return the number of evaluation examples and the evaluation result (loss) - return len(self.testloader), float(loss), float(accuracy) - - -def main() -> None: - """Load data, start CifarClient.""" - - # Load model and data - model = cifar.Net() - model.to(DEVICE) - trainloader, testloader = cifar.load_data() - - # Start client - client = CifarClient(model, trainloader, testloader) - fl.client.start_numpy_client(server_address="[::]:8080", client=client) - - -if __name__ == "__main__": - main() diff --git a/src/py/flwr_example/pytorch_save_weights/run-server.sh b/src/py/flwr_example/pytorch_save_weights/run-server.sh deleted file mode 100755 index 0464c371169c..000000000000 --- a/src/py/flwr_example/pytorch_save_weights/run-server.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/bash - -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -# Start a Flower server -python -m flwr_example.pytorch_save_weights.server diff --git a/src/py/flwr_example/pytorch_save_weights/server.py b/src/py/flwr_example/pytorch_save_weights/server.py deleted file mode 100644 index ede93e268c17..000000000000 --- a/src/py/flwr_example/pytorch_save_weights/server.py +++ /dev/null @@ -1,101 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Flower server example.""" - - -import argparse -from typing import Callable, Dict, List, Optional, Tuple, Union - -import numpy as np -import torch -import torchvision - -import flwr as fl - -from . import cifar - - -class SaveModelStrategy(fl.server.strategy.FedAvg): - def aggregate_fit( - self, - server_round: int, - results: List[Tuple[fl.server.client_proxy.ClientProxy, fl.common.FitRes]], - failures: List[ - Union[ - Tuple[fl.server.client_proxy.ClientProxy, fl.common.FitRes], - BaseException, - ] - ], - ) -> Optional[fl.common.NDArrays]: - weights = super().aggregate_fit(server_round, results, failures) - if weights is not None: - # Save weights - print(f"Saving round {server_round} weights...") - np.savez(f"round-{server_round}-weights.npz", *weights) - return weights - - -def main() -> None: - """Start server and train five rounds.""" - # Load evaluation data - _, testloader = cifar.load_data() - - # Create client_manager, strategy, and server - strategy = SaveModelStrategy( - fraction_fit=1.0, - min_fit_clients=2, - min_available_clients=2, - evaluate_fn=get_evaluate_fn(testloader), - on_fit_config_fn=fit_config, - ) - - # Run server - fl.server.start_server( - config=fl.server.ServerConfig(num_rounds=3), - strategy=strategy, - ) - - -def fit_config(server_round: int) -> Dict[str, fl.common.Scalar]: - """Return a configuration with static batch size and (local) epochs.""" - config: Dict[str, fl.common.Scalar] = { - "epoch_global": str(server_round), - "epochs": str(1), - "batch_size": str(32), - } - return config - - -def get_evaluate_fn( - testloader: torch.utils.data.DataLoader, -) -> Callable[[fl.common.NDArrays], Optional[Tuple[float, float]]]: - """Return an evaluation function for centralized evaluation.""" - - # pylint: disable=no-member - DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") - # pylint: enable=no-member - - def evaluate(weights: fl.common.NDArrays) -> Optional[Tuple[float, float]]: - """Use the entire CIFAR-10 test set for evaluation.""" - model = cifar.Net() - model.set_weights(weights) - model.to(DEVICE) - return cifar.test(model, testloader, device=DEVICE) - - return evaluate - - -if __name__ == "__main__": - main() diff --git a/src/py/flwr_example/quickstart_pytorch/__init__.py b/src/py/flwr_example/quickstart_pytorch/__init__.py deleted file mode 100644 index f3ab11afad31..000000000000 --- a/src/py/flwr_example/quickstart_pytorch/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -warning = """ -DEPRECATION WARNING: Example moved to `examples/quickstart_pytorch`. - -All examples will be migrated to the `examples` directory. `flwr_example` will -be removed in a future release. -""" -print(warning) diff --git a/src/py/flwr_example/quickstart_pytorch/client.py b/src/py/flwr_example/quickstart_pytorch/client.py deleted file mode 100644 index fc675f123cc8..000000000000 --- a/src/py/flwr_example/quickstart_pytorch/client.py +++ /dev/null @@ -1,97 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - - -from argparse import ArgumentParser - -import numpy as np -import torch - -import flwr as fl - -from . import mnist - -DATA_ROOT = "./data/mnist" - -if __name__ == "__main__": - # Training settings - parser = ArgumentParser(description="PyTorch MNIST Example") - parser.add_argument( - "--server_address", - type=str, - default="[::]:8080", - help=f"gRPC server address (default: '[::]:8080')", - ) - parser.add_argument( - "--cid", - type=int, - metavar="N", - help="ID of current client (default: 0)", - ) - parser.add_argument( - "--nb_clients", - type=int, - default=2, - metavar="N", - help="Total number of clients being launched (default: 2)", - ) - parser.add_argument( - "--train-batch-size", - type=int, - default=64, - metavar="N", - help="input batch size for training (default: 64)", - ) - parser.add_argument( - "--test-batch-size", - type=int, - default=1000, - metavar="N", - help="input batch size for testing (default: 1000)", - ) - parser.add_argument( - "--epochs", - type=int, - default=14, - metavar="N", - help="number of epochs to train (default: 14)", - ) - - args = parser.parse_args() - - # Load MNIST data - train_loader, test_loader = mnist.load_data( - data_root=DATA_ROOT, - train_batch_size=args.train_batch_size, - test_batch_size=args.test_batch_size, - cid=args.cid, - nb_clients=args.nb_clients, - ) - - # pylint: disable=no-member - device = torch.device("cuda" if torch.cuda.is_available() else "cpu") - # pylint: enable=no-member - - # Instantiate client - client = mnist.PytorchMNISTClient( - cid=args.cid, - train_loader=train_loader, - test_loader=test_loader, - epochs=args.epochs, - device=device, - ) - - # Start client - fl.client.start_client(args.server_address, client) diff --git a/src/py/flwr_example/quickstart_pytorch/mnist.py b/src/py/flwr_example/quickstart_pytorch/mnist.py deleted file mode 100644 index 9a65145e6ca3..000000000000 --- a/src/py/flwr_example/quickstart_pytorch/mnist.py +++ /dev/null @@ -1,436 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""PyTorch MNIST image classification. - -The code is generally adapted from PyTorch's Basic MNIST Example. -The original code can be inspected in the official PyTorch github: - -https://github.com/pytorch/examples/blob/master/mnist/main.py -""" - - -# mypy: ignore-errors -# pylint: disable=W0223 - -import timeit -from collections import OrderedDict -from typing import Tuple - -import numpy as np -import torch -import torch.nn as nn -import torch.nn.functional as F -from torch import Tensor, optim -from torch.optim.lr_scheduler import StepLR -from torch.utils.data import DataLoader, Dataset, SubsetRandomSampler -from torchvision import datasets, transforms - -import flwr as fl - - -def dataset_partitioner( - dataset: torch.utils.data.Dataset, - batch_size: int, - client_id: int, - number_of_clients: int, -) -> torch.utils.data.DataLoader: - """Helper function to partition datasets - - Parameters - ---------- - dataset: torch.utils.data.Dataset - Dataset to be partitioned into *number_of_clients* subsets. - - batch_size: int - Size of mini-batches used by the returned DataLoader. - - client_id: int - Unique integer used for selecting a specific partition. - - number_of_clients: int - Total number of clients launched during training. This value dictates the number of partitions to be created. - - - Returns - ------- - data_loader: torch.utils.data.Dataset - DataLoader for specific client_id considering number_of_clients partitions. - - """ - - # Set the seed so we are sure to generate the same global batches - # indices across all clients - np.random.seed(123) - - # Get the data corresponding to this client - dataset_size = len(dataset) - nb_samples_per_clients = dataset_size // number_of_clients - dataset_indices = list(range(dataset_size)) - np.random.shuffle(dataset_indices) - - # Get starting and ending indices w.r.t CLIENT_ID - start_ind = client_id * nb_samples_per_clients - end_ind = start_ind + nb_samples_per_clients - data_sampler = SubsetRandomSampler(dataset_indices[start_ind:end_ind]) - data_loader = torch.utils.data.DataLoader( - dataset, batch_size=batch_size, shuffle=False, sampler=data_sampler - ) - return data_loader - - -def load_data( - data_root: str, - train_batch_size: int, - test_batch_size: int, - cid: int, - nb_clients: int, -) -> Tuple[DataLoader, DataLoader]: - """Helper function that loads both training and test datasets for MNIST. - - Parameters - ---------- - data_root: str - Directory where MNIST dataset will be stored. - - train_batch_size: int - Mini-batch size for training set. - - test_batch_size: int - Mini-batch size for test set. - - cid: int - Client ID used to select a specific partition. - - nb_clients: int - Total number of clients launched during training. This value dictates the number of unique to be created. - - - Returns - ------- - (train_loader, test_loader): Tuple[DataLoader, DataLoader] - Tuple contaning DataLoaders for training and test sets. - - """ - - transform = transforms.Compose( - [transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))] - ) - - train_dataset = datasets.MNIST( - data_root, train=True, download=True, transform=transform - ) - test_dataset = datasets.MNIST(data_root, train=False, transform=transform) - - # Create partitioned datasets based on the total number of clients and client_id - train_loader = dataset_partitioner( - dataset=train_dataset, - batch_size=train_batch_size, - client_id=cid, - number_of_clients=nb_clients, - ) - - test_loader = dataset_partitioner( - dataset=test_dataset, - batch_size=test_batch_size, - client_id=cid, - number_of_clients=nb_clients, - ) - - return (train_loader, test_loader) - - -class MNISTNet(nn.Module): - """Simple CNN adapted from Pytorch's 'Basic MNIST Example'.""" - - def __init__(self) -> None: - super(MNISTNet, self).__init__() - self.conv1 = nn.Conv2d(1, 32, 3, 1) - self.conv2 = nn.Conv2d(32, 64, 3, 1) - self.dropout1 = nn.Dropout2d(0.25) - self.dropout2 = nn.Dropout2d(0.5) - self.fc1 = nn.Linear(9216, 128) - self.fc2 = nn.Linear(128, 10) - - # pylint: disable=arguments-differ,invalid-name - def forward(self, x: Tensor) -> Tensor: - """Compute forward pass. - - Parameters - ---------- - x: Tensor - Mini-batch of shape (N,28,28) containing images from MNIST dataset. - - - Returns - ------- - output: Tensor - The probability density of the output being from a specific class given the input. - - """ - x = self.conv1(x) - x = F.relu(x) - x = self.conv2(x) - x = F.relu(x) - x = F.max_pool2d(x, 2) - x = self.dropout1(x) - x = torch.flatten(x, 1) - x = self.fc1(x) - x = F.relu(x) - x = self.dropout2(x) - x = self.fc2(x) - output = F.log_softmax(x, dim=1) - return output - - -def train( - model: torch.nn.Module, - train_loader: torch.utils.data.DataLoader, - epochs: int, - device: torch.device = torch.device("cpu"), -) -> int: - """Train routine based on 'Basic MNIST Example' - - Parameters - ---------- - model: torch.nn.Module - Neural network model used in this example. - - train_loader: torch.utils.data.DataLoader - DataLoader used in training. - - epochs: int - Number of epochs to run in each round. - - device: torch.device - (Default value = torch.device("cpu")) - Device where the network will be trained within a client. - - Returns - ------- - num_examples_train: int - Number of total samples used during training. - - """ - model.train() - optimizer = optim.Adadelta(model.parameters(), lr=1.0) - scheduler = StepLR(optimizer, step_size=1, gamma=0.7) - print(f"Training {epochs} epoch(s) w/ {len(train_loader)} mini-batches each") - for epoch in range(epochs): # loop over the dataset multiple times - print() - loss_epoch: float = 0.0 - num_examples_train: int = 0 - for batch_idx, (data, target) in enumerate(train_loader): - # Grab mini-batch and transfer to device - data, target = data.to(device), target.to(device) - num_examples_train += len(data) - - # Zero gradients - optimizer.zero_grad() - - output = model(data) - loss = F.nll_loss(output, target) - loss.backward() - optimizer.step() - - loss_epoch += loss.item() - if batch_idx % 10 == 8: - print( - "Train Epoch: {} [{}/{} ({:.0f}%)] Loss: {:.6f}\t\t\t\t".format( - epoch, - num_examples_train, - len(train_loader) * train_loader.batch_size, - 100.0 - * num_examples_train - / len(train_loader) - / train_loader.batch_size, - loss.item(), - ), - end="\r", - flush=True, - ) - scheduler.step() - return num_examples_train - - -def test( - model: torch.nn.Module, - test_loader: torch.utils.data.DataLoader, - device: torch.device = torch.device("cpu"), -) -> Tuple[int, float, float]: - """Test routine 'Basic MNIST Example' - - Parameters - ---------- - model: torch.nn.Module : - Neural network model used in this example. - - test_loader: torch.utils.data.DataLoader : - DataLoader used in test. - - device: torch.device : - (Default value = torch.device("cpu")) - Device where the network will be tested within a client. - - Returns - ------- - Tuple containing the total number of test samples, the test_loss, and the accuracy evaluated on the test set. - - """ - model.eval() - test_loss: float = 0 - correct: int = 0 - num_test_samples: int = 0 - with torch.no_grad(): - for data, target in test_loader: - data, target = data.to(device), target.to(device) - num_test_samples += len(data) - output = model(data) - test_loss += F.nll_loss( - output, target, reduction="sum" - ).item() # sum up batch loss - pred = output.argmax( - dim=1, keepdim=True - ) # get the index of the max log-probability - correct += pred.eq(target.view_as(pred)).sum().item() - - test_loss /= num_test_samples - - return (num_test_samples, test_loss, correct / num_test_samples) - - -class PytorchMNISTClient(fl.client.Client): - """Flower client implementing MNIST handwritten classification using PyTorch.""" - - def __init__( - self, - cid: int, - train_loader: datasets, - test_loader: datasets, - epochs: int, - device: torch.device = torch.device("cpu"), - ) -> None: - self.model = MNISTNet().to(device) - self.cid = cid - self.train_loader = train_loader - self.test_loader = test_loader - self.device = device - self.epochs = epochs - - def get_weights(self) -> fl.common.NDArrays: - """Get model weights as a list of NumPy ndarrays.""" - return [val.cpu().numpy() for _, val in self.model.state_dict().items()] - - def set_weights(self, weights: fl.common.NDArrays) -> None: - """Set model weights from a list of NumPy ndarrays. - - Parameters - ---------- - weights: fl.common.NDArrays - Weights received by the server and set to local model - - - Returns - ------- - - """ - state_dict = OrderedDict( - { - k: torch.tensor(v) - for k, v in zip(self.model.state_dict().keys(), weights) - } - ) - self.model.load_state_dict(state_dict, strict=True) - - def get_parameters(self) -> fl.common.ParametersRes: - """Encapsulates the weights into Flower Parameters.""" - weights: fl.common.NDArrays = self.get_weights() - parameters = fl.common.ndarrays_to_parameters(weights) - return fl.common.ParametersRes(parameters=parameters) - - def fit(self, ins: fl.common.FitIns) -> fl.common.FitRes: - """Trains the model on local dataset - - Parameters - ---------- - ins: fl.common.FitIns - Parameters sent by the server to be used during training. - - Returns - ------- - Set of variables containing the new set of weights and information the client. - - """ - - # Set the seed so we are sure to generate the same global batches - # indices across all clients - np.random.seed(123) - - weights: fl.common.NDArrays = fl.common.parameters_to_ndarrays(ins.parameters) - fit_begin = timeit.default_timer() - - # Set model parameters/weights - self.set_weights(weights) - - # Train model - num_examples_train: int = train( - self.model, self.train_loader, epochs=self.epochs, device=self.device - ) - - # Return the refined weights and the number of examples used for training - weights_prime: fl.common.NDArrays = self.get_weights() - params_prime = fl.common.ndarrays_to_parameters(weights_prime) - fit_duration = timeit.default_timer() - fit_begin - return fl.common.FitRes( - parameters=params_prime, - num_examples=num_examples_train, - num_examples_ceil=num_examples_train, - fit_duration=fit_duration, - ) - - def evaluate(self, ins: fl.common.EvaluateIns) -> fl.common.EvaluateRes: - """ - - Parameters - ---------- - ins: fl.common.EvaluateIns - Parameters sent by the server to be used during testing. - - - Returns - ------- - Information the clients testing results. - - """ - weights = fl.common.parameters_to_ndarrays(ins.parameters) - - # Use provided weights to update the local model - self.set_weights(weights) - - ( - num_examples_test, - test_loss, - accuracy, - ) = test(self.model, self.test_loader, device=self.device) - print( - f"Client {self.cid} - Evaluate on {num_examples_test} samples: Average loss: {test_loss:.4f}, Accuracy: {100*accuracy:.2f}%\n" - ) - - # Return the number of evaluation examples and the evaluation result (loss) - return fl.common.EvaluateRes( - loss=float(test_loss), - num_examples=num_examples_test, - accuracy=float(accuracy), - ) diff --git a/src/py/flwr_example/quickstart_pytorch/run-clients.sh b/src/py/flwr_example/quickstart_pytorch/run-clients.sh deleted file mode 100755 index f53d63dc6168..000000000000 --- a/src/py/flwr_example/quickstart_pytorch/run-clients.sh +++ /dev/null @@ -1,32 +0,0 @@ -#!/bin/bash - -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -set -e - -SERVER_ADDRESS="[::]:8080" -NUM_CLIENTS=2 - -echo "Starting $NUM_CLIENTS clients." -for ((i = 0; i < $NUM_CLIENTS; i++)) -do - echo "Starting client(cid=$i) with partition $i out of $NUM_CLIENTS clients." - python -m flwr_example.quickstart_pytorch.client \ - --cid=$i \ - --server_address=$SERVER_ADDRESS \ - --nb_clients=$NUM_CLIENTS & -done -echo "Started $NUM_CLIENTS clients." diff --git a/src/py/flwr_example/quickstart_pytorch/run-server.sh b/src/py/flwr_example/quickstart_pytorch/run-server.sh deleted file mode 100755 index 29cb8cf7caf7..000000000000 --- a/src/py/flwr_example/quickstart_pytorch/run-server.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/bash - -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -# Start a Flower server -python -m flwr_example.quickstart_pytorch.server diff --git a/src/py/flwr_example/quickstart_pytorch/server.py b/src/py/flwr_example/quickstart_pytorch/server.py deleted file mode 100644 index b8cc1e15aca1..000000000000 --- a/src/py/flwr_example/quickstart_pytorch/server.py +++ /dev/null @@ -1,20 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - - -import flwr as fl - -if __name__ == "__main__": - fl.server.start_server(config=fl.server.ServerConfig(num_rounds=3)) diff --git a/src/py/flwr_example/quickstart_tensorflow/__init__.py b/src/py/flwr_example/quickstart_tensorflow/__init__.py deleted file mode 100644 index c3a6e19194a5..000000000000 --- a/src/py/flwr_example/quickstart_tensorflow/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -warning = """ -DEPRECATION WARNING: Example moved to `examples/quickstart_tensorflow`. - -All examples will be migrated to the `examples` directory. `flwr_example` will -be removed in a future release. -""" -print(warning) diff --git a/src/py/flwr_example/quickstart_tensorflow/client.py b/src/py/flwr_example/quickstart_tensorflow/client.py deleted file mode 100644 index 9c4aab0b33ab..000000000000 --- a/src/py/flwr_example/quickstart_tensorflow/client.py +++ /dev/null @@ -1,55 +0,0 @@ -from typing import Tuple, cast - -import numpy as np -import tensorflow as tf - -import flwr as fl - -### uncomment this if you are getting the ssl error -# ssl._create_default_https_context = ssl._create_unverified_context -### - - -def main() -> None: - # Build and compile Keras model - model = tf.keras.models.Sequential( - [ - tf.keras.layers.Flatten(input_shape=(28, 28)), - tf.keras.layers.Dense(128, activation="relu"), - tf.keras.layers.Dropout(0.2), - tf.keras.layers.Dense(10, activation="softmax"), - ] - ) - - model.compile( - optimizer="adam", loss="sparse_categorical_crossentropy", metrics=["accuracy"] - ) - - # Load MNIST data - (x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data() - x_train, x_test = x_train / 255.0, x_test / 255.0 - - # Implement a Flower client - class MnistClient(fl.client.NumPyClient): - def get_parameters(self, config) -> fl.common.NDArrays: - return cast(fl.common.NDArrays, model.get_weights()) - - def fit(self, parameters, config) -> Tuple[fl.common.NDArrays, int, dict]: - model.set_weights(parameters) - model.fit(x_train, y_train, epochs=1, batch_size=32) - return model.get_weights(), len(x_train), {} - - def evaluate(self, parameters, config) -> Tuple[int, int, dict]: - model.set_weights(parameters) - loss, accuracy = model.evaluate(x_test, y_test) - return loss, len(x_test), {"accuracy": accuracy} - - # Start client - fl.client.start_numpy_client( - server_address="127.0.0.1:8080", - client=MnistClient(), - ) - - -if __name__ == "__main__": - main() diff --git a/src/py/flwr_example/quickstart_tensorflow/run-clients.sh b/src/py/flwr_example/quickstart_tensorflow/run-clients.sh deleted file mode 100755 index 5747bfe5fe1b..000000000000 --- a/src/py/flwr_example/quickstart_tensorflow/run-clients.sh +++ /dev/null @@ -1,29 +0,0 @@ -#!/bin/bash - -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -set -e - -SERVER_ADDRESS="[::]:8080" -NUM_CLIENTS=2 - -echo "Starting $NUM_CLIENTS clients." -for ((i = 0; i < $NUM_CLIENTS; i++)) -do - echo "Starting client(cid=$i) with partition $i out of $NUM_CLIENTS clients." - python -m flwr_example.quickstart_tensorflow.client & -done -echo "Started $NUM_CLIENTS clients." diff --git a/src/py/flwr_example/quickstart_tensorflow/run-server.sh b/src/py/flwr_example/quickstart_tensorflow/run-server.sh deleted file mode 100755 index 030167972ac0..000000000000 --- a/src/py/flwr_example/quickstart_tensorflow/run-server.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/bash - -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -# Start a Flower server -python -m flwr_example.quickstart_tensorflow.server diff --git a/src/py/flwr_example/quickstart_tensorflow/server.py b/src/py/flwr_example/quickstart_tensorflow/server.py deleted file mode 100644 index e621005ed277..000000000000 --- a/src/py/flwr_example/quickstart_tensorflow/server.py +++ /dev/null @@ -1,4 +0,0 @@ -import flwr as fl - -if __name__ == "__main__": - fl.server.start_server(config=fl.server.ServerConfig(num_rounds=3)) diff --git a/src/py/flwr_example/tensorflow_fashion_mnist/__init__.py b/src/py/flwr_example/tensorflow_fashion_mnist/__init__.py deleted file mode 100644 index 3370fa33e781..000000000000 --- a/src/py/flwr_example/tensorflow_fashion_mnist/__init__.py +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Flower example using TensorFlow for Fashion-MNIST image classification.""" - - -DEFAULT_SERVER_ADDRESS = "[::]:8080" diff --git a/src/py/flwr_example/tensorflow_fashion_mnist/client.py b/src/py/flwr_example/tensorflow_fashion_mnist/client.py deleted file mode 100644 index bcee031163e1..000000000000 --- a/src/py/flwr_example/tensorflow_fashion_mnist/client.py +++ /dev/null @@ -1,116 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Flower client example using TensorFlow for Fashion-MNIST image classification.""" - - -import argparse -from typing import Dict, Tuple, cast - -import numpy as np -import tensorflow as tf - -import flwr as fl -from flwr.common import NDArrays - -from . import DEFAULT_SERVER_ADDRESS, fashion_mnist - - -class FashionMnistClient(fl.client.KerasClient): - """Flower KerasClient implementing Fashion-MNIST image classification.""" - - def __init__( - self, - model: tf.keras.Model, - xy_train: Tuple[np.ndarray, np.ndarray], - xy_test: Tuple[np.ndarray, np.ndarray], - ): - self.model = model - self.x_train, self.y_train = xy_train - self.x_test, self.y_test = xy_test - - def get_weights(self) -> NDArrays: - return cast(NDArrays, self.model.get_weights()) - - def fit( - self, weights: NDArrays, config: Dict[str, fl.common.Scalar] - ) -> Tuple[NDArrays, int, int]: - # Use provided weights to update local model - self.model.set_weights(weights) - - # Train the local model using local dataset - self.model.fit( - self.x_train, - self.y_train, - batch_size=int(config["batch_size"]), - epochs=int(config["epochs"]), - ) - - # Return the refined weights and the number of examples used for training - return self.model.get_weights(), len(self.x_train), len(self.x_train) - - def evaluate( - self, weights: NDArrays, config: Dict[str, fl.common.Scalar] - ) -> Tuple[int, float, float]: - # Update local model and evaluate on local dataset - self.model.set_weights(weights) - loss, accuracy = self.model.evaluate( - self.x_test, self.y_test, batch_size=len(self.x_test), verbose=2 - ) - - # Return number of evaluation examples and evaluation result (loss/accuracy) - return len(self.x_test), float(loss), float(accuracy) - - -def main() -> None: - """Load data, create and start FashionMnistClient.""" - parser = argparse.ArgumentParser(description="Flower") - parser.add_argument( - "--server_address", - type=str, - default=DEFAULT_SERVER_ADDRESS, - help=f"gRPC server address (default: {DEFAULT_SERVER_ADDRESS})", - ) - parser.add_argument( - "--partition", type=int, required=True, help="Partition index (no default)" - ) - parser.add_argument( - "--clients", - type=int, - required=True, - help="Number of clients (no default)", - ) - parser.add_argument( - "--log_host", - type=str, - help="Logserver address (no default)", - ) - args = parser.parse_args() - - # Configure logger - fl.common.logger.configure(f"client_{args.partition}", host=args.log_host) - - # Load model and data - model = fashion_mnist.load_model() - xy_train, xy_test = fashion_mnist.load_data( - partition=args.partition, num_partitions=args.clients - ) - - # Start client - client = FashionMnistClient(model, xy_train, xy_test) - fl.client.start_keras_client(args.server_address, client) - - -if __name__ == "__main__": - main() diff --git a/src/py/flwr_example/tensorflow_fashion_mnist/download.py b/src/py/flwr_example/tensorflow_fashion_mnist/download.py deleted file mode 100644 index c2f0eb580cf0..000000000000 --- a/src/py/flwr_example/tensorflow_fashion_mnist/download.py +++ /dev/null @@ -1,34 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Helper script to download CIFAR-10/100.""" - - -from logging import INFO - -import tensorflow as tf - -from flwr.common.logger import log - -tf.get_logger().setLevel("ERROR") - - -def main() -> None: - """Download data.""" - log(INFO, "Download Fashion-MNIST") - tf.keras.datasets.fashion_mnist.load_data() - - -if __name__ == "__main__": - main() diff --git a/src/py/flwr_example/tensorflow_fashion_mnist/fashion_mnist.py b/src/py/flwr_example/tensorflow_fashion_mnist/fashion_mnist.py deleted file mode 100644 index 6c7ad2eead79..000000000000 --- a/src/py/flwr_example/tensorflow_fashion_mnist/fashion_mnist.py +++ /dev/null @@ -1,124 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Flower client example using TensorFlow for Fashion-MNIST image classification.""" - - -from typing import Tuple, cast - -import numpy as np -import tensorflow as tf - -tf.get_logger().setLevel("ERROR") - -SEED = 2020 - - -def load_model(input_shape: Tuple[int, int, int] = (28, 28, 1)) -> tf.keras.Model: - """Load model for Fashion-MNIST.""" - # Kernel initializer - kernel_initializer = tf.keras.initializers.glorot_uniform(seed=SEED) - - # Architecture - inputs = tf.keras.layers.Input(shape=input_shape) - layers = tf.keras.layers.Conv2D( - 32, - kernel_size=(5, 5), - strides=(1, 1), - kernel_initializer=kernel_initializer, - padding="same", - activation="relu", - )(inputs) - layers = tf.keras.layers.MaxPool2D(pool_size=(2, 2), strides=(2, 2))(layers) - layers = tf.keras.layers.Conv2D( - 64, - kernel_size=(5, 5), - strides=(1, 1), - kernel_initializer=kernel_initializer, - padding="same", - activation="relu", - )(layers) - layers = tf.keras.layers.MaxPool2D(pool_size=(2, 2), strides=(2, 2))(layers) - layers = tf.keras.layers.Flatten()(layers) - layers = tf.keras.layers.Dense( - 512, kernel_initializer=kernel_initializer, activation="relu" - )(layers) - - outputs = tf.keras.layers.Dense( - 10, kernel_initializer=kernel_initializer, activation="softmax" - )(layers) - - model = tf.keras.Model(inputs=inputs, outputs=outputs) - - # Compile model - model.compile( - optimizer=tf.keras.optimizers.Adam(), - loss=tf.keras.losses.categorical_crossentropy, - metrics=["accuracy"], - ) - return model - - -def load_data( - partition: int, num_partitions: int -) -> Tuple[Tuple[np.ndarray, np.ndarray], Tuple[np.ndarray, np.ndarray]]: - """Load partition of randomly shuffled Fashion-MNIST subset.""" - # Load training and test data (ignoring the test data for now) - (x_train, y_train), (x_test, y_test) = tf.keras.datasets.fashion_mnist.load_data() - - # Take a subset - x_train, y_train = shuffle(x_train, y_train, seed=SEED) - x_test, y_test = shuffle(x_test, y_test, seed=SEED) - - x_train, y_train = get_partition(x_train, y_train, partition, num_partitions) - x_test, y_test = get_partition(x_test, y_test, partition, num_partitions) - - # Adjust x sets shape for model - x_train = adjust_x_shape(x_train) - x_test = adjust_x_shape(x_test) - - # Normalize data - x_train = x_train.astype("float32") / 255.0 - x_test = x_test.astype("float32") / 255.0 - - # Convert class vectors to one-hot encoded labels - y_train = tf.keras.utils.to_categorical(y_train, 10) - y_test = tf.keras.utils.to_categorical(y_test, 10) - - return (x_train, y_train), (x_test, y_test) - - -def adjust_x_shape(nda: np.ndarray) -> np.ndarray: - """Turn shape (x, y, z) into (x, y, z, 1).""" - nda_adjusted = np.reshape(nda, (nda.shape[0], nda.shape[1], nda.shape[2], 1)) - return cast(np.ndarray, nda_adjusted) - - -def shuffle( - x_orig: np.ndarray, y_orig: np.ndarray, seed: int -) -> Tuple[np.ndarray, np.ndarray]: - """Shuffle x and y in the same way.""" - np.random.seed(seed) - idx = np.random.permutation(len(x_orig)) - return x_orig[idx], y_orig[idx] - - -def get_partition( - x_orig: np.ndarray, y_orig: np.ndarray, partition: int, num_clients: int -) -> Tuple[np.ndarray, np.ndarray]: - """Return a single partition of an equally partitioned dataset.""" - step_size = len(x_orig) / num_clients - start_index = int(step_size * partition) - end_index = int(start_index + step_size) - return x_orig[start_index:end_index], y_orig[start_index:end_index] diff --git a/src/py/flwr_example/tensorflow_fashion_mnist/fashion_mnist_test.py b/src/py/flwr_example/tensorflow_fashion_mnist/fashion_mnist_test.py deleted file mode 100644 index f6b922b27eab..000000000000 --- a/src/py/flwr_example/tensorflow_fashion_mnist/fashion_mnist_test.py +++ /dev/null @@ -1,44 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Tests for Fashion-MNIST.""" - - -import numpy as np - -from .fashion_mnist import shuffle - - -def test_shuffle() -> None: - """Test if shuffle is deterministic depending on the provided seed.""" - # Prepare - x_tt = np.arange(8) - y_tt = np.arange(8) - - x_expected_2019 = np.array([1, 4, 3, 6, 7, 5, 2, 0]) - y_expected_2019 = np.array([1, 4, 3, 6, 7, 5, 2, 0]) - - x_expected_2020 = np.array([6, 2, 1, 4, 5, 3, 7, 0]) - y_expected_2020 = np.array([6, 2, 1, 4, 5, 3, 7, 0]) - - # Execute & assert - for _ in range(3): - x_actual, y_actual = shuffle(x_tt, y_tt, seed=2019) - np.testing.assert_array_equal(x_expected_2019, x_actual) - np.testing.assert_array_equal(y_expected_2019, y_actual) - - for _ in range(3): - x_actual, y_actual = shuffle(x_tt, y_tt, seed=2020) - np.testing.assert_array_equal(x_expected_2020, x_actual) - np.testing.assert_array_equal(y_expected_2020, y_actual) diff --git a/src/py/flwr_example/tensorflow_fashion_mnist/run-clients.sh b/src/py/flwr_example/tensorflow_fashion_mnist/run-clients.sh deleted file mode 100755 index 732688b9be08..000000000000 --- a/src/py/flwr_example/tensorflow_fashion_mnist/run-clients.sh +++ /dev/null @@ -1,33 +0,0 @@ -#!/bin/bash - -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -set -e - -SERVER_ADDRESS="[::]:8080" -NUM_CLIENTS=10 - -echo "Starting $NUM_CLIENTS clients." -for ((i = 0; i < $NUM_CLIENTS; i++)) -do - echo "Starting client(cid=$i) with partition $i out of $NUM_CLIENTS clients." - python -m flwr_example.tensorflow_fashion_mnist.client \ - --cid=$i \ - --partition=$i \ - --clients=$NUM_CLIENTS \ - --server_address=$SERVER_ADDRESS & -done -echo "Started $NUM_CLIENTS clients." diff --git a/src/py/flwr_example/tensorflow_fashion_mnist/run-server.sh b/src/py/flwr_example/tensorflow_fashion_mnist/run-server.sh deleted file mode 100755 index d80f3c8b30ce..000000000000 --- a/src/py/flwr_example/tensorflow_fashion_mnist/run-server.sh +++ /dev/null @@ -1,23 +0,0 @@ -#!/bin/bash - -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -# Start a Flower server -python -m flwr_example.tensorflow_fashion_mnist.server \ - --rounds=5 \ - --sample_fraction=0.5 \ - --min_sample_size=5 \ - --min_num_clients=5 diff --git a/src/py/flwr_example/tensorflow_fashion_mnist/server.py b/src/py/flwr_example/tensorflow_fashion_mnist/server.py deleted file mode 100644 index f16da1e4bc93..000000000000 --- a/src/py/flwr_example/tensorflow_fashion_mnist/server.py +++ /dev/null @@ -1,115 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Minimal example on how to start a simple Flower server.""" - - -import argparse -from typing import Callable, Dict, Optional, Tuple - -import numpy as np - -import flwr as fl - -from . import DEFAULT_SERVER_ADDRESS, fashion_mnist - - -def main() -> None: - """Start server and train five rounds.""" - parser = argparse.ArgumentParser(description="Flower") - parser.add_argument( - "--server_address", - type=str, - default=DEFAULT_SERVER_ADDRESS, - help=f"gRPC server address (default: {DEFAULT_SERVER_ADDRESS})", - ) - parser.add_argument( - "--rounds", - type=int, - default=1, - help="Number of rounds of federated learning (default: 1)", - ) - parser.add_argument( - "--sample_fraction", - type=float, - default=0.1, - help="Fraction of available clients used for fit/evaluate (default: 0.1)", - ) - parser.add_argument( - "--min_sample_size", - type=int, - default=1, - help="Minimum number of clients used for fit/evaluate (default: 1)", - ) - parser.add_argument( - "--min_num_clients", - type=int, - default=1, - help="Minimum number of available clients required for sampling (default: 1)", - ) - parser.add_argument( - "--log_host", - type=str, - help="Logserver address (no default)", - ) - args = parser.parse_args() - - # Load evaluation data - _, xy_test = fashion_mnist.load_data(partition=0, num_partitions=1) - - # Create strategy - strategy = fl.server.strategy.FedAvg( - fraction_fit=args.sample_fraction, - min_fit_clients=args.min_sample_size, - min_available_clients=args.min_num_clients, - evaluate_fn=get_evaluate_fn(xy_test=xy_test), - on_fit_config_fn=fit_config, - ) - - # Configure logger and start server - fl.common.logger.configure("server", host=args.log_host) - fl.server.start_server( - args.server_address, - config={"num_rounds": args.rounds}, - strategy=strategy, - ) - - -def fit_config(server_round: int) -> Dict[str, fl.common.Scalar]: - """Return a configuration with static batch size and (local) epochs.""" - config: Dict[str, fl.common.Scalar] = { - "epoch_global": str(server_round), - "epochs": str(1), - "batch_size": str(64), - } - return config - - -def get_evaluate_fn( - xy_test: Tuple[np.ndarray, np.ndarray] -) -> Callable[[fl.common.NDArrays], Optional[Tuple[float, float]]]: - """Return an evaluation function for centralized evaluation.""" - - def evaluate(weights: fl.common.NDArrays) -> Optional[Tuple[float, float]]: - """Use the entire Fashion-MNIST test set for evaluation.""" - model = fashion_mnist.load_model() - model.set_weights(weights) - loss, acc = model.evaluate(xy_test[0], xy_test[1], batch_size=len(xy_test)) - return float(loss), float(acc) - - return evaluate - - -if __name__ == "__main__": - main() diff --git a/src/py/flwr_experimental/__init__.py b/src/py/flwr_experimental/__init__.py deleted file mode 100644 index 1e9952588480..000000000000 --- a/src/py/flwr_experimental/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== diff --git a/src/py/flwr_experimental/baseline/__init__.py b/src/py/flwr_experimental/baseline/__init__.py deleted file mode 100644 index b2fefc3f319d..000000000000 --- a/src/py/flwr_experimental/baseline/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Flower Baselines.""" diff --git a/src/py/flwr_experimental/baseline/command.py b/src/py/flwr_experimental/baseline/command.py deleted file mode 100644 index a776347bacf6..000000000000 --- a/src/py/flwr_experimental/baseline/command.py +++ /dev/null @@ -1,115 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Provides functions to construct various Flower CLI commands.""" - - -from typing import List, Optional - -from flwr_experimental.ops.instance import Instance - - -def install_wheel( - wheel_remote_path: str, wheel_extras: Optional[List[str]] = None -) -> str: - """Return install command for wheel. - - Remove previous versions if existing. - """ - extras = ["http-logger"] - - if wheel_extras: - extras += wheel_extras - - extras_str = ",".join(extras) - - return ( - "python3.7 -m pip uninstall -y flwr && " - + f"python3.7 -m pip install '{wheel_remote_path}[{extras_str}]'" - ) - - -def start_logserver( - logserver_s3_bucket: Optional[str] = None, logserver_s3_key: Optional[str] = None -) -> str: - """Return command to run logserver.""" - cmd = "screen -d -m python3.7 -m flwr_experimental.logserver" - - if logserver_s3_bucket is not None and logserver_s3_key is not None: - cmd += f" --s3_bucket={logserver_s3_bucket}" + f" --s3_key={logserver_s3_key}" - - return cmd - - -# pylint: disable=too-many-arguments -def start_server(log_host: str, baseline: str, setting: str) -> str: - """Build command to run server.""" - return ( - "screen -d -m" - + f" python3.7 -m flwr_experimental.baseline.{baseline}.server" - + f" --log_host={log_host}" - + f" --setting={setting}" - ) - - -def start_client( - server_address: str, log_host: str, baseline: str, setting: str, cid: str -) -> str: - """Build command to run client.""" - return ( - "screen -d -m" - + f" python3.7 -m flwr_experimental.baseline.{baseline}.client" - + f" --server_address={server_address}" - + f" --log_host={log_host}" - + f" --setting={setting}" - + f" --cid={cid}" - ) - - -def download_dataset(baseline: str) -> str: - """Return command which makes dataset locally available.""" - return f"python3.7 -m flwr_experimental.baseline.{baseline}.download" - - -def watch_and_shutdown(keyword: str, adapter: str) -> str: - """Return command which shuts down the instance after no baseline is - running anymore.""" - cmd = ( - f"screen -d -m bash -c 'while [[ $(ps a | grep -v grep | grep {keyword}) ]]; " - + "do sleep 1; done; " - ) - - if adapter == "docker": - cmd += "sleep 180 && kill 1'" - elif adapter == "ec2": - # Shutdown after 2 minutes to allow a logged in user - # to chancel the shutdown manually just in case - cmd += "sudo shutdown -P 3'" - else: - raise Exception("Unknown Adapter") - - return cmd - - -def tail_logfile(adapter: str, private_key: str, logserver: Instance) -> str: - """Return command which can be used to tail the logfile on the - logserver.""" - ssh_key = f"-i {private_key}" - username = "root" if adapter == "docker" else "ubuntu" - - return ( - f"ssh {ssh_key} -o StrictHostKeyChecking=no -p {logserver.ssh_port} " - + f"{username}@{logserver.public_ip}" - + ' "tail -n 1000 -f flower_logs/flower.log"' - ) diff --git a/src/py/flwr_experimental/baseline/common/__init__.py b/src/py/flwr_experimental/baseline/common/__init__.py deleted file mode 100644 index 706d2c41cb69..000000000000 --- a/src/py/flwr_experimental/baseline/common/__init__.py +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Common baseline components.""" - - -from .client import VisionClassificationClient as VisionClassificationClient -from .common import custom_fit as custom_fit -from .common import get_evaluate_fn as get_evaluate_fn -from .common import get_lr_schedule as get_lr_schedule -from .common import keras_evaluate as keras_evaluate -from .common import keras_fit as keras_fit -from .data import build_dataset as build_dataset -from .data import load_partition as load_partition diff --git a/src/py/flwr_experimental/baseline/common/client.py b/src/py/flwr_experimental/baseline/common/client.py deleted file mode 100644 index 18346218d919..000000000000 --- a/src/py/flwr_experimental/baseline/common/client.py +++ /dev/null @@ -1,152 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Flower client using TensorFlow/Keras for image classification.""" - - -from logging import DEBUG -from typing import Tuple - -import numpy as np -import tensorflow as tf - -import flwr as fl -from flwr.common.logger import log - -from .common import custom_fit, keras_evaluate -from .data import build_dataset - -tf.get_logger().setLevel("ERROR") - - -class VisionClassificationClient(fl.client.Client): - """Flower client implementing image classification using - TensorFlow/Keras.""" - - # pylint: disable=too-many-arguments - def __init__( - self, - cid: str, - model: tf.keras.Model, - xy_train: Tuple[np.ndarray, np.ndarray], - xy_test: Tuple[np.ndarray, np.ndarray], - delay_factor: float, - num_classes: int, - augment: bool = False, - augment_horizontal_flip: bool = False, - augment_offset: int = 0, - normalization_factor: float = 255.0, - ): - self.cid = cid - self.model = model - self.ds_train = build_dataset( - xy_train[0], - xy_train[1], - num_classes=num_classes, - shuffle_buffer_size=len(xy_train[0]), - augment=augment, - augment_horizontal_flip=augment_horizontal_flip, - augment_offset=augment_offset, - normalization_factor=normalization_factor, - ) - self.ds_test = build_dataset( - xy_test[0], - xy_test[1], - num_classes=num_classes, - shuffle_buffer_size=0, - augment=False, - normalization_factor=normalization_factor, - ) - self.num_examples_train = len(xy_train[0]) - self.num_examples_test = len(xy_test[0]) - self.delay_factor = delay_factor - - def get_parameters(self) -> fl.common.ParametersRes: - parameters = fl.common.ndarrays_to_parameters(self.model.get_weights()) - return fl.common.ParametersRes(parameters=parameters) - - def fit(self, ins: fl.common.FitIns) -> fl.common.FitRes: - weights: fl.common.NDArrays = fl.common.parameters_to_ndarrays(ins.parameters) - config = ins.config - log( - DEBUG, - "fit on %s (examples: %s), config %s", - self.cid, - self.num_examples_train, - config, - ) - - # Training configuration - # epoch_global = int(config["epoch_global"]) - epochs = int(config["epochs"]) - batch_size = int(config["batch_size"]) - # lr_initial = float(config["lr_initial"]) - # lr_decay = float(config["lr_decay"]) - timeout = int(config["timeout"]) if "timeout" in config else None - partial_updates = bool(int(config["partial_updates"])) - - # Use provided weights to update the local model - self.model.set_weights(weights) - - # Train the local model using the local dataset - completed, fit_duration, num_examples = custom_fit( - model=self.model, - dataset=self.ds_train, - num_epochs=epochs, - batch_size=batch_size, - callbacks=[], - delay_factor=self.delay_factor, - timeout=timeout, - ) - log(DEBUG, "client %s had fit_duration %s", self.cid, fit_duration) - - # Compute the maximum number of examples which could have been processed - num_examples_ceil = self.num_examples_train * epochs - - if not completed and not partial_updates: - # Return empty update if local update could not be completed in time - parameters = fl.common.ndarrays_to_parameters([]) - else: - # Return the refined weights and the number of examples used for training - parameters = fl.common.ndarrays_to_parameters(self.model.get_weights()) - return fl.common.FitRes( - parameters=parameters, - num_examples=num_examples, - num_examples_ceil=num_examples_ceil, - fit_duration=fit_duration, - ) - - def evaluate(self, ins: fl.common.EvaluateIns) -> fl.common.EvaluateRes: - weights = fl.common.parameters_to_ndarrays(ins.parameters) - config = ins.config - log( - DEBUG, - "evaluate on %s (examples: %s), config %s", - self.cid, - self.num_examples_test, - config, - ) - - # Use provided weights to update the local model - self.model.set_weights(weights) - - # Evaluate the updated model on the local dataset - loss, acc = keras_evaluate( - self.model, self.ds_test, batch_size=self.num_examples_test - ) - - # Return the number of evaluation examples and the evaluation result (loss) - return fl.common.EvaluateRes( - loss=loss, num_examples=self.num_examples_test, accuracy=acc - ) diff --git a/src/py/flwr_experimental/baseline/common/common.py b/src/py/flwr_experimental/baseline/common/common.py deleted file mode 100644 index 7be848f96d79..000000000000 --- a/src/py/flwr_experimental/baseline/common/common.py +++ /dev/null @@ -1,186 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Common baseline components.""" - - -import time -import timeit -from logging import INFO -from typing import Callable, List, Optional, Tuple - -import numpy as np -import tensorflow as tf - -import flwr as fl -from flwr.common.logger import log - -from .data import build_dataset - - -# pylint: disable=unused-argument,invalid-name,too-many-arguments,too-many-locals -def custom_fit( - model: tf.keras.Model, - dataset: tf.data.Dataset, - num_epochs: int, - batch_size: int, - callbacks: List[tf.keras.callbacks.Callback], - delay_factor: float = 0.0, - timeout: Optional[int] = None, -) -> Tuple[bool, float, int]: - """Train the model using a custom training loop.""" - ds_train = dataset.batch(batch_size=batch_size, drop_remainder=False) - - # Keep results for plotting - train_loss_results = [] - train_accuracy_results = [] - - # Optimizer - optimizer = tf.keras.optimizers.Adam() - - fit_begin = timeit.default_timer() - num_examples = 0 - for epoch in range(num_epochs): - log(INFO, "Starting epoch %s", epoch) - - epoch_loss_avg = tf.keras.metrics.Mean() - epoch_accuracy = tf.keras.metrics.CategoricalAccuracy() - - # Single loop over the dataset - batch_begin = timeit.default_timer() - num_examples_batch = 0 - for batch, (x, y) in enumerate(ds_train): - num_examples_batch += len(x) - - # Optimize the model - loss_value, grads = grad(model, x, y) - optimizer.apply_gradients(zip(grads, model.trainable_variables)) - - # Track progress - epoch_loss_avg.update_state(loss_value) # Add the current batch loss - epoch_accuracy.update_state(y, model(x, training=True)) - - # Track the number of examples used for training - num_examples += x.shape[0] - - # Delay - batch_duration = timeit.default_timer() - batch_begin - if delay_factor > 0.0: - time.sleep(batch_duration * delay_factor) - - # Progress log - if batch % 100 == 0: - log( - INFO, - "Batch %s: loss %s (%s examples processed, batch duration: %s)", - batch, - loss_value, - num_examples_batch, - batch_duration, - ) - - # Timeout - if timeout is not None: - fit_duration = timeit.default_timer() - fit_begin - if fit_duration > timeout: - log(INFO, "client timeout") - return (False, fit_duration, num_examples) - batch_begin = timeit.default_timer() - - # End epoch - train_loss_results.append(epoch_loss_avg.result()) - train_accuracy_results.append(epoch_accuracy.result()) - log( - INFO, - "Epoch {:03d}: Loss: {:.3f}, Accuracy: {:.3%}".format( - epoch, epoch_loss_avg.result(), epoch_accuracy.result() - ), - ) - - fit_duration = timeit.default_timer() - fit_begin - return True, fit_duration, num_examples - - -def loss( - model: tf.keras.Model, x: tf.Tensor, y: tf.Tensor, training: bool -) -> tf.Tensor: - """Calculate categorical crossentropy loss.""" - loss_object = tf.keras.losses.CategoricalCrossentropy(from_logits=False) - y_ = model(x, training=training) - return loss_object(y_true=y, y_pred=y_) - - -def grad( - model: tf.keras.Model, x: tf.Tensor, y: tf.Tensor -) -> Tuple[tf.Tensor, List[tf.Tensor]]: - """Calculate gradients.""" - with tf.GradientTape() as tape: - loss_value = loss(model, x, y, training=True) - return loss_value, tape.gradient(loss_value, model.trainable_variables) - - -def keras_evaluate( - model: tf.keras.Model, dataset: tf.data.Dataset, batch_size: int -) -> Tuple[float, float]: - """Evaluate the model using model.evaluate(...).""" - ds_test = dataset.batch(batch_size=batch_size, drop_remainder=False) - test_loss, acc = model.evaluate(x=ds_test) - return float(test_loss), float(acc) - - -def keras_fit( - model: tf.keras.Model, - dataset: tf.data.Dataset, - num_epochs: int, - batch_size: int, - callbacks: List[tf.keras.callbacks.Callback], -) -> None: - """Train the model using model.fit(...).""" - ds_train = dataset.batch(batch_size=batch_size, drop_remainder=False) - model.fit(ds_train, epochs=num_epochs, callbacks=callbacks, verbose=2) - - -def get_lr_schedule( - epoch_global: int, lr_initial: float, lr_decay: float -) -> Callable[[int], float]: - """Return a schedule which decays the learning rate after each epoch.""" - - def lr_schedule(epoch: int) -> float: - """Learning rate schedule.""" - epoch += epoch_global - return lr_initial * lr_decay**epoch - - return lr_schedule - - -def get_evaluate_fn( - model: tf.keras.Model, num_classes: int, xy_test: Tuple[np.ndarray, np.ndarray] -) -> Callable[[fl.common.NDArrays], Optional[Tuple[float, float]]]: - """Return an evaluation function for centralized evaluation.""" - - ds_test = build_dataset( - xy_test[0], - xy_test[1], - num_classes=num_classes, - shuffle_buffer_size=0, - augment=False, - ) - - def evaluate(weights: fl.common.NDArrays) -> Optional[Tuple[float, float]]: - """Use entire test set for evaluation.""" - model.set_weights(weights) - lss, acc = keras_evaluate(model, ds_test, batch_size=len(xy_test[0])) - return lss, acc - - return evaluate diff --git a/src/py/flwr_experimental/baseline/common/data.py b/src/py/flwr_experimental/baseline/common/data.py deleted file mode 100644 index 7d939d608310..000000000000 --- a/src/py/flwr_experimental/baseline/common/data.py +++ /dev/null @@ -1,157 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Baseline utilities for data loading.""" - - -from typing import List, Optional, Tuple, cast - -import numpy as np -import tensorflow as tf - - -# pylint: disable=too-many-arguments -def load_partition( - xy_partitions: List[Tuple[np.ndarray, np.ndarray]], - xy_test: Tuple[np.ndarray, np.ndarray], - partition: int, - num_clients: int, - seed: int, - dry_run: bool = False, -) -> Tuple[Tuple[np.ndarray, np.ndarray], Tuple[np.ndarray, np.ndarray]]: - """Load, normalize, and sample CIFAR-10/100.""" - - # Take partition - x_train, y_train = xy_partitions[partition] - - # Take a subset of the test set - x_test, y_test = shuffle(xy_test[0], xy_test[1], seed=seed) - x_test, y_test = get_partition(x_test, y_test, partition, num_clients) - - # Adjust x shape for model - if x_train.ndim == 3: - x_train = adjust_x_shape(x_train) - x_test = adjust_x_shape(x_test) - - # Adjust y shape for model - if y_train.ndim == 2: - y_train = adjust_y_shape(y_train) - y_test = adjust_y_shape(y_test) - - # Return a small subset of the data if dry_run is set - if dry_run: - return (x_train[0:100], y_train[0:100]), (x_test[0:50], y_test[0:50]) - return (x_train, y_train), (x_test, y_test) - - -def shuffle( - x_orig: np.ndarray, y_orig: np.ndarray, seed: int -) -> Tuple[np.ndarray, np.ndarray]: - """Shuffle x and y in the same way.""" - np.random.seed(seed) - idx = np.random.permutation(len(x_orig)) - return x_orig[idx], y_orig[idx] - - -def get_partition( - x_orig: np.ndarray, y_orig: np.ndarray, partition: int, num_clients: int -) -> Tuple[np.ndarray, np.ndarray]: - """Return a single partition of an equally partitioned dataset.""" - step_size = len(x_orig) / num_clients - start_index = int(step_size * partition) - end_index = int(start_index + step_size) - return x_orig[start_index:end_index], y_orig[start_index:end_index] - - -def adjust_x_shape(nda: np.ndarray) -> np.ndarray: - """Turn shape (x, y, z) into (x, y, z, 1).""" - nda_adjusted = np.reshape(nda, (nda.shape[0], nda.shape[1], nda.shape[2], 1)) - return cast(np.ndarray, nda_adjusted) - - -def adjust_y_shape(nda: np.ndarray) -> np.ndarray: - """Turn shape (x, 1) into (x).""" - nda_adjusted = np.reshape(nda, (nda.shape[0])) - return cast(np.ndarray, nda_adjusted) - - -# pylint: disable=too-many-arguments,invalid-name -def build_dataset( - x: np.ndarray, - y: np.ndarray, - num_classes: int, - shuffle_buffer_size: int = 0, - augment: bool = False, - augment_color: bool = False, - augment_horizontal_flip: bool = False, - augment_offset: int = 0, - seed: Optional[int] = None, - normalization_factor: float = 255.0, -) -> tf.data.Dataset: - """Normalize images, one-hot encode labels, optionally shuffle and - augment.""" - dataset = tf.data.Dataset.from_tensor_slices((x, y)) - dataset = dataset.map( - lambda x, y: ( - tf.cast(x, tf.float32) / normalization_factor, - tf.one_hot( - indices=tf.cast(y, tf.int32), depth=num_classes, on_value=1, off_value=0 - ), - ), - num_parallel_calls=tf.data.experimental.AUTOTUNE, - ) - if shuffle_buffer_size > 0: - dataset = dataset.shuffle( - buffer_size=shuffle_buffer_size, seed=seed, reshuffle_each_iteration=True - ) - if augment: - dataset = dataset.map( - lambda x, y: ( - apply_augmentation( - x, - seed=seed, - color=augment_color, - horizontal_flip=augment_horizontal_flip, - offset=augment_offset, - ), - y, - ), - num_parallel_calls=tf.data.experimental.AUTOTUNE, - ) - return dataset - - -def apply_augmentation( - img: tf.Tensor, - seed: Optional[int], - color: bool, - horizontal_flip: bool, - offset: int, -) -> tf.Tensor: - """Apply different augmentations to a single example.""" - if color: - img = tf.image.random_hue(img, 0.08, seed=seed) - img = tf.image.random_saturation(img, 0.6, 1.6, seed=seed) - img = tf.image.random_brightness(img, 0.05, seed=seed) - img = tf.image.random_contrast(img, 0.7, 1.3, seed=seed) - if horizontal_flip: - img = tf.image.random_flip_left_right(img, seed=seed) - # Get image size from tensor - size = img.shape.as_list() # E.g., [28, 28, 1] or [32, 32, 3] - height = size[0] - width = size[1] - img_padded = tf.image.pad_to_bounding_box( - img, offset, offset, height + 2 * offset, width + 2 * offset - ) - return tf.image.random_crop(img_padded, size=size, seed=seed) diff --git a/src/py/flwr_experimental/baseline/config/__init__.py b/src/py/flwr_experimental/baseline/config/__init__.py deleted file mode 100644 index 0221a92188e5..000000000000 --- a/src/py/flwr_experimental/baseline/config/__init__.py +++ /dev/null @@ -1,20 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Common client configuration.""" - - -from .config import configure_client_instances as configure_client_instances -from .config import sample_delay_factors as sample_delay_factors -from .config import sample_real_delay_factors as sample_real_delay_factors diff --git a/src/py/flwr_experimental/baseline/config/config.py b/src/py/flwr_experimental/baseline/config/config.py deleted file mode 100644 index 16c144bb6a2f..000000000000 --- a/src/py/flwr_experimental/baseline/config/config.py +++ /dev/null @@ -1,114 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Provides a variaty of baseline settings for Fashion-MNIST.""" - - -import random -from typing import List, Optional, Tuple - -import numpy as np - -from flwr_experimental.ops.instance import Instance - -# We assume that devices which are older will have at most -# ~80% of the Samsung Galaxy Note 5 compute performance. -SCORE_MISSING = int(226 * 0.80) - -DEVICE_DISTRIBUTION = [ - ("10.0", "Note 10", 0.1612, 729), - ("Pie 9", "Samsung Galaxy Note 9", 0.374, 607), - ("Oreo 8.0/8.1", "Samsung Galaxy S8", 0.1129 + 0.0737, 359), - ("Nougat 7.0/7.1", "Samsung Galaxy S7", 0.0624 + 0.043, 343), - ("Marshmallow 6.0", "Samsung Galaxy Note 5", 0.0872, 226), - ("Lollipop 5.1", "Samsung Galaxy Note 4", 0.0484, SCORE_MISSING), - ("KitKat 4.4", "Samsung Galaxy Note 4", 0.0187, SCORE_MISSING), - ("Other", "Samsung Galaxy S III", 0.0185, SCORE_MISSING), -] - - -def sample_delay_factors( - num_clients: int, max_delay: float, seed: Optional[int] -) -> List[float]: - """Sample delay factors.""" - np.random.seed(seed) - # pylint: disable=invalid-name - ps = [float(p) for p in np.random.rand(num_clients)] - step_size = max_delay / num_clients - ds = [(i + 1) * step_size for i in range(num_clients)] - return [p * d for p, d in zip(ps, ds)] - - -def sample_real_delay_factors(num_clients: int, seed: int = 2021) -> List[float]: - """Split list of floats into two buckets.""" - random.seed(seed) - - if num_clients % 2 != 0: - raise Exception("num_clients has to be divisible by two") - - factors = sorted([get_delay_factor() for _ in range(num_clients)]) - - buckets: Tuple[List[float], List[float]] = ( - factors[: num_clients // 2], # fast, lower factor - factors[num_clients // 2 :], # slow, higher factor - ) - - final_factors: List[float] = [] - - for idx in range(num_clients): - # higher probability to pick bucket 0 with low idx - bucket_idx = random.choices([0, 1], [num_clients - idx, idx])[0] - picked_bucket = buckets[bucket_idx] - other_bucket = buckets[bucket_idx - 1] - - if picked_bucket == other_bucket: - raise Exception("Picked and other bucket can't be same") - - if len(picked_bucket) > 0: - value = picked_bucket.pop(0) - else: - value = other_bucket.pop(0) - - final_factors.append(value) - - return final_factors - - -def get_delay_factor() -> float: - """Return a delay factor.""" - values_prob = [val[2] for val in DEVICE_DISTRIBUTION] - values_perf = [val[3] for val in DEVICE_DISTRIBUTION] - max_perf = max(values_perf) - chosen_score = random.choices(values_perf, values_prob)[0] - return round(max_perf / chosen_score - 1, 4) - - -def configure_client_instances( - num_clients: int, num_cpu: int, num_ram: float, gpu: bool = False -) -> Tuple[List[Instance], List[str]]: - """Return list of client instances and a list of instance names.""" - instance_names = [f"client_{i}" for i in range(num_clients)] - - instances = [ - Instance( - name=instance_name, - group="clients", - num_cpu=num_cpu, - num_ram=num_ram, - gpu=gpu, - ) - for instance_name in instance_names - ] - - return instances, instance_names diff --git a/src/py/flwr_experimental/baseline/config/config_test.py b/src/py/flwr_experimental/baseline/config/config_test.py deleted file mode 100644 index 1e8377b3be1e..000000000000 --- a/src/py/flwr_experimental/baseline/config/config_test.py +++ /dev/null @@ -1,69 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Implements tests for config module.""" - - -from .config import sample_real_delay_factors - - -def test_sample_real_delay_factors_100() -> None: - """Test delay factors.""" - # Prepare - num_clients = 100 - - # Execute - factors = sample_real_delay_factors(num_clients=num_clients) - - # Assert - assert len(factors) == num_clients - - -def test_sample_real_delay_factors_10() -> None: - """Test delay factors.""" - # Prepare - num_clients = 10 - - # Execute - factors = sample_real_delay_factors(num_clients=num_clients) - - # Assert - assert len(factors) == num_clients - - -def test_sample_real_delay_factors_seed() -> None: - """Test delay factors.""" - # Prepare - num_clients = 100 - - # Execute - factors_a = sample_real_delay_factors(num_clients=num_clients, seed=0) - factors_b = sample_real_delay_factors(num_clients=num_clients, seed=0) - factors_c = sample_real_delay_factors(num_clients=num_clients, seed=1) - - # Assert - assert len(factors_a) == num_clients - assert len(factors_b) == num_clients - assert len(factors_c) == num_clients - - # pylint: disable=invalid-name - all_same_in_a_and_b = True - all_same_in_a_and_c = True - - for a, b, c in zip(factors_a, factors_b, factors_c): - all_same_in_a_and_b = all_same_in_a_and_b and (a == b) - all_same_in_a_and_c = all_same_in_a_and_c and (a == c) - - assert all_same_in_a_and_b - assert not all_same_in_a_and_c diff --git a/src/py/flwr_experimental/baseline/dataset/__init__.py b/src/py/flwr_experimental/baseline/dataset/__init__.py deleted file mode 100644 index aa19c2fbfce8..000000000000 --- a/src/py/flwr_experimental/baseline/dataset/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Partitioned versions of popular datasets.""" diff --git a/src/py/flwr_experimental/baseline/dataset/dataset.py b/src/py/flwr_experimental/baseline/dataset/dataset.py deleted file mode 100644 index 8e27ad71821d..000000000000 --- a/src/py/flwr_experimental/baseline/dataset/dataset.py +++ /dev/null @@ -1,206 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Partitioned versions of CIFAR-10/100 datasets.""" -# pylint: disable=invalid-name - -from typing import List, Tuple, cast - -import numpy as np - -XY = Tuple[np.ndarray, np.ndarray] -XYList = List[XY] -PartitionedDataset = Tuple[XYList, XYList] - -np.random.seed(2020) - - -def float_to_int(i: float) -> int: - """Return float as int but raise if decimal is dropped.""" - if not i.is_integer(): - raise Exception("Cast would drop decimals") - - return int(i) - - -def sort_by_label(x: np.ndarray, y: np.ndarray) -> XY: - """Sort by label. - - Assuming two labels and four examples the resulting label order - would be 1,1,2,2 - """ - idx = np.argsort(y, axis=0).reshape((y.shape[0])) - return (x[idx], y[idx]) - - -def sort_by_label_repeating(x: np.ndarray, y: np.ndarray) -> XY: - """Sort by label in repeating groups. Assuming two labels and four examples - the resulting label order would be 1,2,1,2. - - Create sorting index which is applied to by label sorted x, y - - .. code-block:: python - - # given: - y = [ - 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9 - ] - - # use: - idx = [ - 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19 - ] - - # so that y[idx] becomes: - y = [ - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 - ] - """ - x, y = sort_by_label(x, y) - - num_example = x.shape[0] - num_class = np.unique(y).shape[0] - idx = ( - np.array(range(num_example), np.int64) - .reshape((num_class, num_example // num_class)) - .transpose() - .reshape(num_example) - ) - - return (x[idx], y[idx]) - - -def split_at_fraction(x: np.ndarray, y: np.ndarray, fraction: float) -> Tuple[XY, XY]: - """Split x, y at a certain fraction.""" - splitting_index = float_to_int(x.shape[0] * fraction) - # Take everything BEFORE splitting_index - x_0, y_0 = x[:splitting_index], y[:splitting_index] - # Take everything AFTER splitting_index - x_1, y_1 = x[splitting_index:], y[splitting_index:] - return (x_0, y_0), (x_1, y_1) - - -def shuffle(x: np.ndarray, y: np.ndarray) -> XY: - """Shuffle x and y.""" - idx = np.random.permutation(len(x)) - return x[idx], y[idx] - - -def partition(x: np.ndarray, y: np.ndarray, num_partitions: int) -> List[XY]: - """Return x, y as list of partitions.""" - return list(zip(np.split(x, num_partitions), np.split(y, num_partitions))) - - -def combine_partitions(xy_list_0: XYList, xy_list_1: XYList) -> XYList: - """Combine two lists of ndarray Tuples into one list.""" - return [ - (np.concatenate([x_0, x_1], axis=0), np.concatenate([y_0, y_1], axis=0)) - for (x_0, y_0), (x_1, y_1) in zip(xy_list_0, xy_list_1) - ] - - -def shift(x: np.ndarray, y: np.ndarray) -> XY: - """Shift x_1, y_1 so that the first half contains only labels 0 to 4 and - the second half 5 to 9.""" - x, y = sort_by_label(x, y) - - (x_0, y_0), (x_1, y_1) = split_at_fraction(x, y, fraction=0.5) - (x_0, y_0), (x_1, y_1) = shuffle(x_0, y_0), shuffle(x_1, y_1) - x, y = np.concatenate([x_0, x_1], axis=0), np.concatenate([y_0, y_1], axis=0) - return x, y - - -def create_partitions( - unpartitioned_dataset: XY, - iid_fraction: float, - num_partitions: int, -) -> XYList: - """Create partitioned version of a training or test set. - - Currently tested and supported are MNIST, FashionMNIST and - CIFAR-10/100 - """ - x, y = unpartitioned_dataset - - x, y = shuffle(x, y) - x, y = sort_by_label_repeating(x, y) - - (x_0, y_0), (x_1, y_1) = split_at_fraction(x, y, fraction=iid_fraction) - - # Shift in second split of dataset the classes into two groups - x_1, y_1 = shift(x_1, y_1) - - xy_0_partitions = partition(x_0, y_0, num_partitions) - xy_1_partitions = partition(x_1, y_1, num_partitions) - - xy_partitions = combine_partitions(xy_0_partitions, xy_1_partitions) - - # Adjust x and y shape - return [adjust_xy_shape(xy) for xy in xy_partitions] - - -def create_partitioned_dataset( - keras_dataset: Tuple[XY, XY], - iid_fraction: float, - num_partitions: int, -) -> Tuple[PartitionedDataset, XY]: - """Create partitioned version of keras dataset. - - Currently tested and supported are MNIST, FashionMNIST and - CIFAR-10/100 - """ - xy_train, xy_test = keras_dataset - - xy_train_partitions = create_partitions( - unpartitioned_dataset=xy_train, - iid_fraction=iid_fraction, - num_partitions=num_partitions, - ) - - xy_test_partitions = create_partitions( - unpartitioned_dataset=xy_test, - iid_fraction=iid_fraction, - num_partitions=num_partitions, - ) - - return (xy_train_partitions, xy_test_partitions), adjust_xy_shape(xy_test) - - -def log_distribution(xy_partitions: XYList) -> None: - """Print label distribution for list of paritions.""" - distro = [np.unique(y, return_counts=True) for _, y in xy_partitions] - for d in distro: - print(d) - - -def adjust_xy_shape(xy: XY) -> XY: - """Adjust shape of both x and y.""" - x, y = xy - if x.ndim == 3: - x = adjust_x_shape(x) - if y.ndim == 2: - y = adjust_y_shape(y) - return (x, y) - - -def adjust_x_shape(nda: np.ndarray) -> np.ndarray: - """Turn shape (x, y, z) into (x, y, z, 1).""" - nda_adjusted = np.reshape(nda, (nda.shape[0], nda.shape[1], nda.shape[2], 1)) - return cast(np.ndarray, nda_adjusted) - - -def adjust_y_shape(nda: np.ndarray) -> np.ndarray: - """Turn shape (x, 1) into (x).""" - nda_adjusted = np.reshape(nda, (nda.shape[0])) - return cast(np.ndarray, nda_adjusted) diff --git a/src/py/flwr_experimental/baseline/dataset/dataset_test.py b/src/py/flwr_experimental/baseline/dataset/dataset_test.py deleted file mode 100644 index 4c484b8836bc..000000000000 --- a/src/py/flwr_experimental/baseline/dataset/dataset_test.py +++ /dev/null @@ -1,158 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Tests for partitioned CIFAR-10/100 dataset generation.""" -# pylint: disable=no-self-use, invalid-name - -import unittest - -import numpy as np -import tensorflow as tf - -from flwr_experimental.baseline.dataset.dataset import ( - XY, - combine_partitions, - partition, - shuffle, - sort_by_label, - sort_by_label_repeating, - split_at_fraction, -) - - -def hash_xy(xy: XY) -> int: - """Return hash of xy.""" - hashes = set() - for x, y in zip(xy[0], xy[1]): - hashes.add(hash(x.tobytes() + y.tobytes())) - return hash(frozenset(hashes)) - - -def assert_identity(xy_0: XY, xy_1: XY) -> None: - """Assert that both datasets contain the same examples.""" - assert xy_0[0].shape == xy_1[0].shape - assert xy_0[1].shape == xy_1[1].shape - assert hash_xy(xy_0) == hash_xy(xy_1) - - -class CifarPartitionedTestCase(unittest.TestCase): - """Tests for partitioned CIFAR-10/100 dataset generation.""" - - def setUp(self) -> None: - (x, y), (_, _) = tf.keras.datasets.cifar10.load_data() - - np.random.seed(2000) - idx = np.random.permutation(x.shape[0]) - x, y = x[idx], y[idx] - - self.ds = x, y - - # Make sure subsequent shuffle in tests - # produce other permutations - np.random.seed(3000) - - def test_assert_identity(self) -> None: - """Test assert_identity function.""" - assert_identity(self.ds, self.ds) - - def test_sort_by_label(self) -> None: - """Test sort_by_label function.""" - # Prepare - x_org, y_org = self.ds - - # Execute - x, y = sort_by_label(x_org, y_org) - - # Assert - assert_identity(self.ds, (x, y)) - for i, _ in enumerate(y): - if i > 0: - assert y[i] >= y[i - 1] - - def test_sort_by_label_repeating(self) -> None: - """Test sort_by_label function.""" - # Prepare - x, y = self.ds - idx = np.random.permutation(x.shape[0]) - x, y = x[idx], y[idx] - - # Execute - x, y = sort_by_label_repeating(x, y) - - # Assert - assert_identity(self.ds, (x, y)) - assert {y[0] for y in y[:10]} == set(range(10)) - - def test_split_at_fraction(self) -> None: - """Test split_at_fraction function.""" - # Prepare - fraction = 0.5 - x, y = self.ds - - # Execute - (x_0, y_0), (x_1, y_1) = split_at_fraction(x, y, fraction) - - # Assert - barrier = int(x.shape[0] * fraction) - np.testing.assert_equal(x_0, x[:barrier]) - np.testing.assert_equal(y_0, y[:barrier]) - np.testing.assert_equal(x_1, x[barrier:]) - np.testing.assert_equal(y_1, y[barrier:]) - - def test_shuffle(self) -> None: - """Test sort_by_label function.""" - # Prepare - x, y = self.ds - - # Execute - x, y = shuffle(x, y) - - # Assert - assert_identity(self.ds, (x, y)) - - def test_partition(self) -> None: - """Test partition function.""" - # Prepare - x, y = self.ds - - # Execute - partitions = partition(x, y, 2) - - # Assert - assert len(partitions) == 2 - assert partitions[0][0].shape == partitions[1][0].shape - assert partitions[0][1].shape == partitions[1][1].shape - - def test_combine_partitions(self) -> None: - """Test combine function.""" - # Prepare - r_0_5 = list(range(0, 5)) - r_5_10 = list(range(5, 10)) - r_0_10 = r_0_5 + r_5_10 - xy_list_0 = [(np.array(r_0_5, np.int64), np.array(r_0_5, np.int64))] - xy_list_1 = [(np.array(r_5_10, np.int64), np.array(r_5_10, np.int64))] - - # Execute - xy_combined = combine_partitions(xy_list_0, xy_list_1) - - # Assert - assert len(xy_combined) == 1 - assert isinstance(xy_combined[0], tuple) - x_01, y_01 = xy_combined[0] - np.testing.assert_equal(x_01, r_0_10) - np.testing.assert_equal(y_01, r_0_10) - - -if __name__ == "__main__": - unittest.main(verbosity=2) diff --git a/src/py/flwr_experimental/baseline/dataset/tf_cifar_partitioned.py b/src/py/flwr_experimental/baseline/dataset/tf_cifar_partitioned.py deleted file mode 100644 index 61b6e31c29ab..000000000000 --- a/src/py/flwr_experimental/baseline/dataset/tf_cifar_partitioned.py +++ /dev/null @@ -1,48 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Partitioned versions of CIFAR-10/100 datasets.""" -# pylint: disable=invalid-name - -from typing import Tuple - -import tensorflow as tf - -from .dataset import ( - XY, - PartitionedDataset, - create_partitioned_dataset, - log_distribution, -) - - -def load_data( - iid_fraction: float, num_partitions: int, cifar100: bool = False -) -> Tuple[PartitionedDataset, XY]: - """Load partitioned version of CIFAR-10/100.""" - cifar = tf.keras.datasets.cifar100 if cifar100 else tf.keras.datasets.cifar10 - (xy_train_partitions, xy_test_partitions), xy_test = create_partitioned_dataset( - cifar.load_data(), iid_fraction, num_partitions - ) - return (xy_train_partitions, xy_test_partitions), xy_test - - -if __name__ == "__main__": - # Load a partitioned dataset and show distribution of examples - for _num_partitions in [10, 100]: - for _fraction in [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]: - (xy_train_par, xy_test_par), _ = load_data(_fraction, _num_partitions) - print(f"\nfraction: {_fraction}; num_partitions: {_num_partitions}") - log_distribution(xy_train_par) - log_distribution(xy_test_par) diff --git a/src/py/flwr_experimental/baseline/dataset/tf_cifar_partitioned_test.py b/src/py/flwr_experimental/baseline/dataset/tf_cifar_partitioned_test.py deleted file mode 100644 index dc655682adec..000000000000 --- a/src/py/flwr_experimental/baseline/dataset/tf_cifar_partitioned_test.py +++ /dev/null @@ -1,35 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Tests for partitioned CIFAR-10/100 dataset generation.""" -# pylint: disable=no-self-use - -import unittest - -from flwr_experimental.baseline.dataset.tf_cifar_partitioned import load_data - - -class CifarPartitionedTestCase(unittest.TestCase): - """Tests for partitioned CIFAR-10/100 dataset generation.""" - - def test_load_data_integration(self) -> None: - """Test partition function.""" - # Execute - for num_partitions in [10, 100]: - for fraction in [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]: - (_, _), _ = load_data(fraction, num_partitions) - - -if __name__ == "__main__": - unittest.main(verbosity=2) diff --git a/src/py/flwr_experimental/baseline/dataset/tf_fashion_mnist_partitioned.py b/src/py/flwr_experimental/baseline/dataset/tf_fashion_mnist_partitioned.py deleted file mode 100644 index da7d70434bb4..000000000000 --- a/src/py/flwr_experimental/baseline/dataset/tf_fashion_mnist_partitioned.py +++ /dev/null @@ -1,48 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Partitioned versions of CIFAR-10/100 datasets.""" -# pylint: disable=invalid-name - - -from typing import Tuple - -import tensorflow as tf - -from .dataset import ( - XY, - PartitionedDataset, - create_partitioned_dataset, - log_distribution, -) - - -def load_data( - iid_fraction: float, num_partitions: int -) -> Tuple[PartitionedDataset, XY]: - """Load partitioned version of FashionMNIST.""" - (xy_train_partitions, xy_test_partitions), xy_test = create_partitioned_dataset( - tf.keras.datasets.fashion_mnist.load_data(), iid_fraction, num_partitions - ) - return (xy_train_partitions, xy_test_partitions), xy_test - - -if __name__ == "__main__": - # Load a partitioned dataset and show distribution of examples - for _num_partitions in [10, 100]: - for _fraction in [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]: - (xy_train_par, xy_test_par), _ = load_data(_fraction, _num_partitions) - print(f"\nfraction: {_fraction}; num_partitions: {_num_partitions}") - log_distribution(xy_train_par) - log_distribution(xy_test_par) diff --git a/src/py/flwr_experimental/baseline/dataset/tf_fashion_mnist_partitioned_test.py b/src/py/flwr_experimental/baseline/dataset/tf_fashion_mnist_partitioned_test.py deleted file mode 100644 index 2782bcf0353a..000000000000 --- a/src/py/flwr_experimental/baseline/dataset/tf_fashion_mnist_partitioned_test.py +++ /dev/null @@ -1,35 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Tests for partitioned FashionMNIST dataset generation.""" -# pylint: disable=no-self-use - -import unittest - -from flwr_experimental.baseline.dataset.tf_fashion_mnist_partitioned import load_data - - -class FashionMnistPartitionedTestCase(unittest.TestCase): - """Tests for partitioned FashionMNIST dataset generation.""" - - def test_load_data_integration(self) -> None: - """Test partition function.""" - # Execute - for num_partitions in [10, 100]: - for fraction in [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]: - (_, _), _ = load_data(fraction, num_partitions) - - -if __name__ == "__main__": - unittest.main(verbosity=2) diff --git a/src/py/flwr_experimental/baseline/dataset/tf_hotkey_partitioned.py b/src/py/flwr_experimental/baseline/dataset/tf_hotkey_partitioned.py deleted file mode 100644 index 265d67301b64..000000000000 --- a/src/py/flwr_experimental/baseline/dataset/tf_hotkey_partitioned.py +++ /dev/null @@ -1,97 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Partitioned versions of Spoken Keyword Detection dataset.""" -# pylint: disable=invalid-name - -import os -import pickle -import urllib.request -from typing import Tuple - -from .dataset import ( - XY, - PartitionedDataset, - create_partitioned_dataset, - log_distribution, -) - - -def download(filename: str, path: str) -> None: - """Download hotkey dataset.""" - urls = { - "hotkey_test_x.pkl": "https://www.dropbox.com/s/ve0g1m3wtuecb7r/hotkey_test_x.pkl?dl=1", - "hotkey_test_y.pkl": "https://www.dropbox.com/s/hlihc8qchpo3hhj/hotkey_test_y.pkl?dl=1", - "hotkey_train_x.pkl": "https://www.dropbox.com/s/05ym4jg8n7oi5qh/hotkey_train_x.pkl?dl=1", - "hotkey_train_y.pkl": "https://www.dropbox.com/s/k69lhw5j02gsscq/hotkey_train_y.pkl?dl=1", - } - url = urls[filename] - urllib.request.urlretrieve(url, path) - print("Downloaded ", url) - - -def hotkey_load(dirname: str = "./data/hotkey/") -> Tuple[XY, XY]: - """Load Hotkey dataset from disk.""" - files = [ - "hotkey_train_x.pkl", - "hotkey_train_y.pkl", - "hotkey_test_x.pkl", - "hotkey_test_y.pkl", - ] - paths = [] - - for f in files: - if not os.path.exists(dirname): - os.makedirs(dirname) - path = os.path.join(dirname, f) - if not os.path.exists(path): - download(f, path) - paths.append(path) - - with open(paths[0], "rb") as input_file: - x_train = pickle.load(input_file) - - with open(paths[1], "rb") as input_file: - y_train = pickle.load(input_file) - - with open(paths[2], "rb") as input_file: - x_test = pickle.load(input_file) - - with open(paths[3], "rb") as input_file: - y_test = pickle.load(input_file) - - return ( - (x_train[0:31000, :, :], y_train[0:31000]), - (x_test[0:4000, :, :], y_test[0:4000]), - ) - - -def load_data( - iid_fraction: float, num_partitions: int -) -> Tuple[PartitionedDataset, XY]: - """Load partitioned version of FashionMNIST.""" - (xy_train_partitions, xy_test_partitions), xy_test = create_partitioned_dataset( - hotkey_load(), iid_fraction, num_partitions - ) - return (xy_train_partitions, xy_test_partitions), xy_test - - -if __name__ == "__main__": - # Load a partitioned dataset and show distribution of examples - for _num_partitions in [10, 50, 100]: - for _fraction in [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]: - (xy_train_par, xy_test_par), _ = load_data(_fraction, _num_partitions) - print(f"\nfraction: {_fraction}; num_partitions: {_num_partitions}") - log_distribution(xy_train_par) - log_distribution(xy_test_par) diff --git a/src/py/flwr_experimental/baseline/ip.py b/src/py/flwr_experimental/baseline/ip.py deleted file mode 100644 index 25efefa40d0a..000000000000 --- a/src/py/flwr_experimental/baseline/ip.py +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Provide method to get the ip address of a network interface.""" - - -from subprocess import check_output - - -def get_ip_address() -> str: - """Return IP address.""" - ips = check_output(["hostname", "--all-ip-addresses"]) - ips_decoded = ips.decode("utf-8").split(" ") - return ips_decoded[0] diff --git a/src/py/flwr_experimental/baseline/model/__init__.py b/src/py/flwr_experimental/baseline/model/__init__.py deleted file mode 100644 index 70c22a137002..000000000000 --- a/src/py/flwr_experimental/baseline/model/__init__.py +++ /dev/null @@ -1,20 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Baseline models.""" - - -from .cnn import keyword_cnn as keyword_cnn -from .cnn import orig_cnn as orig_cnn -from .resnet import resnet50v2 as resnet50v2 diff --git a/src/py/flwr_experimental/baseline/model/cnn.py b/src/py/flwr_experimental/baseline/model/cnn.py deleted file mode 100644 index 0f0d777745b2..000000000000 --- a/src/py/flwr_experimental/baseline/model/cnn.py +++ /dev/null @@ -1,149 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""CNN.""" - - -from typing import Optional, Tuple - -import tensorflow as tf - -CNN_REG = 1e-5 -DENSE_REG = 1e-3 - - -def orig_cnn( - input_shape: Tuple[int, int, int] = (28, 28, 1), seed: Optional[int] = None -) -> tf.keras.Model: - """Create a CNN instance.""" - # Kernel initializer - kernel_initializer = tf.keras.initializers.glorot_uniform(seed=seed) - - # Architecture - inputs = tf.keras.layers.Input(shape=input_shape) - layers = tf.keras.layers.Conv2D( - 32, - kernel_size=(5, 5), - strides=(1, 1), - kernel_initializer=kernel_initializer, - padding="same", - activation="relu", - kernel_regularizer=tf.keras.regularizers.l2(CNN_REG), - )(inputs) - layers = tf.keras.layers.MaxPool2D(pool_size=(2, 2), strides=(2, 2))(layers) - layers = tf.keras.layers.Conv2D( - 64, - kernel_size=(5, 5), - strides=(1, 1), - kernel_initializer=kernel_initializer, - padding="same", - activation="relu", - kernel_regularizer=tf.keras.regularizers.l2(CNN_REG), - )(layers) - layers = tf.keras.layers.MaxPool2D(pool_size=(2, 2), strides=(2, 2))(layers) - layers = tf.keras.layers.Flatten()(layers) - layers = tf.keras.layers.Dense( - 512, - kernel_initializer=kernel_initializer, - activation="relu", - kernel_regularizer=tf.keras.regularizers.l2(DENSE_REG), - bias_regularizer=tf.keras.regularizers.l2(DENSE_REG), - )(layers) - - outputs = tf.keras.layers.Dense( - 10, - kernel_initializer=kernel_initializer, - activation="softmax", - kernel_regularizer=tf.keras.regularizers.l2(DENSE_REG), - bias_regularizer=tf.keras.regularizers.l2(DENSE_REG), - )(layers) - - model = tf.keras.Model(inputs=inputs, outputs=outputs) - - # Compile model w/ learning rate schedule - lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay( - initial_learning_rate=1e-3, - decay_steps=10000, - decay_rate=0.9, - ) - model.compile( - optimizer=tf.keras.optimizers.SGD(learning_rate=lr_schedule, momentum=0.9), - loss=tf.keras.losses.categorical_crossentropy, - metrics=["accuracy"], - ) - return model - - -def keyword_cnn( - input_shape: Tuple[int, int, int] = (80, 40, 1), seed: Optional[int] = None -) -> tf.keras.Model: - """Create a keyword detection model instance.""" - # Kernel initializer - kernel_initializer = tf.keras.initializers.glorot_uniform(seed=seed) - - # Architecture - inputs = tf.keras.layers.Input(shape=input_shape) - layers = tf.keras.layers.Conv2D( - 32, - kernel_size=(20, 8), - strides=(1, 1), - kernel_initializer=kernel_initializer, - padding="same", - activation="relu", - )(inputs) - layers = tf.keras.layers.MaxPool2D(pool_size=(2, 2), strides=(2, 2))(layers) - layers = tf.keras.layers.Dropout(0.5)(layers) - layers = tf.keras.layers.Conv2D( - 64, - kernel_size=(10, 4), - strides=(1, 1), - kernel_initializer=kernel_initializer, - padding="same", - activation="relu", - )(layers) - layers = tf.keras.layers.MaxPool2D(pool_size=(2, 2), strides=(2, 2))(layers) - - layers = tf.keras.layers.Conv2D( - 64, - kernel_size=(2, 2), - strides=(1, 1), - kernel_initializer=kernel_initializer, - padding="same", - activation="relu", - )(layers) - - layers = tf.keras.layers.GlobalAveragePooling2D()(layers) - layers = tf.keras.layers.Dense( - 128, kernel_initializer=kernel_initializer, activation="relu" - )(layers) - - outputs = tf.keras.layers.Dense( - 10, kernel_initializer=kernel_initializer, activation="softmax" - )(layers) - - model = tf.keras.Model(inputs=inputs, outputs=outputs) - - # Compile model w/ learning rate schedule - lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay( - initial_learning_rate=1e-3, - decay_steps=10000, - decay_rate=0.9, - ) - model.compile( - optimizer=tf.keras.optimizers.Adam(learning_rate=lr_schedule), - loss=tf.keras.losses.categorical_crossentropy, - metrics=["accuracy"], - ) - - return model diff --git a/src/py/flwr_experimental/baseline/model/cnn_test.py b/src/py/flwr_experimental/baseline/model/cnn_test.py deleted file mode 100644 index 3cf23d96d961..000000000000 --- a/src/py/flwr_experimental/baseline/model/cnn_test.py +++ /dev/null @@ -1,44 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Tests for CNN models.""" - - -from .cnn import orig_cnn - - -def test_cnn_size_mnist() -> None: - """Test number of parameters with MNIST-sized inputs.""" - # Prepare - model = orig_cnn(input_shape=(28, 28, 1)) - expected = 1_663_370 - - # Execute - actual = model.count_params() - - # Assert - assert actual == expected - - -def test_cnn_size_cifar() -> None: - """Test number of parameters with CIFAR-sized inputs.""" - # Prepare - model = orig_cnn(input_shape=(32, 32, 3)) - expected = 2_156_490 - - # Execute - actual = model.count_params() - - # Assert - assert actual == expected diff --git a/src/py/flwr_experimental/baseline/model/resnet.py b/src/py/flwr_experimental/baseline/model/resnet.py deleted file mode 100644 index 264822e09616..000000000000 --- a/src/py/flwr_experimental/baseline/model/resnet.py +++ /dev/null @@ -1,44 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""ResNet.""" - - -from typing import Optional, Tuple - -import tensorflow as tf - - -# pylint: disable=unused-argument -def resnet50v2( - input_shape: Tuple[int, int, int], num_classes: int, seed: Optional[int] = None -) -> tf.keras.Model: - """Create a ResNet-50 (v2) instance.""" - - model = tf.keras.applications.ResNet50V2( - weights=None, include_top=True, input_shape=input_shape, classes=num_classes - ) - - # Compile model w/ learning rate schedule - lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay( - initial_learning_rate=1e-3, - decay_steps=10000, - decay_rate=0.9, - ) - model.compile( - optimizer=tf.keras.optimizers.Adam(learning_rate=lr_schedule), - loss="categorical_crossentropy", - metrics=["accuracy"], - ) - return model diff --git a/src/py/flwr_experimental/baseline/plot/__init__.py b/src/py/flwr_experimental/baseline/plot/__init__.py deleted file mode 100644 index bfab50defebf..000000000000 --- a/src/py/flwr_experimental/baseline/plot/__init__.py +++ /dev/null @@ -1,20 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Provides plotting functionality.""" - - -from .plot import bar_chart as bar_chart -from .plot import line_chart as line_chart -from .plot import single_bar_chart as single_bar_chart diff --git a/src/py/flwr_experimental/baseline/plot/plot.py b/src/py/flwr_experimental/baseline/plot/plot.py deleted file mode 100644 index e64b2435f5c7..000000000000 --- a/src/py/flwr_experimental/baseline/plot/plot.py +++ /dev/null @@ -1,247 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Provides plotting functions.""" - - -import math -import os.path -from enum import Enum -from pathlib import Path -from typing import List, Union - -import matplotlib -import matplotlib.pyplot as plt -import numpy as np - -matplotlib.rcParams["ps.useafm"] = True -matplotlib.rcParams["pdf.use14corefonts"] = True -matplotlib.rcParams["axes.axisbelow"] = True -matplotlib.rcParams["hatch.linewidth"] = 1.0 -matplotlib.use("Agg") - - -ROOT_DIR = os.path.realpath(os.path.dirname(__file__) + "/../../..") -PLOT_DIR = ROOT_DIR + "/plot" - -# If it does not exist create the output directory for the plots -Path(PLOT_DIR).mkdir(exist_ok=True) - - -MARKERSIZE = 3 # Size of the symbols on a linecharts - - -class LegendLoc(Enum): - """Enumerates possible legend location in a plot.""" - - UL = "upper left" - UR = "upper right" - LL = "lower left" - LR = "lower right" - UC = "upper center" - LC = "lower center" - CL = "center left" - CR = "center right" - - -# Disable too many arguments for all functions -# pylint: disable=too-many-arguments too-many-locals - - -def roundup_nearest(max_num: Union[int, float], div: int = 10) -> int: - """Roundup to nearst number divideable by n.""" - return int(math.ceil(max_num / float(div))) * div - - -def final_path(dir_name: str, filename: str, suffix: str = "pdf") -> str: - """Join path components and return as string.""" - filename_with_suffix = filename + "." + suffix - - if os.path.isabs(filename_with_suffix): - return filename_with_suffix - - return os.path.join(dir_name, filename_with_suffix) - - -def single_bar_chart( - y_values: np.ndarray, - tick_labels: List[str], - x_label: str, - y_label: str, - filename: str = "single_bar_chart", -) -> str: - """Plot and save a single bar chart.""" - - x_values = np.arange(y_values.size) - fig = plt.figure(figsize=(5, 3)) - ax_subplot = fig.add_subplot(111) - - barwidth = 0.7 - opacity = 1.0 - - plt.bar( - x_values, - y_values, - barwidth / 2, - alpha=opacity, - color=["black"], - linewidth=1, - edgecolor="black", - ) - - ax_subplot.spines["right"].set_visible(False) - ax_subplot.spines["top"].set_visible(False) - ax_subplot.xaxis.set_ticks_position("bottom") - ax_subplot.yaxis.set_ticks_position("left") - - plt.ylabel(y_label, fontsize=16) - plt.xlabel(x_label, fontsize=16) - - plt.xlim((-1, y_values.size)) - plt.ylim((0, 100)) - - plt.grid(linestyle="dotted") - - gca = plt.gca() - gca.set_yticklabels(gca.get_yticks(), fontsize=16) - ax_subplot.yaxis.set_major_formatter(matplotlib.ticker.FormatStrFormatter("%.0f")) - - ax_subplot.set_xticks([0, 1, 2, 3]) - - ax_subplot.set_xticklabels(tick_labels, fontsize=14) - - fig.tight_layout() - path = final_path(PLOT_DIR, filename) - plt.savefig(path, dpi=1000, bbox_inches="tight", transparent=True) - return path - - -def bar_chart( - y_values: List[np.ndarray], - bar_labels: List[str], - x_label: str, - x_tick_labels: List[str], - y_label: str, - legend_location: LegendLoc = LegendLoc.LR, - filename: str = "bar_chart", -) -> str: - """Plot and save a bar chart. - - Note: - Currently only supports len(y_values) == 2 but it should be easy to - support more than 2 bars. Feel free to contribute. - """ - - x_values = np.arange(y_values[0].size) - fig = plt.figure(figsize=(5, 3)) - ax_subplot = fig.add_subplot(111) - - barwidth = 0.7 - opacity = 1.0 - - colors = ["r", "b"] - - rects = [ - plt.bar( - x_values - barwidth * 0.25 * pow(-1, i), - val, - barwidth / len(y_values), - alpha=opacity, - color=[colors[i]], - linewidth=1, - edgecolor="black", - ) - for i, val in enumerate(y_values) - ] - - ax_subplot.spines["right"].set_visible(False) - ax_subplot.spines["top"].set_visible(False) - ax_subplot.xaxis.set_ticks_position("bottom") - ax_subplot.yaxis.set_ticks_position("left") - - plt.ylabel(y_label, fontsize=16) - plt.xlabel(x_label, fontsize=16) - - plt.xlim((-1, y_values[0].size)) - plt.ylim((0, roundup_nearest(np.max(y_values), 20))) - - plt.grid(linestyle="dotted") - gca = plt.gca() - gca.set_yticklabels(gca.get_yticks(), fontsize=16) - ax_subplot.yaxis.set_major_formatter(matplotlib.ticker.FormatStrFormatter("%.0f")) - - # xticks - ax_subplot.set_xticks(range(len(x_tick_labels))) - ax_subplot.set_xticklabels(x_tick_labels, fontsize=14) - - lgd = ax_subplot.legend( - tuple([rect[0] for rect in rects]), - tuple(bar_labels), - loc=legend_location.value, - fontsize=14, - ncol=2, - ) - - fig.tight_layout() - path = final_path(PLOT_DIR, filename) - plt.savefig( - path, - dpi=1000, - bbox_inches="tight", - bbox_extra_artists=(lgd,), - transparent=True, - ) - return path - - -def line_chart( - lines: List[np.ndarray], - labels: List[str], - x_label: str, - y_label: str, - legend_location: LegendLoc = LegendLoc.LR, - filename: str = "line_chart", - y_floor: int = 0, - y_ceil: int = 100, -) -> str: - """Plot and save a line chart.""" - - assert len({line.size for line in lines}) == 1, "Each line must be of same size." - - x_values = range(0, len(lines[0])) - plt.figure(figsize=(6, 4)) - ax_subplot = plt.subplot(111) - symbols = ["-o", "-s", "-d", "-^", "-x", "-8", "-*", "-P"] - - for i, zipped in enumerate(zip(lines, labels)): - line, label = zipped - ax_subplot.plot(x_values, line, symbols[i], label=label, markersize=MARKERSIZE) - - plt.yticks(np.arange(y_floor, y_ceil, 10.0), fontsize=14) - plt.xticks(np.arange(min(x_values), max(x_values) + 1, 10.0), fontsize=10) - - gca = plt.gca() - gca.set_yticklabels(gca.get_yticks(), fontsize=10) - ax_subplot.yaxis.set_major_formatter(matplotlib.ticker.FormatStrFormatter("%.0f")) - - plt.ylim((y_floor, y_ceil + 1)) - plt.xlim((-1, len(x_values))) - plt.legend(loc=legend_location.value, fontsize=14) - # ax.set_xticklabels(('15s', '30s', '60s', '90s', '120s'), fontsize=15) - plt.xlabel(x_label, fontsize=16) - plt.ylabel(y_label, fontsize=16) - - path = final_path(PLOT_DIR, filename) - plt.savefig(path, dpi=1000, bbox_inches="tight", transparent=True) - return path diff --git a/src/py/flwr_experimental/baseline/run.py b/src/py/flwr_experimental/baseline/run.py deleted file mode 100644 index f21963c69a41..000000000000 --- a/src/py/flwr_experimental/baseline/run.py +++ /dev/null @@ -1,268 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Execute Fashion-MNIST baseline locally in Docker.""" - - -import argparse -import concurrent.futures -import configparser -import sys -from logging import INFO -from os import path -from time import strftime -from typing import List, Optional - -import flwr_experimental.baseline.tf_cifar.settings as tf_cifar_settings -import flwr_experimental.baseline.tf_fashion_mnist.settings as tf_fashion_mnist_settings -import flwr_experimental.baseline.tf_hotkey.settings as tf_hotkey_settings -from flwr.common.logger import configure, log -from flwr_experimental.baseline import command -from flwr_experimental.baseline.setting import Baseline -from flwr_experimental.ops.cluster import Cluster -from flwr_experimental.ops.compute.adapter import Adapter -from flwr_experimental.ops.compute.docker_adapter import DockerAdapter -from flwr_experimental.ops.compute.ec2_adapter import EC2Adapter -from flwr_experimental.ops.instance import Instance - -OPS_INI_PATH = path.normpath( - f"{path.dirname(path.realpath(__file__))}/../../../../.flower_ops" -) - -# Read config file and extract all values which are needed further down. -CONFIG = configparser.ConfigParser() -CONFIG.read(OPS_INI_PATH) - -WHEEL_FILENAME = CONFIG.get("paths", "wheel_filename") -WHEEL_LOCAL_PATH = path.expanduser(CONFIG.get("paths", "wheel_dir")) + WHEEL_FILENAME - -DOCKER_PRIVATE_KEY = path.realpath(path.dirname(__file__) + "/../../../docker/ssh_key") - - -def now() -> str: - """Return current date and time as string.""" - return strftime("%Y%m%dT%H%M%S") - - -def configure_cluster( - adapter: str, instances: List[Instance], baseline: str, setting: str -) -> Cluster: - """Return configured compute cluster.""" - adapter_instance: Optional[Adapter] = None - private_key: Optional[str] = None - - if adapter == "docker": - adapter_instance = DockerAdapter() - user = "root" - private_key = DOCKER_PRIVATE_KEY - elif adapter == "ec2": - adapter_instance = EC2Adapter( - image_id=CONFIG.get("aws", "image_id"), - key_name=path.expanduser(CONFIG.get("aws", "key_name")), - subnet_id=CONFIG.get("aws", "subnet_id"), - security_group_ids=CONFIG.get("aws", "security_group_ids").split(","), - tags=[ - ("Purpose", "flwr_experimental.baseline"), - ("Baseline Name", baseline), - ("Baseline Setting", setting), - ], - ) - user = "ubuntu" - private_key = path.expanduser(CONFIG.get("ssh", "private_key")) - else: - raise Exception(f"Adapter of type {adapter} does not exist.") - - cluster = Cluster( - adapter=adapter_instance, - ssh_credentials=(user, private_key), - instances=instances, - timeout=60, - ) - - return cluster - - -def load_baseline_setting(baseline: str, setting: str) -> Baseline: - """Return appropriate baseline setting.""" - if baseline == "tf_cifar": - return tf_cifar_settings.get_setting(setting) - if baseline == "tf_fashion_mnist": - return tf_fashion_mnist_settings.get_setting(setting) - if baseline == "tf_hotkey": - return tf_hotkey_settings.get_setting(setting) - - raise Exception("Setting not found.") - - -# pylint: disable=too-many-arguments, too-many-locals -def run(baseline: str, setting: str, adapter: str) -> None: - """Run baseline.""" - print(f"Starting baseline with {setting} settings.") - - wheel_remote_path = ( - f"/root/{WHEEL_FILENAME}" - if adapter == "docker" - else f"/home/ubuntu/{WHEEL_FILENAME}" - ) - - settings = load_baseline_setting(baseline, setting) - - # Get instances and add a logserver to the list - instances = settings.instances - instances.append( - Instance(name="logserver", group="logserver", num_cpu=2, num_ram=2) - ) - - # Configure cluster - log(INFO, "(1/9) Configure cluster.") - cluster = configure_cluster(adapter, instances, baseline, setting) - - # Start the cluster; this takes some time - log(INFO, "(2/9) Start cluster.") - cluster.start() - - # Upload wheel to all instances - log(INFO, "(3/9) Upload wheel to all instances.") - cluster.upload_all(WHEEL_LOCAL_PATH, wheel_remote_path) - - # Install the wheel on all instances - log(INFO, "(4/9) Install wheel on all instances.") - cluster.exec_all(command.install_wheel(wheel_remote_path)) - extras = ["examples-tensorflow"] if "tf_" in baseline else ["examples-pytorch"] - cluster.exec_all( - command.install_wheel(wheel_remote_path=wheel_remote_path, wheel_extras=extras) - ) - - # Download datasets in server and clients - log(INFO, "(5/9) Download dataset on server and clients.") - cluster.exec_all( - command.download_dataset(baseline=baseline), groups=["server", "clients"] - ) - - # Start logserver - log(INFO, "(6/9) Start logserver.") - logserver = cluster.get_instance("logserver") - cluster.exec( - logserver.name, - command.start_logserver( - logserver_s3_bucket=CONFIG.get("aws", "logserver_s3_bucket"), - logserver_s3_key=f"{baseline}_{setting}_{now()}.log", - ), - ) - - # Start Flower server on Flower server instances - log(INFO, "(7/9) Start server.") - cluster.exec( - "server", - command.start_server( - log_host=f"{logserver.private_ip}:8081", - baseline=baseline, - setting=setting, - ), - ) - - # Start Flower clients - log(INFO, "(8/9) Start clients.") - server = cluster.get_instance("server") - - with concurrent.futures.ThreadPoolExecutor(max_workers=10) as executor: - # Start the load operations and mark each future with its URL - concurrent.futures.wait( - [ - executor.submit( - cluster.exec, - client_setting.instance_name, - command.start_client( - log_host=f"{logserver.private_ip}:8081", - server_address=f"{server.private_ip}:8080", - baseline=baseline, - setting=setting, - cid=client_setting.cid, - ), - ) - for client_setting in settings.clients - ] - ) - - # Shutdown server and client instance after 10min if not at least one Flower - # process is running it - log(INFO, "(9/9) Start shutdown watcher script.") - cluster.exec_all(command.watch_and_shutdown("flwr", adapter)) - - # Give user info how to tail logfile - private_key = ( - DOCKER_PRIVATE_KEY - if adapter == "docker" - else path.expanduser(CONFIG.get("ssh", "private_key")) - ) - - log( - INFO, - "If you would like to tail the central logfile run:\n\n\t%s\n", - command.tail_logfile(adapter, private_key, logserver), - ) - - -def main() -> None: - """Start Flower baseline.""" - parser = argparse.ArgumentParser(description="Flower") - - # When adding a new setting make sure to modify the load_baseline_setting function - possible_baselines = ["tf_cifar", "tf_fashion_mnist", "tf_hotkey"] - possible_settings = [] - all_settings = [ - list(tf_cifar_settings.SETTINGS.keys()), - list(tf_fashion_mnist_settings.SETTINGS.keys()), - list(tf_hotkey_settings.SETTINGS.keys()), - ] - - # Show only relevant settings based on baseline as choices - # for --setting parameter - baseline_arg = [arg for arg in sys.argv if "--baseline" in arg] - if len(baseline_arg) > 0: - selected_baseline = baseline_arg[0].split("=")[1] - idx = possible_baselines.index(selected_baseline) - possible_settings = all_settings[idx] - - parser.add_argument( - "--baseline", - type=str, - required=True, - choices=possible_baselines, - help="Name of baseline name to run.", - ) - parser.add_argument( - "--setting", - type=str, - required=True, - choices=possible_settings, - help="Name of setting to run.", - ) - parser.add_argument( - "--adapter", - type=str, - required=True, - choices=["docker", "ec2"], - help="Set adapter to be used.", - ) - args = parser.parse_args() - - # Configure logger - configure(f"flower_{args.baseline}_{args.setting}") - - run(baseline=args.baseline, setting=args.setting, adapter=args.adapter) - - -if __name__ == "__main__": - main() diff --git a/src/py/flwr_experimental/baseline/run.sh b/src/py/flwr_experimental/baseline/run.sh deleted file mode 100755 index 9cb5b76626d3..000000000000 --- a/src/py/flwr_experimental/baseline/run.sh +++ /dev/null @@ -1,28 +0,0 @@ -#!/bin/bash - -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -set -e -cd "$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"/../../../../ - -# Build `.whl` from current state -./dev/build.sh - -# Execute `run.py` -python -m flwr_experimental.baseline.run \ - --adapter="docker" \ - --baseline="tf_fashion_mnist" \ - --setting="minimal" diff --git a/src/py/flwr_experimental/baseline/setting.py b/src/py/flwr_experimental/baseline/setting.py deleted file mode 100644 index c47d202539ab..000000000000 --- a/src/py/flwr_experimental/baseline/setting.py +++ /dev/null @@ -1,72 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Provides a variaty of baseline settings base classes.""" - - -from dataclasses import dataclass -from typing import List, Optional - -from flwr_experimental.ops.instance import Instance - - -@dataclass -class BaseSetting: - """Base class for all settings.""" - - instance_name: str - - -# pylint: disable=too-many-instance-attributes -@dataclass -class ServerSetting(BaseSetting): - """Settings for the server.""" - - strategy: str - rounds: int - min_num_clients: int - sample_fraction: float - min_sample_size: int - training_round_timeout: Optional[int] - lr_initial: float - partial_updates: bool - importance_sampling: bool - dynamic_timeout: bool - alternating_timeout: bool = False - dry_run: bool = False - training_round_timeout_short: Optional[int] = None - - -@dataclass -class ClientSetting(BaseSetting): - """Settings for the client.""" - - # Individual per client - cid: str - partition: int - delay_factor: float - - # Same across all clients - iid_fraction: float - num_clients: int - dry_run: bool - - -@dataclass -class Baseline: - """One specific training setting.""" - - instances: List[Instance] - server: ServerSetting - clients: List[ClientSetting] diff --git a/src/py/flwr_experimental/baseline/tf_cifar/README.md b/src/py/flwr_experimental/baseline/tf_cifar/README.md deleted file mode 100644 index aa1de0074e8d..000000000000 --- a/src/py/flwr_experimental/baseline/tf_cifar/README.md +++ /dev/null @@ -1,42 +0,0 @@ -# CIFAR-10/100 - -## Ops -To execute the `run_aws.py` script you will have to create a `.flower_ops` file in the -git root of this project. The file needs to contain the following fields - -``` -[paths] -wheel_dir = ~/development/adap/flower/dist/ -wheel_filename = WHEEL_FILENAME - -[aws] -image_id = ami-0370b0294d7241341 -key_name = AWS_KEY_NAME -subnet_id = YOUR_AWS_SUBNET_ID -security_group_ids = YOUR_AWS_SECURITY_GROUP_ID - -[ssh] -private_key = PATH_TO_YOU_PRIVATE_KEY_TO_SSH_INTO_THE_MACHINES -``` - -### Remarks - -#### Wheel directory -Adjust the wheel directory according to the localation of the repo on your machine. - -#### Security Group -The security group needs to have port 8080 open so that the clients can connect to the server. - -#### Subnet Id -We are starting all instances in the same subnet to be more cost efficent (traffic between EC2 -instances in the same subnet over their private IP does not incure any cost). - -#### AMI -The provided AMI is a bare Ubuntu 18.04 image which was modified with the -`dev/aws-ami-bootstrap.sh` script. - -### Execution -To execute the script simply do: -```bash -python -m flwr_experimental.baseline.tf_cifar.run_aws -``` diff --git a/src/py/flwr_experimental/baseline/tf_cifar/client.py b/src/py/flwr_experimental/baseline/tf_cifar/client.py deleted file mode 100644 index 5233ee368ac8..000000000000 --- a/src/py/flwr_experimental/baseline/tf_cifar/client.py +++ /dev/null @@ -1,124 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Flower client using TensorFlow for CIFAR-10/100.""" - - -import argparse -from logging import ERROR, INFO - -import tensorflow as tf - -import flwr as fl -from flwr.common.logger import configure, log -from flwr_experimental.baseline.common import VisionClassificationClient -from flwr_experimental.baseline.dataset import tf_cifar_partitioned -from flwr_experimental.baseline.model import resnet50v2 -from flwr_experimental.baseline.setting import ClientSetting -from flwr_experimental.baseline.tf_cifar.settings import SETTINGS, get_setting - -from . import DEFAULT_SERVER_ADDRESS, NUM_CLASSES, SEED - -tf.get_logger().setLevel("ERROR") - - -class ClientSettingNotFound(Exception): - """Raise when client setting could not be found.""" - - -def parse_args() -> argparse.Namespace: - """Parse and return commandline arguments.""" - parser = argparse.ArgumentParser(description="Flower") - parser.add_argument( - "--server_address", - type=str, - default=DEFAULT_SERVER_ADDRESS, - help=f"gRPC server address (IPv6, default: {DEFAULT_SERVER_ADDRESS})", - ) - parser.add_argument( - "--log_host", - type=str, - help="HTTP log handler host (no default)", - ) - parser.add_argument( - "--setting", - type=str, - choices=SETTINGS.keys(), - help="Setting to run.", - ) - parser.add_argument("--cid", type=str, required=True, help="Client cid.") - return parser.parse_args() - - -def get_client_setting(setting: str, cid: str) -> ClientSetting: - """Return client setting based on setting name and cid.""" - for client_setting in get_setting(setting).clients: - if client_setting.cid == cid: - return client_setting - - raise ClientSettingNotFound() - - -def main() -> None: - """Load data, create and start CIFAR-10/100 client.""" - args = parse_args() - - client_setting = get_client_setting(args.setting, args.cid) - - # Configure logger - configure(identifier=f"client:{client_setting.cid}", host=args.log_host) - log(INFO, "Starting client, settings: %s", client_setting) - - # Load model - model = resnet50v2(input_shape=(32, 32, 3), num_classes=NUM_CLASSES, seed=SEED) - - # Load local data partition - (xy_train_partitions, xy_test_partitions), _ = tf_cifar_partitioned.load_data( - iid_fraction=client_setting.iid_fraction, - num_partitions=client_setting.num_clients, - cifar100=False, - ) - x_train, y_train = xy_train_partitions[client_setting.partition] - x_test, y_test = xy_test_partitions[client_setting.partition] - if client_setting.dry_run: - x_train = x_train[0:100] - y_train = y_train[0:100] - x_test = x_test[0:50] - y_test = y_test[0:50] - - # Start client - client = VisionClassificationClient( - client_setting.cid, - model, - (x_train, y_train), - (x_test, y_test), - client_setting.delay_factor, - NUM_CLASSES, - augment=True, - augment_horizontal_flip=True, - augment_offset=2, - ) - fl.client.start_client(args.server_address, client) - - -if __name__ == "__main__": - # pylint: disable=broad-except - try: - main() - except Exception as err: - log(ERROR, "Fatal error in main") - log(ERROR, err, exc_info=True, stack_info=True) - - # Raise the error again so the exit code is correct - raise err diff --git a/src/py/flwr_experimental/baseline/tf_cifar/download.py b/src/py/flwr_experimental/baseline/tf_cifar/download.py deleted file mode 100644 index 4beb3fb4a0f2..000000000000 --- a/src/py/flwr_experimental/baseline/tf_cifar/download.py +++ /dev/null @@ -1,54 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Helper script to download CIFAR-10/100.""" - - -import argparse -from logging import INFO - -import tensorflow as tf - -from flwr.common.logger import log - -tf.get_logger().setLevel("ERROR") - - -def main() -> None: - """Download data.""" - parser = argparse.ArgumentParser(description="Flower") - parser.add_argument( - "--cifar", - type=int, - choices=[10, 100], - default=10, - help="CIFAR version, allowed values: 10 or 100 (default: 10)", - ) - args = parser.parse_args() - log(INFO, "Download CIFAR-%s", args.cifar) - - # Load model and data - download_data(num_classes=args.cifar) - - -def download_data(num_classes: int) -> None: - """Download CIFAR-10/100.""" - cifar = ( - tf.keras.datasets.cifar10 if num_classes == 10 else tf.keras.datasets.cifar100 - ) - (_, _), (_, _) = cifar.load_data() - - -if __name__ == "__main__": - main() diff --git a/src/py/flwr_experimental/baseline/tf_cifar/server.py b/src/py/flwr_experimental/baseline/tf_cifar/server.py deleted file mode 100644 index cb7a99b9d360..000000000000 --- a/src/py/flwr_experimental/baseline/tf_cifar/server.py +++ /dev/null @@ -1,152 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Flower server for CIFAR-10/100 image classification.""" - - -import argparse -import math -from logging import ERROR, INFO -from typing import Callable, Dict, Optional - -import flwr as fl -from flwr.common.logger import configure, log -from flwr_experimental.baseline.common import get_evaluate_fn -from flwr_experimental.baseline.dataset import tf_cifar_partitioned -from flwr_experimental.baseline.model import resnet50v2 -from flwr_experimental.baseline.tf_cifar.settings import SETTINGS, get_setting - -from . import DEFAULT_SERVER_ADDRESS, NUM_CLASSES, SEED - - -def parse_args() -> argparse.Namespace: - """Parse and return commandline arguments.""" - parser = argparse.ArgumentParser(description="Flower") - parser.add_argument( - "--log_host", - type=str, - help="HTTP log handler host (no default)", - ) - parser.add_argument( - "--setting", - type=str, - choices=SETTINGS.keys(), - help="Setting to run.", - ) - - return parser.parse_args() - - -def main() -> None: - """Start server and train a number of rounds.""" - args = parse_args() - - # Configure logger - configure(identifier="server", host=args.log_host) - - server_setting = get_setting(args.setting).server - log(INFO, "server_setting: %s", server_setting) - - # Load evaluation data - (_, _), (x_test, y_test) = tf_cifar_partitioned.load_data( - iid_fraction=0.0, num_partitions=1, cifar100=NUM_CLASSES == 100 - ) - if server_setting.dry_run: - x_test = x_test[0:50] - y_test = y_test[0:50] - - # Load model (for centralized evaluation) - model = resnet50v2(input_shape=(32, 32, 3), num_classes=NUM_CLASSES, seed=SEED) - - # Strategy - evaluate_fn = get_evaluate_fn( - model=model, num_classes=NUM_CLASSES, xy_test=(x_test, y_test) - ) - fit_config_fn = get_on_fit_config_fn( - lr_initial=server_setting.lr_initial, - timeout=server_setting.training_round_timeout, - partial_updates=server_setting.partial_updates, - ) - - if server_setting.strategy == "fedavg": - strategy = fl.server.strategy.FedAvg( - fraction_fit=server_setting.sample_fraction, - min_fit_clients=server_setting.min_sample_size, - min_available_clients=server_setting.min_num_clients, - evaluate_fn=evaluate_fn, - on_fit_config_fn=fit_config_fn, - ) - - if server_setting.strategy == "fast-and-slow": - if server_setting.training_round_timeout is None: - raise ValueError( - "No `training_round_timeout` set for `fast-and-slow` strategy" - ) - strategy = fl.server.strategy.FastAndSlow( - fraction_fit=server_setting.sample_fraction, - min_fit_clients=server_setting.min_sample_size, - min_available_clients=server_setting.min_num_clients, - evaluate_fn=evaluate_fn, - on_fit_config_fn=fit_config_fn, - importance_sampling=server_setting.importance_sampling, - dynamic_timeout=server_setting.dynamic_timeout, - dynamic_timeout_percentile=0.8, - alternating_timeout=server_setting.alternating_timeout, - r_fast=1, - r_slow=1, - t_fast=math.ceil(0.5 * server_setting.training_round_timeout), - t_slow=server_setting.training_round_timeout, - ) - - # Run server - fl.server.start_server( - DEFAULT_SERVER_ADDRESS, - config={"num_rounds": server_setting.rounds}, - strategy=strategy, - ) - - -def get_on_fit_config_fn( - lr_initial: float, timeout: Optional[int], partial_updates: bool -) -> Callable[[int], Dict[str, fl.common.Scalar]]: - """Return a function which returns training configurations.""" - - def fit_config(server_round: int) -> Dict[str, fl.common.Scalar]: - """Return a configuration with static batch size and (local) epochs.""" - config: Dict[str, fl.common.Scalar] = { - "epoch_global": str(server_round), - "epochs": str(1), - "batch_size": str(32), - "lr_initial": str(lr_initial), - "lr_decay": str(0.99), - "partial_updates": "1" if partial_updates else "0", - } - if timeout is not None: - config["timeout"] = str(timeout) - - return config - - return fit_config - - -if __name__ == "__main__": - # pylint: disable=broad-except - try: - main() - except Exception as err: - log(ERROR, "Fatal error in main") - log(ERROR, err, exc_info=True, stack_info=True) - - # Raise the error again so the exit code is correct - raise err diff --git a/src/py/flwr_experimental/baseline/tf_cifar/settings.py b/src/py/flwr_experimental/baseline/tf_cifar/settings.py deleted file mode 100644 index ed1a72cafac9..000000000000 --- a/src/py/flwr_experimental/baseline/tf_cifar/settings.py +++ /dev/null @@ -1,355 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Provides a variaty of baseline settings for CIFAR.""" - - -from typing import List - -from flwr_experimental.baseline.config import ( - configure_client_instances, - sample_delay_factors, -) -from flwr_experimental.baseline.setting import Baseline, ClientSetting, ServerSetting -from flwr_experimental.ops.instance import Instance - -ROUNDS = 20 -MIN_NUM_CLIENTS = 80 -SAMPLE_FRACTION = 0.1 -MIN_SAMPLE_SIZE = 10 - -LR_INITIAL = 0.01 - -IID_FRACTION = 0.1 -MAX_DELAY_FACTOR = 4.0 # Equals a 5x slowdown - - -def get_setting(name: str) -> Baseline: - """Return appropriate setting.""" - if name not in SETTINGS: - raise Exception( - f"Baseline {name} does not exist. Valid settings are: {list(SETTINGS.keys())}" - ) - return SETTINGS[name] - - -def get_instance_name( - instance_names: List[str], num_clients: int, client_index: int -) -> str: - """Return instance_name.""" - idx = client_index // (num_clients // len(instance_names)) - idx = min([idx, len(instance_names) - 1]) - return instance_names[min(idx, len(instance_names))] - - -def configure_uniform_clients( - iid_fraction: float, - instance_names: List[str], - num_clients: int, - dry_run: bool, -) -> List[ClientSetting]: - """Configure `num_clients`, all using the same delay factor.""" - clients = [] - for i in range(num_clients): - client = ClientSetting( - # Set instance on which to run - instance_name=get_instance_name(instance_names, num_clients, i), - # Individual - cid=str(i), - partition=i, - delay_factor=0.0, - # Shared - iid_fraction=iid_fraction, - num_clients=num_clients, - dry_run=dry_run, - ) - clients.append(client) - - return clients - - -# pylint: disable=too-many-arguments -def configure_clients( - iid_fraction: float, - instance_names: List[str], - num_clients: int, - dry_run: bool, - delay_factor_fast: float, - delay_factor_slow: float, - sample_delays: bool = True, -) -> List[ClientSetting]: - """Configure `num_clients` with different delay factors.""" - if sample_delays: - # Configure clients with sampled delay factors - delay_factors = sample_delay_factors( - num_clients=num_clients, max_delay=delay_factor_slow, seed=2020 - ) - return [ - ClientSetting( - # Set instance on which to run - instance_name=get_instance_name(instance_names, num_clients, i), - # Individual - cid=str(i), - partition=i, - delay_factor=delay_factors[i], - # Shared - iid_fraction=iid_fraction, - num_clients=num_clients, - dry_run=dry_run, - ) - for i in range(num_clients) - ] - # Configure clients with fixed delay factors - clients = [] - for i in range(num_clients): - client = ClientSetting( - # Set instance on which to run - instance_name=get_instance_name(instance_names, num_clients, i), - # Individual - cid=str(i), - partition=i, - # Indices 0 to 49 fast, 50 to 99 slow - delay_factor=( - delay_factor_fast if i < int(num_clients / 2) else delay_factor_slow - ), - # Shared - iid_fraction=iid_fraction, - num_clients=num_clients, - dry_run=dry_run, - ) - clients.append(client) - - return clients - - -client_instances_100, client_names_100 = configure_client_instances( - num_clients=100, num_cpu=2, num_ram=8 -) - -client_instances_10, client_names_10 = configure_client_instances( - num_clients=10, num_cpu=2, num_ram=8 -) - -client_instances_4, client_names_4 = configure_client_instances( - num_clients=4, num_cpu=2, num_ram=8 -) - -client_instances_2, client_names_2 = configure_client_instances( - num_clients=2, num_cpu=16, num_ram=64 -) - -SETTINGS = { - "fedavg-sync-min": Baseline( - instances=[Instance(name="server", group="server", num_cpu=2, num_ram=8)] - + client_instances_2, - server=ServerSetting( - instance_name="server", - strategy="fedavg", - rounds=10, - min_num_clients=2, - sample_fraction=1.0, - min_sample_size=2, - training_round_timeout=None, - lr_initial=0.001, - partial_updates=False, - importance_sampling=False, - dynamic_timeout=False, - dry_run=False, - ), - clients=configure_uniform_clients( - iid_fraction=1.0, - instance_names=client_names_2, - num_clients=2, - dry_run=False, - ), - ), - "fedavg-sync-10-10": Baseline( - instances=[Instance(name="server", group="server", num_cpu=4, num_ram=16)] - + client_instances_10, - server=ServerSetting( - instance_name="server", - strategy="fedavg", - rounds=ROUNDS, - min_num_clients=10, - sample_fraction=1.0, - min_sample_size=10, - training_round_timeout=None, - lr_initial=0.001, - partial_updates=False, - importance_sampling=False, - dynamic_timeout=False, - dry_run=False, - ), - clients=configure_uniform_clients( - iid_fraction=1.0, - instance_names=client_names_10, - num_clients=10, - dry_run=False, - ), - ), - "fedavg-sync-100-10": Baseline( - instances=[Instance(name="server", group="server", num_cpu=4, num_ram=16)] - + client_instances_100, - server=ServerSetting( - instance_name="server", - strategy="fedavg", - rounds=ROUNDS, - min_num_clients=100, - sample_fraction=0.1, - min_sample_size=10, - training_round_timeout=None, - lr_initial=0.001, - partial_updates=False, - importance_sampling=False, - dynamic_timeout=False, - dry_run=False, - ), - clients=configure_uniform_clients( - iid_fraction=1.0, - instance_names=client_names_100, - num_clients=100, - dry_run=False, - ), - ), - ######################################## - ### PREVIOUS ### - ######################################## - "dry-run": Baseline( - instances=[ - Instance(name="server", group="server", num_cpu=2, num_ram=8), - Instance(name="client", group="clients", num_cpu=2, num_ram=8), - ], - server=ServerSetting( - instance_name="server", - strategy="fedavg", - rounds=1, - min_num_clients=1, - sample_fraction=1.0, - min_sample_size=1, - training_round_timeout=600, - lr_initial=0.01, - partial_updates=False, - importance_sampling=False, - dynamic_timeout=False, - dry_run=True, - ), - clients=configure_uniform_clients( - iid_fraction=0.1, instance_names=["client"], num_clients=4, dry_run=True - ), - ), - "minimal": Baseline( - instances=[Instance(name="server", group="server", num_cpu=2, num_ram=8)] - + client_instances_4, - server=ServerSetting( - instance_name="server", - strategy="fast-and-slow", - rounds=2, - min_num_clients=4, - sample_fraction=0.75, - min_sample_size=3, - training_round_timeout=3600, - lr_initial=0.01, - partial_updates=True, - importance_sampling=True, - dynamic_timeout=True, - dry_run=False, - ), - clients=configure_clients( - iid_fraction=IID_FRACTION, - instance_names=client_names_4, - num_clients=4, - dry_run=False, - delay_factor_fast=0.0, - delay_factor_slow=MAX_DELAY_FACTOR, - ), - ), - "fedavg-sync": Baseline( - instances=[Instance(name="server", group="server", num_cpu=4, num_ram=16)] - + client_instances_100, - server=ServerSetting( - instance_name="server", - strategy="fedavg", - rounds=ROUNDS, - min_num_clients=MIN_NUM_CLIENTS, - sample_fraction=SAMPLE_FRACTION, - min_sample_size=MIN_SAMPLE_SIZE, - training_round_timeout=None, - lr_initial=LR_INITIAL, - partial_updates=False, - importance_sampling=False, - dynamic_timeout=False, - dry_run=False, - ), - clients=configure_clients( - iid_fraction=IID_FRACTION, - instance_names=client_names_100, - num_clients=100, - dry_run=False, - delay_factor_fast=0.0, - delay_factor_slow=MAX_DELAY_FACTOR, - ), - ), - "fedavg-async": Baseline( - instances=[Instance(name="server", group="server", num_cpu=4, num_ram=16)] - + client_instances_100, - server=ServerSetting( - instance_name="server", - strategy="fedavg", - rounds=ROUNDS, - min_num_clients=MIN_NUM_CLIENTS, - sample_fraction=SAMPLE_FRACTION, - min_sample_size=MIN_SAMPLE_SIZE, - training_round_timeout=20, - lr_initial=LR_INITIAL, - partial_updates=False, - importance_sampling=False, - dynamic_timeout=False, - dry_run=False, - ), - clients=configure_clients( - iid_fraction=IID_FRACTION, - instance_names=client_names_100, - num_clients=100, - dry_run=False, - delay_factor_fast=0.0, - delay_factor_slow=MAX_DELAY_FACTOR, - ), - ), - "fedfs": Baseline( - instances=[Instance(name="server", group="server", num_cpu=4, num_ram=16)] - + client_instances_100, - server=ServerSetting( - instance_name="server", - strategy="fast-and-slow", - rounds=ROUNDS, - min_num_clients=MIN_NUM_CLIENTS, - sample_fraction=SAMPLE_FRACTION, - min_sample_size=MIN_SAMPLE_SIZE, - training_round_timeout=20, - lr_initial=LR_INITIAL, - partial_updates=True, - importance_sampling=True, - dynamic_timeout=True, - dry_run=False, - ), - clients=configure_clients( - iid_fraction=IID_FRACTION, - instance_names=client_names_100, - num_clients=100, - dry_run=False, - delay_factor_fast=0.0, - delay_factor_slow=MAX_DELAY_FACTOR, - ), - ), -} diff --git a/src/py/flwr_experimental/baseline/tf_fashion_mnist/README.md b/src/py/flwr_experimental/baseline/tf_fashion_mnist/README.md deleted file mode 100644 index 43c25065344b..000000000000 --- a/src/py/flwr_experimental/baseline/tf_fashion_mnist/README.md +++ /dev/null @@ -1,95 +0,0 @@ -# Fashion-MNIST Baselines - -## Prepare - -To execute the `run.py` script you need to create a `.flower_ops` file in the -git root of this project. The file needs to contain the following fields: - -``` -[paths] -wheel_dir = ~/development/adap/flower/dist/ -wheel_filename = flwr-0.0.1-py3-none-any.whl - -[aws] -image_id = ami-0370b0294d7241341 -key_name = AWS_KEY_NAME -subnet_id = YOUR_AWS_SUBNET_ID -security_group_ids = YOUR_AWS_SECURITY_GROUP_ID -logserver_s3_bucket = YOUR_S3_BUCKET - -[ssh] -private_key = PATH_TO_YOU_PRIVATE_KEY_TO_SSH_INTO_THE_MACHINES -``` - -### Remarks - -#### Wheel directory - -Adjust the wheel directory according to the localation of the repo on your -machine. - -#### Security Group - -The security group needs to have port 8080 open so that the clients can connect -to the server. - -#### Subnet Id - -We are starting all instances in the same subnet to be more cost efficent -(traffic between EC2 instances in the same subnet over their private IP does -not incure any cost). - -#### AMI - -The provided AMI is a bare Ubuntu 18.04 image which was modified using the -`dev/aws-ami-bootstrap.sh` script. - -## Build Docker Container - -```bash -./src/docker/build.sh -``` - -## Build Python Wheel - -To execute the latest version of your baselines during development, please -ensure that the `.whl` build in `dist/` reflects your changes. Re-build -if necessary: - -```bash -./dev/build.sh -``` - -## Execute - -To execute a baseline setting locally using docker: - -```bash -python -m flwr_experimental.baseline.tf_fashion_mnist.run --adapter="docker" --setting="minimal" -``` - -To execute a baseline setting remotely on AWS: - -```bash -python -m flwr_experimental.baseline.tf_fashion_mnist.run --adapter="ec2" --setting="minimal" -``` - -Or alternatively, customize the wrapper script `run.sh` and run it using your AWS profile: - -```bash -AWS_PROFILE=your-aws-profile src/py/flwr_experimental/baseline/run.sh -``` - -## Get Results - -See all current and past results on the S3 website of your S3 bucket: - -``` -http://[your-flower-log-s3-bucket].s3-website.eu-central-1.amazonaws.com/ -``` - -Download and filter invididual logs using `cURL` and `jq`: - -```bash -curl http://[your-flower-log-s3-bucket].s3-eu-central-1.amazonaws.com/[your-experiment].log | jq '.identifier + " => " + .message' -``` diff --git a/src/py/flwr_experimental/baseline/tf_fashion_mnist/__init__.py b/src/py/flwr_experimental/baseline/tf_fashion_mnist/__init__.py deleted file mode 100644 index 62c3d33eee7f..000000000000 --- a/src/py/flwr_experimental/baseline/tf_fashion_mnist/__init__.py +++ /dev/null @@ -1,20 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Flower baseline using TensorFlow for Fashion-MNIST image classification.""" - - -DEFAULT_SERVER_ADDRESS = "[::]:8080" - -SEED = 2020 diff --git a/src/py/flwr_experimental/baseline/tf_fashion_mnist/client.py b/src/py/flwr_experimental/baseline/tf_fashion_mnist/client.py deleted file mode 100644 index 6177336446f1..000000000000 --- a/src/py/flwr_experimental/baseline/tf_fashion_mnist/client.py +++ /dev/null @@ -1,126 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Flower client using TensorFlow for Fashion-MNIST image classification.""" - - -import argparse -from logging import ERROR, INFO - -import tensorflow as tf - -import flwr as fl -from flwr.common.logger import configure, log -from flwr_experimental.baseline.common import VisionClassificationClient -from flwr_experimental.baseline.dataset import tf_fashion_mnist_partitioned -from flwr_experimental.baseline.model import orig_cnn -from flwr_experimental.baseline.setting import ClientSetting -from flwr_experimental.baseline.tf_fashion_mnist.settings import SETTINGS, get_setting - -from . import DEFAULT_SERVER_ADDRESS, SEED - -tf.get_logger().setLevel("ERROR") - - -class ClientSettingNotFound(Exception): - """Raise when client setting could not be found.""" - - -def parse_args() -> argparse.Namespace: - """Parse and return commandline arguments.""" - parser = argparse.ArgumentParser(description="Flower") - parser.add_argument( - "--server_address", - type=str, - default=DEFAULT_SERVER_ADDRESS, - help=f"gRPC server address (IPv6, default: {DEFAULT_SERVER_ADDRESS})", - ) - parser.add_argument( - "--log_host", - type=str, - help="HTTP log handler host (no default)", - ) - parser.add_argument( - "--setting", - type=str, - choices=SETTINGS.keys(), - help="Setting to run.", - ) - parser.add_argument("--cid", type=str, required=True, help="Client cid.") - return parser.parse_args() - - -def get_client_setting(setting: str, cid: str) -> ClientSetting: - """Return client setting based on setting name and cid.""" - for client_setting in get_setting(setting).clients: - if client_setting.cid == cid: - return client_setting - - raise ClientSettingNotFound() - - -def main() -> None: - """Load data, create and start Fashion-MNIST client.""" - args = parse_args() - - client_setting = get_client_setting(args.setting, args.cid) - - # Configure logger - configure(identifier=f"client:{client_setting.cid}", host=args.log_host) - log(INFO, "Starting client, settings: %s", client_setting) - - # Load model - model = orig_cnn(input_shape=(28, 28, 1), seed=SEED) - - # Load local data partition - ( - (xy_train_partitions, xy_test_partitions), - _, - ) = tf_fashion_mnist_partitioned.load_data( - iid_fraction=client_setting.iid_fraction, - num_partitions=client_setting.num_clients, - ) - x_train, y_train = xy_train_partitions[client_setting.partition] - x_test, y_test = xy_test_partitions[client_setting.partition] - if client_setting.dry_run: - x_train = x_train[0:100] - y_train = y_train[0:100] - x_test = x_test[0:50] - y_test = y_test[0:50] - - # Start client - client = VisionClassificationClient( - client_setting.cid, - model, - (x_train, y_train), - (x_test, y_test), - client_setting.delay_factor, - 10, - augment=True, - augment_horizontal_flip=False, - augment_offset=1, - ) - fl.client.start_client(args.server_address, client) - - -if __name__ == "__main__": - # pylint: disable=broad-except - try: - main() - except Exception as err: - log(ERROR, "Fatal error in main") - log(ERROR, err, exc_info=True, stack_info=True) - - # Raise the error again so the exit code is correct - raise err diff --git a/src/py/flwr_experimental/baseline/tf_fashion_mnist/download.py b/src/py/flwr_experimental/baseline/tf_fashion_mnist/download.py deleted file mode 100644 index c2f0eb580cf0..000000000000 --- a/src/py/flwr_experimental/baseline/tf_fashion_mnist/download.py +++ /dev/null @@ -1,34 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Helper script to download CIFAR-10/100.""" - - -from logging import INFO - -import tensorflow as tf - -from flwr.common.logger import log - -tf.get_logger().setLevel("ERROR") - - -def main() -> None: - """Download data.""" - log(INFO, "Download Fashion-MNIST") - tf.keras.datasets.fashion_mnist.load_data() - - -if __name__ == "__main__": - main() diff --git a/src/py/flwr_experimental/baseline/tf_fashion_mnist/fn_plots.py b/src/py/flwr_experimental/baseline/tf_fashion_mnist/fn_plots.py deleted file mode 100644 index e28d98808d8a..000000000000 --- a/src/py/flwr_experimental/baseline/tf_fashion_mnist/fn_plots.py +++ /dev/null @@ -1,416 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Generate plots for Fashion-MNIST results.""" - - -from typing import List, Tuple - -import numpy as np - -from flwr_experimental.baseline.plot import line_chart - -RESULTS = { - "fn-c10-r40-fedfs-v1-16": [ - (0, 0.03759999945759773), - (1, 0.7357000112533569), - (2, 0.7964000105857849), - (3, 0.8057000041007996), - (4, 0.8197000026702881), - (5, 0.8321999907493591), - (6, 0.8583999872207642), - (7, 0.8324999809265137), - (8, 0.864300012588501), - (9, 0.8565000295639038), - (10, 0.8743000030517578), - (11, 0.8575000166893005), - (12, 0.8496999740600586), - (13, 0.8644999861717224), - (14, 0.8758999705314636), - (15, 0.8762999773025513), - (16, 0.8198999762535095), - (17, 0.8725000023841858), - (18, 0.882099986076355), - (19, 0.8758999705314636), - (20, 0.8791000247001648), - (21, 0.8792999982833862), - (22, 0.885699987411499), - (23, 0.8748000264167786), - (24, 0.8561000227928162), - (25, 0.8564000129699707), - (26, 0.8363999724388123), - (27, 0.876800000667572), - (28, 0.8805999755859375), - (29, 0.8569999933242798), - (30, 0.8654000163078308), - (31, 0.8705999851226807), - (32, 0.8468999862670898), - (33, 0.887499988079071), - (34, 0.8823000192642212), - (35, 0.8806999921798706), - (36, 0.8823000192642212), - (37, 0.8889999985694885), - (38, 0.8101000189781189), - (39, 0.8652999997138977), - (40, 0.8766000270843506), - ], - "fn-c10-r40-fedfs-v0-16-16": [ - (0, 0.03759999945759773), - (1, 0.7462000250816345), - (2, 0.7843000292778015), - (3, 0.7990999817848206), - (4, 0.8149999976158142), - (5, 0.8291000127792358), - (6, 0.8413000106811523), - (7, 0.8600999712944031), - (8, 0.8511999845504761), - (9, 0.8668000102043152), - (10, 0.857699990272522), - (11, 0.8673999905586243), - (12, 0.8765000104904175), - (13, 0.8773999810218811), - (14, 0.8773999810218811), - (15, 0.8562999963760376), - (16, 0.8758999705314636), - (17, 0.8729000091552734), - (18, 0.8722000122070312), - (19, 0.8356999754905701), - (20, 0.8776999711990356), - (21, 0.8845000267028809), - (22, 0.8700000047683716), - (23, 0.8766999840736389), - (24, 0.8870999813079834), - (25, 0.7976999878883362), - (26, 0.876800000667572), - (27, 0.8084999918937683), - (28, 0.8737999796867371), - (29, 0.8867999911308289), - (30, 0.8797000050544739), - (31, 0.8866999745368958), - (32, 0.8795999884605408), - (33, 0.8743000030517578), - (34, 0.8881000280380249), - (35, 0.8858000040054321), - (36, 0.8881000280380249), - (37, 0.8851000070571899), - (38, 0.8403000235557556), - (39, 0.8751000165939331), - (40, 0.8812000155448914), - ], - "fn-c10-r40-fedfs-v0-16-08": [ - (0, 0.03759999945759773), - (1, 0.644599974155426), - (2, 0.7526000142097473), - (3, 0.7882999777793884), - (4, 0.8141000270843506), - (5, 0.8335000276565552), - (6, 0.8378999829292297), - (7, 0.8572999835014343), - (8, 0.86080002784729), - (9, 0.84170001745224), - (10, 0.8429999947547913), - (11, 0.8489000201225281), - (12, 0.858299970626831), - (13, 0.8694999814033508), - (14, 0.8694000244140625), - (15, 0.8751999735832214), - (16, 0.8722000122070312), - (17, 0.8736000061035156), - (18, 0.8744000196456909), - (19, 0.8763999938964844), - (20, 0.8431000113487244), - (21, 0.8564000129699707), - (22, 0.869700014591217), - (23, 0.873199999332428), - (24, 0.8788999915122986), - (25, 0.8726000189781189), - (26, 0.8784999847412109), - (27, 0.8777999877929688), - (28, 0.8776000142097473), - (29, 0.8830000162124634), - (30, 0.8838000297546387), - (31, 0.873199999332428), - (32, 0.8822000026702881), - (33, 0.8835999965667725), - (34, 0.8826000094413757), - (35, 0.8847000002861023), - (36, 0.8835999965667725), - (37, 0.7781000137329102), - (38, 0.8820000290870667), - (39, 0.8762000203132629), - (40, 0.8736000061035156), - ], - "fn-c10-r40-fedavg-16": [ - (0, 0.03759999945759773), - (1, 0.6743000149726868), - (2, 0.7746000289916992), - (3, 0.7752000093460083), - (4, 0.7994999885559082), - (5, 0.8137000203132629), - (6, 0.8341000080108643), - (7, 0.822700023651123), - (8, 0.822700023651123), - (9, 0.8327999711036682), - (10, 0.8264999985694885), - (11, 0.8608999848365784), - (12, 0.8526999950408936), - (13, 0.859000027179718), - (14, 0.8611000180244446), - (15, 0.8482999801635742), - (16, 0.8560000061988831), - (17, 0.8414000272750854), - (18, 0.8305000066757202), - (19, 0.8445000052452087), - (20, 0.8525999784469604), - (21, 0.8528000116348267), - (22, 0.8544999957084656), - (23, 0.8572999835014343), - (24, 0.8547000288963318), - (25, 0.8582000136375427), - (26, 0.8501999974250793), - (27, 0.8741999864578247), - (28, 0.8605999946594238), - (29, 0.8578000068664551), - (30, 0.8578000068664551), - (31, 0.8598999977111816), - (32, 0.8450999855995178), - (33, 0.85589998960495), - (34, 0.8565999865531921), - (35, 0.8582000136375427), - (36, 0.8547999858856201), - (37, 0.8608999848365784), - (38, 0.8503000140190125), - (39, 0.8677999973297119), - (40, 0.8535000085830688), - ], - "fn-c50-r40-fedfs-v1-16": [ - (0, 0.03759999945759773), - (1, 0.7195000052452087), - (2, 0.7919999957084656), - (3, 0.8069000244140625), - (4, 0.8201000094413757), - (5, 0.8353000283241272), - (6, 0.8583999872207642), - (7, 0.8440999984741211), - (8, 0.8585000038146973), - (9, 0.8571000099182129), - (10, 0.840499997138977), - (11, 0.8586000204086304), - (12, 0.853600025177002), - (13, 0.8680999875068665), - (14, 0.8540999889373779), - (15, 0.8722000122070312), - (16, 0.8702999949455261), - (17, 0.8741999864578247), - (18, 0.8626000285148621), - (19, 0.8730999827384949), - (20, 0.8611999750137329), - (21, 0.8758999705314636), - (22, 0.8833000063896179), - (23, 0.8773000240325928), - (24, 0.8705000281333923), - (25, 0.8709999918937683), - (26, 0.8791999816894531), - (27, 0.8755999803543091), - (28, 0.8640000224113464), - (29, 0.8776000142097473), - (30, 0.8615000247955322), - (31, 0.8776999711990356), - (32, 0.8809999823570251), - (33, 0.8824999928474426), - (34, 0.8783000111579895), - (35, 0.8817999958992004), - (36, 0.8858000040054321), - (37, 0.8791999816894531), - (38, 0.8888999819755554), - (39, 0.8822000026702881), - (40, 0.8755999803543091), - ], - "fn-c50-r40-fedfs-v0-16-16": [ - (0, 0.03759999945759773), - (1, 0.7275999784469604), - (2, 0.7993999719619751), - (3, 0.8122000098228455), - (4, 0.8399999737739563), - (5, 0.8474000096321106), - (6, 0.8608999848365784), - (7, 0.8666999936103821), - (8, 0.8718000054359436), - (9, 0.8705000281333923), - (10, 0.8758999705314636), - (11, 0.8726999759674072), - (12, 0.8804000020027161), - (13, 0.8805999755859375), - (14, 0.8823000192642212), - (15, 0.8834999799728394), - (16, 0.8777999877929688), - (17, 0.883400022983551), - (18, 0.8848999738693237), - (19, 0.8844000101089478), - (20, 0.8852999806404114), - (21, 0.8855999708175659), - (22, 0.8845000267028809), - (23, 0.8885999917984009), - (24, 0.8859000205993652), - (25, 0.8862000107765198), - (26, 0.8885999917984009), - (27, 0.8881999850273132), - (28, 0.8901000022888184), - (29, 0.885699987411499), - (30, 0.885200023651123), - (31, 0.8899000287055969), - (32, 0.8924000263214111), - (33, 0.890500009059906), - (34, 0.8894000053405762), - (35, 0.8916000127792358), - (36, 0.8934000134468079), - (37, 0.8913999795913696), - (38, 0.8902000188827515), - (39, 0.8916000127792358), - (40, 0.8913999795913696), - ], - "fn-c50-r40-fedfs-v0-16-08": [ - (0, 0.03759999945759773), - (1, 0.6811000108718872), - (2, 0.7753999829292297), - (3, 0.8039000034332275), - (4, 0.8253999948501587), - (5, 0.8299000263214111), - (6, 0.8508999943733215), - (7, 0.8583999872207642), - (8, 0.8583999872207642), - (9, 0.8593000173568726), - (10, 0.8654000163078308), - (11, 0.8607000112533569), - (12, 0.8736000061035156), - (13, 0.8740000128746033), - (14, 0.8770999908447266), - (15, 0.8766000270843506), - (16, 0.8762000203132629), - (17, 0.8787999749183655), - (18, 0.8787999749183655), - (19, 0.8801000118255615), - (20, 0.879800021648407), - (21, 0.8812999725341797), - (22, 0.8828999996185303), - (23, 0.8848000168800354), - (24, 0.8794999718666077), - (25, 0.8830000162124634), - (26, 0.8841000199317932), - (27, 0.8841000199317932), - (28, 0.8816999793052673), - (29, 0.8845000267028809), - (30, 0.8884999752044678), - (31, 0.8881999850273132), - (32, 0.8885999917984009), - (33, 0.8899000287055969), - (34, 0.8883000016212463), - (35, 0.8884000182151794), - (36, 0.8914999961853027), - (37, 0.8913999795913696), - (38, 0.8920999765396118), - (39, 0.8902999758720398), - (40, 0.8909000158309937), - ], - "fn-c50-r40-fedavg-16": [ - (0, 0.03759999945759773), - (1, 0.6868000030517578), - (2, 0.7861999869346619), - (3, 0.8012999892234802), - (4, 0.8083000183105469), - (5, 0.8226000070571899), - (6, 0.823199987411499), - (7, 0.84170001745224), - (8, 0.8342000246047974), - (9, 0.8363000154495239), - (10, 0.8543000221252441), - (11, 0.8504999876022339), - (12, 0.8500999808311462), - (13, 0.8579999804496765), - (14, 0.8633999824523926), - (15, 0.852400004863739), - (16, 0.8640000224113464), - (17, 0.8540999889373779), - (18, 0.8550000190734863), - (19, 0.8555999994277954), - (20, 0.8589000105857849), - (21, 0.8683000206947327), - (22, 0.8655999898910522), - (23, 0.8604999780654907), - (24, 0.859000027179718), - (25, 0.8605999946594238), - (26, 0.8716999888420105), - (27, 0.8683000206947327), - (28, 0.867900013923645), - (29, 0.8668000102043152), - (30, 0.859000027179718), - (31, 0.8586999773979187), - (32, 0.8657000064849854), - (33, 0.8700000047683716), - (34, 0.8619999885559082), - (35, 0.8705000281333923), - (36, 0.8709999918937683), - (37, 0.8708999752998352), - (38, 0.8719000220298767), - (39, 0.8698999881744385), - (40, 0.8705999851226807), - ], -} - - -def accuracy_fn_c10() -> None: - """Generate plots.""" - lines = [ - ("FedFSv1, c=10, t_max=16", RESULTS["fn-c10-r40-fedfs-v1-16"]), - ("FedFSv0, c=10, t=16/16", RESULTS["fn-c10-r40-fedfs-v0-16-16"]), - ("FedFSv0, c=10, t=16/08", RESULTS["fn-c10-r40-fedfs-v0-16-08"]), - ("FedAvg, c=10, t=16", RESULTS["fn-c10-r40-fedavg-16"]), - ] - plot(lines, "fmnist-fn-progress-c10") - - -def accuracy_fn_c50() -> None: - """Generate plots.""" - lines = [ - ("FedFSv1, c=10, t_max=16", RESULTS["fn-c50-r40-fedfs-v1-16"]), - ("FedFSv0, c=10, t=16/16", RESULTS["fn-c50-r40-fedfs-v0-16-16"]), - ("FedFSv0, c=10, t=16/08", RESULTS["fn-c50-r40-fedfs-v0-16-08"]), - ("FedAvg, c=10, t=16", RESULTS["fn-c50-r40-fedavg-16"]), - ] - plot(lines, "fmnist-fn-progress-c50") - - -def plot(lines: List[Tuple[str, List[Tuple[int, float]]]], filename: str) -> None: - """Plot a single line chart.""" - values = [np.array([x * 100 for _, x in val]) for _, val in lines] - labels = [label for label, _ in lines] - line_chart( - values, - labels, - "Round", - "Accuracy", - filename=filename, - y_floor=60, - y_ceil=100, - ) - - -def main() -> None: - """Call all plot functions.""" - accuracy_fn_c10() - accuracy_fn_c50() - - -if __name__ == "__main__": - main() diff --git a/src/py/flwr_experimental/baseline/tf_fashion_mnist/gen_plots.py b/src/py/flwr_experimental/baseline/tf_fashion_mnist/gen_plots.py deleted file mode 100644 index f6c0383367fd..000000000000 --- a/src/py/flwr_experimental/baseline/tf_fashion_mnist/gen_plots.py +++ /dev/null @@ -1,334 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Generate plots for Fashion-MNIST results.""" - - -from typing import List, Tuple - -import numpy as np - -from flwr_experimental.baseline.plot import bar_chart, line_chart - -RESULTS = { - "fedavg-t10": [ - (0, 0.03759999945759773), - (1, 0.03759999945759773), - (2, 0.03759999945759773), - (3, 0.03759999945759773), - (4, 0.03759999945759773), - (5, 0.03759999945759773), - (6, 0.03759999945759773), - (7, 0.03759999945759773), - (8, 0.03759999945759773), - (9, 0.03759999945759773), - (10, 0.03759999945759773), - (11, 0.03759999945759773), - (12, 0.03759999945759773), - (13, 0.03759999945759773), - (14, 0.03759999945759773), - (15, 0.03759999945759773), - (16, 0.03759999945759773), - (17, 0.03759999945759773), - (18, 0.03759999945759773), - (19, 0.03759999945759773), - (20, 0.03759999945759773), - ], - "fedavg-t12": [ - (0, 0.03759999945759773), - (1, 0.03759999945759773), - (2, 0.03759999945759773), - (3, 0.03759999945759773), - (4, 0.03759999945759773), - (5, 0.03759999945759773), - (6, 0.03759999945759773), - (7, 0.03759999945759773), - (8, 0.03759999945759773), - (9, 0.03759999945759773), - (10, 0.03759999945759773), - (11, 0.03759999945759773), - (12, 0.03759999945759773), - (13, 0.03759999945759773), - (14, 0.03759999945759773), - (15, 0.03759999945759773), - (16, 0.03759999945759773), - (17, 0.03759999945759773), - (18, 0.03759999945759773), - (19, 0.03759999945759773), - (20, 0.03759999945759773), - ], - "fedavg-t14": [ - (0, 0.03759999945759773), - (1, 0.03759999945759773), - (2, 0.6743999719619751), - (3, 0.6802999973297119), - (4, 0.6802999973297119), - (5, 0.6802999973297119), - (6, 0.6802999973297119), - (7, 0.7853999733924866), - (8, 0.7853999733924866), - (9, 0.7876999974250793), - (10, 0.7642999887466431), - (11, 0.8054999709129333), - (12, 0.8181999921798706), - (13, 0.8108999729156494), - (14, 0.7907000184059143), - (15, 0.763700008392334), - (16, 0.8091999888420105), - (17, 0.8296999931335449), - (18, 0.8123999834060669), - (19, 0.8123999834060669), - (20, 0.8101999759674072), - ], - "fedavg-t16": [ - (0, 0.03759999945759773), - (1, 0.7197999954223633), - (2, 0.7720999717712402), - (3, 0.7900999784469604), - (4, 0.7811999917030334), - (5, 0.7724000215530396), - (6, 0.8023999929428101), - (7, 0.8043000102043152), - (8, 0.8230999708175659), - (9, 0.8327999711036682), - (10, 0.8299000263214111), - (11, 0.8402000069618225), - (12, 0.853600025177002), - (13, 0.8370000123977661), - (14, 0.83160001039505), - (15, 0.8424000144004822), - (16, 0.830299973487854), - (17, 0.8476999998092651), - (18, 0.8632000088691711), - (19, 0.8636999726295471), - (20, 0.8657000064849854), - ], - "fedfs-t10": [ - (0, 0.03759999945759773), - (1, 0.7343000173568726), - (2, 0.7664999961853027), - (3, 0.7900000214576721), - (4, 0.805899977684021), - (5, 0.8237000107765198), - (6, 0.8406999707221985), - (7, 0.8263000249862671), - (8, 0.8442999720573425), - (9, 0.8564000129699707), - (10, 0.8651999831199646), - (11, 0.8375999927520752), - (12, 0.8646000027656555), - (13, 0.8669999837875366), - (14, 0.861299991607666), - (15, 0.8773999810218811), - (16, 0.800599992275238), - (17, 0.8676999807357788), - (18, 0.8763999938964844), - (19, 0.8695999979972839), - (20, 0.873199999332428), - ], - "fedfs-t12": [ - (0, 0.03759999945759773), - (1, 0.7153000235557556), - (2, 0.7835999727249146), - (3, 0.8083999752998352), - (4, 0.816100001335144), - (5, 0.8215000033378601), - (6, 0.8429999947547913), - (7, 0.8464000225067139), - (8, 0.8603000044822693), - (9, 0.8482999801635742), - (10, 0.8450000286102295), - (11, 0.866599977016449), - (12, 0.863099992275238), - (13, 0.8709999918937683), - (14, 0.873199999332428), - (15, 0.8701000213623047), - (16, 0.8600000143051147), - (17, 0.8766999840736389), - (18, 0.8697999715805054), - (19, 0.8795999884605408), - (20, 0.8830999732017517), - ], - "fedfs-t14": [ - (0, 0.03759999945759773), - (1, 0.7245000004768372), - (2, 0.7972000241279602), - (3, 0.8059999942779541), - (4, 0.8252999782562256), - (5, 0.8334000110626221), - (6, 0.8560000061988831), - (7, 0.8510000109672546), - (8, 0.8650000095367432), - (9, 0.8621000051498413), - (10, 0.866599977016449), - (11, 0.8615999817848206), - (12, 0.8636999726295471), - (13, 0.8740000128746033), - (14, 0.866100013256073), - (15, 0.867900013923645), - (16, 0.83160001039505), - (17, 0.8741999864578247), - (18, 0.8736000061035156), - (19, 0.8810999989509583), - (20, 0.8762000203132629), - ], - "fedfs-t16": [ - (0, 0.03759999945759773), - (1, 0.7476999759674072), - (2, 0.7982000112533569), - (3, 0.8276000022888184), - (4, 0.8256999850273132), - (5, 0.8312000036239624), - (6, 0.8536999821662903), - (7, 0.8483999967575073), - (8, 0.85589998960495), - (9, 0.8687000274658203), - (10, 0.8664000034332275), - (11, 0.8586999773979187), - (12, 0.8662999868392944), - (13, 0.8754000067710876), - (14, 0.878600001335144), - (15, 0.8763999938964844), - (16, 0.748199999332428), - (17, 0.8806999921798706), - (18, 0.8794000148773193), - (19, 0.8813999891281128), - (20, 0.8708000183105469), - ], -} - -RESULTS_WALL_CLOCK_TIME = { - "fedavg-14": 218.49, - "fedfs-14": 61.16, - "fedavg-16": 153.56, - "fedfs-16": 66.84, -} - - -def accuracy_t10() -> None: - """Generate plots.""" - lines = [ - ("FedAvg, t=10", RESULTS["fedavg-t10"]), - ("FedFS, t=10", RESULTS["fedfs-t10"]), - ] - plot(lines, "fmnist-progress-t10") - - -def accuracy_t12() -> None: - """Generate plots.""" - lines = [ - ("FedAvg, t=12", RESULTS["fedavg-t12"]), - ("FedFS, t=12", RESULTS["fedfs-t12"]), - ] - plot(lines, "fmnist-progress-t12") - - -def accuracy_t14() -> None: - """Generate plots.""" - lines = [ - ("FedAvg, t=14", RESULTS["fedavg-t14"]), - ("FedFS, t=14", RESULTS["fedfs-t14"]), - ] - plot(lines, "fmnist-progress-t14") - - -def accuracy_t16() -> None: - """Generate plots.""" - lines = [ - ("FedAvg, t=16", RESULTS["fedavg-t16"]), - ("FedFS, t=16", RESULTS["fedfs-t16"]), - ] - plot(lines, "fmnist-progress-t16") - - -def accuracy_fedavg_vs_fedfs() -> None: - """Comparision of FedAvg vs FedFS.""" - fedavg = [ - RESULTS["fedavg-t10"][-1][1], - RESULTS["fedavg-t12"][-1][1], - RESULTS["fedavg-t14"][-1][1], - RESULTS["fedavg-t16"][-1][1], - ] - fedfs = [ - RESULTS["fedfs-t10"][-1][1], - RESULTS["fedfs-t12"][-1][1], - RESULTS["fedfs-t14"][-1][1], - RESULTS["fedfs-t16"][-1][1], - ] - bar_chart( - y_values=[ - np.array([x * 100 for x in fedavg]), - np.array([x * 100 for x in fedfs]), - ], - bar_labels=["FedAvg", "FedFS"], - x_label="Timeout", - x_tick_labels=["T=10", "T=12", "T=14", "T=16"], - y_label="Accuracy", - filename="fmnist-accuracy_fedavg_vs_fedfs", - ) - - -def wall_clock_time_fedavg_vs_fedfs() -> None: - """Comparision of FedAvg vs FedFS.""" - - bar_chart( - y_values=[ - np.array( - [ - RESULTS_WALL_CLOCK_TIME["fedavg-14"], - RESULTS_WALL_CLOCK_TIME["fedavg-16"], - ] - ), - np.array( - [ - RESULTS_WALL_CLOCK_TIME["fedfs-t14"], - RESULTS_WALL_CLOCK_TIME["fedfs-16"], - ] - ), - ], - bar_labels=["FedAvg", "FedFS"], - x_label="Timeout", - x_tick_labels=["T=14", "T=16"], - y_label="Completion time", - filename="fmnist-time_fedavg_vs_fedfs", - ) - - -def plot(lines: List[Tuple[str, List[Tuple[int, float]]]], filename: str) -> None: - """Plot a single line chart.""" - values = [np.array([x * 100 for _, x in val]) for _, val in lines] - labels = [label for label, _ in lines] - line_chart( - values, - labels, - "Round", - "Accuracy", - filename=filename, - y_floor=0, - y_ceil=100, - ) - - -def main() -> None: - """Call all plot functions.""" - accuracy_t10() - accuracy_t12() - accuracy_t14() - accuracy_t16() - accuracy_fedavg_vs_fedfs() - wall_clock_time_fedavg_vs_fedfs() - - -if __name__ == "__main__": - main() diff --git a/src/py/flwr_experimental/baseline/tf_fashion_mnist/server.py b/src/py/flwr_experimental/baseline/tf_fashion_mnist/server.py deleted file mode 100644 index fbeb0683df28..000000000000 --- a/src/py/flwr_experimental/baseline/tf_fashion_mnist/server.py +++ /dev/null @@ -1,203 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Flower server for Fashion-MNIST image classification.""" - - -import argparse -import math -from logging import ERROR, INFO -from typing import Callable, Dict, Optional - -import flwr as fl -from flwr.common.logger import configure, log -from flwr_experimental.baseline.common import get_evaluate_fn -from flwr_experimental.baseline.dataset import tf_fashion_mnist_partitioned -from flwr_experimental.baseline.model import orig_cnn -from flwr_experimental.baseline.tf_fashion_mnist.settings import SETTINGS, get_setting - -from . import DEFAULT_SERVER_ADDRESS, SEED - - -def parse_args() -> argparse.Namespace: - """Parse and return commandline arguments.""" - parser = argparse.ArgumentParser(description="Flower") - parser.add_argument( - "--log_host", - type=str, - help="HTTP log handler host (no default)", - ) - parser.add_argument( - "--setting", - type=str, - choices=SETTINGS.keys(), - help="Setting to run.", - ) - - return parser.parse_args() - - -def main() -> None: - """Start server and train a number of rounds.""" - args = parse_args() - - # Configure logger - configure(identifier="server", host=args.log_host) - - server_setting = get_setting(args.setting).server - log(INFO, "server_setting: %s", server_setting) - - # Load evaluation data - (_, _), (x_test, y_test) = tf_fashion_mnist_partitioned.load_data( - iid_fraction=0.0, num_partitions=1 - ) - if server_setting.dry_run: - x_test = x_test[0:50] - y_test = y_test[0:50] - - # Load model (for centralized evaluation) - model = orig_cnn(input_shape=(28, 28, 1), seed=SEED) - - # Strategy - evaluate_fn = get_evaluate_fn(model=model, num_classes=10, xy_test=(x_test, y_test)) - on_fit_config_fn = get_on_fit_config_fn( - lr_initial=server_setting.lr_initial, - timeout=server_setting.training_round_timeout, - partial_updates=server_setting.partial_updates, - ) - - if server_setting.strategy == "fedavg": - strategy = fl.server.strategy.FedAvg( - fraction_fit=server_setting.sample_fraction, - min_fit_clients=server_setting.min_sample_size, - min_available_clients=server_setting.min_num_clients, - evaluate_fn=evaluate_fn, - on_fit_config_fn=on_fit_config_fn, - ) - - if server_setting.strategy == "fast-and-slow": - if server_setting.training_round_timeout is None: - raise ValueError( - "No `training_round_timeout` set for `fast-and-slow` strategy" - ) - t_fast = ( - math.ceil(0.5 * server_setting.training_round_timeout) - if server_setting.training_round_timeout_short is None - else server_setting.training_round_timeout_short - ) - strategy = fl.server.strategy.FastAndSlow( - fraction_fit=server_setting.sample_fraction, - min_fit_clients=server_setting.min_sample_size, - min_available_clients=server_setting.min_num_clients, - evaluate_fn=evaluate_fn, - on_fit_config_fn=on_fit_config_fn, - importance_sampling=server_setting.importance_sampling, - dynamic_timeout=server_setting.dynamic_timeout, - dynamic_timeout_percentile=0.8, - alternating_timeout=server_setting.alternating_timeout, - r_fast=1, - r_slow=1, - t_fast=t_fast, - t_slow=server_setting.training_round_timeout, - ) - - if server_setting.strategy == "fedfs-v0": - if server_setting.training_round_timeout is None: - raise ValueError("No `training_round_timeout` set for `fedfs-v0` strategy") - t_fast = ( - math.ceil(0.5 * server_setting.training_round_timeout) - if server_setting.training_round_timeout_short is None - else server_setting.training_round_timeout_short - ) - strategy = fl.server.strategy.FedFSv0( - fraction_fit=server_setting.sample_fraction, - min_fit_clients=server_setting.min_sample_size, - min_available_clients=server_setting.min_num_clients, - evaluate_fn=evaluate_fn, - on_fit_config_fn=on_fit_config_fn, - r_fast=1, - r_slow=1, - t_fast=t_fast, - t_slow=server_setting.training_round_timeout, - ) - - if server_setting.strategy == "fedfs-v1": - if server_setting.training_round_timeout is None: - raise ValueError("No `training_round_timeout` set for `fedfs-v1` strategy") - strategy = fl.server.strategy.FedFSv1( - fraction_fit=server_setting.sample_fraction, - min_fit_clients=server_setting.min_sample_size, - min_available_clients=server_setting.min_num_clients, - evaluate_fn=evaluate_fn, - on_fit_config_fn=on_fit_config_fn, - dynamic_timeout_percentile=0.8, - r_fast=1, - r_slow=1, - t_max=server_setting.training_round_timeout, - use_past_contributions=True, - ) - - if server_setting.strategy == "qffedavg": - strategy = fl.server.strategy.QFedAvg( - q_param=0.2, - qffl_learning_rate=0.1, - fraction_fit=server_setting.sample_fraction, - min_fit_clients=server_setting.min_sample_size, - min_available_clients=server_setting.min_num_clients, - evaluate_fn=evaluate_fn, - on_fit_config_fn=on_fit_config_fn, - ) - - # Run server - log(INFO, "Instantiating server, strategy: %s", str(strategy)) - fl.server.start_server( - DEFAULT_SERVER_ADDRESS, - config={"num_rounds": server_setting.rounds}, - strategy=strategy, - ) - - -def get_on_fit_config_fn( - lr_initial: float, timeout: Optional[int], partial_updates: bool -) -> Callable[[int], Dict[str, fl.common.Scalar]]: - """Return a function which returns training configurations.""" - - def fit_config(server_round: int) -> Dict[str, fl.common.Scalar]: - """Return a configuration with static batch size and (local) epochs.""" - config: Dict[str, fl.common.Scalar] = { - "epoch_global": str(server_round), - "epochs": str(5), - "batch_size": str(10), - "lr_initial": str(lr_initial), - "lr_decay": str(0.99), - "partial_updates": "1" if partial_updates else "0", - } - if timeout is not None: - config["timeout"] = str(timeout) - - return config - - return fit_config - - -if __name__ == "__main__": - # pylint: disable=broad-except - try: - main() - except Exception as err: - log(ERROR, "Fatal error in main") - log(ERROR, err, exc_info=True, stack_info=True) - - # Raise the error again so the exit code is correct - raise err diff --git a/src/py/flwr_experimental/baseline/tf_fashion_mnist/settings.py b/src/py/flwr_experimental/baseline/tf_fashion_mnist/settings.py deleted file mode 100644 index 72adc1f0be04..000000000000 --- a/src/py/flwr_experimental/baseline/tf_fashion_mnist/settings.py +++ /dev/null @@ -1,998 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Provides a variaty of baseline settings for Fashion-MNIST.""" - - -from typing import List - -from flwr_experimental.baseline.config import ( - configure_client_instances, - sample_delay_factors, - sample_real_delay_factors, -) -from flwr_experimental.baseline.setting import Baseline, ClientSetting, ServerSetting -from flwr_experimental.ops.instance import Instance - -N20_ROUNDS = 50 -ROUNDS = 20 -MIN_NUM_CLIENTS = 90 -SAMPLE_FRACTION = 0.1 -MIN_SAMPLE_SIZE = 10 - -LR_INITIAL = 0.01 - -IID_FRACTION = 0.1 -MAX_DELAY_FACTOR = 4.0 # Equals a 5x slowdown - - -FN_ROUNDS = 40 -FN_MIN_NUM_CLIENTS = 90 -FN_LR_INITIAL = 0.001 -FN_IID_FRACTION = 0.1 -FN_MAX_DELAY_FACTOR = 4.0 - -FN_SAMPLE_FRACTION_50 = 0.5 -FN_MIN_SAMPLE_SIZE_50 = 50 - -FN_SAMPLE_FRACTION_10 = 0.1 -FN_MIN_SAMPLE_SIZE_10 = 10 - - -def get_setting(name: str) -> Baseline: - """Return appropriate setting.""" - if name not in SETTINGS: - raise Exception( - f"Baseline {name} does not exist. Valid settings are: {list(SETTINGS.keys())}" - ) - return SETTINGS[name] - - -def get_instance_name( - instance_names: List[str], num_clients: int, client_index: int -) -> str: - """Return instance_name.""" - idx = client_index // (num_clients // len(instance_names)) - idx = min([idx, len(instance_names) - 1]) - return instance_names[min(idx, len(instance_names))] - - -def configure_uniform_clients( - iid_fraction: float, - instance_names: List[str], - num_clients: int, - dry_run: bool, -) -> List[ClientSetting]: - """Configure `num_clients`, all using the same delay factor.""" - clients = [] - for i in range(num_clients): - client = ClientSetting( - # Set instance on which to run - instance_name=get_instance_name(instance_names, num_clients, i), - # Individual - cid=str(i), - partition=i, - delay_factor=0.0, - # Shared - iid_fraction=iid_fraction, - num_clients=num_clients, - dry_run=dry_run, - ) - clients.append(client) - - return clients - - -# pylint: disable=too-many-arguments -def configure_clients( - iid_fraction: float, - instance_names: List[str], - num_clients: int, - dry_run: bool, - delay_factor_fast: float, - delay_factor_slow: float, - sample_delays: bool = True, - real_delays: bool = False, -) -> List[ClientSetting]: - """Configure `num_clients` with different delay factors.""" - if sample_delays: - # Configure clients with sampled delay factors - if real_delays: - delay_factors = sample_real_delay_factors( - num_clients=num_clients, seed=2020 - ) - else: - delay_factors = sample_delay_factors( - num_clients=num_clients, max_delay=delay_factor_slow, seed=2020 - ) - return [ - ClientSetting( - # Set instance on which to run - instance_name=get_instance_name(instance_names, num_clients, i), - # Individual - cid=str(i), - partition=i, - delay_factor=delay_factors[i], - # Shared - iid_fraction=iid_fraction, - num_clients=num_clients, - dry_run=dry_run, - ) - for i in range(num_clients) - ] - # Configure clients with fixed delay factors - clients = [] - for i in range(num_clients): - client = ClientSetting( - # Set instance on which to run - instance_name=get_instance_name(instance_names, num_clients, i), - # Individual - cid=str(i), - partition=i, - # Indices 0 to 49 fast, 50 to 99 slow - delay_factor=( - delay_factor_fast if i < int(num_clients / 2) else delay_factor_slow - ), - # Shared - iid_fraction=iid_fraction, - num_clients=num_clients, - dry_run=dry_run, - ) - clients.append(client) - - return clients - - -client_instances_100, client_names_100 = configure_client_instances( - num_clients=100, num_cpu=2, num_ram=4 -) - -client_instances_10, client_names_10 = configure_client_instances( - num_clients=10, num_cpu=2, num_ram=4 -) - -SETTINGS = { - ### - ### FedFS vs FedAvg - ### - "fn-c50-r40-fedavg-16": Baseline( - instances=[Instance(name="server", group="server", num_cpu=4, num_ram=16)] - + client_instances_100, - server=ServerSetting( - instance_name="server", - strategy="fedavg", - rounds=FN_ROUNDS, - min_num_clients=FN_MIN_NUM_CLIENTS, - sample_fraction=FN_SAMPLE_FRACTION_50, - min_sample_size=FN_MIN_SAMPLE_SIZE_50, - training_round_timeout=16, - lr_initial=FN_LR_INITIAL, - partial_updates=False, - importance_sampling=False, - dynamic_timeout=False, - ), - clients=configure_clients( - iid_fraction=FN_IID_FRACTION, - instance_names=client_names_100, - num_clients=100, - dry_run=False, - delay_factor_fast=0.0, - delay_factor_slow=FN_MAX_DELAY_FACTOR, - real_delays=True, - ), - ), - "fn-c50-r40-fedfs-v0-16-08": Baseline( - instances=[Instance(name="server", group="server", num_cpu=4, num_ram=16)] - + client_instances_100, - server=ServerSetting( - instance_name="server", - strategy="fedfs-v0", - rounds=FN_ROUNDS, - min_num_clients=FN_MIN_NUM_CLIENTS, - sample_fraction=FN_SAMPLE_FRACTION_50, - min_sample_size=FN_MIN_SAMPLE_SIZE_50, - training_round_timeout=16, - lr_initial=FN_LR_INITIAL, - partial_updates=True, - importance_sampling=False, - dynamic_timeout=False, - training_round_timeout_short=8, - ), - clients=configure_clients( - iid_fraction=FN_IID_FRACTION, - instance_names=client_names_100, - num_clients=100, - dry_run=False, - delay_factor_fast=0.0, - delay_factor_slow=FN_MAX_DELAY_FACTOR, - real_delays=True, - ), - ), - "fn-c50-r40-fedfs-v0-16-16": Baseline( - instances=[Instance(name="server", group="server", num_cpu=4, num_ram=16)] - + client_instances_100, - server=ServerSetting( - instance_name="server", - strategy="fedfs-v0", - rounds=FN_ROUNDS, - min_num_clients=FN_MIN_NUM_CLIENTS, - sample_fraction=FN_SAMPLE_FRACTION_50, - min_sample_size=FN_MIN_SAMPLE_SIZE_50, - training_round_timeout=16, - lr_initial=FN_LR_INITIAL, - partial_updates=True, - importance_sampling=False, - dynamic_timeout=False, - training_round_timeout_short=16, - ), - clients=configure_clients( - iid_fraction=FN_IID_FRACTION, - instance_names=client_names_100, - num_clients=100, - dry_run=False, - delay_factor_fast=0.0, - delay_factor_slow=FN_MAX_DELAY_FACTOR, - real_delays=True, - ), - ), - "fn-c50-r40-fedfs-v1-16": Baseline( - instances=[Instance(name="server", group="server", num_cpu=4, num_ram=16)] - + client_instances_100, - server=ServerSetting( - instance_name="server", - strategy="fedfs-v1", - rounds=FN_ROUNDS, - min_num_clients=FN_MIN_NUM_CLIENTS, - sample_fraction=FN_SAMPLE_FRACTION_50, - min_sample_size=FN_MIN_SAMPLE_SIZE_50, - training_round_timeout=16, - lr_initial=FN_LR_INITIAL, - partial_updates=True, - importance_sampling=False, - dynamic_timeout=False, - ), - clients=configure_clients( - iid_fraction=FN_IID_FRACTION, - instance_names=client_names_100, - num_clients=100, - dry_run=False, - delay_factor_fast=0.0, - delay_factor_slow=FN_MAX_DELAY_FACTOR, - real_delays=True, - ), - ), - "fn-c50-r40-qffedavg-16": Baseline( - instances=[Instance(name="server", group="server", num_cpu=4, num_ram=16)] - + client_instances_100, - server=ServerSetting( - instance_name="server", - strategy="qffedavg", - rounds=FN_ROUNDS, - min_num_clients=FN_MIN_NUM_CLIENTS, - sample_fraction=FN_SAMPLE_FRACTION_50, - min_sample_size=FN_MIN_SAMPLE_SIZE_50, - training_round_timeout=16, - lr_initial=FN_LR_INITIAL, - partial_updates=False, - importance_sampling=False, - dynamic_timeout=False, - ), - clients=configure_clients( - iid_fraction=FN_IID_FRACTION, - instance_names=client_names_100, - num_clients=100, - dry_run=False, - delay_factor_fast=0.0, - delay_factor_slow=FN_MAX_DELAY_FACTOR, - real_delays=True, - ), - ), - "fn-c10-r40-fedavg-16": Baseline( - instances=[Instance(name="server", group="server", num_cpu=4, num_ram=16)] - + client_instances_100, - server=ServerSetting( - instance_name="server", - strategy="fedavg", - rounds=FN_ROUNDS, - min_num_clients=FN_MIN_NUM_CLIENTS, - sample_fraction=FN_SAMPLE_FRACTION_10, - min_sample_size=FN_MIN_SAMPLE_SIZE_10, - training_round_timeout=16, - lr_initial=FN_LR_INITIAL, - partial_updates=False, - importance_sampling=False, - dynamic_timeout=False, - ), - clients=configure_clients( - iid_fraction=FN_IID_FRACTION, - instance_names=client_names_100, - num_clients=100, - dry_run=False, - delay_factor_fast=0.0, - delay_factor_slow=FN_MAX_DELAY_FACTOR, - real_delays=True, - ), - ), - "fn-c10-r40-fedfs-v0-16-08": Baseline( - instances=[Instance(name="server", group="server", num_cpu=4, num_ram=16)] - + client_instances_100, - server=ServerSetting( - instance_name="server", - strategy="fedfs-v0", - rounds=FN_ROUNDS, - min_num_clients=FN_MIN_NUM_CLIENTS, - sample_fraction=FN_SAMPLE_FRACTION_10, - min_sample_size=FN_MIN_SAMPLE_SIZE_10, - training_round_timeout=16, - lr_initial=FN_LR_INITIAL, - partial_updates=True, - importance_sampling=False, - dynamic_timeout=False, - training_round_timeout_short=8, - ), - clients=configure_clients( - iid_fraction=FN_IID_FRACTION, - instance_names=client_names_100, - num_clients=100, - dry_run=False, - delay_factor_fast=0.0, - delay_factor_slow=FN_MAX_DELAY_FACTOR, - real_delays=True, - ), - ), - "fn-c10-r40-fedfs-v0-16-16": Baseline( - instances=[Instance(name="server", group="server", num_cpu=4, num_ram=16)] - + client_instances_100, - server=ServerSetting( - instance_name="server", - strategy="fedfs-v0", - rounds=FN_ROUNDS, - min_num_clients=FN_MIN_NUM_CLIENTS, - sample_fraction=FN_SAMPLE_FRACTION_10, - min_sample_size=FN_MIN_SAMPLE_SIZE_10, - training_round_timeout=16, - lr_initial=FN_LR_INITIAL, - partial_updates=True, - importance_sampling=False, - dynamic_timeout=False, - training_round_timeout_short=16, - ), - clients=configure_clients( - iid_fraction=FN_IID_FRACTION, - instance_names=client_names_100, - num_clients=100, - dry_run=False, - delay_factor_fast=0.0, - delay_factor_slow=FN_MAX_DELAY_FACTOR, - real_delays=True, - ), - ), - "fn-c10-r40-fedfs-v1-16": Baseline( - instances=[Instance(name="server", group="server", num_cpu=4, num_ram=16)] - + client_instances_100, - server=ServerSetting( - instance_name="server", - strategy="fedfs-v1", - rounds=FN_ROUNDS, - min_num_clients=FN_MIN_NUM_CLIENTS, - sample_fraction=FN_SAMPLE_FRACTION_10, - min_sample_size=FN_MIN_SAMPLE_SIZE_10, - training_round_timeout=16, - lr_initial=FN_LR_INITIAL, - partial_updates=True, - importance_sampling=False, - dynamic_timeout=False, - ), - clients=configure_clients( - iid_fraction=FN_IID_FRACTION, - instance_names=client_names_100, - num_clients=100, - dry_run=False, - delay_factor_fast=0.0, - delay_factor_slow=FN_MAX_DELAY_FACTOR, - real_delays=True, - ), - ), - "fn-c10-r40-qffedavg-16": Baseline( - instances=[Instance(name="server", group="server", num_cpu=4, num_ram=16)] - + client_instances_100, - server=ServerSetting( - instance_name="server", - strategy="qffedavg", - rounds=FN_ROUNDS, - min_num_clients=FN_MIN_NUM_CLIENTS, - sample_fraction=FN_SAMPLE_FRACTION_10, - min_sample_size=FN_MIN_SAMPLE_SIZE_10, - training_round_timeout=16, - lr_initial=FN_LR_INITIAL, - partial_updates=False, - importance_sampling=False, - dynamic_timeout=False, - ), - clients=configure_clients( - iid_fraction=FN_IID_FRACTION, - instance_names=client_names_100, - num_clients=100, - dry_run=False, - delay_factor_fast=0.0, - delay_factor_slow=FN_MAX_DELAY_FACTOR, - real_delays=True, - ), - ), - ### - ### FedFS - ### - "n20-fedfs-v0-16-08": Baseline( - instances=[Instance(name="server", group="server", num_cpu=4, num_ram=16)] - + client_instances_100, - server=ServerSetting( - instance_name="server", - strategy="fedfs-v0", - rounds=N20_ROUNDS, - min_num_clients=MIN_NUM_CLIENTS, - sample_fraction=SAMPLE_FRACTION, - min_sample_size=MIN_SAMPLE_SIZE, - training_round_timeout=16, - lr_initial=LR_INITIAL, - partial_updates=True, - importance_sampling=False, - dynamic_timeout=False, - training_round_timeout_short=8, - ), - clients=configure_clients( - iid_fraction=IID_FRACTION, - instance_names=client_names_100, - num_clients=100, - dry_run=False, - delay_factor_fast=0.0, - delay_factor_slow=MAX_DELAY_FACTOR, - real_delays=True, - ), - ), - "n20-fedfs-v0-16-16": Baseline( - instances=[Instance(name="server", group="server", num_cpu=4, num_ram=16)] - + client_instances_100, - server=ServerSetting( - instance_name="server", - strategy="fedfs-v0", - rounds=N20_ROUNDS, - min_num_clients=MIN_NUM_CLIENTS, - sample_fraction=SAMPLE_FRACTION, - min_sample_size=MIN_SAMPLE_SIZE, - training_round_timeout=16, - lr_initial=LR_INITIAL, - partial_updates=True, - importance_sampling=False, - dynamic_timeout=False, - training_round_timeout_short=16, - ), - clients=configure_clients( - iid_fraction=IID_FRACTION, - instance_names=client_names_100, - num_clients=100, - dry_run=False, - delay_factor_fast=0.0, - delay_factor_slow=MAX_DELAY_FACTOR, - real_delays=True, - ), - ), - "n20-fedfs-v1-16": Baseline( - instances=[Instance(name="server", group="server", num_cpu=4, num_ram=16)] - + client_instances_100, - server=ServerSetting( - instance_name="server", - strategy="fedfs-v1", - rounds=N20_ROUNDS, - min_num_clients=MIN_NUM_CLIENTS, - sample_fraction=SAMPLE_FRACTION, - min_sample_size=MIN_SAMPLE_SIZE, - training_round_timeout=16, - lr_initial=LR_INITIAL, - partial_updates=True, - importance_sampling=False, - dynamic_timeout=False, - ), - clients=configure_clients( - iid_fraction=IID_FRACTION, - instance_names=client_names_100, - num_clients=100, - dry_run=False, - delay_factor_fast=0.0, - delay_factor_slow=MAX_DELAY_FACTOR, - real_delays=True, - ), - ), - "n20-fedavg-16": Baseline( - instances=[Instance(name="server", group="server", num_cpu=4, num_ram=16)] - + client_instances_100, - server=ServerSetting( - instance_name="server", - strategy="fedavg", - rounds=N20_ROUNDS, - min_num_clients=MIN_NUM_CLIENTS, - sample_fraction=SAMPLE_FRACTION, - min_sample_size=MIN_SAMPLE_SIZE, - training_round_timeout=16, - lr_initial=LR_INITIAL, - partial_updates=False, - importance_sampling=False, - dynamic_timeout=False, - ), - clients=configure_clients( - iid_fraction=IID_FRACTION, - instance_names=client_names_100, - num_clients=100, - dry_run=False, - delay_factor_fast=0.0, - delay_factor_slow=MAX_DELAY_FACTOR, - real_delays=True, - ), - ), - ### - ### FastAndSlow - ### - "n2020-fedfs-10": Baseline( - instances=[Instance(name="server", group="server", num_cpu=4, num_ram=16)] - + client_instances_100, - server=ServerSetting( - instance_name="server", - strategy="fast-and-slow", - rounds=ROUNDS, - min_num_clients=MIN_NUM_CLIENTS, - sample_fraction=SAMPLE_FRACTION, - min_sample_size=MIN_SAMPLE_SIZE, - training_round_timeout=10, - lr_initial=LR_INITIAL, - partial_updates=True, - importance_sampling=True, - dynamic_timeout=True, - alternating_timeout=False, - ), - clients=configure_clients( - iid_fraction=IID_FRACTION, - instance_names=client_names_100, - num_clients=100, - dry_run=False, - delay_factor_fast=0.0, - delay_factor_slow=MAX_DELAY_FACTOR, - real_delays=True, - ), - ), - "n2020-fedfs-12": Baseline( - instances=[Instance(name="server", group="server", num_cpu=4, num_ram=16)] - + client_instances_100, - server=ServerSetting( - instance_name="server", - strategy="fast-and-slow", - rounds=ROUNDS, - min_num_clients=MIN_NUM_CLIENTS, - sample_fraction=SAMPLE_FRACTION, - min_sample_size=MIN_SAMPLE_SIZE, - training_round_timeout=12, - lr_initial=LR_INITIAL, - partial_updates=True, - importance_sampling=True, - dynamic_timeout=True, - alternating_timeout=False, - ), - clients=configure_clients( - iid_fraction=IID_FRACTION, - instance_names=client_names_100, - num_clients=100, - dry_run=False, - delay_factor_fast=0.0, - delay_factor_slow=MAX_DELAY_FACTOR, - real_delays=True, - ), - ), - "n2020-fedfs-14": Baseline( - instances=[Instance(name="server", group="server", num_cpu=4, num_ram=16)] - + client_instances_100, - server=ServerSetting( - instance_name="server", - strategy="fast-and-slow", - rounds=ROUNDS, - min_num_clients=MIN_NUM_CLIENTS, - sample_fraction=SAMPLE_FRACTION, - min_sample_size=MIN_SAMPLE_SIZE, - training_round_timeout=14, - lr_initial=LR_INITIAL, - partial_updates=True, - importance_sampling=True, - dynamic_timeout=True, - alternating_timeout=False, - ), - clients=configure_clients( - iid_fraction=IID_FRACTION, - instance_names=client_names_100, - num_clients=100, - dry_run=False, - delay_factor_fast=0.0, - delay_factor_slow=MAX_DELAY_FACTOR, - real_delays=True, - ), - ), - "n2020-fedfs-16": Baseline( - instances=[Instance(name="server", group="server", num_cpu=4, num_ram=16)] - + client_instances_100, - server=ServerSetting( - instance_name="server", - strategy="fast-and-slow", - rounds=ROUNDS, - min_num_clients=MIN_NUM_CLIENTS, - sample_fraction=SAMPLE_FRACTION, - min_sample_size=MIN_SAMPLE_SIZE, - training_round_timeout=16, - lr_initial=LR_INITIAL, - partial_updates=True, - importance_sampling=True, - dynamic_timeout=True, - alternating_timeout=False, - ), - clients=configure_clients( - iid_fraction=IID_FRACTION, - instance_names=client_names_100, - num_clients=100, - dry_run=False, - delay_factor_fast=0.0, - delay_factor_slow=MAX_DELAY_FACTOR, - real_delays=True, - ), - ), - "n2020-fedavg-10": Baseline( - instances=[Instance(name="server", group="server", num_cpu=4, num_ram=16)] - + client_instances_100, - server=ServerSetting( - instance_name="server", - strategy="fedavg", - rounds=ROUNDS, - min_num_clients=MIN_NUM_CLIENTS, - sample_fraction=SAMPLE_FRACTION, - min_sample_size=MIN_SAMPLE_SIZE, - training_round_timeout=10, - lr_initial=LR_INITIAL, - partial_updates=False, - importance_sampling=False, - dynamic_timeout=False, - alternating_timeout=False, - ), - clients=configure_clients( - iid_fraction=IID_FRACTION, - instance_names=client_names_100, - num_clients=100, - dry_run=False, - delay_factor_fast=0.0, - delay_factor_slow=MAX_DELAY_FACTOR, - real_delays=True, - ), - ), - "n2020-fedavg-12": Baseline( - instances=[Instance(name="server", group="server", num_cpu=4, num_ram=16)] - + client_instances_100, - server=ServerSetting( - instance_name="server", - strategy="fedavg", - rounds=ROUNDS, - min_num_clients=MIN_NUM_CLIENTS, - sample_fraction=SAMPLE_FRACTION, - min_sample_size=MIN_SAMPLE_SIZE, - training_round_timeout=12, - lr_initial=LR_INITIAL, - partial_updates=False, - importance_sampling=False, - dynamic_timeout=False, - alternating_timeout=False, - ), - clients=configure_clients( - iid_fraction=IID_FRACTION, - instance_names=client_names_100, - num_clients=100, - dry_run=False, - delay_factor_fast=0.0, - delay_factor_slow=MAX_DELAY_FACTOR, - real_delays=True, - ), - ), - "n2020-fedavg-14": Baseline( - instances=[Instance(name="server", group="server", num_cpu=4, num_ram=16)] - + client_instances_100, - server=ServerSetting( - instance_name="server", - strategy="fedavg", - rounds=ROUNDS, - min_num_clients=MIN_NUM_CLIENTS, - sample_fraction=SAMPLE_FRACTION, - min_sample_size=MIN_SAMPLE_SIZE, - training_round_timeout=14, - lr_initial=LR_INITIAL, - partial_updates=False, - importance_sampling=False, - dynamic_timeout=False, - alternating_timeout=False, - ), - clients=configure_clients( - iid_fraction=IID_FRACTION, - instance_names=client_names_100, - num_clients=100, - dry_run=False, - delay_factor_fast=0.0, - delay_factor_slow=MAX_DELAY_FACTOR, - real_delays=True, - ), - ), - "n2020-fedavg-16": Baseline( - instances=[Instance(name="server", group="server", num_cpu=4, num_ram=16)] - + client_instances_100, - server=ServerSetting( - instance_name="server", - strategy="fedavg", - rounds=ROUNDS, - min_num_clients=MIN_NUM_CLIENTS, - sample_fraction=SAMPLE_FRACTION, - min_sample_size=MIN_SAMPLE_SIZE, - training_round_timeout=16, - lr_initial=LR_INITIAL, - partial_updates=False, - importance_sampling=False, - dynamic_timeout=False, - alternating_timeout=False, - ), - clients=configure_clients( - iid_fraction=IID_FRACTION, - instance_names=client_names_100, - num_clients=100, - dry_run=False, - delay_factor_fast=0.0, - delay_factor_slow=MAX_DELAY_FACTOR, - real_delays=True, - ), - ), - ######################################## - ### PREVIOUS ### - ######################################## - "dry-run": Baseline( - instances=[ - Instance(name="server", group="server", num_cpu=2, num_ram=8), - Instance(name="client", group="clients", num_cpu=2, num_ram=4), - ], - server=ServerSetting( - instance_name="server", - strategy="fedavg", - rounds=1, - min_num_clients=1, - sample_fraction=1.0, - min_sample_size=1, - training_round_timeout=600, - lr_initial=LR_INITIAL, - partial_updates=False, - importance_sampling=False, - dynamic_timeout=False, - dry_run=True, - ), - clients=configure_uniform_clients( - iid_fraction=IID_FRACTION, - instance_names=["client"], - num_clients=4, - dry_run=True, - ), - ), - "minimal": Baseline( - instances=[Instance(name="server", group="server", num_cpu=2, num_ram=8)] - + client_instances_10, - server=ServerSetting( - instance_name="server", - strategy="fast-and-slow", - rounds=3, - min_num_clients=8, - sample_fraction=0.5, - min_sample_size=5, - training_round_timeout=3600, - lr_initial=LR_INITIAL, - partial_updates=True, - importance_sampling=True, - dynamic_timeout=True, - dry_run=False, - ), - clients=configure_clients( - iid_fraction=IID_FRACTION, - instance_names=client_names_10, - num_clients=10, - dry_run=False, - delay_factor_fast=0.0, - delay_factor_slow=MAX_DELAY_FACTOR, - ), - ), - "fedavg-sync": Baseline( - instances=[Instance(name="server", group="server", num_cpu=4, num_ram=16)] - + client_instances_100, - server=ServerSetting( - instance_name="server", - strategy="fedavg", - rounds=ROUNDS, - min_num_clients=MIN_NUM_CLIENTS, - sample_fraction=SAMPLE_FRACTION, - min_sample_size=MIN_SAMPLE_SIZE, - training_round_timeout=None, - lr_initial=LR_INITIAL, - partial_updates=False, - importance_sampling=False, - dynamic_timeout=False, - dry_run=False, - ), - clients=configure_clients( - iid_fraction=IID_FRACTION, - instance_names=client_names_100, - num_clients=100, - dry_run=False, - delay_factor_fast=0.0, - delay_factor_slow=MAX_DELAY_FACTOR, - ), - ), - "fedavg-async": Baseline( - instances=[Instance(name="server", group="server", num_cpu=4, num_ram=16)] - + client_instances_100, - server=ServerSetting( - instance_name="server", - strategy="fedavg", - rounds=ROUNDS, - min_num_clients=MIN_NUM_CLIENTS, - sample_fraction=SAMPLE_FRACTION, - min_sample_size=MIN_SAMPLE_SIZE, - training_round_timeout=20, - lr_initial=LR_INITIAL, - partial_updates=False, - importance_sampling=False, - dynamic_timeout=False, - dry_run=False, - ), - clients=configure_clients( - iid_fraction=IID_FRACTION, - instance_names=client_names_100, - num_clients=100, - dry_run=False, - delay_factor_fast=0.0, - delay_factor_slow=MAX_DELAY_FACTOR, - ), - ), - "fast-and-slow-only-partial-updates": Baseline( - instances=[Instance(name="server", group="server", num_cpu=4, num_ram=16)] - + client_instances_100, - server=ServerSetting( - instance_name="server", - strategy="fast-and-slow", - rounds=ROUNDS, - min_num_clients=MIN_NUM_CLIENTS, - sample_fraction=SAMPLE_FRACTION, - min_sample_size=MIN_SAMPLE_SIZE, - training_round_timeout=20, - lr_initial=LR_INITIAL, - partial_updates=True, - importance_sampling=False, - dynamic_timeout=False, - dry_run=False, - ), - clients=configure_clients( - iid_fraction=IID_FRACTION, - instance_names=client_names_100, - num_clients=100, - dry_run=False, - delay_factor_fast=0.0, - delay_factor_slow=MAX_DELAY_FACTOR, - ), - ), - "fast-and-slow-only-dynamic-timeouts": Baseline( - instances=[Instance(name="server", group="server", num_cpu=4, num_ram=16)] - + client_instances_100, - server=ServerSetting( - instance_name="server", - strategy="fast-and-slow", - rounds=ROUNDS, - min_num_clients=MIN_NUM_CLIENTS, - sample_fraction=SAMPLE_FRACTION, - min_sample_size=MIN_SAMPLE_SIZE, - training_round_timeout=20, - lr_initial=LR_INITIAL, - partial_updates=False, - importance_sampling=False, - dynamic_timeout=True, - dry_run=False, - ), - clients=configure_clients( - iid_fraction=IID_FRACTION, - instance_names=client_names_100, - num_clients=100, - dry_run=False, - delay_factor_fast=0.0, - delay_factor_slow=MAX_DELAY_FACTOR, - ), - ), - "fast-and-slow-only-importance-sampling": Baseline( - instances=[Instance(name="server", group="server", num_cpu=4, num_ram=16)] - + client_instances_100, - server=ServerSetting( - instance_name="server", - strategy="fast-and-slow", - rounds=ROUNDS, - min_num_clients=MIN_NUM_CLIENTS, - sample_fraction=SAMPLE_FRACTION, - min_sample_size=MIN_SAMPLE_SIZE, - training_round_timeout=20, - lr_initial=LR_INITIAL, - partial_updates=False, - importance_sampling=True, - dynamic_timeout=False, - dry_run=False, - ), - clients=configure_clients( - iid_fraction=IID_FRACTION, - instance_names=client_names_100, - num_clients=100, - dry_run=False, - delay_factor_fast=0.0, - delay_factor_slow=MAX_DELAY_FACTOR, - ), - ), - "fast-and-slow": Baseline( - instances=[Instance(name="server", group="server", num_cpu=4, num_ram=16)] - + client_instances_100, - server=ServerSetting( - instance_name="server", - strategy="fast-and-slow", - rounds=ROUNDS, - min_num_clients=MIN_NUM_CLIENTS, - sample_fraction=SAMPLE_FRACTION, - min_sample_size=MIN_SAMPLE_SIZE, - training_round_timeout=20, - lr_initial=LR_INITIAL, - partial_updates=True, - importance_sampling=True, - dynamic_timeout=True, - dry_run=False, - ), - clients=configure_clients( - iid_fraction=IID_FRACTION, - instance_names=client_names_100, - num_clients=100, - dry_run=False, - delay_factor_fast=0.0, - delay_factor_slow=MAX_DELAY_FACTOR, - ), - ), - "qffedavg": Baseline( - instances=[Instance(name="server", group="server", num_cpu=4, num_ram=16)] - + client_instances_100, - server=ServerSetting( - instance_name="server", - strategy="qffedavg", - rounds=ROUNDS, - min_num_clients=MIN_NUM_CLIENTS, - sample_fraction=SAMPLE_FRACTION, - min_sample_size=MIN_SAMPLE_SIZE, - training_round_timeout=None, - lr_initial=LR_INITIAL, - partial_updates=False, - importance_sampling=False, - dynamic_timeout=False, - dry_run=False, - ), - clients=configure_clients( - iid_fraction=IID_FRACTION, - instance_names=client_names_100, - num_clients=100, - dry_run=False, - delay_factor_fast=0.0, - delay_factor_slow=MAX_DELAY_FACTOR, - ), - ), -} diff --git a/src/py/flwr_experimental/baseline/tf_hotkey/__init__.py b/src/py/flwr_experimental/baseline/tf_hotkey/__init__.py deleted file mode 100644 index 7ebf8a732ab5..000000000000 --- a/src/py/flwr_experimental/baseline/tf_hotkey/__init__.py +++ /dev/null @@ -1,20 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Flower baseline using TensorFlow for Spoken Keyword classification.""" - - -DEFAULT_SERVER_ADDRESS = "[::]:8080" - -SEED = 2020 diff --git a/src/py/flwr_experimental/baseline/tf_hotkey/client.py b/src/py/flwr_experimental/baseline/tf_hotkey/client.py deleted file mode 100644 index f59ad5f676ba..000000000000 --- a/src/py/flwr_experimental/baseline/tf_hotkey/client.py +++ /dev/null @@ -1,123 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Flower client using TensorFlow for Spoken Keyword classification.""" - - -import argparse -from logging import ERROR - -import tensorflow as tf - -import flwr as fl -from flwr.common.logger import configure, log -from flwr_experimental.baseline.common import VisionClassificationClient -from flwr_experimental.baseline.dataset import tf_hotkey_partitioned -from flwr_experimental.baseline.model import keyword_cnn -from flwr_experimental.baseline.setting import ClientSetting -from flwr_experimental.baseline.tf_hotkey.settings import SETTINGS, get_setting - -from . import DEFAULT_SERVER_ADDRESS, SEED - -tf.get_logger().setLevel("ERROR") - - -class ClientSettingNotFound(Exception): - """Raise when client setting could not be found.""" - - -def parse_args() -> argparse.Namespace: - """Parse and return commandline arguments.""" - parser = argparse.ArgumentParser(description="Flower") - parser.add_argument( - "--server_address", - type=str, - default=DEFAULT_SERVER_ADDRESS, - help=f"Server address (IPv6, default: {DEFAULT_SERVER_ADDRESS})", - ) - parser.add_argument( - "--log_host", - type=str, - help="HTTP log handler host (no default)", - ) - parser.add_argument( - "--setting", - type=str, - choices=SETTINGS.keys(), - help="Setting to run.", - ) - parser.add_argument("--cid", type=str, required=True, help="Client cid.") - return parser.parse_args() - - -def get_client_setting(setting: str, cid: str) -> ClientSetting: - """Return client setting based on setting name and cid.""" - for client_setting in get_setting(setting).clients: - if client_setting.cid == cid: - return client_setting - - raise ClientSettingNotFound() - - -def main() -> None: - """Load data, create and start client.""" - args = parse_args() - - client_setting = get_client_setting(args.setting, args.cid) - - # Configure logger - configure(identifier=f"client:{client_setting.cid}", host=args.log_host) - - # Load model - model = keyword_cnn(input_shape=(80, 40, 1), seed=SEED) - - # Load local data partition - ( - (xy_train_partitions, xy_test_partitions), - _, - ) = tf_hotkey_partitioned.load_data( - iid_fraction=client_setting.iid_fraction, - num_partitions=client_setting.num_clients, - ) - (x_train, y_train) = xy_train_partitions[client_setting.partition] - (x_test, y_test) = xy_test_partitions[client_setting.partition] - if client_setting.dry_run: - x_train = x_train[0:100] - y_train = y_train[0:100] - x_test = x_test[0:50] - y_test = y_test[0:50] - - # Start client - client = VisionClassificationClient( - client_setting.cid, - model, - (x_train, y_train), - (x_test, y_test), - client_setting.delay_factor, - 10, - normalization_factor=100.0, - ) - fl.client.start_client(args.server_address, client) - - -if __name__ == "__main__": - # pylint: disable=broad-except - try: - main() - except Exception as err: - log(ERROR, "Fatal error in main") - log(ERROR, err, exc_info=True, stack_info=True) - - # Raise the error again so the exit code is correct - raise err diff --git a/src/py/flwr_experimental/baseline/tf_hotkey/download.py b/src/py/flwr_experimental/baseline/tf_hotkey/download.py deleted file mode 100644 index d9f3c53bdb39..000000000000 --- a/src/py/flwr_experimental/baseline/tf_hotkey/download.py +++ /dev/null @@ -1,31 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Helper script to download Spoken Keyword dataset.""" - - -from logging import INFO - -from flwr.common.logger import log -from flwr_experimental.baseline.dataset import tf_hotkey_partitioned - - -def main() -> None: - """Download data.""" - log(INFO, "Download Keyword Detection") - tf_hotkey_partitioned.hotkey_load() - - -if __name__ == "__main__": - main() diff --git a/src/py/flwr_experimental/baseline/tf_hotkey/server.py b/src/py/flwr_experimental/baseline/tf_hotkey/server.py deleted file mode 100644 index beba81f58396..000000000000 --- a/src/py/flwr_experimental/baseline/tf_hotkey/server.py +++ /dev/null @@ -1,181 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Flower server for Spoken Keyword classification.""" - - -import argparse -import math -from logging import ERROR, INFO -from typing import Callable, Dict, Optional - -import flwr as fl -from flwr.common.logger import configure, log -from flwr_experimental.baseline.common import get_evaluate_fn -from flwr_experimental.baseline.dataset import tf_hotkey_partitioned -from flwr_experimental.baseline.model import keyword_cnn -from flwr_experimental.baseline.tf_hotkey.settings import SETTINGS, get_setting - -from . import DEFAULT_SERVER_ADDRESS, SEED - - -def parse_args() -> argparse.Namespace: - """Parse and return commandline arguments.""" - parser = argparse.ArgumentParser(description="Flower") - parser.add_argument( - "--log_host", - type=str, - help="HTTP log handler host (no default)", - ) - parser.add_argument( - "--setting", - type=str, - choices=SETTINGS.keys(), - help="Setting to run.", - ) - - return parser.parse_args() - - -def main() -> None: - """Start server and train a number of rounds.""" - args = parse_args() - - # Configure logger - configure(identifier="server", host=args.log_host) - - server_setting = get_setting(args.setting).server - log(INFO, "server_setting: %s", server_setting) - - # Load evaluation data - (_, _), (x_test, y_test) = tf_hotkey_partitioned.load_data( - iid_fraction=0.0, num_partitions=1 - ) - if server_setting.dry_run: - x_test = x_test[0:50] - y_test = y_test[0:50] - - # Load model (for centralized evaluation) - model = keyword_cnn(input_shape=(80, 40, 1), seed=SEED) - - # Strategy - evaluate_fn = get_evaluate_fn(model=model, num_classes=10, xy_test=(x_test, y_test)) - on_fit_config_fn = get_on_fit_config_fn( - lr_initial=server_setting.lr_initial, - timeout=server_setting.training_round_timeout, - partial_updates=server_setting.partial_updates, - ) - - if server_setting.strategy == "fedavg": - strategy = fl.server.strategy.FedAvg( - fraction_fit=server_setting.sample_fraction, - min_fit_clients=server_setting.min_sample_size, - min_available_clients=server_setting.min_num_clients, - evaluate_fn=evaluate_fn, - on_fit_config_fn=on_fit_config_fn, - ) - - if server_setting.strategy == "fast-and-slow": - if server_setting.training_round_timeout is None: - raise ValueError( - "No `training_round_timeout` set for `fast-and-slow` strategy" - ) - strategy = fl.server.strategy.FastAndSlow( - fraction_fit=server_setting.sample_fraction, - min_fit_clients=server_setting.min_sample_size, - min_available_clients=server_setting.min_num_clients, - evaluate_fn=evaluate_fn, - on_fit_config_fn=on_fit_config_fn, - importance_sampling=server_setting.importance_sampling, - dynamic_timeout=server_setting.dynamic_timeout, - dynamic_timeout_percentile=0.9, - alternating_timeout=server_setting.alternating_timeout, - r_fast=1, - r_slow=1, - t_fast=math.ceil(0.5 * server_setting.training_round_timeout), - t_slow=server_setting.training_round_timeout, - ) - - if server_setting.strategy == "fedfs-v0": - if server_setting.training_round_timeout is None: - raise ValueError("No `training_round_timeout` set for `fedfs-v0` strategy") - t_fast = ( - math.ceil(0.5 * server_setting.training_round_timeout) - if server_setting.training_round_timeout_short is None - else server_setting.training_round_timeout_short - ) - strategy = fl.server.strategy.FedFSv0( - fraction_fit=server_setting.sample_fraction, - min_fit_clients=server_setting.min_sample_size, - min_available_clients=server_setting.min_num_clients, - evaluate_fn=evaluate_fn, - on_fit_config_fn=on_fit_config_fn, - r_fast=1, - r_slow=1, - t_fast=t_fast, - t_slow=server_setting.training_round_timeout, - ) - - if server_setting.strategy == "qffedavg": - strategy = fl.server.strategy.QFedAvg( - q_param=0.2, - qffl_learning_rate=0.1, - fraction_fit=server_setting.sample_fraction, - min_fit_clients=server_setting.min_sample_size, - min_available_clients=server_setting.min_num_clients, - evaluate_fn=evaluate_fn, - on_fit_config_fn=on_fit_config_fn, - ) - - # Run server - fl.server.start_server( - DEFAULT_SERVER_ADDRESS, - config={"num_rounds": server_setting.rounds}, - strategy=strategy, - ) - - -def get_on_fit_config_fn( - lr_initial: float, timeout: Optional[int], partial_updates: bool -) -> Callable[[int], Dict[str, fl.common.Scalar]]: - """Return a function which returns training configurations.""" - - def fit_config(server_round: int) -> Dict[str, fl.common.Scalar]: - """Return a configuration with static batch size and (local) epochs.""" - config: Dict[str, fl.common.Scalar] = { - "epoch_global": str(server_round), - "epochs": str(5), - "batch_size": str(32), - "lr_initial": str(lr_initial), - "lr_decay": str(0.99), - "partial_updates": "1" if partial_updates else "0", - } - if timeout is not None: - config["timeout"] = str(timeout) - - return config - - return fit_config - - -if __name__ == "__main__": - # pylint: disable=broad-except - try: - main() - except Exception as err: - log(ERROR, "Fatal error in main") - log(ERROR, err, exc_info=True, stack_info=True) - - # Raise the error again so the exit code is correct - raise err diff --git a/src/py/flwr_experimental/baseline/tf_hotkey/settings.py b/src/py/flwr_experimental/baseline/tf_hotkey/settings.py deleted file mode 100644 index 5bfb7b1e42ad..000000000000 --- a/src/py/flwr_experimental/baseline/tf_hotkey/settings.py +++ /dev/null @@ -1,577 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Provides a variaty of baseline settings for Spoken Keyword -classification.""" - - -from typing import List - -from flwr_experimental.baseline.config import ( - configure_client_instances, - sample_delay_factors, - sample_real_delay_factors, -) -from flwr_experimental.baseline.setting import Baseline, ClientSetting, ServerSetting -from flwr_experimental.ops.instance import Instance - -ROUNDS = 50 -MIN_NUM_CLIENTS = 45 -SAMPLE_FRACTION = 0.2 -MIN_SAMPLE_SIZE = 10 - -LR_INITIAL = 0.01 - -IID_FRACTION = 0.1 -MAX_DELAY_FACTOR = 4.0 # Equals a 5x slowdown - - -FN_NUM_CLIENTS = 50 -FN_ROUNDS = 50 -FN_MIN_NUM_CLIENTS = 45 -FN_LR_INITIAL = 0.001 -FN_IID_FRACTION = 0.1 -FN_MAX_DELAY_FACTOR = 4.0 - -FN_SAMPLE_FRACTION_25 = 0.5 -FN_MIN_SAMPLE_SIZE_25 = 25 - -FN_SAMPLE_FRACTION_10 = 0.2 -FN_MIN_SAMPLE_SIZE_10 = 10 - -FN_TRAINING_ROUND_TIMEOUT = 230 - - -def get_setting(name: str) -> Baseline: - """Return appropriate setting.""" - if name not in SETTINGS: - raise Exception( - f"Baseline {name} does not exist. Valid settings are: {list(SETTINGS.keys())}" - ) - return SETTINGS[name] - - -def get_instance_name( - instance_names: List[str], num_clients: int, client_index: int -) -> str: - """Return instance_name.""" - idx = client_index // (num_clients // len(instance_names)) - idx = min([idx, len(instance_names) - 1]) - return instance_names[min(idx, len(instance_names))] - - -def configure_uniform_clients( - iid_fraction: float, - instance_names: List[str], - num_clients: int, - dry_run: bool, -) -> List[ClientSetting]: - """Configure `num_clients`, all using the same delay factor.""" - clients = [] - for i in range(num_clients): - client = ClientSetting( - # Set instance on which to run - instance_name=get_instance_name(instance_names, num_clients, i), - # Individual - cid=str(i), - partition=i, - delay_factor=0.0, - # Shared - iid_fraction=iid_fraction, - num_clients=num_clients, - dry_run=dry_run, - ) - clients.append(client) - - return clients - - -# pylint: disable=too-many-arguments -def configure_clients( - iid_fraction: float, - instance_names: List[str], - num_clients: int, - dry_run: bool, - delay_factor_fast: float, - delay_factor_slow: float, - sample_delays: bool = True, - real_delays: bool = False, -) -> List[ClientSetting]: - """Configure `num_clients` with different delay factors.""" - if sample_delays: - # Configure clients with sampled delay factors - if real_delays: - delay_factors = sample_real_delay_factors( - num_clients=num_clients, seed=2020 - ) - else: - delay_factors = sample_delay_factors( - num_clients=num_clients, max_delay=delay_factor_slow, seed=2020 - ) - return [ - ClientSetting( - # Set instance on which to run - instance_name=get_instance_name(instance_names, num_clients, i), - # Individual - cid=str(i), - partition=i, - delay_factor=delay_factors[i], - # Shared - iid_fraction=iid_fraction, - num_clients=num_clients, - dry_run=dry_run, - ) - for i in range(num_clients) - ] - # Configure clients with fixed delay factors - clients = [] - for i in range(num_clients): - client = ClientSetting( - # Set instance on which to run - instance_name=get_instance_name(instance_names, num_clients, i), - # Individual - cid=str(i), - partition=i, - # Indices 0 to 49 fast, 50 to 99 slow - delay_factor=( - delay_factor_fast if i < int(num_clients / 2) else delay_factor_slow - ), - # Shared - iid_fraction=iid_fraction, - num_clients=num_clients, - dry_run=dry_run, - ) - clients.append(client) - - return clients - - -client_instances_50, client_names_50 = configure_client_instances( - num_clients=50, num_cpu=2, num_ram=8 -) - -client_instances_10, client_names_10 = configure_client_instances( - num_clients=10, num_cpu=2, num_ram=8 -) - -SETTINGS = { - ### - ### FedFS vs FedAvg - ### - "fn-c25-r50-fedavg-230": Baseline( - instances=[Instance(name="server", group="server", num_cpu=4, num_ram=16)] - + client_instances_50, - server=ServerSetting( - instance_name="server", - strategy="fedavg", - rounds=FN_ROUNDS, - min_num_clients=FN_MIN_NUM_CLIENTS, - sample_fraction=FN_SAMPLE_FRACTION_25, - min_sample_size=FN_MIN_SAMPLE_SIZE_25, - training_round_timeout=FN_TRAINING_ROUND_TIMEOUT, - lr_initial=FN_LR_INITIAL, - partial_updates=False, - importance_sampling=False, - dynamic_timeout=False, - ), - clients=configure_clients( - iid_fraction=FN_IID_FRACTION, - instance_names=client_names_50, - num_clients=FN_NUM_CLIENTS, - dry_run=False, - delay_factor_fast=0.0, - delay_factor_slow=FN_MAX_DELAY_FACTOR, - real_delays=True, - ), - ), - "fn-c25-r50-fedfs-v0-230-230": Baseline( - instances=[Instance(name="server", group="server", num_cpu=4, num_ram=16)] - + client_instances_50, - server=ServerSetting( - instance_name="server", - strategy="fedfs-v0", - rounds=FN_ROUNDS, - min_num_clients=FN_MIN_NUM_CLIENTS, - sample_fraction=FN_SAMPLE_FRACTION_25, - min_sample_size=FN_MIN_SAMPLE_SIZE_25, - training_round_timeout=FN_TRAINING_ROUND_TIMEOUT, - lr_initial=FN_LR_INITIAL, - partial_updates=True, - importance_sampling=False, - dynamic_timeout=False, - training_round_timeout_short=FN_TRAINING_ROUND_TIMEOUT, - ), - clients=configure_clients( - iid_fraction=FN_IID_FRACTION, - instance_names=client_names_50, - num_clients=FN_NUM_CLIENTS, - dry_run=False, - delay_factor_fast=0.0, - delay_factor_slow=FN_MAX_DELAY_FACTOR, - real_delays=True, - ), - ), - "fn-c10-r50-fedavg-230": Baseline( - instances=[Instance(name="server", group="server", num_cpu=4, num_ram=16)] - + client_instances_50, - server=ServerSetting( - instance_name="server", - strategy="fedavg", - rounds=FN_ROUNDS, - min_num_clients=FN_MIN_NUM_CLIENTS, - sample_fraction=FN_SAMPLE_FRACTION_10, - min_sample_size=FN_MIN_SAMPLE_SIZE_10, - training_round_timeout=FN_TRAINING_ROUND_TIMEOUT, - lr_initial=FN_LR_INITIAL, - partial_updates=False, - importance_sampling=False, - dynamic_timeout=False, - ), - clients=configure_clients( - iid_fraction=FN_IID_FRACTION, - instance_names=client_names_50, - num_clients=FN_NUM_CLIENTS, - dry_run=False, - delay_factor_fast=0.0, - delay_factor_slow=FN_MAX_DELAY_FACTOR, - real_delays=True, - ), - ), - "fn-c10-r50-fedfs-v0-230-230": Baseline( - instances=[Instance(name="server", group="server", num_cpu=4, num_ram=16)] - + client_instances_50, - server=ServerSetting( - instance_name="server", - strategy="fedfs-v0", - rounds=FN_ROUNDS, - min_num_clients=FN_MIN_NUM_CLIENTS, - sample_fraction=FN_SAMPLE_FRACTION_10, - min_sample_size=FN_MIN_SAMPLE_SIZE_10, - training_round_timeout=FN_TRAINING_ROUND_TIMEOUT, - lr_initial=FN_LR_INITIAL, - partial_updates=True, - importance_sampling=False, - dynamic_timeout=False, - training_round_timeout_short=FN_TRAINING_ROUND_TIMEOUT, - ), - clients=configure_clients( - iid_fraction=FN_IID_FRACTION, - instance_names=client_names_50, - num_clients=FN_NUM_CLIENTS, - dry_run=False, - delay_factor_fast=0.0, - delay_factor_slow=FN_MAX_DELAY_FACTOR, - real_delays=True, - ), - ), - ### - ### - ### - "n2020-fedfs": Baseline( - instances=[Instance(name="server", group="server", num_cpu=4, num_ram=16)] - + client_instances_50, - server=ServerSetting( - instance_name="server", - strategy="fast-and-slow", - rounds=ROUNDS, - min_num_clients=MIN_NUM_CLIENTS, - sample_fraction=SAMPLE_FRACTION, - min_sample_size=MIN_SAMPLE_SIZE, - training_round_timeout=200, - lr_initial=LR_INITIAL, - partial_updates=True, - importance_sampling=True, - dynamic_timeout=True, - alternating_timeout=False, - ), - clients=configure_clients( - iid_fraction=IID_FRACTION, - instance_names=client_names_50, - num_clients=50, - dry_run=False, - delay_factor_fast=0.0, - delay_factor_slow=MAX_DELAY_FACTOR, - real_delays=True, - ), - ), - "n2020-fedavg": Baseline( - instances=[Instance(name="server", group="server", num_cpu=4, num_ram=16)] - + client_instances_50, - server=ServerSetting( - instance_name="server", - strategy="fedavg", - rounds=ROUNDS, - min_num_clients=MIN_NUM_CLIENTS, - sample_fraction=SAMPLE_FRACTION, - min_sample_size=MIN_SAMPLE_SIZE, - training_round_timeout=200, - lr_initial=LR_INITIAL, - partial_updates=False, - importance_sampling=False, - dynamic_timeout=False, - alternating_timeout=False, - ), - clients=configure_clients( - iid_fraction=IID_FRACTION, - instance_names=client_names_50, - num_clients=50, - dry_run=False, - delay_factor_fast=0.0, - delay_factor_slow=MAX_DELAY_FACTOR, - real_delays=True, - ), - ), - "dry-run": Baseline( - instances=[ - Instance(name="server", group="server", num_cpu=4, num_ram=16), - Instance(name="client", group="clients", num_cpu=4, num_ram=16), - ], - server=ServerSetting( - instance_name="server", - strategy="fedavg", - rounds=1, - min_num_clients=1, - sample_fraction=1.0, - min_sample_size=1, - training_round_timeout=600, - lr_initial=LR_INITIAL, - partial_updates=False, - importance_sampling=False, - dynamic_timeout=False, - dry_run=True, - ), - clients=configure_uniform_clients( - iid_fraction=IID_FRACTION, - instance_names=["client"], - num_clients=4, - dry_run=True, - ), - ), - "minimal": Baseline( - instances=[Instance(name="server", group="server", num_cpu=4, num_ram=16)] - + client_instances_10, - server=ServerSetting( - instance_name="server", - strategy="fedavg", - rounds=2, - min_num_clients=4, - sample_fraction=1.0, - min_sample_size=3, - training_round_timeout=3600, - lr_initial=LR_INITIAL, - partial_updates=False, - importance_sampling=False, - dynamic_timeout=False, - dry_run=False, - ), - clients=configure_clients( - iid_fraction=IID_FRACTION, - instance_names=client_names_10, - num_clients=10, - dry_run=False, - delay_factor_fast=0.0, - delay_factor_slow=MAX_DELAY_FACTOR, - real_delays=True, - ), - ), - "fedavg-sync": Baseline( - instances=[Instance(name="server", group="server", num_cpu=8, num_ram=32)] - + client_instances_50, - server=ServerSetting( - instance_name="server", - strategy="fedavg", - rounds=ROUNDS, - min_num_clients=MIN_NUM_CLIENTS, - sample_fraction=SAMPLE_FRACTION, - min_sample_size=MIN_SAMPLE_SIZE, - training_round_timeout=None, - lr_initial=LR_INITIAL, - partial_updates=False, - importance_sampling=False, - dynamic_timeout=False, - dry_run=False, - ), - clients=configure_clients( - iid_fraction=IID_FRACTION, - instance_names=client_names_50, - num_clients=100, - dry_run=False, - delay_factor_fast=0.0, - delay_factor_slow=MAX_DELAY_FACTOR, - real_delays=True, - ), - ), - "fedavg-async": Baseline( - instances=[Instance(name="server", group="server", num_cpu=8, num_ram=32)] - + client_instances_50, - server=ServerSetting( - instance_name="server", - strategy="fedavg", - rounds=ROUNDS, - min_num_clients=MIN_NUM_CLIENTS, - sample_fraction=SAMPLE_FRACTION, - min_sample_size=MIN_SAMPLE_SIZE, - training_round_timeout=20, - lr_initial=LR_INITIAL, - partial_updates=False, - importance_sampling=False, - dynamic_timeout=False, - dry_run=False, - ), - clients=configure_clients( - iid_fraction=IID_FRACTION, - instance_names=client_names_50, - num_clients=100, - dry_run=False, - delay_factor_fast=0.0, - delay_factor_slow=MAX_DELAY_FACTOR, - real_delays=True, - ), - ), - "fast-and-slow-only-partial-updates": Baseline( - instances=[Instance(name="server", group="server", num_cpu=8, num_ram=32)] - + client_instances_50, - server=ServerSetting( - instance_name="server", - strategy="fast-and-slow", - rounds=ROUNDS, - min_num_clients=MIN_NUM_CLIENTS, - sample_fraction=SAMPLE_FRACTION, - min_sample_size=MIN_SAMPLE_SIZE, - training_round_timeout=20, - lr_initial=LR_INITIAL, - partial_updates=True, - importance_sampling=False, - dynamic_timeout=False, - dry_run=False, - ), - clients=configure_clients( - iid_fraction=IID_FRACTION, - instance_names=client_names_50, - num_clients=100, - dry_run=False, - delay_factor_fast=0.0, - delay_factor_slow=MAX_DELAY_FACTOR, - real_delays=True, - ), - ), - "fast-and-slow-only-dynamic-timeouts": Baseline( - instances=[Instance(name="server", group="server", num_cpu=8, num_ram=32)] - + client_instances_50, - server=ServerSetting( - instance_name="server", - strategy="fast-and-slow", - rounds=ROUNDS, - min_num_clients=MIN_NUM_CLIENTS, - sample_fraction=SAMPLE_FRACTION, - min_sample_size=MIN_SAMPLE_SIZE, - training_round_timeout=20, - lr_initial=LR_INITIAL, - partial_updates=False, - importance_sampling=False, - dynamic_timeout=True, - dry_run=False, - ), - clients=configure_clients( - iid_fraction=IID_FRACTION, - instance_names=client_names_50, - num_clients=100, - dry_run=False, - delay_factor_fast=0.0, - delay_factor_slow=MAX_DELAY_FACTOR, - real_delays=True, - ), - ), - "fast-and-slow-only-importance-sampling": Baseline( - instances=[Instance(name="server", group="server", num_cpu=8, num_ram=32)] - + client_instances_50, - server=ServerSetting( - instance_name="server", - strategy="fast-and-slow", - rounds=ROUNDS, - min_num_clients=MIN_NUM_CLIENTS, - sample_fraction=SAMPLE_FRACTION, - min_sample_size=MIN_SAMPLE_SIZE, - training_round_timeout=20, - lr_initial=LR_INITIAL, - partial_updates=False, - importance_sampling=True, - dynamic_timeout=False, - dry_run=False, - ), - clients=configure_clients( - iid_fraction=IID_FRACTION, - instance_names=client_names_50, - num_clients=100, - dry_run=False, - delay_factor_fast=0.0, - delay_factor_slow=MAX_DELAY_FACTOR, - real_delays=True, - ), - ), - "fast-and-slow": Baseline( - instances=[Instance(name="server", group="server", num_cpu=8, num_ram=32)] - + client_instances_50, - server=ServerSetting( - instance_name="server", - strategy="fast-and-slow", - rounds=ROUNDS, - min_num_clients=MIN_NUM_CLIENTS, - sample_fraction=SAMPLE_FRACTION, - min_sample_size=MIN_SAMPLE_SIZE, - training_round_timeout=60, - lr_initial=LR_INITIAL, - partial_updates=True, - importance_sampling=True, - dynamic_timeout=True, - dry_run=False, - ), - clients=configure_clients( - iid_fraction=IID_FRACTION, - instance_names=client_names_50, - num_clients=100, - dry_run=False, - delay_factor_fast=0.0, - delay_factor_slow=MAX_DELAY_FACTOR, - real_delays=True, - ), - ), - "qffedavg": Baseline( - instances=[Instance(name="server", group="server", num_cpu=8, num_ram=32)] - + client_instances_50, - server=ServerSetting( - instance_name="server", - strategy="qffedavg", - rounds=ROUNDS, - min_num_clients=MIN_NUM_CLIENTS, - sample_fraction=SAMPLE_FRACTION, - min_sample_size=MIN_SAMPLE_SIZE, - training_round_timeout=None, - lr_initial=LR_INITIAL, - partial_updates=False, - importance_sampling=False, - dynamic_timeout=False, - dry_run=False, - ), - clients=configure_clients( - iid_fraction=IID_FRACTION, - instance_names=client_names_50, - num_clients=100, - dry_run=False, - delay_factor_fast=0.0, - delay_factor_slow=MAX_DELAY_FACTOR, - real_delays=True, - ), - ), -} diff --git a/src/py/flwr_experimental/logserver/README.md b/src/py/flwr_experimental/logserver/README.md deleted file mode 100644 index f7784c56a20a..000000000000 --- a/src/py/flwr_experimental/logserver/README.md +++ /dev/null @@ -1,34 +0,0 @@ -# Flower LogServer - -A simple server which receives logs from the python standard library `logging.handlers.HTTPHandler` and prints them to the console. - -## Quickstart - -A minimal example showing how centralized logging works. - -Run these commands in 3 different terminals. -Start the log server. - -```bash -python -m flwr_experimental.logserver -``` - -Start the FL server and client. - -```bash -python -m flwr_experimental.baseline.tf_fashion_mnist.server --log_host=localhost:8081 -``` - -```bash -python -m flwr_experimental.baseline.tf_fashion_mnist.client \ - --cid=0 --partition=0 --clients=1 --server_address=localhost:8080 \ - --log_host=localhost:8081 -``` - -## Persist logs to S3 - -If you would like to upload your logs regularly to S3 you can pass the following command line arguments on start. - -```bash -python -m flwr_experimental.logserver --s3_bucket=MY_BUCKET --s3_key=MY_S3_KEY -``` diff --git a/src/py/flwr_experimental/logserver/__init__.py b/src/py/flwr_experimental/logserver/__init__.py deleted file mode 100644 index 352112d5e933..000000000000 --- a/src/py/flwr_experimental/logserver/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Provides simple server to receive logs from python http logger.""" diff --git a/src/py/flwr_experimental/logserver/__main__.py b/src/py/flwr_experimental/logserver/__main__.py deleted file mode 100644 index a3ac56e405f0..000000000000 --- a/src/py/flwr_experimental/logserver/__main__.py +++ /dev/null @@ -1,21 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Start log server.""" - - -from flwr_experimental.logserver.server import main - -if __name__ == "__main__": - main() diff --git a/src/py/flwr_experimental/logserver/server.py b/src/py/flwr_experimental/logserver/server.py deleted file mode 100644 index 683b12b6db6c..000000000000 --- a/src/py/flwr_experimental/logserver/server.py +++ /dev/null @@ -1,232 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Provides a logserver.""" - - -import argparse -import ast -import json -import logging -import re -import time -import urllib.parse -from http.server import BaseHTTPRequestHandler, HTTPServer -from pathlib import Path -from threading import Event, Thread -from typing import Dict, List, Optional, Tuple, Union - -import boto3 -import numpy as np - -from flwr_experimental.baseline.plot import plot - -LOGDIR = "flower_logs" -LOGFILE = "{logdir}/flower.log".format(logdir=LOGDIR) -LOGFILE_UPLOAD_INTERVAL = 60 -SERVER_TIMEOUT = 1200 - -CONFIG: Dict[str, Optional[str]] = {"s3_bucket": None, "s3_key": None} - -Accuracies = List[Tuple[int, float]] - - -def write_to_logfile(line: str) -> None: - """Write line to logfile.""" - with open(f"{LOGFILE}", "a+") as lfd: - lfd.write(line + "\n") - - -def is_credentials_available() -> bool: - """Return True is credentials are available in CONFIG.""" - return all([v is not None for v in CONFIG.values()]) - - -def upload_file(local_filepath: str, s3_key: Optional[str]) -> None: - """Upload logfile to S3.""" - if not is_credentials_available(): - logging.info( - "Skipping S3 logfile upload as s3_bucket or s3_key was not provided." - ) - elif not Path(LOGFILE).is_file(): - logging.info("No logfile found.") - elif s3_key is not None: - try: - logging.info("Uploading logfile to S3.") - boto3.resource("s3").meta.client.upload_file( - Filename=local_filepath, - Bucket=CONFIG["s3_bucket"], - Key=s3_key, - ExtraArgs={ - "ContentType": ( - "application/pdf" if s3_key.endswith(".pdf") else "text/plain" - ) - }, - ) - # pylint: disable=broad-except - except Exception as err: - logging.error(err) - - -def continuous_logfile_upload(stop_condition: Event, interval: int) -> None: - """Call upload_logfile function regularly until stop_condition Event is - set.""" - while True: - upload_file(LOGFILE, CONFIG["s3_key"]) - - if stop_condition.is_set(): - break - - time.sleep(interval) - - -def on_record(record: Dict[str, str]) -> None: - """Call on each new line.""" - - # Print record as JSON and write it to a logfile - line = str(json.dumps(record)) - print(line) - write_to_logfile(line) - - # Analyze record and if possible extract a plot_type and data from it - plot_type, data = parse_plot_message(record["message"]) - - if plot_type == "accuracies" and data is not None: - plot_accuracies(data) - - -def parse_plot_message( - message: str, -) -> Tuple[Optional[str], Optional[Union[Accuracies]]]: - """Parse message and return its type and the data if possible. - - If the message does not contain plotable data return None. - """ - accuracies_str = "app_fit: accuracies_centralized " - - if accuracies_str in message: - values_str = re.sub(accuracies_str, "", message) - values: Accuracies = ast.literal_eval(values_str) - return "accuracies", values - - return None, None - - -def plot_accuracies(values: Accuracies) -> str: - """Plot accuracies.""" - filename = f'{CONFIG["s3_key"]}.accuracies' - - line = [val * 100 for _, val in values] - - local_path = plot.line_chart( - lines=[np.array(line)], - labels=["Train"], - x_label="Rounds", - y_label="Accuracy", - filename=filename, - ) - upload_file(local_path, filename + ".pdf") - return local_path - - -class RequestHandler(BaseHTTPRequestHandler): - """Provide custom POST handler.""" - - def _set_response(self) -> None: - self.send_response(200) - self.send_header("Content-type", "text/html") - self.end_headers() - - def do_POST(self) -> None: # pylint: disable=invalid-name - """Handle POST request.""" - content_length = int(self.headers["Content-Length"]) - post_qs = self.rfile.read(content_length).decode("utf-8") - record: Dict[str, str] = { - "client_address": f"{self.client_address[0]}:{self.client_address[1]}" - } - - for key, val in urllib.parse.parse_qs(post_qs).items(): - record[key] = str(val[0]) if len(val) == 1 else str(val) - - self._set_response() - self.wfile.write("POST request for {}".format(self.path).encode("utf-8")) - - thread = Thread(target=on_record, args=(record,)) - thread.start() - - -class LogServer(HTTPServer): - """Log server with timeout.""" - - timeout = SERVER_TIMEOUT - - def handle_timeout(self) -> None: - """Cleanup and upload logfile to S3.""" - self.server_close() - raise TimeoutError() - - -def main() -> None: - """Start log server.""" - # Create a flower_logs directory to store the logfiles. - Path(LOGDIR).mkdir(exist_ok=True) - Path(LOGFILE).touch() - - logging.basicConfig(level=logging.INFO) - - parser = argparse.ArgumentParser(description="Flower LogServer") - parser.add_argument( - "--s3_bucket", - type=str, - help="S3 bucket where the logfile should be uploaded to.", - ) - parser.add_argument( - "--s3_key", - type=str, - help="S3 key under which the logfile should be uploaded.", - ) - args = parser.parse_args() - - CONFIG["s3_bucket"] = args.s3_bucket - CONFIG["s3_key"] = args.s3_key - - server = LogServer(("", 8081), RequestHandler) - logging.info("Starting logging server...\n") - - # Start file upload loop - sync_loop_stop_condition = Event() - sync_loop = Thread( - target=continuous_logfile_upload, - args=(sync_loop_stop_condition, LOGFILE_UPLOAD_INTERVAL), - ) - sync_loop.start() - - try: - while True: - server.handle_request() - except TimeoutError: - print( - f"TimeoutError raised as no request was received for {SERVER_TIMEOUT} seconds." - ) - sync_loop_stop_condition.set() - sync_loop.join() - - # Final upload - upload_file(LOGFILE, CONFIG["s3_key"]) - - logging.info("Stopping logging server...\n") - - -if __name__ == "__main__": - main() diff --git a/src/py/flwr_experimental/logserver/server_test.py b/src/py/flwr_experimental/logserver/server_test.py deleted file mode 100644 index becead625a5d..000000000000 --- a/src/py/flwr_experimental/logserver/server_test.py +++ /dev/null @@ -1,74 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Test for server.""" - - -import os.path -import tempfile -import unittest - -from flwr_experimental.logserver.server import ( - CONFIG, - parse_plot_message, - plot_accuracies, -) - - -# pylint: disable=no-self-use -class LogserverTest(unittest.TestCase): - """Tests for functions in the server module.""" - - def setUp(self) -> None: - # Create a temporary directory - self.test_dir = tempfile.TemporaryDirectory() - - def tearDown(self) -> None: - # Cleanup the directory after the test - # self.test_dir.cleanup() - pass - - def test_parse_plot_message(self) -> None: - """Test parse_plot_message function.""" - # Prepare - message = "app_fit: accuracies_centralized [(0, 0.019), (1, 0.460)]" - expected_plot_type = "accuracies" - expected_values = [(0, 0.019), (1, 0.460)] - - # Execute - plot_type, values = parse_plot_message(message) - - # Assert - assert plot_type == expected_plot_type - assert values == expected_values - - def test_plot_accuracies(self) -> None: - """Test plot accuracies function.""" - # Prepare - values = [(0, 0.019), (1, 0.460), (2, 0.665), (3, 0.845)] - CONFIG["s3_key"] = os.path.join(self.test_dir.name, "foo.log") - - expected_filepath = os.path.join( - self.test_dir.name, f'{CONFIG["s3_key"]}.accuracies.pdf' - ) - - # Execute - plot_accuracies(values) - - # Assert - assert os.path.isfile(expected_filepath) - - -if __name__ == "__main__": - unittest.main() diff --git a/src/py/flwr_experimental/ops/README.md b/src/py/flwr_experimental/ops/README.md deleted file mode 100644 index 855f86821c5f..000000000000 --- a/src/py/flwr_experimental/ops/README.md +++ /dev/null @@ -1,27 +0,0 @@ -# Flower Ops -## Compute -### EC2 Adapter -For permission management an IAM instance profile named `FlowerInstanceProfile` is expected. -The instances will use that profile for all nessecary permissions. In case of logfile upload -the profile must include the permission to upload the logfile from the machine to the respective -S3 bucket. - -An example policy attached to the profile for the logfiles might look like: -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Sid": "FlowerInstanceProfileS3Policy", - "Effect": "Allow", - "Action": [ - "s3:PutObject", - "s3:PutObjectRetention", - "s3:PutObjectVersionAcl", - "s3:PutObjectAcl" - ], - "Resource": "arn:aws:s3:::mylogfilebucket/*" - } - ] -} -``` diff --git a/src/py/flwr_experimental/ops/__init__.py b/src/py/flwr_experimental/ops/__init__.py deleted file mode 100644 index bad31028e68c..000000000000 --- a/src/py/flwr_experimental/ops/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Flower ops provides an opinionated way to provision necessary compute -infrastructure for running Flower runs.""" diff --git a/src/py/flwr_experimental/ops/cluster.py b/src/py/flwr_experimental/ops/cluster.py deleted file mode 100644 index 53a4e9617427..000000000000 --- a/src/py/flwr_experimental/ops/cluster.py +++ /dev/null @@ -1,309 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Implements compute classes for EC2.""" - - -import concurrent.futures -from contextlib import contextmanager -from itertools import groupby -from logging import DEBUG, ERROR -from typing import Dict, Iterator, List, Optional, Tuple, Type, Union, cast - -from paramiko.client import MissingHostKeyPolicy, SSHClient -from paramiko.sftp_attr import SFTPAttributes - -from flwr.common.logger import log - -from .compute.adapter import Adapter -from .instance import Instance - -ExecInfo = Tuple[List[str], List[str]] - - -class StartFailed(Exception): - """Raised when cluster could not start.""" - - -class InstanceIdNotFound(Exception): - """Raised when there was no instance with given id.""" - - -class InstanceMismatch(Exception): - """Raised when instances passed to create_instances do not have the same - values for RAM or CPU.""" - - -class IgnoreHostKeyPolicy: - """Policy for accepting any unknown host key. - - This is used by `paramiko.client.SSHClient`. - """ - - # pylint: disable=no-self-use, unused-argument - def missing_host_key(self, client: SSHClient, hostname: str, key: str) -> None: - """Simply return to ignore the host key. - - As we create and destroy machines quite regularly and don't - reuse them we will not store the host key in the local system to - avoid pollution the local known_hosts file. - """ - return None - - -SSHCredentials = Tuple[str, str] # username, key_filename - - -@contextmanager -def ssh_connection( - instance: Instance, ssh_credentials: SSHCredentials -) -> Iterator[SSHClient]: - """Connect to server and yield SSH client.""" - username, key_filename = ssh_credentials - - instance_ssh_port: int = cast(int, instance.ssh_port) - ignore_host_key_policy: Union[Type[MissingHostKeyPolicy], MissingHostKeyPolicy] = ( - cast( - Union[Type[MissingHostKeyPolicy], MissingHostKeyPolicy], IgnoreHostKeyPolicy - ) - ) - - client = SSHClient() - client.set_missing_host_key_policy(ignore_host_key_policy) - client.connect( - hostname=str(instance.public_ip), - port=instance_ssh_port, - username=username, - key_filename=key_filename, - ) - - yield client - - client.close() - - -def create_instances(adapter: Adapter, instances: List[Instance], timeout: int) -> None: - """Start instances and set props of each instance. - - Fails if CPU and RAM of instances are not all the same. - """ - if not all( - [ - ins.num_cpu == instances[0].num_cpu and ins.num_ram == instances[0].num_ram - for ins in instances - ] - ): - raise InstanceMismatch( - "Values of num_cpu and num_ram have to be equal for all instances." - ) - - # As checked before that each instance has the same num_cpu and num_ram - # we can just take the values from the first => instances[0] - adapter_instances = adapter.create_instances( - num_cpu=instances[0].num_cpu, - num_ram=instances[0].num_ram, - num_instance=len(instances), - gpu=instances[0].gpu, - timeout=timeout, - ) - - for i, adp_ins in enumerate(adapter_instances): - instance_id, private_ip, public_ip, ssh_port, state = adp_ins - - instances[i].instance_id = instance_id - instances[i].private_ip = private_ip - instances[i].public_ip = public_ip - instances[i].ssh_port = ssh_port - instances[i].state = state - - -def group_instances_by_specs(instances: List[Instance]) -> List[List[Instance]]: - """Group instances by num_cpu and num_ram.""" - groups: List[List[Instance]] = [] - keyfunc = lambda ins: f"{ins.num_cpu}-{ins.num_ram}" - instances = sorted(instances, key=keyfunc) - for _, group in groupby(instances, keyfunc): - groups.append(list(group)) - return groups - - -class Cluster: - """Compute environment independend compute cluster.""" - - def __init__( - self, - adapter: Adapter, - ssh_credentials: SSHCredentials, - instances: List[Instance], - timeout: int, - ): - """Create cluster. - - Args: - timeout (int): Minutes after which the machine will shutdown and terminate. - This is a safety mechanism to avoid run aways cost. The user should still - make sure to monitor the progress in case this mechanism fails. - - Example: - To start two groups of instances where the first one has one instance and the - second one has two instances you might define the following list of instances: - - instances = [ - Instance(name='server', group='server', num_cpu=2, num_ram=1.0), - Instance(name='client_0', group='clients', num_cpu=4, num_ram=16.0), - Instance(name='client_1', group='clients', num_cpu=4, num_ram=16.0), - ] - - Depending on the adapter used not every combination of vCPU and RAM might be available. - """ - instance_names = {ins.name for ins in instances} - assert len(instance_names) == len(instances), "Instance names must be unique." - - self.adapter = adapter - self.ssh_credentials = ssh_credentials - self.instances = instances - self.timeout = timeout - - def get_instance(self, instance_name: str) -> Instance: - """Return instance by instance_name.""" - for ins in self.instances: - if ins.name == instance_name: - return ins - - raise InstanceIdNotFound() - - def get_instance_names(self, groups: Optional[List[str]] = None) -> List[str]: - """Return a list of all instance names.""" - return [ - ins.name for ins in self.instances if groups is None or ins.group in groups - ] - - def start(self) -> None: - """Start the instance.""" - instance_groups = group_instances_by_specs(self.instances) - - with concurrent.futures.ThreadPoolExecutor(max_workers=10) as executor: - futures = [ - executor.submit( - create_instances, self.adapter, instance_group, self.timeout - ) - for instance_group in instance_groups - ] - concurrent.futures.wait(futures) - - try: - for future in futures: - future.result() - # pylint: disable=broad-except - except Exception as exc: - log( - ERROR, - "Failed to start the cluster completely. Shutting down...", - ) - log(ERROR, exc) - - for future in futures: - future.cancel() - - self.terminate() - raise StartFailed() from exc - - for ins in self.instances: - log(DEBUG, ins) - - def terminate(self) -> None: - """Terminate all instances and shutdown cluster.""" - self.adapter.terminate_all_instances() - - def upload( - self, instance_name: str, local_path: str, remote_path: str - ) -> SFTPAttributes: - """Upload local file to remote instance.""" - instance = self.get_instance(instance_name) - - with ssh_connection(instance, self.ssh_credentials) as client: - sftp = client.open_sftp() - - if sftp is not None: - sftp_file_attributes = sftp.put(local_path, remote_path) - - return sftp_file_attributes - - def upload_all( - self, local_path: str, remote_path: str - ) -> Dict[str, SFTPAttributes]: - """Upload file to all instances.""" - results: Dict[str, SFTPAttributes] = {} - - with concurrent.futures.ThreadPoolExecutor(max_workers=10) as executor: - # Start the load operations and mark each future with its URL - future_to_result = { - executor.submit( - self.upload, instance_name, local_path, remote_path - ): instance_name - for instance_name in self.get_instance_names() - } - - for future in concurrent.futures.as_completed(future_to_result): - instance_name = future_to_result[future] - try: - results[instance_name] = future.result() - # pylint: disable=broad-except - except Exception as exc: - log(ERROR, (instance_name, exc)) - - return results - - def exec(self, instance_name: str, command: str) -> ExecInfo: - """Run command on instance and return stdout.""" - log(DEBUG, "Exec on %s: %s", instance_name, command) - - instance = self.get_instance(instance_name) - - with ssh_connection(instance, self.ssh_credentials) as client: - _, stdout, stderr = client.exec_command(command) - lines_stdout = stdout.readlines() - lines_stderr = stderr.readlines() - - print(lines_stdout, lines_stderr) - - return lines_stdout, lines_stderr - - def exec_all( - self, command: str, groups: Optional[List[str]] = None - ) -> Dict[str, ExecInfo]: - """Run command on all instances. - - If provided filter by group. - """ - instance_names = self.get_instance_names(groups) - - results: Dict[str, ExecInfo] = {} - - with concurrent.futures.ThreadPoolExecutor(max_workers=10) as executor: - # Start the load operations and mark each future with its URL - future_to_result = { - executor.submit(self.exec, instance_name, command): instance_name - for instance_name in instance_names - } - - for future in concurrent.futures.as_completed(future_to_result): - instance_name = future_to_result[future] - try: - results[instance_name] = future.result() - # pylint: disable=broad-except - except Exception as exc: - log(ERROR, (instance_name, exc)) - - return results diff --git a/src/py/flwr_experimental/ops/cluster_test.py b/src/py/flwr_experimental/ops/cluster_test.py deleted file mode 100644 index 6b00182c4d2f..000000000000 --- a/src/py/flwr_experimental/ops/cluster_test.py +++ /dev/null @@ -1,178 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Implments compute classes for EC2.""" - - -import os -import unittest -import warnings -from typing import Sized, cast -from unittest.mock import MagicMock - -from .cluster import ( - Cluster, - InstanceMismatch, - create_instances, - group_instances_by_specs, -) -from .compute.ec2_adapter import EC2Adapter -from .instance import Instance - -IMAGE_ID = "ami-0370b0294d7241341" -KEY_NAME = "flower" -SSH_CREDENTIALS = ("ubuntu", "/Users/tanto/.ssh/flower.pem") -SUBNET_ID = "subnet-23da286f" -SECURITY_GROUP_IDS = ["sg-0dd0f0080bcf86400"] - - -class CreateInstancesTestCase(unittest.TestCase): - """Test cases for create_instances.""" - - def setUp(self) -> None: - """Prepare tests.""" - self.mock_adapter = MagicMock() - self.mock_adapter.create_instances.return_value = [ - (1, "1.1.1.1", "2.2.2.1", 22, "running"), - (2, "1.1.1.2", "2.2.2.2", 22, "running"), - ] - self.timeout = 10 - - def test_create_instances(self) -> None: - """Test if create_instances works correctly.""" - # Prepare - instances = [ - Instance(name="client_0", group="clients", num_cpu=2, num_ram=8), - Instance(name="client_1", group="clients", num_cpu=2, num_ram=8), - ] - - # Execute - create_instances( - adapter=self.mock_adapter, instances=instances, timeout=self.timeout - ) - - # Assert - self.mock_adapter.create_instances.assert_called_once_with( - num_cpu=instances[0].num_cpu, - num_ram=instances[0].num_ram, - num_instance=len(instances), - timeout=10, - gpu=False, - ) - for ins in instances: - assert ins.instance_id is not None - assert ins.private_ip is not None - assert ins.public_ip is not None - assert ins.ssh_port is not None - assert ins.state is not None - - def test_create_instances_fail(self) -> None: - """Test if create_instances fails when instances list is invalid.""" - # Prepare - instances = [ - Instance(name="client_0", group="clients", num_cpu=2, num_ram=8), - Instance(name="client_1", group="clients", num_cpu=1, num_ram=4), - ] - - # Execute - with self.assertRaises(InstanceMismatch): - create_instances( - adapter=self.mock_adapter, instances=instances, timeout=self.timeout - ) - - -def test_group_instances_by_specs() -> None: - """Test that function works correctly.""" - # Prepare - instances = [ - Instance(name="server", group="server", num_cpu=2, num_ram=4), - Instance(name="client_0", group="clients", num_cpu=2, num_ram=8), - Instance(name="logserver", group="logserver", num_cpu=2, num_ram=4), - Instance(name="client_1", group="clients", num_cpu=2, num_ram=8), - ] - expected_groups = [[instances[0], instances[2]], [instances[1], instances[3]]] - - # Execute - groups = group_instances_by_specs(instances) - - # Assert - assert len(groups) == 2 - assert groups == expected_groups - - -if os.getenv("FLOWER_INTEGRATION"): - - class ClusterIntegrationTestCase(unittest.TestCase): - """Integration tests class Cluster. - - This TestCase will not mock anythin and use a live EC2Adapter - which will be used to provision a single machine and execute a - single command on it. Afterwards the machines will be shut down. - """ - - # pylint: disable=too-many-instance-attributes - def setUp(self) -> None: - """Create an instance.""" - # Filter false positive warning - warnings.filterwarnings( - "ignore", - category=ResourceWarning, - message="unclosed.*", - ) - - adapter = EC2Adapter( - image_id=IMAGE_ID, - key_name=KEY_NAME, - subnet_id=SUBNET_ID, - security_group_ids=SECURITY_GROUP_IDS, - tags=[ - ("Purpose", "integration_test"), - ("Test Name", "ClusterIntegrationTestCase"), - ], - ) - self.cluster = Cluster( - adapter=adapter, - ssh_credentials=SSH_CREDENTIALS, - instances=[ - Instance(name="server", group="server", num_cpu=2, num_ram=2) - ], - # In case the tearDown fails for some reason the machines - # should automatically terminate after 10 minutes - timeout=10, - ) - self.cluster.start() - - def tearDown(self) -> None: - self.cluster.terminate() - - def test_exec(self) -> None: - """Execute on all clients.""" - # Prepare - command = "nproc" - expected_result = "2\n" - - # Execute - stdout, stderr = self.cluster.exec("server", command) - - casted_stderr: Sized = cast(Sized, stderr) - casted_stdout: Sized = cast(Sized, stdout) - - # Assert - assert len(casted_stderr) == 0 - assert len(casted_stdout) == 1 - assert "".join(stdout) == expected_result - - -if __name__ == "__main__": - unittest.main(verbosity=2) diff --git a/src/py/flwr_experimental/ops/compute/__init__.py b/src/py/flwr_experimental/ops/compute/__init__.py deleted file mode 100644 index f6ad468f484d..000000000000 --- a/src/py/flwr_experimental/ops/compute/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Provides various adapters with standard interface to make compute resources -available.""" diff --git a/src/py/flwr_experimental/ops/compute/adapter.py b/src/py/flwr_experimental/ops/compute/adapter.py deleted file mode 100644 index 51c67a226a6e..000000000000 --- a/src/py/flwr_experimental/ops/compute/adapter.py +++ /dev/null @@ -1,74 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Provides a standardised interface for provisioning compute resources.""" - - -from abc import ABC, abstractmethod -from typing import List, Optional, Tuple - -AdapterInstance = Tuple[ - str, str, Optional[str], int, str -] # (InstanceId, PrivateIpAddress, PublicIpAddress, SSHPort, State) - - -class Adapter(ABC): - """Base class for different Adapter implementations, for example, AWS - EC2.""" - - # pylint: disable=too-many-arguments - @abstractmethod - def create_instances( - self, - num_cpu: int, - num_ram: float, - timeout: int, - num_instance: int = 1, - gpu: bool = False, - ) -> List[AdapterInstance]: - """Create one or more instance(s) of the same type. - - Args: - num_cpu (int): Number of instance CPU - num_ram (int): RAM in GB - num_instance (int): Number of instances to start if currently available - timeout (int): Timeout in minutes - commands (:obj:`str`, optional): List of bash commands which will be joined into a - single string with newline as a seperator - gpu (bool): If true will only consider instances with GPU - """ - - @abstractmethod - def list_instances( - self, instance_ids: Optional[List[str]] = None - ) -> List[AdapterInstance]: - """List all instances with tags belonging to this adapter. - - Args: - instance_ids (:obj:`list` of :obj:`str`, optional): If provided, filter by instance_ids - """ - - @abstractmethod - def terminate_instances(self, instance_ids: List[str]) -> None: - """Terminate instances. - - Should raise an error if something goes wrong. - """ - - @abstractmethod - def terminate_all_instances(self) -> None: - """Terminate all instances. - - Will raise an error if something goes wrong. - """ diff --git a/src/py/flwr_experimental/ops/compute/docker_adapter.py b/src/py/flwr_experimental/ops/compute/docker_adapter.py deleted file mode 100644 index acb7c0c5a4e0..000000000000 --- a/src/py/flwr_experimental/ops/compute/docker_adapter.py +++ /dev/null @@ -1,166 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Provides an Adapter implementation for Docker.""" - - -import socket -import time -from contextlib import closing -from typing import List, Optional -from uuid import uuid4 - -import docker - -from .adapter import Adapter, AdapterInstance - - -class NoPublicFacingPortFound(Exception): - """Raise if public-facing port of container was not bound to private port - of host.""" - - -def get_free_port() -> int: - """Returns a free port.""" - with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as socks: - socks.bind(("", 0)) - socks.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) - return int(socks.getsockname()[1]) - - -def _get_container_port(container_id: str) -> int: - """Return container port on host machine.""" - client = docker.APIClient(base_url="unix://var/run/docker.sock") - result = client.port(container_id, 22) - client.close() - if len(result) == 0: - raise NoPublicFacingPortFound - return int(result[0]["HostPort"]) - - -class DockerAdapter(Adapter): - """Adapter for Docker.""" - - def __init__(self, name: str = "flower", network: str = "flower"): - self.name = name - self.network = network - self._create_network() - - def _create_network(self) -> None: - """Create Docker network if it does not exist.""" - client = docker.from_env() - try: - client.networks.get(self.network) - except docker.errors.NotFound: - client.networks.create(self.network, driver="bridge") - client.close() - - # pylint: disable=too-many-arguments - def create_instances( - self, - num_cpu: int, - num_ram: float, - timeout: int, - num_instance: int = 1, - gpu: bool = False, - ) -> List[AdapterInstance]: - """Create one or more docker container instance(s) of the same type. - - Args: - num_cpu (int): Number of instance CPU cores (currently ignored) - num_ram (int): RAM in GB (currently ignored) - timeout (int): Timeout in minutes - num_instance (int): Number of instances to start - """ - instances: List[AdapterInstance] = [] - - client = docker.from_env() - for _ in range(num_instance): - port = get_free_port() - container = client.containers.run( - "flower-sshd:latest", - auto_remove=True, - detach=True, - ports={"22/tcp": port}, - network=self.network, - labels={"adapter_name": self.name}, - # We have to assign a name as the default random name will not work - # as hostname so the containers can reach each other - name=str(uuid4().hex[:8]), - ) - - # Docker needs a little time to start the container - time.sleep(1) - - port = _get_container_port(container.short_id) - instances.append( - (container.short_id, container.name, "127.0.0.1", port, "started") - ) - - client.close() - - return instances - - def list_instances( - self, instance_ids: Optional[List[str]] = None - ) -> List[AdapterInstance]: - """List all container instances with tags belonging to this adapter. - - Args: - instance_ids ([str[]]): If provided, filter by instance_ids - """ - instances: List[AdapterInstance] = [] - - client = docker.from_env() - containers = client.containers.list( - filters={"label": f"adapter_name={self.name}"} - ) - for container in containers: - port = _get_container_port(container.short_id) - instances.append( - ( - container.short_id, - container.name, - "127.0.0.1", - port, - container.status, - ) - ) - client.close() - - return instances - - def terminate_instances(self, instance_ids: List[str]) -> None: - """Terminate container instance(s). - - Will raise an error if something goes wrong. - """ - client = docker.from_env() - for instance_id in instance_ids: - container = client.containers.get(instance_id) - container.remove(force=True) - client.close() - - def terminate_all_instances(self) -> None: - """Terminate all instances. - - Will raise an error if something goes wrong. - """ - client = docker.from_env() - containers = client.containers.list( - filters={"label": f"adapter_name={self.name}"} - ) - for container in containers: - container.remove(force=True) - client.close() diff --git a/src/py/flwr_experimental/ops/compute/docker_adapter_test.py b/src/py/flwr_experimental/ops/compute/docker_adapter_test.py deleted file mode 100644 index c6d5759a8246..000000000000 --- a/src/py/flwr_experimental/ops/compute/docker_adapter_test.py +++ /dev/null @@ -1,131 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Tests DockerAdapter.""" - - -import os -import time -import unittest - -import docker - -from .docker_adapter import DockerAdapter, get_free_port - -if os.getenv("FLOWER_INTEGRATION"): - - class DockerAdapterIntegrationTestCase(unittest.TestCase): - """Test suite for class DockerAdapter. - - Required docker to be available on the host machine. - """ - - def setUp(self) -> None: - """Prepare tests.""" - self.name = "flower_test" - self.client = docker.from_env() - self.adapter = DockerAdapter(name=self.name) - - def tearDown(self) -> None: - """Cleanup tests.""" - containers = self.client.containers.list( - filters={"label": f"adapter_name={self.name}"} - ) - for container in containers: - container.remove(force=True) - self.client.close() - - def test_create_instances(self) -> None: - """Create and start an instance.""" - # Execute - instances = self.adapter.create_instances( - num_cpu=2, num_ram=2, timeout=1, num_instance=2, gpu=False - ) - - # Assert - assert len(instances) == 2 - - containers = self.client.containers.list( - filters={"label": f"adapter_name={self.name}"} - ) - assert len(containers) == 2 - - def test_list_instances(self) -> None: - """List all instances.""" - # Prepare - for _ in range(2): - port = get_free_port() - self.client.containers.run( - "flower-sshd:latest", - auto_remove=True, - detach=True, - ports={"22/tcp": port}, - labels={"adapter_name": self.name}, - ) - - # Execute - instances = self.adapter.list_instances() - - # Assert - assert len(instances) == 2, "Expected to find two instances." - ports = {i[3] for i in instances} - assert len(ports) == 2, "Each instance should have a distinct port." - - def test_terminate_instance(self) -> None: - """Destroy all instances.""" - # Prepare - port = get_free_port() - container = self.client.containers.run( - "flower-sshd:latest", - name=f"{self.name}_{int(time.time() * 1000)}", - auto_remove=True, - detach=True, - ports={"22/tcp": port}, - labels={"adapter_name": self.name}, - ) - - # Execute - self.adapter.terminate_instances([container.short_id]) - - # Assert - containers = self.client.containers.list( - filters={"label": f"adapter_name={self.name}"} - ) - assert len(containers) == 0 - - def test_terminate_all_instances(self) -> None: - """Destroy all instances.""" - # Prepare - for _ in range(2): - port = get_free_port() - self.client.containers.run( - "flower-sshd:latest", - name=f"{self.name}_{int(time.time() * 1000)}", - auto_remove=True, - detach=True, - ports={"22/tcp": port}, - ) - - # Execute - self.adapter.terminate_all_instances() - - # Assert - containers = self.client.containers.list( - filters={"label": f"adapter_name={self.name}"} - ) - assert len(containers) == 0 - - -if __name__ == "__main__": - unittest.main(verbosity=2) diff --git a/src/py/flwr_experimental/ops/compute/ec2_adapter.py b/src/py/flwr_experimental/ops/compute/ec2_adapter.py deleted file mode 100644 index 43fb66c2d944..000000000000 --- a/src/py/flwr_experimental/ops/compute/ec2_adapter.py +++ /dev/null @@ -1,311 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Provides an Adapter implementation for AWS EC2.""" - - -import time -from logging import INFO -from typing import Dict, List, Optional, Tuple, Union - -import boto3 -from boto3_type_annotations import ec2 - -from flwr.common.logger import log - -from .adapter import Adapter, AdapterInstance - -EC2RunInstancesResult = Dict[str, List[ec2.Instance]] -EC2DescribeInstancesResult = Dict[str, List[Dict[str, List[ec2.Instance]]]] - - -class NoMatchingInstanceType(Exception): - """No matching instance type exists.""" - - -class EC2TerminationFailure(Exception): - """Something went wrong while terminating EC2 instances. - - EC2 should be manually checked to check what went wrong and the - instances might need manual shutdown and terminatation. - """ - - -class EC2CreateInstanceFailure(Exception): - """Instance provisioning failed.""" - - -class EC2StatusTimeout(Exception): - """Indicates that the status check timed out.""" - - -# List of AWS instance types with -# (instance_type, vCPU, Mem) -INSTANCE_TYPES_CPU = [ - ("t3.small", 2, 2, 0.0209), # Beware CPU credit limited - ("c5.large", 2, 4, 0.097), - ("m5a.large", 2, 8, 0.104), - ("m5a.xlarge", 4, 16, 0.208), - ("m5a.2xlarge", 8, 32, 0.416), - ("m5a.4xlarge", 16, 64, 0.832), - ("m5a.12xlarge", 48, 192, 2.496), - ("m5a.24xlarge", 96, 384, 4.992), - ("r5.24xlarge", 96, 768, 7.296), -] - -INSTANCE_TYPES_GPU = [ - ("p3.2xlarge", 8, 61, 3.823), - ("p2.xlarge", 4, 61, 0.900), -] - - -def find_instance_type( - num_cpu: int, num_ram: float, instance_types: List[Tuple[str, int, int, float]] -) -> Tuple[str, float]: - """Return the first matching instance type if one exists, raise - otherwise.""" - for instance_type in instance_types: - if instance_type[1] == num_cpu and instance_type[2] == num_ram: - return instance_type[0], instance_type[3] - - raise NoMatchingInstanceType - - -def flatten_reservations( - reservations: EC2DescribeInstancesResult, -) -> List[ec2.Instance]: - """Extract instances from reservations returned by a call to - describe_instances.""" - instances: List[ec2.Instance] = [] - - # Flatten list of lists - for ins in [res["Instances"] for res in reservations["Reservations"]]: - instances += ins - - return instances - - -def are_all_instances_running(instances: List[ec2.Instance]) -> bool: - """Return True if all instances are running.""" - for ins in instances: - if ins["State"]["Name"] != "running": - return False - - return True - - -def are_all_status_ok(instance_status: List[Tuple[str, str]]) -> bool: - """Return True if all instances are ok.""" - for status in instance_status: - if status[1] != "ok": - return False - - return True - - -def tags_to_filter( - tags: List[Tuple[str, str]] -) -> List[Dict[str, Union[str, List[str]]]]: - """Turn list of tuples with tag name and value in to AWS format.""" - return [{"Name": f"tag:{tag[0]}", "Values": [tag[1]]} for tag in tags] - - -class EC2Adapter(Adapter): - """Adapter for AWS EC2.""" - - # pylint: disable=too-many-arguments - def __init__( - self, - image_id: str, - key_name: str, - subnet_id: str, - security_group_ids: List[str], - tags: Optional[List[Tuple[str, str]]] = None, - boto_ec2_client: Optional[boto3.session.Session] = None, - ): - self.image_id = image_id - self.key_name = key_name - self.subnet_id = subnet_id - self.security_group_ids = security_group_ids - self.tags = [("Flower EC2 Adapter ID", f"{int(time.time())}")] - - if tags is not None: - self.tags += tags - - self.tag_specifications = [ - { - "ResourceType": "instance", - "Tags": [{"Key": tag[0], "Value": tag[1]} for tag in self.tags], - } - ] - - self.ec2 = boto3.client("ec2") if boto_ec2_client is None else boto_ec2_client - - def _wait_until_instances_are_reachable(self, instance_ids: List[str]) -> None: - """Block until all instances are reachable. Raises TimeoutException - after 300s. - - Returns: - bool: True if all are reachable otherwise False. - """ - - for _ in range(30): - result = self.ec2.describe_instance_status( - InstanceIds=instance_ids, - # Also include instances which don't have state "running" yet - IncludeAllInstances=True, - ) - - instance_status = [ - (ins["InstanceId"], ins["InstanceStatus"]["Status"]) - for ins in result["InstanceStatuses"] - ] - - print(instance_status) - - if are_all_status_ok(instance_status): - return - - time.sleep(10) - - raise EC2StatusTimeout() - - # pylint: disable=too-many-arguments - def create_instances( - self, - num_cpu: int, - num_ram: float, - timeout: int, - num_instance: int = 1, - gpu: bool = False, - ) -> List[AdapterInstance]: - """Create one or more EC2 instance(s) of the same type. - - Args: - num_cpu (int): Number of instance vCPU (values in - ec2_adapter.INSTANCE_TYPES_CPU or INSTANCE_TYPES_GPU) - num_ram (int): RAM in GB (values in ec2_adapter.INSTANCE_TYPES_CPU - or INSTANCE_TYPES_GPU) - timeout (int): Timeout in minutes - num_instance (int): Number of instances to start if currently available in EC2 - """ - # The instance will be set to terminate after shutdown - # This is a fail safe in case something happens and the instances - # are not correctly shutdown - user_data = ["#!/bin/bash", f"sudo shutdown -P {timeout}"] - user_data_str = "\n".join(user_data) - - instance_type, hourly_price = find_instance_type( - num_cpu, num_ram, INSTANCE_TYPES_GPU if gpu else INSTANCE_TYPES_CPU - ) - - hourly_price_total = round(num_instance * hourly_price, 2) - - log( - INFO, - "Starting %s instances of type %s which in total will roughly cost $%s an hour.", - num_instance, - instance_type, - hourly_price_total, - ) - - result: EC2RunInstancesResult = self.ec2.run_instances( - BlockDeviceMappings=[ - {"DeviceName": "/dev/sda1", "Ebs": {"DeleteOnTermination": True}} - ], - ImageId=self.image_id, - # We always want an exact number of instances - MinCount=num_instance, - MaxCount=num_instance, - InstanceType=instance_type, - KeyName=self.key_name, - IamInstanceProfile={"Name": "FlowerInstanceProfile"}, - SubnetId=self.subnet_id, - SecurityGroupIds=self.security_group_ids, - TagSpecifications=self.tag_specifications, - InstanceInitiatedShutdownBehavior="terminate", - UserData=user_data_str, - ) - - instance_ids = [ins["InstanceId"] for ins in result["Instances"]] - - # As soon as all instances status is "running" we have to check the InstanceStatus which - # reports impaired functionality that stems from issues internal to the instance, such as - # impaired reachability - try: - self._wait_until_instances_are_reachable(instance_ids=instance_ids) - except EC2StatusTimeout as ec2_status_timeout: - self.terminate_instances(instance_ids) - raise EC2CreateInstanceFailure() from ec2_status_timeout - - return self.list_instances(instance_ids=instance_ids) - - def list_instances( - self, instance_ids: Optional[List[str]] = None - ) -> List[AdapterInstance]: - """List all instances with tags belonging to this adapter. - - Args: - instance_ids ([str[]]): If provided, filter by instance_ids - """ - if instance_ids is None: - instance_ids = [] - - result: EC2DescribeInstancesResult = self.ec2.describe_instances( - InstanceIds=instance_ids, - Filters=tags_to_filter(self.tags), - ) - - instances = flatten_reservations(result) - - instances = [ - ( - ins["InstanceId"], - ins["PrivateIpAddress"], - ins["PublicIpAddress"], - 22, - ins["State"]["Name"], - ) - for ins in instances - ] - - return instances - - def terminate_instances(self, instance_ids: List[str]) -> None: - """Terminate instances. - - Will raise an error if something goes wrong. - """ - res = self.ec2.terminate_instances(InstanceIds=instance_ids) - - for tin in res["TerminatingInstances"]: - if tin["CurrentState"]["Name"] != "shutting-down": - raise EC2TerminationFailure - - def terminate_all_instances(self) -> None: - """Terminate all instances. - - Will raise an error if something goes wrong. - """ - result: EC2DescribeInstancesResult = self.ec2.describe_instances( - Filters=tags_to_filter(self.tags), - ) - - instances = flatten_reservations(result) - instance_ids = [ins["InstanceId"] for ins in instances] - - if not instance_ids: - return - - self.terminate_instances(instance_ids) diff --git a/src/py/flwr_experimental/ops/compute/ec2_adapter_test.py b/src/py/flwr_experimental/ops/compute/ec2_adapter_test.py deleted file mode 100644 index 14827745bf3d..000000000000 --- a/src/py/flwr_experimental/ops/compute/ec2_adapter_test.py +++ /dev/null @@ -1,162 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Tests EC2Adapter.""" - - -import os -import unittest -import warnings -from unittest.mock import MagicMock - -from .ec2_adapter import EC2Adapter - -IMAGE_ID = "ami-0370b0294d7241341" -KEY_NAME = "flower" -SUBNET_ID = "subnet-23da286f" -SECURITY_GROUP_IDS = ["sg-0dd0f0080bcf86400"] - - -class EC2AdapterTestCase(unittest.TestCase): - """Test suite for class EC2Adapter.""" - - def setUp(self) -> None: - """Create an instance.""" - self.ec2_mock = MagicMock() - - self.ec2_mock.run_instances.return_value = { - "Instances": [ - { - "InstanceId": "1", - "PrivateIpAddress": "1.1.1.1", - "PublicIpAddress": "2.1.1.1", - "State": {"Name": "pending"}, - } - ] - } - - self.ec2_mock.describe_instances.return_value = { - "Reservations": [ - { - "Instances": [ - { - "InstanceId": "1", - "PrivateIpAddress": "1.1.1.1", - "PublicIpAddress": "2.1.1.1", - "State": {"Name": "running"}, - } - ] - } - ] - } - - self.adapter = EC2Adapter( - image_id="ami-0370b0294d7241341", - key_name="flower", - subnet_id="subnet-23da286f", - security_group_ids=["sg-0dd0f0080bcf86400"], - tags=[("Purpose", "integration_test"), ("Test Name", "EC2AdapterTestCase")], - boto_ec2_client=self.ec2_mock, - ) - - def test_create_instances(self) -> None: - """Create and start an instance.""" - # Prepare - reservations = self.ec2_mock.describe_instances.return_value["Reservations"] - ec2_instance = reservations[0]["Instances"][0] - - expected_return_value = ( - ec2_instance["InstanceId"], - ec2_instance["PrivateIpAddress"], - ec2_instance["PublicIpAddress"], - 22, - ec2_instance["State"]["Name"], - ) - - # Execute - instances = self.adapter.create_instances(num_cpu=2, num_ram=2, timeout=1) - - # Assert - assert len(instances) == 1 - assert isinstance(instances[0], tuple) - assert instances[0] == expected_return_value - - def test_list_instances(self) -> None: - """List all instances.""" - # Prepare - reservations = self.ec2_mock.describe_instances.return_value["Reservations"] - ec2_instance = reservations[0]["Instances"][0] - - expected_return_value = ( - ec2_instance["InstanceId"], - ec2_instance["PrivateIpAddress"], - ec2_instance["PublicIpAddress"], - 22, - ec2_instance["State"]["Name"], - ) - - # Execute - instances = self.adapter.list_instances() - - # Assert - assert len(instances) == 1 - assert instances[0] == expected_return_value - - def test_terminate_instances(self) -> None: - """Destroy all instances.""" - # Prepare - instance_id = "1" - result = {"TerminatingInstances": [{"CurrentState": {"Name": "shutting-down"}}]} - self.ec2_mock.terminate_instances.return_value = result - - # Execute - self.adapter.terminate_instances([instance_id]) - - -if os.getenv("FLOWER_INTEGRATION"): - - class EC2AdapterIntegrationTestCase(unittest.TestCase): - """Test suite for class EC2Adapter.""" - - def setUp(self) -> None: - """Prepare tests.""" - # Filter false positive warning - warnings.filterwarnings( - "ignore", - category=ResourceWarning, - message="unclosed.*", - ) - - self.adapter = EC2Adapter( - image_id="ami-0370b0294d7241341", - key_name="flower", - subnet_id="subnet-23da286f", - security_group_ids=["sg-0dd0f0080bcf86400"], - ) - - def test_workflow(self) -> None: - """Create, list and terminate an instance.""" - # Execute & Assert - instances = self.adapter.create_instances( - num_cpu=2, num_ram=2, num_instance=1, timeout=10 - ) - instances = self.adapter.list_instances() - - assert len(instances) == 1 - - self.adapter.terminate_instances([instances[0][0]]) - - -if __name__ == "__main__": - unittest.main(verbosity=2) diff --git a/src/py/flwr_experimental/ops/instance.py b/src/py/flwr_experimental/ops/instance.py deleted file mode 100644 index 20be40552727..000000000000 --- a/src/py/flwr_experimental/ops/instance.py +++ /dev/null @@ -1,39 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Provides dataclass Instance.""" - - -from dataclasses import dataclass -from typing import Optional - - -# pylint: disable=too-many-instance-attributes -@dataclass -class Instance: - """Represents an instance.""" - - # Specs - name: str - group: str - num_cpu: int - num_ram: float - gpu: bool = False - - # Runtime information - instance_id: Optional[str] = None - private_ip: Optional[str] = None - public_ip: Optional[str] = None - ssh_port: Optional[int] = None - state: Optional[str] = None diff --git a/src/swift/flwr/Package.swift b/src/swift/flwr/Package.swift index 9ebef2d89870..8adf85d67117 100644 --- a/src/swift/flwr/Package.swift +++ b/src/swift/flwr/Package.swift @@ -1,4 +1,4 @@ -// swift-tools-version: 5.6 +// swift-tools-version: 5.9 // The swift-tools-version declares the minimum version of Swift required to build this package. import PackageDescription @@ -6,7 +6,7 @@ import PackageDescription let package = Package( name: "flwr", platforms: [ - .iOS(.v13), + .iOS(.v16), ], products: [ // Products define the executables and libraries a package produces, and make them visible to other packages. @@ -19,8 +19,8 @@ let package = Package( // .package(url: /* package url */, from: "1.0.0"), .package(url: "https://github.com/pvieito/PythonKit.git", branch: "master"), .package(url: "https://github.com/kewlbear/NumPy-iOS.git", branch: "main"), - .package(url: "https://github.com/grpc/grpc-swift.git", from: "1.0.0"), - .package(url: "https://github.com/apple/swift-docc-plugin", from: "1.1.0"), + .package(url: "https://github.com/grpc/grpc-swift.git", from: "1.22.0"), + .package(url: "https://github.com/apple/swift-protobuf.git", from: "1.26.0"), ], targets: [ // Targets are the basic building blocks of a package. A target can define a module or a test suite. @@ -30,7 +30,8 @@ let package = Package( dependencies: [ .product(name: "GRPC", package: "grpc-swift"), .product(name: "NumPy-iOS", package: "NumPy-iOS"), - .product(name: "PythonKit", package: "PythonKit")], + .product(name: "PythonKit", package: "PythonKit"), + .product(name: "SwiftProtobuf", package: "swift-protobuf")], path: "Sources/Flower"), .testTarget( name: "FlowerTests",