diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile index 61c8d14039db..03849995735d 100644 --- a/.devcontainer/Dockerfile +++ b/.devcontainer/Dockerfile @@ -31,7 +31,7 @@ RUN apt-get install -y curl wget gnupg python3 python-is-python3 python3-pip git RUN python -m pip install \ pip==23.3.1 \ setuptools==68.2.2 \ - poetry==1.5.1 + poetry==1.7.1 USER $USERNAME ENV PATH="/home/$USERNAME/.local/bin:${PATH}" diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 71a8aea59859..8dac63a20598 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -3,6 +3,9 @@ # Default code owners * @danieljanes @tanertopal +# README.md +README.md @jafermarq @tanertopal @danieljanes + # Flower Baselines /baselines @jafermarq @tanertopal @danieljanes diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 479f88c1bbd5..b9d4a0a23e23 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -44,7 +44,7 @@ Example: The variable `rnd` was renamed to `server_round` to improve readability `fraction_evaluate`" -msgstr "`fraction_eval` --> `fraction_evaluate`" - -#: ../../source/ref-changelog.md:380 -msgid "`min_eval_clients` --> `min_evaluate_clients`" -msgstr "`min_eval_clients` --> `min_evaluate_clients`" -#: ../../source/ref-changelog.md:381 -msgid "`eval_fn` --> `evaluate_fn`" -msgstr "`eval_fn` --> `evaluate_fn`" +#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: flwr.server.strategy.fedadam.FedAdam:1 of +msgid "FedAdam - Adaptive Federated Optimization using Adam." +msgstr "" -#: ../../source/ref-changelog.md:383 +#: ../../source/ref-api/flwr.server.strategy.rst:41::1 msgid "" -"**Update default arguments of built-in strategies** " -"([#1278](https://github.com/adap/flower/pull/1278))" +":py:obj:`FedAvg `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" msgstr "" -"**Mettre à jour les arguments par défaut des stratégies intégrées** " -"([#1278](https://github.com/adap/flower/pull/1278))" -#: ../../source/ref-changelog.md:385 +#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: flwr.server.strategy.fedavg.FedAvg:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:1 of +#, fuzzy +msgid "Federated Averaging strategy." +msgstr "Stratégie de moyenne fédérée." + +#: ../../source/ref-api/flwr.server.strategy.rst:41::1 msgid "" -"All built-in strategies now use `fraction_fit=1.0` and " -"`fraction_evaluate=1.0`, which means they select *all* currently " -"available clients for training and evaluation. Projects that relied on " -"the previous default values can get the previous behaviour by " -"initializing the strategy in the following way:" +":py:obj:`FedXgbNnAvg `\\ \\(\\*args\\, " +"\\*\\*kwargs\\)" msgstr "" -"Toutes les stratégies intégrées utilisent désormais `fraction_fit=1.0` et" -" `fraction_evaluate=1.0`, ce qui signifie qu'elles sélectionnent *tous* " -"les clients actuellement disponibles pour l'entraînement et l'évaluation." -" Les projets qui s'appuyaient sur les valeurs par défaut précédentes " -"peuvent retrouver le comportement antérieur en initialisant la stratégie " -"de la manière suivante :" -#: ../../source/ref-changelog.md:387 -msgid "`strategy = FedAvg(fraction_fit=0.1, fraction_evaluate=0.1)`" -msgstr "`stratégie = FedAvg(fraction_fit=0.1, fraction_evaluate=0.1)`" +#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg:1 of +msgid "Configurable FedXgbNnAvg strategy implementation." +msgstr "" -#: ../../source/ref-changelog.md:389 +#: ../../source/ref-api/flwr.server.strategy.rst:41::1 msgid "" -"**Add** `server_round` **to** `Strategy.evaluate` " -"([#1334](https://github.com/adap/flower/pull/1334))" +":py:obj:`FedXgbBagging `\\ " +"\\(\\[evaluate\\_function\\]\\)" msgstr "" -"**Ajouter** `server_round` **à** `Strategy.evaluate` " -"([#1334](https://github.com/adap/flower/pull/1334))" -#: ../../source/ref-changelog.md:391 -msgid "" -"The `Strategy` method `evaluate` now receives the current round of " -"federated learning/evaluation as the first parameter." +#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging:1 of +msgid "Configurable FedXgbBagging strategy implementation." msgstr "" -"La méthode `Stratégie` `évaluer` reçoit maintenant le cycle actuel " -"d'apprentissage/évaluation fédéré comme premier paramètre." -#: ../../source/ref-changelog.md:393 +#: ../../source/ref-api/flwr.server.strategy.rst:41::1 msgid "" -"**Add** `server_round` **and** `config` **parameters to** `evaluate_fn` " -"([#1334](https://github.com/adap/flower/pull/1334))" +":py:obj:`FedXgbCyclic `\\ " +"\\(\\*\\*kwargs\\)" msgstr "" -"**Ajouter** `server_round` **et** `config` **paramètres à** `evaluate_fn`" -" ([#1334](https://github.com/adap/flower/pull/1334))" -#: ../../source/ref-changelog.md:395 -msgid "" -"The `evaluate_fn` passed to built-in strategies like `FedAvg` now takes " -"three parameters: (1) The current round of federated learning/evaluation " -"(`server_round`), (2) the model parameters to evaluate (`parameters`), " -"and (3) a config dictionary (`config`)." +#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic:1 of +msgid "Configurable FedXgbCyclic strategy implementation." msgstr "" -"Le `evaluate_fn` passé aux stratégies intégrées comme `FedAvg` prend " -"maintenant trois paramètres : (1) le cycle actuel " -"d'apprentissage/évaluation fédéré (`server_round`), (2) les paramètres du" -" modèle à évaluer (`parameters`), et (3) un dictionnaire de configuration" -" (`config`)." -#: ../../source/ref-changelog.md:397 +#: ../../source/ref-api/flwr.server.strategy.rst:41::1 msgid "" -"**Rename** `rnd` **to** `server_round` " -"([#1321](https://github.com/adap/flower/pull/1321))" +":py:obj:`FedAvgAndroid `\\ " +"\\(\\*\\[\\, fraction\\_fit\\, ...\\]\\)" msgstr "" -"**Rename** `rnd` **to** `server_round` " -"([#1321](https://github.com/adap/flower/pull/1321))" -#: ../../source/ref-changelog.md:399 +#: ../../source/ref-api/flwr.server.strategy.rst:41::1 msgid "" -"Several Flower methods and functions (`evaluate_fn`, `configure_fit`, " -"`aggregate_fit`, `configure_evaluate`, `aggregate_evaluate`) receive the " -"current round of federated learning/evaluation as their first parameter. " -"To improve reaability and avoid confusion with *random*, this parameter " -"has been renamed from `rnd` to `server_round`." +":py:obj:`FedAvgM `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" msgstr "" -"Plusieurs méthodes et fonctions de Flower (`evaluate_fn`, " -"`configure_fit`, `aggregate_fit`, `configure_evaluate`, " -"`aggregate_evaluate`) reçoivent le cycle actuel " -"d'apprentissage/évaluation fédéré comme premier paramètre. Pour améliorer" -" la fiabilité et éviter la confusion avec *random*, ce paramètre a été " -"renommé de `rnd` à `server_round`." -#: ../../source/ref-changelog.md:401 +#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: flwr.server.strategy.fedavgm.FedAvgM:1 of +#, fuzzy +msgid "Federated Averaging with Momentum strategy." +msgstr "Stratégie de moyenne fédérée." + +#: ../../source/ref-api/flwr.server.strategy.rst:41::1 msgid "" -"**Move** `flwr.dataset` **to** `flwr_baselines` " -"([#1273](https://github.com/adap/flower/pull/1273))" +":py:obj:`FedOpt `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" msgstr "" -"**Déplacer** `flwr.dataset` **vers** `flwr_baselines` " -"([#1273](https://github.com/adap/flower/pull/1273))" -#: ../../source/ref-changelog.md:403 -msgid "The experimental package `flwr.dataset` was migrated to Flower Baselines." -msgstr "Le paquet expérimental `flwr.dataset` a été migré vers Flower Baselines." +#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: flwr.server.strategy.fedopt.FedOpt:1 of +#, fuzzy +msgid "Federated Optim strategy." +msgstr "Stratégie de moyenne fédérée." -#: ../../source/ref-changelog.md:405 +#: ../../source/ref-api/flwr.server.strategy.rst:41::1 msgid "" -"**Remove experimental strategies** " -"([#1280](https://github.com/adap/flower/pull/1280))" +":py:obj:`FedProx `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" msgstr "" -"**Supprimer les stratégies expérimentales** " -"([#1280](https://github.com/adap/flower/pull/1280))" -#: ../../source/ref-changelog.md:407 +#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: flwr.server.strategy.fedprox.FedProx:1 of +#, fuzzy +msgid "Federated Optimization strategy." +msgstr "Stratégie de moyenne fédérée." + +#: ../../source/ref-api/flwr.server.strategy.rst:41::1 msgid "" -"Remove unmaintained experimental strategies (`FastAndSlow`, `FedFSv0`, " -"`FedFSv1`)." +":py:obj:`FedYogi `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" msgstr "" -"Supprimer les stratégies expérimentales non maintenues (`FastAndSlow`, " -"`FedFSv0`, `FedFSv1`)." -#: ../../source/ref-changelog.md:409 -msgid "" -"**Rename** `Weights` **to** `NDArrays` " -"([#1258](https://github.com/adap/flower/pull/1258), " -"[#1259](https://github.com/adap/flower/pull/1259))" +#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: flwr.server.strategy.fedyogi.FedYogi:1 of +msgid "FedYogi [Reddi et al., 2020] strategy." msgstr "" -"**Rename** `Weights` **to** `NDArrays` " -"([#1258](https://github.com/adap/flower/pull/1258), " -"[#1259](https://github.com/adap/flower/pull/1259))" -#: ../../source/ref-changelog.md:411 +#: ../../source/ref-api/flwr.server.strategy.rst:41::1 msgid "" -"`flwr.common.Weights` was renamed to `flwr.common.NDArrays` to better " -"capture what this type is all about." +":py:obj:`QFedAvg `\\ \\(\\*\\[\\, " +"q\\_param\\, qffl\\_learning\\_rate\\, ...\\]\\)" msgstr "" -"`flwr.common.Weights` a été renommé en `flwr.common.NDArys` pour mieux " -"rendre compte de la nature de ce type." -#: ../../source/ref-changelog.md:413 -msgid "" -"**Remove antiquated** `force_final_distributed_eval` **from** " -"`start_server` ([#1258](https://github.com/adap/flower/pull/1258), " -"[#1259](https://github.com/adap/flower/pull/1259))" +#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: flwr.server.strategy.qfedavg.QFedAvg:1 of +msgid "Configurable QFedAvg strategy implementation." msgstr "" -"**Supprimez l'ancien** `force_final_distributed_eval` **de** " -"`start_server` ([#1258](https://github.com/adap/flower/pull/1258), " -"[#1259](https://github.com/adap/flower/pull/1259))" -#: ../../source/ref-changelog.md:415 +#: ../../source/ref-api/flwr.server.strategy.rst:41::1 msgid "" -"The `start_server` parameter `force_final_distributed_eval` has long been" -" a historic artefact, in this release it is finally gone for good." +":py:obj:`FedMedian `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" msgstr "" -"Le paramètre `start_server` `force_final_distributed_eval` a longtemps " -"été un artefact historique, dans cette version il a finalement disparu " -"pour de bon." -#: ../../source/ref-changelog.md:417 +#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: flwr.server.strategy.fedmedian.FedMedian:1 of +#, fuzzy +msgid "Configurable FedMedian strategy implementation." +msgstr "Configuration de l'évaluation fédérée" + +#: ../../source/ref-api/flwr.server.strategy.rst:41::1 msgid "" -"**Make** `get_parameters` **configurable** " -"([#1242](https://github.com/adap/flower/pull/1242))" +":py:obj:`FedTrimmedAvg `\\ " +"\\(\\*\\[\\, fraction\\_fit\\, ...\\]\\)" msgstr "" -"**Make** `get_parameters` **configurable** " -"([#1242](https://github.com/adap/flower/pull/1242))" -#: ../../source/ref-changelog.md:419 -msgid "" -"The `get_parameters` method now accepts a configuration dictionary, just " -"like `get_properties`, `fit`, and `evaluate`." +#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:1 of +msgid "Federated Averaging with Trimmed Mean [Dong Yin, et al., 2021]." msgstr "" -"La méthode `get_parameters` accepte maintenant un dictionnaire de " -"configuration, tout comme `get_properties`, `fit`, et `evaluate`." -#: ../../source/ref-changelog.md:421 +#: ../../source/ref-api/flwr.server.strategy.rst:41::1 msgid "" -"**Replace** `num_rounds` **in** `start_simulation` **with new** `config` " -"**parameter** ([#1281](https://github.com/adap/flower/pull/1281))" +":py:obj:`Krum `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" msgstr "" -"**Remplace** `num_rounds` **dans** `start_simulation` **avec le nouveau**" -" `config` **paramètre** " -"([#1281](https://github.com/adap/flower/pull/1281))" -#: ../../source/ref-changelog.md:423 -msgid "" -"The `start_simulation` function now accepts a configuration dictionary " -"`config` instead of the `num_rounds` integer. This improves the " -"consistency between `start_simulation` and `start_server` and makes " -"transitioning between the two easier." +#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: flwr.server.strategy.krum.Krum:1 of +msgid "Krum [Blanchard et al., 2017] strategy." msgstr "" -"La fonction `start_simulation` accepte maintenant un dictionnaire de " -"configuration `config` au lieu de l'entier `num_rounds`. Cela améliore la" -" cohérence entre `start_simulation` et `start_server` et facilite la " -"transition entre les deux." -#: ../../source/ref-changelog.md:427 +#: ../../source/ref-api/flwr.server.strategy.rst:41::1 msgid "" -"**Support Python 3.10** " -"([#1320](https://github.com/adap/flower/pull/1320))" +":py:obj:`Bulyan `\\ \\(\\*\\, " +"fraction\\_fit\\, fraction\\_evaluate\\, ...\\)" msgstr "" -"**Support Python 3.10** " -"([#1320](https://github.com/adap/flower/pull/1320))" -#: ../../source/ref-changelog.md:429 +#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: flwr.server.strategy.bulyan.Bulyan:1 of +#, fuzzy +msgid "Bulyan strategy." +msgstr "Stratégies intégrées" + +#: ../../source/ref-api/flwr.server.strategy.rst:41::1 msgid "" -"The previous Flower release introduced experimental support for Python " -"3.10, this release declares Python 3.10 support as stable." +":py:obj:`DPFedAvgAdaptive `\\ " +"\\(strategy\\, num\\_sampled\\_clients\\)" msgstr "" -"La version précédente de Flower a introduit la prise en charge " -"expérimentale de Python 3.10, cette version déclare la prise en charge de" -" Python 3.10 comme stable." -#: ../../source/ref-changelog.md:431 -msgid "" -"**Make all** `Client` **and** `NumPyClient` **methods optional** " -"([#1260](https://github.com/adap/flower/pull/1260), " -"[#1277](https://github.com/adap/flower/pull/1277))" +#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive:1 of +msgid "Wrapper for configuring a Strategy for DP with Adaptive Clipping." msgstr "" -"**Rendre toutes les **méthodes `Client` **et** `NumPyClient` " -"**facultatives** ([#1260](https://github.com/adap/flower/pull/1260), " -"[#1277](https://github.com/adap/flower/pull/1277))" -#: ../../source/ref-changelog.md:433 +#: ../../source/ref-api/flwr.server.strategy.rst:41::1 msgid "" -"The `Client`/`NumPyClient` methods `get_properties`, `get_parameters`, " -"`fit`, and `evaluate` are all optional. This enables writing clients that" -" implement, for example, only `fit`, but no other method. No need to " -"implement `evaluate` when using centralized evaluation!" +":py:obj:`DPFedAvgFixed `\\ " +"\\(strategy\\, num\\_sampled\\_clients\\, ...\\)" msgstr "" -"Les méthodes `Client`/`NumPyClient` `get_properties`, `get_parameters`, " -"`fit`, et `evaluate` sont toutes optionnelles. Cela permet d'écrire des " -"clients qui n'implémentent, par exemple, que `fit`, mais aucune autre " -"méthode. Pas besoin d'implémenter `evaluate` quand on utilise " -"l'évaluation centralisée !" -#: ../../source/ref-changelog.md:435 +#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed:1 of +msgid "Wrapper for configuring a Strategy for DP with Fixed Clipping." +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#, fuzzy +msgid ":py:obj:`Strategy `\\ \\(\\)" +msgstr "serveur.stratégie.Stratégie" + +#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: flwr.server.strategy.strategy.Strategy:1 of +msgid "Abstract base class for server strategy implementations." +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.Bulyan.rst:2 +msgid "Bulyan" +msgstr "" + +#: flwr.server.strategy.bulyan.Bulyan:1 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg:1 +#: flwr.server.strategy.fedavgm.FedAvgM:1 +#: flwr.server.strategy.fedmedian.FedMedian:1 +#: flwr.server.strategy.fedopt.FedOpt:1 flwr.server.strategy.fedprox.FedProx:1 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging:1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic:1 +#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg:1 +#: flwr.server.strategy.krum.Krum:1 flwr.server.strategy.qfedavg.QFedAvg:1 of +msgid "Bases: :py:class:`~flwr.server.strategy.fedavg.FedAvg`" +msgstr "" + +#: flwr.server.strategy.bulyan.Bulyan:3 of +msgid "Implementation based on https://arxiv.org/abs/1802.07927." +msgstr "" + +#: flwr.server.strategy.bulyan.Bulyan:5 +#: flwr.server.strategy.fedadagrad.FedAdagrad:5 +#: flwr.server.strategy.fedadam.FedAdam:5 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:5 +#: flwr.server.strategy.fedavgm.FedAvgM:5 flwr.server.strategy.fedopt.FedOpt:5 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:5 +#: flwr.server.strategy.fedyogi.FedYogi:5 flwr.server.strategy.krum.Krum:5 of +msgid "Fraction of clients used during training. Defaults to 1.0." +msgstr "" + +#: flwr.server.strategy.bulyan.Bulyan:7 +#: flwr.server.strategy.fedadagrad.FedAdagrad:7 +#: flwr.server.strategy.fedadam.FedAdam:7 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:7 +#: flwr.server.strategy.fedavgm.FedAvgM:7 flwr.server.strategy.fedopt.FedOpt:7 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:7 +#: flwr.server.strategy.fedyogi.FedYogi:7 flwr.server.strategy.krum.Krum:7 of +msgid "Fraction of clients used during validation. Defaults to 1.0." +msgstr "" + +#: flwr.server.strategy.bulyan.Bulyan:9 +#: flwr.server.strategy.fedadagrad.FedAdagrad:9 +#: flwr.server.strategy.fedadam.FedAdam:9 flwr.server.strategy.fedavg.FedAvg:13 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:9 +#: flwr.server.strategy.fedavgm.FedAvgM:9 flwr.server.strategy.fedopt.FedOpt:9 +#: flwr.server.strategy.fedprox.FedProx:45 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:9 +#: flwr.server.strategy.fedyogi.FedYogi:9 flwr.server.strategy.krum.Krum:9 of +msgid "Minimum number of clients used during training. Defaults to 2." +msgstr "" + +#: flwr.server.strategy.bulyan.Bulyan:11 +#: flwr.server.strategy.fedadagrad.FedAdagrad:11 +#: flwr.server.strategy.fedadam.FedAdam:11 +#: flwr.server.strategy.fedavg.FedAvg:15 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:11 +#: flwr.server.strategy.fedavgm.FedAvgM:11 +#: flwr.server.strategy.fedopt.FedOpt:11 +#: flwr.server.strategy.fedprox.FedProx:47 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:11 +#: flwr.server.strategy.fedyogi.FedYogi:11 flwr.server.strategy.krum.Krum:11 of +msgid "Minimum number of clients used during validation. Defaults to 2." +msgstr "" + +#: flwr.server.strategy.bulyan.Bulyan:13 +#: flwr.server.strategy.fedadagrad.FedAdagrad:13 +#: flwr.server.strategy.fedadam.FedAdam:13 +#: flwr.server.strategy.fedavg.FedAvg:17 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:13 +#: flwr.server.strategy.fedavgm.FedAvgM:13 +#: flwr.server.strategy.fedopt.FedOpt:13 +#: flwr.server.strategy.fedprox.FedProx:49 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:13 +#: flwr.server.strategy.fedyogi.FedYogi:13 flwr.server.strategy.krum.Krum:13 of +msgid "Minimum number of total clients in the system. Defaults to 2." +msgstr "" + +#: flwr.server.strategy.bulyan.Bulyan:15 flwr.server.strategy.krum.Krum:15 of +msgid "Number of malicious clients in the system. Defaults to 0." +msgstr "" + +#: flwr.server.strategy.bulyan.Bulyan:17 +#: flwr.server.strategy.fedadagrad.FedAdagrad:15 +#: flwr.server.strategy.fedadam.FedAdam:15 +#: flwr.server.strategy.fedavg.FedAvg:19 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:15 +#: flwr.server.strategy.fedavgm.FedAvgM:15 +#: flwr.server.strategy.fedopt.FedOpt:15 +#: flwr.server.strategy.fedprox.FedProx:51 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:15 +#: flwr.server.strategy.fedyogi.FedYogi:17 +#: flwr.server.strategy.fedyogi.FedYogi:18 +#: flwr.server.strategy.fedyogi.FedYogi:19 flwr.server.strategy.krum.Krum:20 of +msgid "Optional function used for validation. Defaults to None." +msgstr "" + +#: flwr.server.strategy.bulyan.Bulyan:19 +#: flwr.server.strategy.fedadagrad.FedAdagrad:17 +#: flwr.server.strategy.fedadam.FedAdam:17 +#: flwr.server.strategy.fedavg.FedAvg:21 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:17 +#: flwr.server.strategy.fedavgm.FedAvgM:17 +#: flwr.server.strategy.fedopt.FedOpt:17 +#: flwr.server.strategy.fedprox.FedProx:53 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:17 +#: flwr.server.strategy.fedyogi.FedYogi:20 flwr.server.strategy.krum.Krum:22 of +msgid "Function used to configure training. Defaults to None." +msgstr "" + +#: flwr.server.strategy.bulyan.Bulyan:21 +#: flwr.server.strategy.fedadagrad.FedAdagrad:19 +#: flwr.server.strategy.fedadam.FedAdam:19 +#: flwr.server.strategy.fedavg.FedAvg:23 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:19 +#: flwr.server.strategy.fedavgm.FedAvgM:19 +#: flwr.server.strategy.fedopt.FedOpt:19 +#: flwr.server.strategy.fedprox.FedProx:55 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:19 +#: flwr.server.strategy.fedyogi.FedYogi:22 flwr.server.strategy.krum.Krum:24 of +msgid "Function used to configure validation. Defaults to None." +msgstr "" + +#: flwr.server.strategy.bulyan.Bulyan:23 +#: flwr.server.strategy.fedadagrad.FedAdagrad:25 +#: flwr.server.strategy.fedadam.FedAdam:21 +#: flwr.server.strategy.fedavg.FedAvg:25 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:21 +#: flwr.server.strategy.fedavgm.FedAvgM:21 +#: flwr.server.strategy.fedopt.FedOpt:21 +#: flwr.server.strategy.fedprox.FedProx:57 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:21 +#: flwr.server.strategy.fedyogi.FedYogi:24 flwr.server.strategy.krum.Krum:26 of +msgid "Whether or not accept rounds containing failures. Defaults to True." +msgstr "" + +#: flwr.server.strategy.bulyan.Bulyan:25 +#: flwr.server.strategy.fedadagrad.FedAdagrad:27 +#: flwr.server.strategy.fedadam.FedAdam:23 +#: flwr.server.strategy.fedavg.FedAvg:27 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:24 +#: flwr.server.strategy.fedavgm.FedAvgM:23 +#: flwr.server.strategy.fedopt.FedOpt:23 +#: flwr.server.strategy.fedprox.FedProx:59 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:23 +#: flwr.server.strategy.fedyogi.FedYogi:26 flwr.server.strategy.krum.Krum:28 of +#, fuzzy +msgid "Initial global model parameters." +msgstr "Initialise le modèle global" + +#: flwr.server.strategy.bulyan.Bulyan:27 of msgid "" -"**Enable passing a** `Server` **instance to** `start_simulation` " -"([#1281](https://github.com/adap/flower/pull/1281))" +"Byzantine resilient aggregation rule that is used as the first step of " +"the Bulyan (e.g., Krum)" msgstr "" -"**Autoriser le passage d'une **instance `Server` à** `start_simulation` " -"([#1281](https://github.com/adap/flower/pull/1281))" -#: ../../source/ref-changelog.md:437 +#: flwr.server.strategy.bulyan.Bulyan:29 of +msgid "arguments to the first_aggregation rule" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Similar to `start_server`, `start_simulation` now accepts a full `Server`" -" instance. This enables users to heavily customize the execution of " -"eperiments and opens the door to running, for example, async FL using the" -" Virtual Client Engine." +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\, " +"results\\, ...\\)" msgstr "" -"Comme pour `start_server`, `start_simulation` accepte maintenant une " -"instance complète de `Server`. Cela permet aux utilisateurs de " -"personnaliser fortement l'exécution des expériences et ouvre la porte à " -"l'exécution, par exemple, de FL asynchrones à l'aide du moteur de client " -"virtuel." -#: ../../source/ref-changelog.md:439 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +#, fuzzy +msgid "Aggregate evaluation losses using weighted average." +msgstr "Résultats globaux de l'évaluation." + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Update code examples** " -"([#1291](https://github.com/adap/flower/pull/1291), " -"[#1286](https://github.com/adap/flower/pull/1286), " -"[#1282](https://github.com/adap/flower/pull/1282))" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -"**Mettre à jour les exemples de code** " -"([#1291](https://github.com/adap/flower/pull/1291), " -"[#1286](https://github.com/adap/flower/pull/1286), " -"[#1282](https://github.com/adap/flower/pull/1282))" -#: ../../source/ref-changelog.md:441 +#: flwr.server.strategy.bulyan.Bulyan.aggregate_fit:1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy +msgid "Aggregate fit results using Bulyan." +msgstr "Résultats globaux de l'évaluation." + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\, " +"parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.configure_evaluate:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.configure_evaluate:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.configure_evaluate:1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.configure_evaluate:1 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: flwr.server.strategy.strategy.Strategy.configure_evaluate:1 of +#, fuzzy +msgid "Configure the next round of evaluation." +msgstr "Configuration de l'évaluation côté serveur" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive.configure_fit:1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.configure_fit:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.configure_fit:1 +#: flwr.server.strategy.fedprox.FedProx.configure_fit:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.configure_fit:1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.configure_fit:1 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: flwr.server.strategy.strategy.Strategy.configure_fit:1 of +msgid "Configure the next round of training." +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" +msgstr "" + +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.evaluate:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.evaluate:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.evaluate:1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg.evaluate:1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +msgid "Evaluate model parameters using an evaluation function." +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" +msgstr "" + +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.initialize_parameters:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.initialize_parameters:1 +#: flwr.server.strategy.fedavgm.FedAvgM.initialize_parameters:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +#, fuzzy +msgid "Initialize global model parameters." +msgstr "Initialise le modèle global" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Many code examples received small or even large maintenance updates, " -"among them are" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"De nombreux exemples de code ont reçu de petites ou même de grandes mises" -" à jour de maintenance" -#: ../../source/ref-changelog.md:443 -msgid "`scikit-learn`" -msgstr "`scikit-learn`" +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.num_evaluation_clients:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.num_evaluation_clients:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.num_evaluation_clients:1 of +msgid "Use a fraction of available clients for evaluation." +msgstr "" -#: ../../source/ref-changelog.md:444 -msgid "`simulation_pytorch`" -msgstr "`simulation_pytorch`" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_fit_clients `\\" +" \\(num\\_available\\_clients\\)" +msgstr "" -#: ../../source/ref-changelog.md:445 -msgid "`quickstart_pytorch`" -msgstr "`quickstart_pytorch` (démarrage rapide)" +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.num_fit_clients:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.num_fit_clients:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.num_fit_clients:1 of +msgid "Return the sample size and the required number of available clients." +msgstr "" -#: ../../source/ref-changelog.md:446 -msgid "`quickstart_simulation`" -msgstr "`quickstart_simulation`" +#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive:1 of +msgid "Bases: :py:class:`~flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed`" +msgstr "" -#: ../../source/ref-changelog.md:447 -msgid "`quickstart_tensorflow`" -msgstr "`quickstart_tensorflow`" +#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive:3 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed:3 of +msgid "This class is deprecated and will be removed in a future release." +msgstr "" -#: ../../source/ref-changelog.md:448 -msgid "`advanced_tensorflow`" -msgstr "`advanced_tensorflow` (en anglais)" +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of +#, fuzzy +msgid "Aggregate evaluation losses using the given strategy." +msgstr "Résultats globaux de l'évaluation." -#: ../../source/ref-changelog.md:450 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of msgid "" -"**Remove the obsolete simulation example** " -"([#1328](https://github.com/adap/flower/pull/1328))" +":py:obj:`aggregate_fit " +"`\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -"**Supprime l'exemple de simulation obsolète** " -"([#1328](https://github.com/adap/flower/pull/1328))" -#: ../../source/ref-changelog.md:452 +#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive.aggregate_fit:1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of +msgid "Aggregate training results as in DPFedAvgFixed and update clip norms." +msgstr "" + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of msgid "" -"Removes the obsolete `simulation` example and renames " -"`quickstart_simulation` to `simulation_tensorflow` so it fits withs the " -"naming of `simulation_pytorch`" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"Supprime l'exemple obsolète `simulation` et renomme " -"`quickstart_simulation` en `simulation_tensorflow` pour qu'il corresponde" -" au nom de `simulation_pytorch`" -#: ../../source/ref-changelog.md:454 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:1 of +msgid "Configure the next round of evaluation using the specified strategy." +msgstr "" + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of msgid "" -"**Update documentation** " -"([#1223](https://github.com/adap/flower/pull/1223), " -"[#1209](https://github.com/adap/flower/pull/1209), " -"[#1251](https://github.com/adap/flower/pull/1251), " -"[#1257](https://github.com/adap/flower/pull/1257), " -"[#1267](https://github.com/adap/flower/pull/1267), " -"[#1268](https://github.com/adap/flower/pull/1268), " -"[#1300](https://github.com/adap/flower/pull/1300), " -"[#1304](https://github.com/adap/flower/pull/1304), " -"[#1305](https://github.com/adap/flower/pull/1305), " -"[#1307](https://github.com/adap/flower/pull/1307))" +":py:obj:`configure_fit " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"**Mise à jour de la documentation** " -"([#1223](https://github.com/adap/flower/pull/1223), " -"[#1209](https://github.com/adap/flower/pull/1209), " -"[#1251](https://github.com/adap/flower/pull/1251), " -"[#1257](https://github.com/adap/flower/pull/1257), " -"[#1267](https://github.com/adap/flower/pull/1267), " -"[#1268](https://github.com/adap/flower/pull/1268), " -"[#1300](https://github.com/adap/flower/pull/1300), " -"[#1304](https://github.com/adap/flower/pull/1304), " -"[#1305](https://github.com/adap/flower/pull/1305), " -"[#1307](https://github.com/adap/flower/pull/1307))" -#: ../../source/ref-changelog.md:456 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of msgid "" -"One substantial documentation update fixes multiple smaller rendering " -"issues, makes titles more succinct to improve navigation, removes a " -"deprecated library, updates documentation dependencies, includes the " -"`flwr.common` module in the API reference, includes support for markdown-" -"based documentation, migrates the changelog from `.rst` to `.md`, and " -"fixes a number of smaller details!" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -"Une mise à jour substantielle de la documentation corrige plusieurs " -"petits problèmes de rendu, rend les titres plus succincts pour améliorer " -"la navigation, supprime une bibliothèque obsolète, met à jour les " -"dépendances de la documentation, inclut le module `flwr.common` dans la " -"référence de l'API, inclut le support de la documentation basée sur le " -"markdown, migre le changelog de `.rst` vers `.md`, et corrige un certain " -"nombre de détails plus petits !" -#: ../../source/ref-changelog.md:458 ../../source/ref-changelog.md:513 -#: ../../source/ref-changelog.md:582 ../../source/ref-changelog.md:621 -msgid "**Minor updates**" -msgstr "**Mises à jour mineures**" +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.evaluate:1 of +msgid "Evaluate model parameters using an evaluation function from the strategy." +msgstr "" -#: ../../source/ref-changelog.md:460 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of msgid "" -"Add round number to fit and evaluate log messages " -"([#1266](https://github.com/adap/flower/pull/1266))" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -"Ajoute un chiffre rond pour ajuster et évaluer les messages du journal " -"([#1266](https://github.com/adap/flower/pull/1266))" -#: ../../source/ref-changelog.md:461 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.initialize_parameters:1 of +msgid "Initialize global model parameters using given strategy." +msgstr "" + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:3 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:6 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:3 +#: flwr.server.strategy.strategy.Strategy.aggregate_fit:3 +#: flwr.server.strategy.strategy.Strategy.configure_evaluate:3 +#: flwr.server.strategy.strategy.Strategy.configure_fit:3 +#: flwr.server.strategy.strategy.Strategy.evaluate:6 of +#, fuzzy +msgid "The current round of federated learning." +msgstr "Qu'est-ce que l'apprentissage fédéré ?" + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:7 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:10 +#: flwr.server.strategy.strategy.Strategy.configure_evaluate:7 +#: flwr.server.strategy.strategy.Strategy.configure_fit:7 +#: flwr.server.strategy.strategy.Strategy.initialize_parameters:3 of +msgid "The client manager which holds all currently connected clients." +msgstr "" + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:10 +#: flwr.server.strategy.strategy.Strategy.configure_evaluate:10 of msgid "" -"Add secure gRPC connection to the `advanced_tensorflow` code example " -"([#847](https://github.com/adap/flower/pull/847))" +"**evaluate_configuration** -- A list of tuples. Each tuple in the list " +"identifies a `ClientProxy` and the `EvaluateIns` for this particular " +"`ClientProxy`. If a particular `ClientProxy` is not included in this " +"list, it means that this `ClientProxy` will not participate in the next " +"round of federated evaluation." msgstr "" -"Ajouter une connexion gRPC sécurisée à l'exemple de code " -"`advanced_tensorflow` ([#847](https://github.com/adap/flower/pull/847))" -#: ../../source/ref-changelog.md:462 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed:1 +#: flwr.server.strategy.fedavg.FedAvg:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:1 of +msgid "Bases: :py:class:`~flwr.server.strategy.strategy.Strategy`" +msgstr "" + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of msgid "" -"Update developer tooling " -"([#1231](https://github.com/adap/flower/pull/1231), " -"[#1276](https://github.com/adap/flower/pull/1276), " -"[#1301](https://github.com/adap/flower/pull/1301), " -"[#1310](https://github.com/adap/flower/pull/1310))" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -"Mettre à jour les outils de développement " -"([#1231](https://github.com/adap/flower/pull/1231), " -"[#1276](https://github.com/adap/flower/pull/1276), " -"[#1301](https://github.com/adap/flower/pull/1301), " -"[#1310](https://github.com/adap/flower/pull/1310))" -#: ../../source/ref-changelog.md:463 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of msgid "" -"Rename ProtoBuf messages to improve consistency " -"([#1214](https://github.com/adap/flower/pull/1214), " -"[#1258](https://github.com/adap/flower/pull/1258), " -"[#1259](https://github.com/adap/flower/pull/1259))" +":py:obj:`aggregate_fit " +"`\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -"Renomme les messages ProtoBuf pour améliorer la cohérence " -"([#1214](https://github.com/adap/flower/pull/1214), " -"[#1258](https://github.com/adap/flower/pull/1258), " -"[#1259](https://github.com/adap/flower/pull/1259))" -#: ../../source/ref-changelog.md:465 -msgid "v0.19.0 (2022-05-18)" -msgstr "v0.19.0 (2022-05-18)" +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_fit:1 of +msgid "Aggregate training results using unweighted aggregation." +msgstr "" -#: ../../source/ref-changelog.md:469 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of msgid "" -"**Flower Baselines (preview): FedOpt, FedBN, FedAvgM** " -"([#919](https://github.com/adap/flower/pull/919), " -"[#1127](https://github.com/adap/flower/pull/1127), " -"[#914](https://github.com/adap/flower/pull/914))" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"**Flower Baselines (preview) : FedOpt, FedBN, FedAvgM** " -"([#919](https://github.com/adap/flower/pull/919), " -"[#1127](https://github.com/adap/flower/pull/1127), " -"[#914](https://github.com/adap/flower/pull/914))" -#: ../../source/ref-changelog.md:471 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of msgid "" -"The first preview release of Flower Baselines has arrived! We're " -"kickstarting Flower Baselines with implementations of FedOpt (FedYogi, " -"FedAdam, FedAdagrad), FedBN, and FedAvgM. Check the documentation on how " -"to use [Flower Baselines](https://flower.dev/docs/using-baselines.html). " -"With this first preview release we're also inviting the community to " -"[contribute their own baselines](https://flower.dev/docs/contributing-" -"baselines.html)." +":py:obj:`configure_fit " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"La première version préliminaire de Flower Baselines est arrivée ! Nous " -"démarrons Flower Baselines avec des implémentations de FedOpt (FedYogi, " -"FedAdam, FedAdagrad), FedBN, et FedAvgM. Consultez la documentation sur " -"l'utilisation de [Flower Baselines](https://flower.dev/docs/using-" -"baselines.html). Avec cette première version préliminaire, nous invitons " -"également la communauté à [contribuer à leurs propres lignes de " -"base](https://flower.dev/docs/contributing-baselines.html)." -#: ../../source/ref-changelog.md:473 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:1 of msgid "" -"**C++ client SDK (preview) and code example** " -"([#1111](https://github.com/adap/flower/pull/1111))" +"Configure the next round of training incorporating Differential Privacy " +"(DP)." msgstr "" -"**SDK client C++ (aperçu) et exemple de code** " -"([#1111](https://github.com/adap/flower/pull/1111))" -#: ../../source/ref-changelog.md:475 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of msgid "" -"Preview support for Flower clients written in C++. The C++ preview " -"includes a Flower client SDK and a quickstart code example that " -"demonstrates a simple C++ client using the SDK." +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -"L'aperçu C++ comprend un SDK pour les clients Flower et un exemple de " -"code de démarrage rapide qui démontre un client C++ simple utilisant le " -"SDK." -#: ../../source/ref-changelog.md:477 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of msgid "" -"**Add experimental support for Python 3.10 and Python 3.11** " -"([#1135](https://github.com/adap/flower/pull/1135))" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -"**Ajouter la prise en charge expérimentale de Python 3.10 et Python " -"3.11** ([#1135](https://github.com/adap/flower/pull/1135))" -#: ../../source/ref-changelog.md:479 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:3 of msgid "" -"Python 3.10 is the latest stable release of Python and Python 3.11 is due" -" to be released in October. This Flower release adds experimental support" -" for both Python versions." +"Configuration of the next training round includes information related to " +"DP, such as clip norm and noise stddev." msgstr "" -"Python 3.10 est la dernière version stable de Python et Python 3.11 " -"devrait sortir en octobre. Cette version de Flower ajoute une prise en " -"charge expérimentale pour les deux versions de Python." -#: ../../source/ref-changelog.md:481 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:13 +#: flwr.server.strategy.strategy.Strategy.configure_fit:10 of msgid "" -"**Aggregate custom metrics through user-provided functions** " -"([#1144](https://github.com/adap/flower/pull/1144))" +"**fit_configuration** -- A list of tuples. Each tuple in the list " +"identifies a `ClientProxy` and the `FitIns` for this particular " +"`ClientProxy`. If a particular `ClientProxy` is not included in this " +"list, it means that this `ClientProxy` will not participate in the next " +"round of federated learning." msgstr "" -"**Agréger des mesures personnalisées grâce à des fonctions fournies par " -"l'utilisateur** ([#1144](https://github.com/adap/flower/pull/1144))" -#: ../../source/ref-changelog.md:483 +#: ../../source/ref-api/flwr.server.strategy.FaultTolerantFedAvg.rst:2 +#, fuzzy +msgid "FaultTolerantFedAvg" +msgstr "server.strategy.FaultTolerantFedAvg" + +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of msgid "" -"Custom metrics (e.g., `accuracy`) can now be aggregated without having to" -" customize the strategy. Built-in strategies support two new arguments, " -"`fit_metrics_aggregation_fn` and `evaluate_metrics_aggregation_fn`, that " -"allow passing custom metric aggregation functions." +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -"Les stratégies intégrées prennent en charge deux nouveaux arguments, " -"`fit_metrics_aggregation_fn` et `evaluate_metrics_aggregation_fn`, qui " -"permettent de passer des fonctions d'agrégation de métriques " -"personnalisées." -#: ../../source/ref-changelog.md:485 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of msgid "" -"**User-configurable round timeout** " -"([#1162](https://github.com/adap/flower/pull/1162))" +":py:obj:`aggregate_fit " +"`\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -"**Temps d'attente configurable par l'utilisateur** " -"([#1162](https://github.com/adap/flower/pull/1162))" -#: ../../source/ref-changelog.md:487 -msgid "" -"A new configuration value allows the round timeout to be set for " -"`start_server` and `start_simulation`. If the `config` dictionary " -"contains a `round_timeout` key (with a `float` value in seconds), the " -"server will wait *at least* `round_timeout` seconds before it closes the " -"connection." +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_fit:1 +#: flwr.server.strategy.fedadagrad.FedAdagrad.aggregate_fit:1 +#: flwr.server.strategy.fedadam.FedAdam.aggregate_fit:1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_fit:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_fit:1 +#: flwr.server.strategy.fedavgm.FedAvgM.aggregate_fit:1 +#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg.aggregate_fit:1 +#: flwr.server.strategy.fedyogi.FedYogi.aggregate_fit:1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_fit:1 of +msgid "Aggregate fit results using weighted average." msgstr "" -"Si le dictionnaire `config` contient une clé `round_timeout` (avec une " -"valeur `float` en secondes), le serveur attendra *au moins* " -"`round_timeout` secondes avant de fermer la connexion." -#: ../../source/ref-changelog.md:489 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of msgid "" -"**Enable both federated evaluation and centralized evaluation to be used " -"at the same time in all built-in strategies** " -"([#1091](https://github.com/adap/flower/pull/1091))" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"**Permettre l'utilisation simultanée de l'évaluation fédérée et de " -"l'évaluation centralisée dans toutes les stratégies intégrées** " -"([#1091](https://github.com/adap/flower/pull/1091))" -#: ../../source/ref-changelog.md:491 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of msgid "" -"Built-in strategies can now perform both federated evaluation (i.e., " -"client-side) and centralized evaluation (i.e., server-side) in the same " -"round. Federated evaluation can be disabled by setting `fraction_eval` to" -" `0.0`." +":py:obj:`configure_fit " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"Les stratégies intégrées peuvent maintenant effectuer une évaluation " -"fédérée (c'est-à-dire côté client) et une évaluation centralisée " -"(c'est-à-dire côté serveur) dans le même tour. L'évaluation fédérée peut " -"être désactivée en réglant `fraction_eval` sur `0.0`." -#: ../../source/ref-changelog.md:493 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of msgid "" -"**Two new Jupyter Notebook tutorials** " -"([#1141](https://github.com/adap/flower/pull/1141))" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -"**Deux nouveaux tutoriels Jupyter Notebook** " -"([#1141](https://github.com/adap/flower/pull/1141))" -#: ../../source/ref-changelog.md:495 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of msgid "" -"Two Jupyter Notebook tutorials (compatible with Google Colab) explain " -"basic and intermediate Flower features:" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -"Deux tutoriels Jupyter Notebook (compatibles avec Google Colab) " -"expliquent les fonctionnalités de base et intermédiaires de Flower :" -#: ../../source/ref-changelog.md:497 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of msgid "" -"*An Introduction to Federated Learning*: [Open in " -"Colab](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-1" -"-Intro-to-FL-PyTorch.ipynb)" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"*Introduction à l'apprentissage fédéré* : [Open in " -"Colab](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-1" -"-Intro-to-FL-PyTorch.ipynb)" -#: ../../source/ref-changelog.md:499 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of msgid "" -"*Using Strategies in Federated Learning*: [Open in " -"Colab](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-2" -"-Strategies-in-FL-PyTorch.ipynb)" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"*Utiliser des stratégies dans l'apprentissage fédéré* : [Open in " -"Colab](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-2" -"-Strategies-in-FL-PyTorch.ipynb)" -#: ../../source/ref-changelog.md:501 -msgid "" -"**New FedAvgM strategy (Federated Averaging with Server Momentum)** " -"([#1076](https://github.com/adap/flower/pull/1076))" +#: ../../source/ref-api/flwr.server.strategy.FedAdagrad.rst:2 +#: ../../source/ref-changelog.md:839 +msgid "FedAdagrad" +msgstr "FedAdagrad" + +#: flwr.server.strategy.fedadagrad.FedAdagrad:1 +#: flwr.server.strategy.fedadam.FedAdam:1 +#: flwr.server.strategy.fedyogi.FedYogi:1 of +msgid "Bases: :py:class:`~flwr.server.strategy.fedopt.FedOpt`" msgstr "" -"**Nouvelle stratégie FedAvgM (Federated Averaging with Server Momentum)**" -" ([#1076](https://github.com/adap/flower/pull/1076))" -#: ../../source/ref-changelog.md:503 +#: flwr.server.strategy.fedadagrad.FedAdagrad:3 +#: flwr.server.strategy.fedadam.FedAdam:3 flwr.server.strategy.fedopt.FedOpt:3 +#: flwr.server.strategy.fedyogi.FedYogi:3 of #, fuzzy -msgid "" -"The new `FedAvgM` strategy implements Federated Averaging with Server " -"Momentum \\[Hsu et al., 2019\\]." +msgid "Implementation based on https://arxiv.org/abs/2003.00295v5" msgstr "" -"La nouvelle stratégie `FedAvgM` met en œuvre la moyenne fédérée avec le " -"momentum du serveur [Hsu et al., 2019]." +"FedYogi - Stratégie d'apprentissage fédéré utilisant Yogi côté serveur. " +"Mise en oeuvre basée sur https://arxiv.org/abs/2003.00295" -#: ../../source/ref-changelog.md:505 -msgid "" -"**New advanced PyTorch code example** " -"([#1007](https://github.com/adap/flower/pull/1007))" +#: flwr.server.strategy.fedadagrad.FedAdagrad:21 +#: flwr.server.strategy.fedadagrad.FedAdagrad:23 +#: flwr.server.strategy.fedadam.FedAdam:25 +#: flwr.server.strategy.fedadam.FedAdam:27 +#: flwr.server.strategy.fedavg.FedAvg:29 flwr.server.strategy.fedavg.FedAvg:31 +#: flwr.server.strategy.fedopt.FedOpt:25 flwr.server.strategy.fedopt.FedOpt:27 +#: flwr.server.strategy.fedprox.FedProx:61 +#: flwr.server.strategy.fedprox.FedProx:63 +#: flwr.server.strategy.fedyogi.FedYogi:28 +#: flwr.server.strategy.fedyogi.FedYogi:30 of +msgid "Metrics aggregation function, optional." msgstr "" -"**Nouvel exemple de code PyTorch avancé** " -"([#1007](https://github.com/adap/flower/pull/1007))" -#: ../../source/ref-changelog.md:507 +#: flwr.server.strategy.fedadagrad.FedAdagrad:29 +#: flwr.server.strategy.fedadam.FedAdam:29 +#: flwr.server.strategy.fedopt.FedOpt:29 of +msgid "Server-side learning rate. Defaults to 1e-1." +msgstr "" + +#: flwr.server.strategy.fedadagrad.FedAdagrad:31 +#: flwr.server.strategy.fedadam.FedAdam:31 +#: flwr.server.strategy.fedopt.FedOpt:31 of +msgid "Client-side learning rate. Defaults to 1e-1." +msgstr "" + +#: flwr.server.strategy.fedadagrad.FedAdagrad:33 +#: flwr.server.strategy.fedadam.FedAdam:37 +#: flwr.server.strategy.fedopt.FedOpt:37 of +msgid "Controls the algorithm's degree of adaptability. Defaults to 1e-9." +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"A new code example (`advanced_pytorch`) demonstrates advanced Flower " -"concepts with PyTorch." +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -"Un nouvel exemple de code (`advanced_pytorch`) démontre des concepts de " -"fleur avancés avec PyTorch." -#: ../../source/ref-changelog.md:509 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**New JAX code example** " -"([#906](https://github.com/adap/flower/pull/906), " -"[#1143](https://github.com/adap/flower/pull/1143))" +":py:obj:`aggregate_fit `\\" +" \\(server\\_round\\, results\\, failures\\)" msgstr "" -"**Nouvel exemple de code JAX** " -"([#906](https://github.com/adap/flower/pull/906), " -"[#1143](https://github.com/adap/flower/pull/1143))" -#: ../../source/ref-changelog.md:511 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"A new code example (`jax_from_centralized_to_federated`) shows federated " -"learning with JAX and Flower." +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"Un nouvel exemple de code (`jax_from_centralized_to_federated`) montre " -"l'apprentissage fédéré avec JAX et Flower." -#: ../../source/ref-changelog.md:515 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"New option to keep Ray running if Ray was already initialized in " -"`start_simulation` ([#1177](https://github.com/adap/flower/pull/1177))" +":py:obj:`configure_fit `\\" +" \\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"Nouvelle option pour continuer à faire fonctionner Ray si Ray a déjà été " -"initialisé dans `start_simulation` " -"([#1177](https://github.com/adap/flower/pull/1177))" -#: ../../source/ref-changelog.md:516 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Add support for custom `ClientManager` as a `start_simulation` parameter " -"([#1171](https://github.com/adap/flower/pull/1171))" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -"Ajout de la prise en charge d'un `ClientManager` personnalisé comme " -"paramètre de `start_simulation` " -"([#1171](https://github.com/adap/flower/pull/1171))" -#: ../../source/ref-changelog.md:517 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"New documentation for [implementing strategies](https://flower.dev/docs" -"/how-to-implement-strategies.html) " -"([#1097](https://github.com/adap/flower/pull/1097), " -"[#1175](https://github.com/adap/flower/pull/1175))" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -"Nouvelle documentation pour [mettre en œuvre des " -"stratégies](https://flower.dev/docs/framework/how-to-implement-strategies.html) " -"([#1097](https://github.com/adap/flower/pull/1097), " -"[#1175](https://github.com/adap/flower/pull/1175))" -#: ../../source/ref-changelog.md:518 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"New mobile-friendly documentation theme " -"([#1174](https://github.com/adap/flower/pull/1174))" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"Nouveau thème de documentation adapté aux mobiles " -"([#1174](https://github.com/adap/flower/pull/1174))" -#: ../../source/ref-changelog.md:519 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Limit version range for (optional) `ray` dependency to include only " -"compatible releases (`>=1.9.2,<1.12.0`) " -"([#1205](https://github.com/adap/flower/pull/1205))" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"Limite la plage de versions pour la dépendance (optionnelle) `ray` pour " -"n'inclure que les versions compatibles (`>=1.9.2,<1.12.0`) " -"([#1205](https://github.com/adap/flower/pull/1205))" -#: ../../source/ref-changelog.md:523 +#: ../../source/ref-api/flwr.server.strategy.FedAdam.rst:2 +#, fuzzy +msgid "FedAdam" +msgstr "FedAdagrad" + +#: flwr.server.strategy.fedadam.FedAdam:33 +#: flwr.server.strategy.fedyogi.FedYogi:36 of +msgid "Momentum parameter. Defaults to 0.9." +msgstr "" + +#: flwr.server.strategy.fedadam.FedAdam:35 +#: flwr.server.strategy.fedyogi.FedYogi:38 of +msgid "Second moment parameter. Defaults to 0.99." +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Remove deprecated support for Python 3.6** " -"([#871](https://github.com/adap/flower/pull/871))" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\," +" results\\, ...\\)" msgstr "" -"**Supprime la prise en charge obsolète de Python 3.6** " -"([#871](https://github.com/adap/flower/pull/871))" -#: ../../source/ref-changelog.md:524 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Remove deprecated KerasClient** " -"([#857](https://github.com/adap/flower/pull/857))" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -"**Supprimez KerasClient** " -"([#857](https://github.com/adap/flower/pull/857))" -#: ../../source/ref-changelog.md:525 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Remove deprecated no-op extra installs** " -"([#973](https://github.com/adap/flower/pull/973))" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" msgstr "" -"**Supprimer les installations supplémentaires no-op dépréciées** " -"([#973](https://github.com/adap/flower/pull/973))" -#: ../../source/ref-changelog.md:526 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Remove deprecated proto fields from** `FitRes` **and** `EvaluateRes` " -"([#869](https://github.com/adap/flower/pull/869))" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"**Supprimez les champs proto obsolètes de** `FitRes` **et** `EvaluateRes`" -" ([#869](https://github.com/adap/flower/pull/869))" -#: ../../source/ref-changelog.md:527 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Remove deprecated QffedAvg strategy (replaced by QFedAvg)** " -"([#1107](https://github.com/adap/flower/pull/1107))" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -"**Supprime la stratégie QffedAvg (remplacée par QFedAvg)** " -"([#1107](https://github.com/adap/flower/pull/1107))" -#: ../../source/ref-changelog.md:528 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Remove deprecated DefaultStrategy strategy** " -"([#1142](https://github.com/adap/flower/pull/1142))" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -"**Supprime la stratégie DefaultStrategy qui est obsolète** " -"([#1142](https://github.com/adap/flower/pull/1142))" -#: ../../source/ref-changelog.md:529 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Remove deprecated support for eval_fn accuracy return value** " -"([#1142](https://github.com/adap/flower/pull/1142))" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"**Supprimer la prise en charge obsolète de la valeur de retour de la " -"précision eval_fn** ([#1142](https://github.com/adap/flower/pull/1142))" -#: ../../source/ref-changelog.md:530 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Remove deprecated support for passing initial parameters as NumPy " -"ndarrays** ([#1142](https://github.com/adap/flower/pull/1142))" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"**Supprime la prise en charge obsolète du passage des paramètres initiaux" -" en tant que ndarrays NumPy** " -"([#1142](https://github.com/adap/flower/pull/1142))" -#: ../../source/ref-changelog.md:532 -msgid "v0.18.0 (2022-02-28)" -msgstr "v0.18.0 (2022-02-28)" +#: ../../source/ref-api/flwr.server.strategy.FedAvg.rst:2 +#, fuzzy +msgid "FedAvg" +msgstr "DP-FedAvg" -#: ../../source/ref-changelog.md:536 +#: flwr.server.strategy.fedavg.FedAvg:3 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:3 of +msgid "Implementation based on https://arxiv.org/abs/1602.05629" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg:5 flwr.server.strategy.fedprox.FedProx:37 +#: of msgid "" -"**Improved Virtual Client Engine compatibility with Jupyter Notebook / " -"Google Colab** ([#866](https://github.com/adap/flower/pull/866), " -"[#872](https://github.com/adap/flower/pull/872), " -"[#833](https://github.com/adap/flower/pull/833), " -"[#1036](https://github.com/adap/flower/pull/1036))" +"Fraction of clients used during training. In case `min_fit_clients` is " +"larger than `fraction_fit * available_clients`, `min_fit_clients` will " +"still be sampled. Defaults to 1.0." msgstr "" -"**Amélioration de la compatibilité du moteur de client virtuel avec " -"Jupyter Notebook / Google Colab** " -"([#866](https://github.com/adap/flower/pull/866), " -"[#872](https://github.com/adap/flower/pull/872), " -"[#833](https://github.com/adap/flower/pull/833), " -"[#1036](https://github.com/adap/flower/pull/1036))" -#: ../../source/ref-changelog.md:538 +#: flwr.server.strategy.fedavg.FedAvg:9 flwr.server.strategy.fedprox.FedProx:41 +#: of msgid "" -"Simulations (using the Virtual Client Engine through `start_simulation`) " -"now work more smoothly on Jupyter Notebooks (incl. Google Colab) after " -"installing Flower with the `simulation` extra (`pip install " -"flwr[simulation]`)." +"Fraction of clients used during validation. In case " +"`min_evaluate_clients` is larger than `fraction_evaluate * " +"available_clients`, `min_evaluate_clients` will still be sampled. " +"Defaults to 1.0." msgstr "" -"Les simulations (utilisant le moteur de client virtuel via " -"`start_simulation`) fonctionnent maintenant plus facilement sur les " -"Notebooks Jupyter (y compris Google Colab) après avoir installé Flower " -"avec l'option `simulation` (`pip install flwr[simulation]`)." -#: ../../source/ref-changelog.md:540 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**New Jupyter Notebook code example** " -"([#833](https://github.com/adap/flower/pull/833))" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\, " +"results\\, ...\\)" msgstr "" -"**Nouvel exemple de code Jupyter Notebook** " -"([#833](https://github.com/adap/flower/pull/833))" -#: ../../source/ref-changelog.md:542 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"A new code example (`quickstart_simulation`) demonstrates Flower " -"simulations using the Virtual Client Engine through Jupyter Notebook " -"(incl. Google Colab)." +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -"Un nouvel exemple de code (`quickstart_simulation`) démontre des " -"simulations de Flower en utilisant le moteur de client virtuel via " -"Jupyter Notebook (y compris Google Colab)." -#: ../../source/ref-changelog.md:544 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Client properties (feature preview)** " -"([#795](https://github.com/adap/flower/pull/795))" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\, " +"parameters\\, ...\\)" msgstr "" -"**Propriétés du client (aperçu des fonctionnalités)** " -"([#795](https://github.com/adap/flower/pull/795))" -#: ../../source/ref-changelog.md:546 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Clients can implement a new method `get_properties` to enable server-side" -" strategies to query client properties." +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"Les clients peuvent implémenter une nouvelle méthode `get_properties` " -"pour permettre aux stratégies côté serveur d'interroger les propriétés du" -" client." -#: ../../source/ref-changelog.md:548 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Experimental Android support with TFLite** " -"([#865](https://github.com/adap/flower/pull/865))" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -"**Support expérimental d'Android avec TFLite** " -"([#865](https://github.com/adap/flower/pull/865))" -#: ../../source/ref-changelog.md:550 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Android support has finally arrived in `main`! Flower is both client-" -"agnostic and framework-agnostic by design. One can integrate arbitrary " -"client platforms and with this release, using Flower on Android has " -"become a lot easier." +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -"La prise en charge d'Android est enfin arrivée dans `main` ! Flower est à" -" la fois agnostique au niveau du client et du cadre de travail. On peut " -"intégrer des plates-formes client arbitraires et avec cette version, " -"l'utilisation de Flower sur Android est devenue beaucoup plus facile." -#: ../../source/ref-changelog.md:552 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"The example uses TFLite on the client side, along with a new " -"`FedAvgAndroid` strategy. The Android client and `FedAvgAndroid` are " -"still experimental, but they are a first step towards a fully-fledged " -"Android SDK and a unified `FedAvg` implementation that integrated the new" -" functionality from `FedAvgAndroid`." +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"L'exemple utilise TFLite du côté client, ainsi qu'une nouvelle stratégie " -"`FedAvgAndroid`. Le client Android et `FedAvgAndroid` sont encore " -"expérimentaux, mais ils constituent un premier pas vers un SDK Android à " -"part entière et une implémentation unifiée de `FedAvg` intégrant la " -"nouvelle fonctionnalité de `FedAvgAndroid`." -#: ../../source/ref-changelog.md:554 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Make gRPC keepalive time user-configurable and decrease default " -"keepalive time** ([#1069](https://github.com/adap/flower/pull/1069))" +":py:obj:`num_fit_clients `\\" +" \\(num\\_available\\_clients\\)" msgstr "" -"**Rendre le temps de garde gRPC configurable par l'utilisateur et " -"diminuer le temps de garde par défaut** " -"([#1069](https://github.com/adap/flower/pull/1069))" -#: ../../source/ref-changelog.md:556 +#: ../../source/ref-api/flwr.server.strategy.FedAvgAndroid.rst:2 +#, fuzzy +msgid "FedAvgAndroid" +msgstr "DPFedAvgAdaptive" + +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of msgid "" -"The default gRPC keepalive time has been reduced to increase the " -"compatibility of Flower with more cloud environments (for example, " -"Microsoft Azure). Users can configure the keepalive time to customize the" -" gRPC stack based on specific requirements." +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -"Le temps de keepalive gRPC par défaut a été réduit pour augmenter la " -"compatibilité de Flower avec davantage d'environnements cloud (par " -"exemple, Microsoft Azure). Les utilisateurs peuvent configurer le temps " -"de keepalive pour personnaliser la pile gRPC en fonction d'exigences " -"spécifiques." -#: ../../source/ref-changelog.md:558 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of msgid "" -"**New differential privacy example using Opacus and PyTorch** " -"([#805](https://github.com/adap/flower/pull/805))" +":py:obj:`aggregate_fit " +"`\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -"**Nouvel exemple de confidentialité différentielle utilisant Opacus et " -"PyTorch** ([#805](https://github.com/adap/flower/pull/805))" -#: ../../source/ref-changelog.md:560 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of msgid "" -"A new code example (`opacus`) demonstrates differentially-private " -"federated learning with Opacus, PyTorch, and Flower." +":py:obj:`bytes_to_ndarray " +"`\\ \\(tensor\\)" msgstr "" -"Un nouvel exemple de code (`opacus`) démontre l'apprentissage fédéré " -"différentiellement privé avec Opacus, PyTorch et Flower." -#: ../../source/ref-changelog.md:562 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.bytes_to_ndarray:1 of +#, fuzzy +msgid "Deserialize NumPy array from bytes." +msgstr "Désérialise le tableau numérique NumPy à partir d'octets." + +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of msgid "" -"**New Hugging Face Transformers code example** " -"([#863](https://github.com/adap/flower/pull/863))" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"**Nouvel exemple de code pour les Transformers à visage embrassant** " -"([#863](https://github.com/adap/flower/pull/863))" -#: ../../source/ref-changelog.md:564 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of msgid "" -"A new code example (`quickstart_huggingface`) demonstrates usage of " -"Hugging Face Transformers with Flower." +":py:obj:`configure_fit " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"Un nouvel exemple de code (`quickstart_huggingface`) démontre " -"l'utilisation des transformateurs Hugging Face avec Flower." -#: ../../source/ref-changelog.md:566 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of msgid "" -"**New MLCube code example** " -"([#779](https://github.com/adap/flower/pull/779), " -"[#1034](https://github.com/adap/flower/pull/1034), " -"[#1065](https://github.com/adap/flower/pull/1065), " -"[#1090](https://github.com/adap/flower/pull/1090))" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -"**Nouvel exemple de code MLCube** " -"([#779](https://github.com/adap/flower/pull/779), " -"[#1034](https://github.com/adap/flower/pull/1034), " -"[#1065](https://github.com/adap/flower/pull/1065), " -"[#1090](https://github.com/adap/flower/pull/1090))" -#: ../../source/ref-changelog.md:568 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of msgid "" -"A new code example (`quickstart_mlcube`) demonstrates usage of MLCube " -"with Flower." +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -"Un nouvel exemple de code (`quickstart_mlcube`) démontre l'utilisation de" -" MLCube avec Flower." -#: ../../source/ref-changelog.md:570 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of msgid "" -"**SSL-enabled server and client** " -"([#842](https://github.com/adap/flower/pull/842), " -"[#844](https://github.com/adap/flower/pull/844), " -"[#845](https://github.com/adap/flower/pull/845), " -"[#847](https://github.com/adap/flower/pull/847), " -"[#993](https://github.com/adap/flower/pull/993), " -"[#994](https://github.com/adap/flower/pull/994))" +":py:obj:`ndarray_to_bytes " +"`\\ \\(ndarray\\)" msgstr "" -"**([#842](https://github.com/adap/flower/pull/842), " -"[#844](https://github.com/adap/flower/pull/844), " -"[#845](https://github.com/adap/flower/pull/845), " -"[#847](https://github.com/adap/flower/pull/847), " -"[#993](https://github.com/adap/flower/pull/993), " -"[#994](https://github.com/adap/flower/pull/994))" -#: ../../source/ref-changelog.md:572 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.ndarray_to_bytes:1 of +#, fuzzy +msgid "Serialize NumPy array to bytes." +msgstr "Sérialise le tableau numérique NumPy en octets." + +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of msgid "" -"SSL enables secure encrypted connections between clients and servers. " -"This release open-sources the Flower secure gRPC implementation to make " -"encrypted communication channels accessible to all Flower users." +":py:obj:`ndarrays_to_parameters " +"`\\ " +"\\(ndarrays\\)" msgstr "" -"SSL permet d'établir des connexions cryptées et sécurisées entre les " -"clients et les serveurs. Cette version met en open-source " -"l'implémentation gRPC sécurisée de Flower afin de rendre les canaux de " -"communication cryptés accessibles à tous les utilisateurs de Flower." -#: ../../source/ref-changelog.md:574 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of msgid "" -"**Updated** `FedAdam` **and** `FedYogi` **strategies** " -"([#885](https://github.com/adap/flower/pull/885), " -"[#895](https://github.com/adap/flower/pull/895))" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"**Mise à jour** `FedAdam` **et** `FedYogi` **stratégies** " -"([#885](https://github.com/adap/flower/pull/885), " -"[#895](https://github.com/adap/flower/pull/895))" -#: ../../source/ref-changelog.md:576 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of msgid "" -"`FedAdam` and `FedAdam` match the latest version of the Adaptive " -"Federated Optimization paper." +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"`FedAdam` et `FedAdam` correspondent à la dernière version de l'article " -"sur l'optimisation fédérée adaptative." -#: ../../source/ref-changelog.md:578 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of msgid "" -"**Initialize** `start_simulation` **with a list of client IDs** " -"([#860](https://github.com/adap/flower/pull/860))" +":py:obj:`parameters_to_ndarrays " +"`\\ " +"\\(parameters\\)" msgstr "" -"**Initialise** `start_simulation` **avec une liste d'ID de clients** " -"([#860](https://github.com/adap/flower/pull/860))" -#: ../../source/ref-changelog.md:580 -msgid "" -"`start_simulation` can now be called with a list of client IDs " -"(`clients_ids`, type: `List[str]`). Those IDs will be passed to the " -"`client_fn` whenever a client needs to be initialized, which can make it " -"easier to load data partitions that are not accessible through `int` " -"identifiers." +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.parameters_to_ndarrays:1 +#: of +#, fuzzy +msgid "Convert parameters object to NumPy weights." +msgstr "Convertit l'objet des paramètres en ndarrays NumPy." + +#: ../../source/ref-api/flwr.server.strategy.FedAvgM.rst:2 +#, fuzzy +msgid "FedAvgM" +msgstr "DP-FedAvg" + +#: flwr.server.strategy.fedavgm.FedAvgM:3 of +msgid "Implementation based on https://arxiv.org/abs/1909.06335" msgstr "" -"`start_simulation` peut maintenant être appelé avec une liste " -"d'identifiants de clients (`clients_ids`, type : `List[str]`). Ces " -"identifiants seront passés à `client_fn` chaque fois qu'un client doit " -"être initialisé, ce qui peut faciliter le chargement de partitions de " -"données qui ne sont pas accessibles par des identifiants `int`." -#: ../../source/ref-changelog.md:584 +#: flwr.server.strategy.fedavgm.FedAvgM:25 of msgid "" -"Update `num_examples` calculation in PyTorch code examples in " -"([#909](https://github.com/adap/flower/pull/909))" +"Server-side learning rate used in server-side optimization. Defaults to " +"1.0." msgstr "" -"Mettre à jour le calcul de `num_examples` dans les exemples de code " -"PyTorch dans ([#909](https://github.com/adap/flower/pull/909))" -#: ../../source/ref-changelog.md:585 +#: flwr.server.strategy.fedavgm.FedAvgM:28 of +msgid "Server-side momentum factor used for FedAvgM. Defaults to 0.0." +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Expose Flower version through `flwr.__version__` " -"([#952](https://github.com/adap/flower/pull/952))" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\," +" results\\, ...\\)" msgstr "" -"Exposer la version de Flower à travers `flwr.__version__` " -"([#952](https://github.com/adap/flower/pull/952))" -#: ../../source/ref-changelog.md:586 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"`start_server` in `app.py` now returns a `History` object containing " -"metrics from training ([#974](https://github.com/adap/flower/pull/974))" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -"`start_server` dans `app.py` renvoie maintenant un objet `History` " -"contenant les métriques de l'entraînement " -"([#974](https://github.com/adap/flower/pull/974))" -#: ../../source/ref-changelog.md:587 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Make `max_workers` (used by `ThreadPoolExecutor`) configurable " -"([#978](https://github.com/adap/flower/pull/978))" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" msgstr "" -"Rendre `max_workers` (utilisé par `ThreadPoolExecutor`) configurable " -"([#978](https://github.com/adap/flower/pull/978))" -#: ../../source/ref-changelog.md:588 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Increase sleep time after server start to three seconds in all code " -"examples ([#1086](https://github.com/adap/flower/pull/1086))" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"Augmente le temps de sommeil après le démarrage du serveur à trois " -"secondes dans tous les exemples de code " -"([#1086](https://github.com/adap/flower/pull/1086))" -#: ../../source/ref-changelog.md:589 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Added a new FAQ section to the documentation " -"([#948](https://github.com/adap/flower/pull/948))" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -"Ajout d'une nouvelle section FAQ à la documentation " -"([#948](https://github.com/adap/flower/pull/948))" -#: ../../source/ref-changelog.md:590 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"And many more under-the-hood changes, library updates, documentation " -"changes, and tooling improvements!" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -"Et bien d'autres changements sous le capot, des mises à jour de la " -"bibliothèque, des modifications de la documentation et des améliorations " -"de l'outillage !" -#: ../../source/ref-changelog.md:594 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Removed** `flwr_example` **and** `flwr_experimental` **from release " -"build** ([#869](https://github.com/adap/flower/pull/869))" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"**Supprimé** `flwr_example` **et** `flwr_experimental` **de la version " -"release build** ([#869](https://github.com/adap/flower/pull/869))" -#: ../../source/ref-changelog.md:596 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"The packages `flwr_example` and `flwr_experimental` have been deprecated " -"since Flower 0.12.0 and they are not longer included in Flower release " -"builds. The associated extras (`baseline`, `examples-pytorch`, `examples-" -"tensorflow`, `http-logger`, `ops`) are now no-op and will be removed in " -"an upcoming release." +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"Les paquets `flwr_example` et `flwr_experimental` ont été dépréciés " -"depuis Flower 0.12.0 et ils ne sont plus inclus dans les builds de " -"Flower. Les extras associés (`baseline`, `examples-pytorch`, `examples-" -"tensorflow`, `http-logger`, `ops`) sont maintenant no-op et seront " -"supprimés dans une prochaine version." -#: ../../source/ref-changelog.md:598 -msgid "v0.17.0 (2021-09-24)" -msgstr "v0.17.0 (2021-09-24)" +#: ../../source/ref-api/flwr.server.strategy.FedMedian.rst:2 +msgid "FedMedian" +msgstr "" -#: ../../source/ref-changelog.md:602 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Experimental virtual client engine** " -"([#781](https://github.com/adap/flower/pull/781) " -"[#790](https://github.com/adap/flower/pull/790) " -"[#791](https://github.com/adap/flower/pull/791))" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -"**Moteur expérimental de client virtuel** " -"([#781](https://github.com/adap/flower/pull/781) " -"[#790](https://github.com/adap/flower/pull/790) " -"[#791](https://github.com/adap/flower/pull/791))" -#: ../../source/ref-changelog.md:604 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"One of Flower's goals is to enable research at scale. This release " -"enables a first (experimental) peek at a major new feature, codenamed the" -" virtual client engine. Virtual clients enable simulations that scale to " -"a (very) large number of clients on a single machine or compute cluster. " -"The easiest way to test the new functionality is to look at the two new " -"code examples called `quickstart_simulation` and `simulation_pytorch`." +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -"L'un des objectifs de Flower est de permettre la recherche à grande " -"échelle. Cette version donne un premier aperçu (expérimental) d'une " -"nouvelle fonctionnalité majeure, connue sous le nom de code de moteur de " -"client virtuel. Les clients virtuels permettent des simulations qui " -"s'étendent à un (très) grand nombre de clients sur une seule machine ou " -"une grappe de calcul. La façon la plus simple de tester la nouvelle " -"fonctionnalité est de regarder les deux nouveaux exemples de code appelés" -" `quickstart_simulation` et `simulation_pytorch`." -#: ../../source/ref-changelog.md:606 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedmedian.FedMedian.aggregate_fit:1 of +#, fuzzy +msgid "Aggregate fit results using median." +msgstr "Résultats globaux de l'évaluation." + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"The feature is still experimental, so there's no stability guarantee for " -"the API. It's also not quite ready for prime time and comes with a few " -"known caveats. However, those who are curious are encouraged to try it " -"out and share their thoughts." +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"La fonction est encore expérimentale, il n'y a donc aucune garantie de " -"stabilité pour l'API. Elle n'est pas non plus tout à fait prête pour le " -"prime time et s'accompagne de quelques mises en garde connues. Cependant," -" les personnes curieuses sont encouragées à l'essayer et à faire part de " -"leurs réflexions." -#: ../../source/ref-changelog.md:608 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**New built-in strategies** " -"([#828](https://github.com/adap/flower/pull/828) " -"[#822](https://github.com/adap/flower/pull/822))" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"**Nouvelles stratégies intégrées** " -"([#828](https://github.com/adap/flower/pull/828) " -"[#822](https://github.com/adap/flower/pull/822))" -#: ../../source/ref-changelog.md:610 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"FedYogi - Federated learning strategy using Yogi on server-side. " -"Implementation based on https://arxiv.org/abs/2003.00295" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -"FedYogi - Stratégie d'apprentissage fédéré utilisant Yogi côté serveur. " -"Mise en oeuvre basée sur https://arxiv.org/abs/2003.00295" -#: ../../source/ref-changelog.md:611 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"FedAdam - Federated learning strategy using Adam on server-side. " -"Implementation based on https://arxiv.org/abs/2003.00295" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -"FedAdam - Stratégie d'apprentissage fédéré utilisant Adam côté serveur. " -"Mise en œuvre basée sur https://arxiv.org/abs/2003.00295" -#: ../../source/ref-changelog.md:613 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**New PyTorch Lightning code example** " -"([#617](https://github.com/adap/flower/pull/617))" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"**Nouvel exemple de code PyTorch Lightning** " -"([#617](https://github.com/adap/flower/pull/617))" -#: ../../source/ref-changelog.md:615 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**New Variational Auto-Encoder code example** " -"([#752](https://github.com/adap/flower/pull/752))" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.FedOpt.rst:2 +msgid "FedOpt" +msgstr "" + +#: flwr.server.strategy.fedopt.FedOpt:33 of +msgid "Momentum parameter. Defaults to 0.0." +msgstr "" + +#: flwr.server.strategy.fedopt.FedOpt:35 of +msgid "Second moment parameter. Defaults to 0.0." msgstr "" -"**Nouvel exemple de code d'autocodage variationnel** " -"([#752](https://github.com/adap/flower/pull/752))" -#: ../../source/ref-changelog.md:617 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**New scikit-learn code example** " -"([#748](https://github.com/adap/flower/pull/748))" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\, " +"results\\, ...\\)" msgstr "" -"**Nouvel exemple de code scikit-learn** " -"([#748](https://github.com/adap/flower/pull/748))" -#: ../../source/ref-changelog.md:619 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**New experimental TensorBoard strategy** " -"([#789](https://github.com/adap/flower/pull/789))" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -"**Nouvelle stratégie expérimentale TensorBoard** " -"([#789](https://github.com/adap/flower/pull/789))" -#: ../../source/ref-changelog.md:623 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Improved advanced TensorFlow code example " -"([#769](https://github.com/adap/flower/pull/769))" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\, " +"parameters\\, ...\\)" msgstr "" -"Amélioration de l'exemple de code TensorFlow avancé " -"([#769](https://github.com/adap/flower/pull/769))" -#: ../../source/ref-changelog.md:624 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Warning when `min_available_clients` is misconfigured " -"([#830](https://github.com/adap/flower/pull/830))" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"Avertissement lorsque `min_available_clients` est mal configuré " -"([#830](https://github.com/adap/flower/pull/830))" -#: ../../source/ref-changelog.md:625 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Improved gRPC server docs " -"([#841](https://github.com/adap/flower/pull/841))" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -"Amélioration de la documentation sur le serveur gRPC " -"([#841](https://github.com/adap/flower/pull/841))" -#: ../../source/ref-changelog.md:626 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Improved error message in `NumPyClient` " -"([#851](https://github.com/adap/flower/pull/851))" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -"Amélioration du message d'erreur dans `NumPyClient` " -"([#851](https://github.com/adap/flower/pull/851))" -#: ../../source/ref-changelog.md:627 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Improved PyTorch quickstart code example " -"([#852](https://github.com/adap/flower/pull/852))" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"Exemple de code de démarrage rapide PyTorch amélioré " -"([#852](https://github.com/adap/flower/pull/852))" -#: ../../source/ref-changelog.md:631 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Disabled final distributed evaluation** " -"([#800](https://github.com/adap/flower/pull/800))" +":py:obj:`num_fit_clients `\\" +" \\(num\\_available\\_clients\\)" msgstr "" -"**Désactivé l'évaluation finale distribuée** " -"([#800](https://github.com/adap/flower/pull/800))" -#: ../../source/ref-changelog.md:633 +#: ../../source/ref-api/flwr.server.strategy.FedProx.rst:2 +msgid "FedProx" +msgstr "" + +#: flwr.server.strategy.fedprox.FedProx:3 of +msgid "Implementation based on https://arxiv.org/abs/1812.06127" +msgstr "" + +#: flwr.server.strategy.fedprox.FedProx:5 of msgid "" -"Prior behaviour was to perform a final round of distributed evaluation on" -" all connected clients, which is often not required (e.g., when using " -"server-side evaluation). The prior behaviour can be enabled by passing " -"`force_final_distributed_eval=True` to `start_server`." +"The strategy in itself will not be different than FedAvg, the client " +"needs to be adjusted. A proximal term needs to be added to the loss " +"function during the training:" msgstr "" -"Le comportement précédent consistait à effectuer un dernier tour " -"d'évaluation distribuée sur tous les clients connectés, ce qui n'est " -"souvent pas nécessaire (par exemple, lors de l'utilisation de " -"l'évaluation côté serveur). Le comportement précédent peut être activé en" -" passant `force_final_distributed_eval=True` à `start_server`." -#: ../../source/ref-changelog.md:635 +#: flwr.server.strategy.fedprox.FedProx:9 of msgid "" -"**Renamed q-FedAvg strategy** " -"([#802](https://github.com/adap/flower/pull/802))" +"\\\\frac{\\\\mu}{2} || w - w^t ||^2\n" +"\n" msgstr "" -"**Renommé stratégie q-FedAvg** " -"([#802](https://github.com/adap/flower/pull/802))" -#: ../../source/ref-changelog.md:637 +#: flwr.server.strategy.fedprox.FedProx:12 of msgid "" -"The strategy named `QffedAvg` was renamed to `QFedAvg` to better reflect " -"the notation given in the original paper (q-FFL is the optimization " -"objective, q-FedAvg is the proposed solver). Note the the original (now " -"deprecated) `QffedAvg` class is still available for compatibility reasons" -" (it will be removed in a future release)." +"Where $w^t$ are the global parameters and $w$ are the local weights the " +"function will be optimized with." msgstr "" -"La stratégie nommée `QffedAvg` a été renommée en `QFedAvg` pour mieux " -"refléter la notation donnée dans l'article original (q-FFL est l'objectif" -" d'optimisation, q-FedAvg est le solveur proposé). Notez que la classe " -"`QffedAvg` originale (maintenant obsolète) est toujours disponible pour " -"des raisons de compatibilité (elle sera supprimée dans une prochaine " -"version)." -#: ../../source/ref-changelog.md:639 +#: flwr.server.strategy.fedprox.FedProx:15 of +msgid "In PyTorch, for example, the loss would go from:" +msgstr "" + +#: flwr.server.strategy.fedprox.FedProx:21 of +msgid "To:" +msgstr "" + +#: flwr.server.strategy.fedprox.FedProx:30 of msgid "" -"**Deprecated and renamed code example** `simulation_pytorch` **to** " -"`simulation_pytorch_legacy` " -"([#791](https://github.com/adap/flower/pull/791))" +"With `global_params` being a copy of the parameters before the training " +"takes place." msgstr "" -"**Exemple de code déprécié et renommé** `simulation_pytorch` **en** " -"`simulation_pytorch_legacy` " -"([#791](https://github.com/adap/flower/pull/791))" -#: ../../source/ref-changelog.md:641 +#: flwr.server.strategy.fedprox.FedProx:65 of msgid "" -"This example has been replaced by a new example. The new example is based" -" on the experimental virtual client engine, which will become the new " -"default way of doing most types of large-scale simulations in Flower. The" -" existing example was kept for reference purposes, but it might be " -"removed in the future." +"The weight of the proximal term used in the optimization. 0.0 makes this " +"strategy equivalent to FedAvg, and the higher the coefficient, the more " +"regularization will be used (that is, the client parameters will need to " +"be closer to the server parameters during training)." msgstr "" -"Cet exemple a été remplacé par un nouvel exemple. Le nouvel exemple est " -"basé sur le moteur expérimental du client virtuel, qui deviendra la " -"nouvelle méthode par défaut pour effectuer la plupart des types de " -"simulations à grande échelle dans Flower. L'exemple existant a été " -"conservé à des fins de référence, mais il pourrait être supprimé à " -"l'avenir." -#: ../../source/ref-changelog.md:643 -msgid "v0.16.0 (2021-05-11)" -msgstr "v0.16.0 (2021-05-11)" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\," +" results\\, ...\\)" +msgstr "" -#: ../../source/ref-changelog.md:647 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**New built-in strategies** " -"([#549](https://github.com/adap/flower/pull/549))" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -"**Nouvelles stratégies intégrées** " -"([#549](https://github.com/adap/flower/pull/549))" -#: ../../source/ref-changelog.md:649 -msgid "(abstract) FedOpt" -msgstr "(résumé) FedOpt" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" +msgstr "" -#: ../../source/ref-changelog.md:650 -msgid "FedAdagrad" -msgstr "FedAdagrad" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" -#: ../../source/ref-changelog.md:652 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Custom metrics for server and strategies** " -"([#717](https://github.com/adap/flower/pull/717))" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -"**Métriques personnalisées pour le serveur et les stratégies** " -"([#717](https://github.com/adap/flower/pull/717))" -#: ../../source/ref-changelog.md:654 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"The Flower server is now fully task-agnostic, all remaining instances of " -"task-specific metrics (such as `accuracy`) have been replaced by custom " -"metrics dictionaries. Flower 0.15 introduced the capability to pass a " -"dictionary containing custom metrics from client to server. As of this " -"release, custom metrics replace task-specific metrics on the server." +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -"Le serveur Flower est maintenant totalement agnostique, toutes les " -"instances restantes de métriques spécifiques à une tâche (telles que " -"`accuracy`) ont été remplacées par des dictionnaires de métriques " -"personnalisées. Flower 0.15 a introduit la possibilité de passer un " -"dictionnaire contenant des métriques personnalisées du client au serveur." -" À partir de cette version, les métriques personnalisées remplacent les " -"métriques spécifiques à une tâche sur le serveur." -#: ../../source/ref-changelog.md:656 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Custom metric dictionaries are now used in two user-facing APIs: they are" -" returned from Strategy methods `aggregate_fit`/`aggregate_evaluate` and " -"they enable evaluation functions passed to build-in strategies (via " -"`eval_fn`) to return more than two evaluation metrics. Strategies can " -"even return *aggregated* metrics dictionaries for the server to keep " -"track of." +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"Les dictionnaires de métriques personnalisés sont maintenant utilisés " -"dans deux API orientées vers l'utilisateur : ils sont renvoyés par les " -"méthodes de stratégie `aggregate_fit`/`aggregate_evaluate` et ils " -"permettent aux fonctions d'évaluation passées aux stratégies intégrées " -"(via `eval_fn`) de renvoyer plus de deux métriques d'évaluation. Les " -"stratégies peuvent même renvoyer des dictionnaires de métriques " -"*agrégées* pour que le serveur puisse en garder la trace." -#: ../../source/ref-changelog.md:658 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Stratey implementations should migrate their `aggregate_fit` and " -"`aggregate_evaluate` methods to the new return type (e.g., by simply " -"returning an empty `{}`), server-side evaluation functions should migrate" -" from `return loss, accuracy` to `return loss, {\"accuracy\": accuracy}`." +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"Les implémentations de Stratey doivent migrer leurs méthodes " -"`aggregate_fit` et `aggregate_evaluate` vers le nouveau type de retour " -"(par exemple, en renvoyant simplement un `{}` vide), les fonctions " -"d'évaluation côté serveur doivent migrer de `return loss, accuracy` à " -"`return loss, {\"accuracy\" : accuracy}`." -#: ../../source/ref-changelog.md:660 +#: flwr.server.strategy.fedprox.FedProx.configure_fit:3 of +msgid "Sends the proximal factor mu to the clients" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.FedTrimmedAvg.rst:2 +msgid "FedTrimmedAvg" +msgstr "" + +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:3 of +msgid "Implemented based on: https://arxiv.org/abs/1803.01498" +msgstr "" + +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:25 of +msgid "Fraction to cut off of both tails of the distribution. Defaults to 0.2." +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Flower 0.15-style return types are deprecated (but still supported), " -"compatibility will be removed in a future release." +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -"Les types de retour du style Flower 0.15 sont dépréciés (mais toujours " -"pris en charge), la compatibilité sera supprimée dans une prochaine " -"version." -#: ../../source/ref-changelog.md:662 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Migration warnings for deprecated functionality** " -"([#690](https://github.com/adap/flower/pull/690))" +":py:obj:`aggregate_fit " +"`\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -"**Avertissements de migration pour les fonctionnalités obsolètes** " -"([#690](https://github.com/adap/flower/pull/690))" -#: ../../source/ref-changelog.md:664 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg.aggregate_fit:1 of +msgid "Aggregate fit results using trimmed average." +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Earlier versions of Flower were often migrated to new APIs, while " -"maintaining compatibility with legacy APIs. This release introduces " -"detailed warning messages if usage of deprecated APIs is detected. The " -"new warning messages often provide details on how to migrate to more " -"recent APIs, thus easing the transition from one release to another." +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"Les versions antérieures de Flower ont souvent été migrées vers de " -"nouvelles API, tout en maintenant la compatibilité avec les anciennes " -"API. Cette version introduit des messages d'avertissement détaillés si " -"l'utilisation d'API obsolètes est détectée. Les nouveaux messages " -"d'avertissement fournissent souvent des détails sur la façon de migrer " -"vers des API plus récentes, facilitant ainsi la transition d'une version " -"à l'autre." -#: ../../source/ref-changelog.md:666 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Improved docs and docstrings " -"([#691](https://github.com/adap/flower/pull/691) " -"[#692](https://github.com/adap/flower/pull/692) " -"[#713](https://github.com/adap/flower/pull/713))" +":py:obj:`configure_fit " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"Amélioration des docs et des docstrings " -"([#691](https://github.com/adap/flower/pull/691) " -"[#692](https://github.com/adap/flower/pull/692) " -"[#713](https://github.com/adap/flower/pull/713))" -#: ../../source/ref-changelog.md:668 -msgid "MXNet example and documentation" -msgstr "Exemple et documentation MXNet" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" +msgstr "" -#: ../../source/ref-changelog.md:670 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"FedBN implementation in example PyTorch: From Centralized To Federated " -"([#696](https://github.com/adap/flower/pull/696) " -"[#702](https://github.com/adap/flower/pull/702) " -"[#705](https://github.com/adap/flower/pull/705))" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -"Mise en œuvre de FedBN dans l'exemple PyTorch : De la centralisation à la" -" fédération ([#696](https://github.com/adap/flower/pull/696) " -"[#702](https://github.com/adap/flower/pull/702) " -"[#705](https://github.com/adap/flower/pull/705))" -#: ../../source/ref-changelog.md:674 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Serialization-agnostic server** " -"([#721](https://github.com/adap/flower/pull/721))" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"**Serveur agnostique de sérialisation** " -"([#721](https://github.com/adap/flower/pull/721))" -#: ../../source/ref-changelog.md:676 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"The Flower server is now fully serialization-agnostic. Prior usage of " -"class `Weights` (which represents parameters as deserialized NumPy " -"ndarrays) was replaced by class `Parameters` (e.g., in `Strategy`). " -"`Parameters` objects are fully serialization-agnostic and represents " -"parameters as byte arrays, the `tensor_type` attributes indicates how " -"these byte arrays should be interpreted (e.g., for " -"serialization/deserialization)." +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"Le serveur Flower est désormais totalement agnostique en matière de " -"sérialisation. L'utilisation antérieure de la classe `Weights` (qui " -"représente les paramètres sous forme de tableaux NumPy désérialisés) a " -"été remplacée par la classe `Parameters` (par exemple, dans `Strategy`). " -"Les objets `Parameters` sont totalement agnostiques en matière de " -"sérialisation et représentent les paramètres sous forme de tableaux " -"d'octets, les attributs `tensor_type` indiquent comment ces tableaux " -"d'octets doivent être interprétés (par exemple, pour la " -"sérialisation/désérialisation)." -#: ../../source/ref-changelog.md:678 +#: ../../source/ref-api/flwr.server.strategy.FedXgbBagging.rst:2 +msgid "FedXgbBagging" +msgstr "" + +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of msgid "" -"Built-in strategies implement this approach by handling serialization and" -" deserialization to/from `Weights` internally. Custom/3rd-party Strategy " -"implementations should update to the slighly changed Strategy method " -"definitions. Strategy authors can consult PR " -"[#721](https://github.com/adap/flower/pull/721) to see how strategies can" -" easily migrate to the new format." +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -"Les stratégies intégrées mettent en œuvre cette approche en gérant en " -"interne la sérialisation et la désérialisation de `Weights`. Les " -"implémentations de stratégies personnalisées ou tierces doivent être " -"mises à jour avec les définitions de méthodes de stratégie légèrement " -"modifiées. Les auteurs de stratégies peuvent consulter le PR " -"[#721](https://github.com/adap/flower/pull/721) pour voir comment les " -"stratégies peuvent facilement migrer vers le nouveau format." -#: ../../source/ref-changelog.md:680 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of +#, fuzzy +msgid "Aggregate evaluation metrics using average." +msgstr "Résultats globaux de l'évaluation." + +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of msgid "" -"Deprecated `flwr.server.Server.evaluate`, use " -"`flwr.server.Server.evaluate_round` instead " -"([#717](https://github.com/adap/flower/pull/717))" +":py:obj:`aggregate_fit " +"`\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -"Déclassé `flwr.server.Server.evaluate`, utiliser " -"`flwr.server.Server.evaluate_round` à la place " -"([#717](https://github.com/adap/flower/pull/717))" -#: ../../source/ref-changelog.md:682 -msgid "v0.15.0 (2021-03-12)" -msgstr "v0.15.0 (2021-03-12)" +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_fit:1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_fit:1 of +msgid "Aggregate fit results using bagging." +msgstr "" -#: ../../source/ref-changelog.md:686 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of msgid "" -"**Server-side parameter initialization** " -"([#658](https://github.com/adap/flower/pull/658))" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"**Initialisation des paramètres côté serveur** " -"([#658](https://github.com/adap/flower/pull/658))" -#: ../../source/ref-changelog.md:688 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of msgid "" -"Model parameters can now be initialized on the server-side. Server-side " -"parameter initialization works via a new `Strategy` method called " -"`initialize_parameters`." +":py:obj:`configure_fit " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"Les paramètres du modèle peuvent maintenant être initialisés côté " -"serveur. L'initialisation des paramètres côté serveur fonctionne via une " -"nouvelle méthode `Strategy` appelée `initialize_parameters`." -#: ../../source/ref-changelog.md:690 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of msgid "" -"Built-in strategies support a new constructor argument called " -"`initial_parameters` to set the initial parameters. Built-in strategies " -"will provide these initial parameters to the server on startup and then " -"delete them to free the memory afterwards." +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -"Les stratégies intégrées prennent en charge un nouvel argument du " -"constructeur appelé `initial_parameters` pour définir les paramètres " -"initiaux. Les stratégies intégrées fourniront ces paramètres initiaux au " -"serveur au démarrage et les supprimeront ensuite pour libérer la mémoire." -#: ../../source/ref-changelog.md:709 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of msgid "" -"If no initial parameters are provided to the strategy, the server will " -"continue to use the current behaviour (namely, it will ask one of the " -"connected clients for its parameters and use these as the initial global " -"parameters)." +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -"Si aucun paramètre initial n'est fourni à la stratégie, le serveur " -"continuera à utiliser le comportement actuel (à savoir qu'il demandera à " -"l'un des clients connectés ses paramètres et les utilisera comme " -"paramètres globaux initiaux)." -#: ../../source/ref-changelog.md:711 -msgid "Deprecations" -msgstr "Dépréciations" +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" -#: ../../source/ref-changelog.md:713 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of msgid "" -"Deprecate `flwr.server.strategy.DefaultStrategy` (migrate to " -"`flwr.server.strategy.FedAvg`, which is equivalent)" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"Déclasser `flwr.server.strategy.DefaultStrategy` (migrer vers " -"`flwr.server.strategy.FedAvg`, qui est équivalent)" -#: ../../source/ref-changelog.md:715 -msgid "v0.14.0 (2021-02-18)" -msgstr "v0.14.0 (2021-02-18)" +#: ../../source/ref-api/flwr.server.strategy.FedXgbCyclic.rst:2 +msgid "FedXgbCyclic" +msgstr "" -#: ../../source/ref-changelog.md:719 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of msgid "" -"**Generalized** `Client.fit` **and** `Client.evaluate` **return values** " -"([#610](https://github.com/adap/flower/pull/610) " -"[#572](https://github.com/adap/flower/pull/572) " -"[#633](https://github.com/adap/flower/pull/633))" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -"**Généralisé** `Client.fit` **et** `Client.evaluate` **valeurs de " -"retour** ([#610](https://github.com/adap/flower/pull/610) " -"[#572](https://github.com/adap/flower/pull/572) " -"[#633](https://github.com/adap/flower/pull/633))" -#: ../../source/ref-changelog.md:721 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of msgid "" -"Clients can now return an additional dictionary mapping `str` keys to " -"values of the following types: `bool`, `bytes`, `float`, `int`, `str`. " -"This means one can return almost arbitrary values from `fit`/`evaluate` " -"and make use of them on the server side!" +":py:obj:`aggregate_fit " +"`\\ \\(server\\_round\\," +" results\\, failures\\)" msgstr "" -"Les clients peuvent maintenant renvoyer un dictionnaire supplémentaire " -"associant les clés `str` aux valeurs des types suivants : `bool`, " -"`bytes`, `float`, `int`, `str`. Cela signifie que l'on peut renvoyer des " -"valeurs presque arbitraires de `fit`/`evaluate` et les utiliser du côté " -"du serveur !" -#: ../../source/ref-changelog.md:723 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of msgid "" -"This improvement also allowed for more consistent return types between " -"`fit` and `evaluate`: `evaluate` should now return a tuple `(float, int, " -"dict)` representing the loss, number of examples, and a dictionary " -"holding arbitrary problem-specific values like accuracy." +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"Cette amélioration a également permis de rendre plus cohérents les types " -"de retour entre `fit` et `evaluate` : `evaluate` devrait maintenant " -"retourner un tuple `(float, int, dict)` représentant la perte, le nombre " -"d'exemples, et un dictionnaire contenant des valeurs arbitraires " -"spécifiques au problème comme la précision." -#: ../../source/ref-changelog.md:725 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of msgid "" -"In case you wondered: this feature is compatible with existing projects, " -"the additional dictionary return value is optional. New code should " -"however migrate to the new return types to be compatible with upcoming " -"Flower releases (`fit`: `List[np.ndarray], int, Dict[str, Scalar]`, " -"`evaluate`: `float, int, Dict[str, Scalar]`). See the example below for " -"details." +":py:obj:`configure_fit " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" msgstr "" -"Au cas où tu te poserais la question : cette fonctionnalité est " -"compatible avec les projets existants, la valeur de retour supplémentaire" -" du dictionnaire est facultative. Le nouveau code doit cependant migrer " -"vers les nouveaux types de retour pour être compatible avec les " -"prochaines versions de Flower (`fit` : `List[np.ndarray], int, Dict[str, " -"Scalar]`, `evaluate` : `float, int, Dict[str, Scalar]`). Voir l'exemple " -"ci-dessous pour plus de détails." -#: ../../source/ref-changelog.md:727 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of msgid "" -"*Code example:* note the additional dictionary return values in both " -"`FlwrClient.fit` and `FlwrClient.evaluate`:" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -"*Exemple de code:* note les valeurs de retour du dictionnaire " -"supplémentaires dans `FlwrClient.fit` et `FlwrClient.evaluate` :" -#: ../../source/ref-changelog.md:742 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of msgid "" -"**Generalized** `config` **argument in** `Client.fit` **and** " -"`Client.evaluate` ([#595](https://github.com/adap/flower/pull/595))" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -"**Généralisé** `config` **argument dans** `Client.fit` **et** " -"`Client.evaluate` ([#595](https://github.com/adap/flower/pull/595))" -#: ../../source/ref-changelog.md:744 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of msgid "" -"The `config` argument used to be of type `Dict[str, str]`, which means " -"that dictionary values were expected to be strings. The new release " -"generalizes this to enable values of the following types: `bool`, " -"`bytes`, `float`, `int`, `str`." +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"L'argument `config` était auparavant de type `Dict[str, str]`, ce qui " -"signifie que les valeurs du dictionnaire devaient être des chaînes. La " -"nouvelle version généralise cela pour permettre les valeurs des types " -"suivants : `bool`, `bytes`, `float`, `int`, `str`." -#: ../../source/ref-changelog.md:746 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of msgid "" -"This means one can now pass almost arbitrary values to `fit`/`evaluate` " -"using the `config` dictionary. Yay, no more `str(epochs)` on the server-" -"side and `int(config[\"epochs\"])` on the client side!" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"Cela signifie que l'on peut maintenant passer des valeurs presque " -"arbitraires à `fit`/`evaluate` en utilisant le dictionnaire `config`. " -"Yay, plus de `str(epochs)` du côté serveur et `int(config[\"epochs\"])` " -"du côté client !" -#: ../../source/ref-changelog.md:748 +#: ../../source/ref-api/flwr.server.strategy.FedXgbNnAvg.rst:2 +#, fuzzy +msgid "FedXgbNnAvg" +msgstr "DP-FedAvg" + +#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg:5 of msgid "" -"*Code example:* note that the `config` dictionary now contains non-`str` " -"values in both `Client.fit` and `Client.evaluate`:" +"This strategy is deprecated, but a copy of it is available in Flower " +"Baselines: " +"https://github.com/adap/flower/tree/main/baselines/hfedxgboost." msgstr "" -"*Exemple de code:* Notez que le dictionnaire `config` contient maintenant" -" des valeurs autres que `str` dans `Client.fit` et `Client.evaluate` :" - -#: ../../source/ref-changelog.md:765 -msgid "v0.13.0 (2021-01-08)" -msgstr "v0.13.0 (2021-01-08)" -#: ../../source/ref-changelog.md:769 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"New example: PyTorch From Centralized To Federated " -"([#549](https://github.com/adap/flower/pull/549))" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -"Nouvel exemple : PyTorch de centralisé à fédéré " -"([#549](https://github.com/adap/flower/pull/549))" -#: ../../source/ref-changelog.md:770 -msgid "Improved documentation" -msgstr "Amélioration de la documentation" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_fit " +"`\\ \\(server\\_round\\, " +"results\\, failures\\)" +msgstr "" -#: ../../source/ref-changelog.md:771 -msgid "New documentation theme ([#551](https://github.com/adap/flower/pull/551))" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"Nouveau thème de documentation " -"([#551](https://github.com/adap/flower/pull/551))" -#: ../../source/ref-changelog.md:772 -msgid "New API reference ([#554](https://github.com/adap/flower/pull/554))" -msgstr "Nouvelle référence API ([#554](https://github.com/adap/flower/pull/554))" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_fit " +"`\\ \\(server\\_round\\, " +"parameters\\, ...\\)" +msgstr "" -#: ../../source/ref-changelog.md:773 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Updated examples documentation " -"([#549](https://github.com/adap/flower/pull/549))" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -"Mise à jour de la documentation des exemples " -"([#549](https://github.com/adap/flower/pull/549))" -#: ../../source/ref-changelog.md:774 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Removed obsolete documentation " -"([#548](https://github.com/adap/flower/pull/548))" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -"Suppression de la documentation obsolète " -"([#548](https://github.com/adap/flower/pull/548))" -#: ../../source/ref-changelog.md:776 -msgid "Bugfix:" -msgstr "Correction de bogues :" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" -#: ../../source/ref-changelog.md:778 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"`Server.fit` does not disconnect clients when finished, disconnecting the" -" clients is now handled in `flwr.server.start_server` " -"([#553](https://github.com/adap/flower/pull/553) " -"[#540](https://github.com/adap/flower/issues/540))." +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"`Server.fit` ne déconnecte pas les clients lorsqu'il est terminé, la " -"déconnexion des clients est maintenant gérée dans " -"`flwr.server.start_server` " -"([#553](https://github.com/adap/flower/pull/553) " -"[#540](https://github.com/adap/flower/issues/540))." -#: ../../source/ref-changelog.md:780 -msgid "v0.12.0 (2020-12-07)" -msgstr "v0.12.0 (2020-12-07)" +#: ../../source/ref-api/flwr.server.strategy.FedYogi.rst:2 +msgid "FedYogi" +msgstr "" -#: ../../source/ref-changelog.md:782 ../../source/ref-changelog.md:798 -msgid "Important changes:" -msgstr "Changements importants :" +#: flwr.server.strategy.fedyogi.FedYogi:32 of +msgid "Server-side learning rate. Defaults to 1e-2." +msgstr "" + +#: flwr.server.strategy.fedyogi.FedYogi:34 of +msgid "Client-side learning rate. Defaults to 0.0316." +msgstr "" + +#: flwr.server.strategy.fedyogi.FedYogi:40 of +msgid "Controls the algorithm's degree of adaptability. Defaults to 1e-3." +msgstr "" -#: ../../source/ref-changelog.md:784 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Added an example for embedded devices " -"([#507](https://github.com/adap/flower/pull/507))" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\," +" results\\, ...\\)" msgstr "" -"Ajout d'un exemple pour les périphériques embarqués " -"([#507](https://github.com/adap/flower/pull/507))" -#: ../../source/ref-changelog.md:785 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Added a new NumPyClient (in addition to the existing KerasClient) " -"([#504](https://github.com/adap/flower/pull/504) " -"[#508](https://github.com/adap/flower/pull/508))" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -"Ajout d'un nouveau NumPyClient (en plus du KerasClient existant) " -"([#504](https://github.com/adap/flower/pull/504) " -"[#508](https://github.com/adap/flower/pull/508))" -#: ../../source/ref-changelog.md:786 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Deprecated `flwr_example` package and started to migrate examples into " -"the top-level `examples` directory " -"([#494](https://github.com/adap/flower/pull/494) " -"[#512](https://github.com/adap/flower/pull/512))" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" msgstr "" -"Déclassement du paquet `flwr_example` et migration des exemples dans le " -"répertoire de premier niveau `examples` " -"([#494](https://github.com/adap/flower/pull/494) " -"[#512](https://github.com/adap/flower/pull/512))" -#: ../../source/ref-changelog.md:788 -msgid "v0.11.0 (2020-11-30)" -msgstr "v0.11.0 (2020-11-30)" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" -#: ../../source/ref-changelog.md:790 -msgid "Incompatible changes:" -msgstr "Changements incompatibles :" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" +msgstr "" -#: ../../source/ref-changelog.md:792 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Renamed strategy methods " -"([#486](https://github.com/adap/flower/pull/486)) to unify the naming of " -"Flower's public APIs. Other public methods/functions (e.g., every method " -"in `Client`, but also `Strategy.evaluate`) do not use the `on_` prefix, " -"which is why we're removing it from the four methods in Strategy. To " -"migrate rename the following `Strategy` methods accordingly:" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -"Renommé les méthodes de stratégie " -"([#486](https://github.com/adap/flower/pull/486)) pour unifier le nommage" -" des API publiques de Flower. D'autres méthodes/fonctions publiques (par " -"exemple, toutes les méthodes de `Client`, mais aussi `Strategy.evaluate`)" -" n'utilisent pas le préfixe `on_`, c'est pourquoi nous le supprimons des " -"quatre méthodes de Stratégie. Pour migrer, renommez les méthodes de " -"`Strategy` suivantes en conséquence :" -#: ../../source/ref-changelog.md:793 -msgid "`on_configure_evaluate` => `configure_evaluate`" -msgstr "`on_configure_evaluate` => `configure_evaluate`" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" -#: ../../source/ref-changelog.md:794 -msgid "`on_aggregate_evaluate` => `aggregate_evaluate`" -msgstr "`on_aggregate_evaluate` => `aggregate_evaluate`" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" -#: ../../source/ref-changelog.md:795 -msgid "`on_configure_fit` => `configure_fit`" -msgstr "`on_configure_fit` => `configure_fit`" +#: ../../source/ref-api/flwr.server.strategy.Krum.rst:2 +msgid "Krum" +msgstr "" -#: ../../source/ref-changelog.md:796 -msgid "`on_aggregate_fit` => `aggregate_fit`" -msgstr "`on_aggregate_fit` => `aggregate_fit`" +#: flwr.server.strategy.krum.Krum:3 of +msgid "Implementation based on https://arxiv.org/abs/1703.02757" +msgstr "" -#: ../../source/ref-changelog.md:800 +#: flwr.server.strategy.krum.Krum:17 of msgid "" -"Deprecated `DefaultStrategy` " -"([#479](https://github.com/adap/flower/pull/479)). To migrate use " -"`FedAvg` instead." +"Number of clients to keep before averaging (MultiKrum). Defaults to 0, in" +" that case classical Krum is applied." msgstr "" -"Déclassé `DefaultStrategy` " -"([#479](https://github.com/adap/flower/pull/479)). Pour migrer, utilisez " -"`FedAvg` à la place." -#: ../../source/ref-changelog.md:801 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Simplified examples and baselines " -"([#484](https://github.com/adap/flower/pull/484))." +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\, " +"results\\, ...\\)" msgstr "" -"Exemples simplifiés et lignes de base " -"([#484](https://github.com/adap/flower/pull/484))." -#: ../../source/ref-changelog.md:802 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Removed presently unused `on_conclude_round` from strategy interface " -"([#483](https://github.com/adap/flower/pull/483))." +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -"Suppression de `on_conclude_round` actuellement inutilisé de l'interface " -"de stratégie ([#483](https://github.com/adap/flower/pull/483))." -#: ../../source/ref-changelog.md:803 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.krum.Krum.aggregate_fit:1 of +#, fuzzy +msgid "Aggregate fit results using Krum." +msgstr "Résultats globaux de l'évaluation." + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Set minimal Python version to 3.6.1 instead of 3.6.9 " -"([#471](https://github.com/adap/flower/pull/471))." +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\, " +"parameters\\, ...\\)" msgstr "" -"Fixe la version minimale de Python à 3.6.1 au lieu de 3.6.9 " -"([#471](https://github.com/adap/flower/pull/471))." -#: ../../source/ref-changelog.md:804 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Improved `Strategy` docstrings " -"([#470](https://github.com/adap/flower/pull/470))." +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"Amélioration des docstrings `Stratégie` " -"([#470](https://github.com/adap/flower/pull/470))." -#: ../../source/ref-example-projects.rst:2 -#, fuzzy -msgid "Example projects" -msgstr "Exemples de PyTorch" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" +msgstr "" -#: ../../source/ref-example-projects.rst:4 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Flower comes with a number of usage examples. The examples demonstrate " -"how Flower can be used to federate different kinds of existing machine " -"learning pipelines, usually leveraging popular machine learning " -"frameworks such as `PyTorch `_ or `TensorFlow " -"`_." +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -"Flower est livré avec un certain nombre d'exemples d'utilisation, qui " -"montrent comment Flower peut être utilisé pour fédérer différents types " -"de pipelines d'apprentissage automatique existants, qui s'appuient " -"généralement sur des frameworks d'apprentissage automatique populaires " -"tels que `PyTorch `_ ou `TensorFlow " -"`_." -#: ../../source/ref-example-projects.rst:11 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Flower usage examples used to be bundled with Flower in a package called " -"``flwr_example``. We are migrating those examples to standalone projects " -"to make them easier to use. All new examples are based in the directory " -"`examples `_." +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"Les exemples d'utilisation de Flower étaient auparavant regroupés avec " -"Flower dans un paquet appelé ``flwr_example``. Nous migrons ces exemples " -"vers des projets autonomes pour les rendre plus faciles à utiliser. Tous " -"les nouveaux exemples sont basés dans le répertoire ``examples " -"`_." -#: ../../source/ref-example-projects.rst:16 -msgid "The following examples are available as standalone projects." -msgstr "Les exemples suivants sont disponibles sous forme de projets autonomes." +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_fit_clients `\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" -#: ../../source/ref-example-projects.rst:20 -msgid "Quickstart TensorFlow/Keras" -msgstr "Démarrage rapide de TensorFlow/Keras" +#: ../../source/ref-api/flwr.server.strategy.QFedAvg.rst:2 +#, fuzzy +msgid "QFedAvg" +msgstr "DP-FedAvg" -#: ../../source/ref-example-projects.rst:22 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of msgid "" -"The TensorFlow/Keras quickstart example shows CIFAR-10 image " -"classification with MobileNetV2:" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\," +" results\\, ...\\)" msgstr "" -"L'exemple de démarrage rapide TensorFlow/Keras montre la classification " -"d'images CIFAR-10 avec MobileNetV2 :" -#: ../../source/ref-example-projects.rst:25 -#, fuzzy +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of msgid "" -"`Quickstart TensorFlow (Code) " -"`_" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -"`Quickstart TensorFlow (Code) " -"`_" -#: ../../source/ref-example-projects.rst:26 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of msgid "" -"`Quickstart TensorFlow (Tutorial) `_" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" msgstr "" -"`Quickstart TensorFlow (Tutorial) `_" -#: ../../source/ref-example-projects.rst:27 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of msgid "" -"`Quickstart TensorFlow (Blog Post) `_" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"`Quickstart TensorFlow (Blog Post) `_" -#: ../../source/ref-example-projects.rst:31 -#: ../../source/tutorial-quickstart-pytorch.rst:5 -msgid "Quickstart PyTorch" -msgstr "Démarrage rapide de PyTorch" - -#: ../../source/ref-example-projects.rst:33 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of msgid "" -"The PyTorch quickstart example shows CIFAR-10 image classification with a" -" simple Convolutional Neural Network:" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -"L'exemple de démarrage rapide PyTorch montre la classification d'images " -"CIFAR-10 avec un simple réseau neuronal convolutif :" -#: ../../source/ref-example-projects.rst:36 -#, fuzzy +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of msgid "" -"`Quickstart PyTorch (Code) " -"`_" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -"`Quickstart PyTorch (Code) " -"`_" -#: ../../source/ref-example-projects.rst:37 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of msgid "" -"`Quickstart PyTorch (Tutorial) `_" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"`Quickstart PyTorch (Tutorial) `_" - -#: ../../source/ref-example-projects.rst:41 -msgid "PyTorch: From Centralized To Federated" -msgstr "PyTorch : De la centralisation à la fédération" -#: ../../source/ref-example-projects.rst:43 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of msgid "" -"This example shows how a regular PyTorch project can be federated using " -"Flower:" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"Cet exemple montre comment un projet PyTorch ordinaire peut être fédéré à" -" l'aide de Flower :" -#: ../../source/ref-example-projects.rst:45 +#: ../../source/ref-api/flwr.server.strategy.Strategy.rst:2 #, fuzzy -msgid "" -"`PyTorch: From Centralized To Federated (Code) " -"`_" -msgstr "" -"`PyTorch : De la centralisation à la fédération (Code) " -"`_" +msgid "Strategy" +msgstr "stratégie.du.serveur" -#: ../../source/ref-example-projects.rst:46 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of msgid "" -"`PyTorch: From Centralized To Federated (Tutorial) " -"`_" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -"`PyTorch : De la centralisation à la fédération (Tutoriel) " -"`_" -#: ../../source/ref-example-projects.rst:50 -msgid "Federated Learning on Raspberry Pi and Nvidia Jetson" -msgstr "Apprentissage fédéré sur Raspberry Pi et Nvidia Jetson" +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of +#, fuzzy +msgid "Aggregate evaluation results." +msgstr "Résultats globaux de l'évaluation." -#: ../../source/ref-example-projects.rst:52 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of msgid "" -"This example shows how Flower can be used to build a federated learning " -"system that run across Raspberry Pi and Nvidia Jetson:" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -"Cet exemple montre comment Flower peut être utilisé pour construire un " -"système d'apprentissage fédéré qui fonctionne sur Raspberry Pi et Nvidia " -"Jetson :" -#: ../../source/ref-example-projects.rst:54 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: flwr.server.strategy.strategy.Strategy.aggregate_fit:1 of #, fuzzy +msgid "Aggregate training results." +msgstr "Résultats globaux de l'évaluation." + +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of msgid "" -"`Federated Learning on Raspberry Pi and Nvidia Jetson (Code) " -"`_" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"`L'apprentissage fédéré sur Raspberry Pi et Nvidia Jetson (Code) " -"`_" -#: ../../source/ref-example-projects.rst:55 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of msgid "" -"`Federated Learning on Raspberry Pi and Nvidia Jetson (Blog Post) " -"`_" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"`L'apprentissage fédéré sur Raspberry Pi et Nvidia Jetson (Blog Post) " -"`_" -#: ../../source/ref-example-projects.rst:60 -msgid "Legacy Examples (`flwr_example`)" -msgstr "Exemples hérités (`flwr_example`)" - -#: ../../source/ref-example-projects.rst:63 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of msgid "" -"The useage examples in `flwr_example` are deprecated and will be removed " -"in the future. New examples are provided as standalone projects in " -"`examples `_." +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -"Les exemples d'utilisation dans `flwr_example` sont obsolètes et seront " -"supprimés à l'avenir. De nouveaux exemples sont fournis en tant que " -"projets autonomes dans `examples " -"`_." -#: ../../source/ref-example-projects.rst:69 -msgid "Extra Dependencies" -msgstr "Dépendances supplémentaires" +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: flwr.server.strategy.strategy.Strategy.evaluate:1 of +#, fuzzy +msgid "Evaluate the current model parameters." +msgstr "``get_parameters`` : renvoie les paramètres du modèle local actuel" -#: ../../source/ref-example-projects.rst:71 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of msgid "" -"The core Flower framework keeps a minimal set of dependencies. The " -"examples demonstrate Flower in the context of different machine learning " -"frameworks, so additional dependencies need to be installed before an " -"example can be run." +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -"Le noyau du framework Flower conserve un ensemble minimal de dépendances." -" Les exemples démontrent Flower dans le contexte de différents frameworks" -" d'apprentissage automatique, de sorte que des dépendances " -"supplémentaires doivent être installées avant qu'un exemple puisse être " -"exécuté." - -#: ../../source/ref-example-projects.rst:75 -msgid "For PyTorch examples::" -msgstr "Pour les exemples de PyTorch: :" - -#: ../../source/ref-example-projects.rst:79 -msgid "For TensorFlow examples::" -msgstr "Pour les exemples de TensorFlow : :" -#: ../../source/ref-example-projects.rst:83 -msgid "For both PyTorch and TensorFlow examples::" -msgstr "Pour les exemples PyTorch et TensorFlow: :" +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: flwr.server.strategy.strategy.Strategy.initialize_parameters:1 of +#, fuzzy +msgid "Initialize the (global) model parameters." +msgstr "Initialise le modèle global" -#: ../../source/ref-example-projects.rst:87 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:5 of msgid "" -"Please consult :code:`pyproject.toml` for a full list of possible extras " -"(section :code:`[tool.poetry.extras]`)." +"Successful updates from the previously selected and configured clients. " +"Each pair of `(ClientProxy, FitRes` constitutes a successful update from " +"one of the previously selected clients. Not that not all previously " +"selected clients are necessarily included in this list: a client might " +"drop out and not submit a result. For each client that did not submit an " +"update, there should be an `Exception` in `failures`." msgstr "" -"Tu peux consulter :code:`pyproject.toml` pour une liste complète des " -"extras possibles (section :code:`[tool.poetry.extras]`)." -#: ../../source/ref-example-projects.rst:92 -msgid "PyTorch Examples" -msgstr "Exemples de PyTorch" +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:13 +#: flwr.server.strategy.strategy.Strategy.aggregate_fit:13 of +msgid "Exceptions that occurred while the server was waiting for client updates." +msgstr "" -#: ../../source/ref-example-projects.rst:94 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:16 of msgid "" -"Our PyTorch examples are based on PyTorch 1.7. They should work with " -"other releases as well. So far, we provide the following examples." +"**aggregation_result** -- The aggregated evaluation result. Aggregation " +"typically uses some variant of a weighted average." msgstr "" -"Nos exemples PyTorch sont basés sur PyTorch 1.7. Ils devraient " -"fonctionner avec d'autres versions également. Jusqu'à présent, nous " -"fournissons les exemples suivants." - -#: ../../source/ref-example-projects.rst:98 -msgid "CIFAR-10 Image Classification" -msgstr "Classification d'images CIFAR-10" -#: ../../source/ref-example-projects.rst:100 +#: flwr.server.strategy.strategy.Strategy.aggregate_fit:5 of msgid "" -"`CIFAR-10 and CIFAR-100 `_ " -"are popular RGB image datasets. The Flower CIFAR-10 example uses PyTorch " -"to train a simple CNN classifier in a federated learning setup with two " -"clients." +"Successful updates from the previously selected and configured clients. " +"Each pair of `(ClientProxy, FitRes)` constitutes a successful update from" +" one of the previously selected clients. Not that not all previously " +"selected clients are necessarily included in this list: a client might " +"drop out and not submit a result. For each client that did not submit an " +"update, there should be an `Exception` in `failures`." msgstr "" -"`CIFAR-10 et CIFAR-100 `_ " -"sont des ensembles de données d'images RVB populaires. L'exemple Flower " -"CIFAR-10 utilise PyTorch pour former un classificateur CNN simple dans " -"une configuration d'apprentissage fédéré avec deux clients." - -#: ../../source/ref-example-projects.rst:104 -#: ../../source/ref-example-projects.rst:121 -#: ../../source/ref-example-projects.rst:146 -msgid "First, start a Flower server:" -msgstr "Tout d'abord, démarre un serveur Flower :" -#: ../../source/ref-example-projects.rst:106 -msgid "$ ./src/py/flwr_example/pytorch_cifar/run-server.sh" -msgstr "$ ./src/py/flwr_example/pytorch_cifar/run-server.sh" +#: flwr.server.strategy.strategy.Strategy.aggregate_fit:17 of +msgid "" +"**parameters** -- If parameters are returned, then the server will treat " +"these as the new global model parameters (i.e., it will replace the " +"previous parameters with the ones returned from this method). If `None` " +"is returned (e.g., because there were only failures and no viable " +"results) then the server will no update the previous model parameters, " +"the updates received in this round are discarded, and the global model " +"parameters remain the same." +msgstr "" -#: ../../source/ref-example-projects.rst:108 -#: ../../source/ref-example-projects.rst:125 -#: ../../source/ref-example-projects.rst:150 -msgid "Then, start the two clients in a new terminal window:" -msgstr "Ensuite, démarre les deux clients dans une nouvelle fenêtre de terminal :" +#: flwr.server.strategy.strategy.Strategy.evaluate:3 of +msgid "" +"This function can be used to perform centralized (i.e., server-side) " +"evaluation of model parameters." +msgstr "" -#: ../../source/ref-example-projects.rst:110 -msgid "$ ./src/py/flwr_example/pytorch_cifar/run-clients.sh" -msgstr "$ ./src/py/flwr_example/pytorch_cifar/run-clients.sh" +#: flwr.server.strategy.strategy.Strategy.evaluate:11 of +msgid "" +"**evaluation_result** -- The evaluation result, usually a Tuple " +"containing loss and a dictionary containing task-specific metrics (e.g., " +"accuracy)." +msgstr "" -#: ../../source/ref-example-projects.rst:112 -msgid "For more details, see :code:`src/py/flwr_example/pytorch_cifar`." -msgstr "Pour plus de détails, voir :code:`src/py/flwr_example/pytorch_cifar`." +#: flwr.server.strategy.strategy.Strategy.initialize_parameters:6 of +msgid "" +"**parameters** -- If parameters are returned, then the server will treat " +"these as the initial global model parameters." +msgstr "" -#: ../../source/ref-example-projects.rst:115 -msgid "ImageNet-2012 Image Classification" -msgstr "ImageNet-2012 Classification des images" +#: ../../source/ref-api/flwr.simulation.rst:2 +#, fuzzy +msgid "simulation" +msgstr "Simulation de moniteur" -#: ../../source/ref-example-projects.rst:117 +#: ../../source/ref-api/flwr.simulation.rst:17::1 msgid "" -"`ImageNet-2012 `_ is one of the major computer" -" vision datasets. The Flower ImageNet example uses PyTorch to train a " -"ResNet-18 classifier in a federated learning setup with ten clients." +":py:obj:`start_simulation `\\ \\(\\*\\," +" client\\_fn\\[\\, ...\\]\\)" msgstr "" -"`ImageNet-2012 `_ est l'un des principaux " -"ensembles de données de vision par ordinateur. L'exemple Flower ImageNet " -"utilise PyTorch pour entraîner un classificateur ResNet-18 dans une " -"configuration d'apprentissage fédéré avec dix clients." -#: ../../source/ref-example-projects.rst:123 -msgid "$ ./src/py/flwr_example/pytorch_imagenet/run-server.sh" -msgstr "$ ./src/py/flwr_example/pytorch_imagenet/run-server.sh" +#: ../../source/ref-api/flwr.simulation.rst:17::1 +#: flwr.simulation.app.start_simulation:1 of +#, fuzzy +msgid "Start a Ray-based Flower simulation server." +msgstr "Simulation de moniteur" -#: ../../source/ref-example-projects.rst:127 -msgid "$ ./src/py/flwr_example/pytorch_imagenet/run-clients.sh" -msgstr "$ ./src/py/flwr_example/pytorch_imagenet/run-clients.sh" +#: ../../source/ref-api/flwr.simulation.start_simulation.rst:2 +#, fuzzy +msgid "start\\_simulation" +msgstr "démarrer_simulation" -#: ../../source/ref-example-projects.rst:129 -msgid "For more details, see :code:`src/py/flwr_example/pytorch_imagenet`." -msgstr "Pour plus de détails, voir :code:`src/py/flwr_example/pytorch_imagenet`." +#: flwr.simulation.app.start_simulation:3 of +msgid "" +"A function creating client instances. The function must take a single " +"`str` argument called `cid`. It should return a single client instance of" +" type Client. Note that the created client instances are ephemeral and " +"will often be destroyed after a single method invocation. Since client " +"instances are not long-lived, they should not attempt to carry state over" +" method invocations. Any state required by the instance (model, dataset, " +"hyperparameters, ...) should be (re-)created in either the call to " +"`client_fn` or the call to any of the client methods (e.g., load " +"evaluation data in the `evaluate` method itself)." +msgstr "" -#: ../../source/ref-example-projects.rst:133 -msgid "TensorFlow Examples" -msgstr "Exemples de TensorFlow" +#: flwr.simulation.app.start_simulation:13 of +msgid "" +"The total number of clients in this simulation. This must be set if " +"`clients_ids` is not set and vice-versa." +msgstr "" -#: ../../source/ref-example-projects.rst:135 +#: flwr.simulation.app.start_simulation:16 of msgid "" -"Our TensorFlow examples are based on TensorFlow 2.0 or newer. So far, we " -"provide the following examples." +"List `client_id`s for each client. This is only required if `num_clients`" +" is not set. Setting both `num_clients` and `clients_ids` with " +"`len(clients_ids)` not equal to `num_clients` generates an error." msgstr "" -"Nos exemples TensorFlow sont basés sur TensorFlow 2.0 ou une version plus" -" récente. Jusqu'à présent, nous te proposons les exemples suivants." -#: ../../source/ref-example-projects.rst:139 -msgid "Fashion-MNIST Image Classification" -msgstr "Classification d'images Fashion-MNIST" +#: flwr.simulation.app.start_simulation:20 of +msgid "" +"CPU and GPU resources for a single client. Supported keys are `num_cpus` " +"and `num_gpus`. To understand the GPU utilization caused by `num_gpus`, " +"as well as using custom resources, please consult the Ray documentation." +msgstr "" -#: ../../source/ref-example-projects.rst:141 +#: flwr.simulation.app.start_simulation:25 of msgid "" -"`Fashion-MNIST `_ is " -"often used as the \"Hello, world!\" of machine learning. We follow this " -"tradition and provide an example which samples random local datasets from" -" Fashion-MNIST and trains a simple image classification model over those " -"partitions." +"An implementation of the abstract base class `flwr.server.Server`. If no " +"instance is provided, then `start_server` will create one." msgstr "" -"nous suivons cette tradition et fournissons un exemple qui échantillonne " -"des ensembles de données locales aléatoires de Fashion-MNIST et entraîne " -"un modèle simple de classification d'images sur ces partitions." -#: ../../source/ref-example-projects.rst:148 -msgid "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-server.sh" -msgstr "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-server.sh" +#: flwr.simulation.app.start_simulation:31 of +msgid "" +"An implementation of the abstract base class `flwr.server.Strategy`. If " +"no strategy is provided, then `start_server` will use " +"`flwr.server.strategy.FedAvg`." +msgstr "" -#: ../../source/ref-example-projects.rst:152 -msgid "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-clients.sh" -msgstr "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-clients.sh" +#: flwr.simulation.app.start_simulation:35 of +msgid "" +"An implementation of the abstract base class `flwr.server.ClientManager`." +" If no implementation is provided, then `start_simulation` will use " +"`flwr.server.client_manager.SimpleClientManager`." +msgstr "" -#: ../../source/ref-example-projects.rst:154 +#: flwr.simulation.app.start_simulation:39 of msgid "" -"For more details, see " -":code:`src/py/flwr_example/tensorflow_fashion_mnist`." +"Optional dictionary containing arguments for the call to `ray.init`. If " +"ray_init_args is None (the default), Ray will be initialized with the " +"following default args: { \"ignore_reinit_error\": True, " +"\"include_dashboard\": False } An empty dictionary can be used " +"(ray_init_args={}) to prevent any arguments from being passed to " +"ray.init." msgstr "" -"Pour plus de détails, voir " -":code:`src/py/flwr_example/tensorflow_fashion_mnist`." -#: ../../source/ref-faq.rst:4 +#: flwr.simulation.app.start_simulation:39 of msgid "" -"This page collects answers to commonly asked questions about Federated " -"Learning with Flower." +"Optional dictionary containing arguments for the call to `ray.init`. If " +"ray_init_args is None (the default), Ray will be initialized with the " +"following default args:" msgstr "" -"Cette page rassemble les réponses aux questions les plus fréquemment " -"posées sur l'apprentissage fédéré avec Flower." -#: ../../source/ref-faq.rst -msgid ":fa:`eye,mr-1` Can Flower run on Juptyter Notebooks / Google Colab?" +#: flwr.simulation.app.start_simulation:43 of +msgid "{ \"ignore_reinit_error\": True, \"include_dashboard\": False }" msgstr "" -":fa:`eye,mr-1` Flower peut-il fonctionner sur les ordinateurs portables " -"Juptyter / Google Colab ?" -#: ../../source/ref-faq.rst:8 +#: flwr.simulation.app.start_simulation:45 of msgid "" -"Yes, it can! Flower even comes with a few under-the-hood optimizations to" -" make it work even better on Colab. Here's a quickstart example:" +"An empty dictionary can be used (ray_init_args={}) to prevent any " +"arguments from being passed to ray.init." msgstr "" -"Oui, c'est possible ! Flower est même livré avec quelques optimisations " -"pour qu'il fonctionne encore mieux sur Colab. Voici un exemple de " -"démarrage rapide :" -#: ../../source/ref-faq.rst:10 -#, fuzzy +#: flwr.simulation.app.start_simulation:48 of msgid "" -"`Flower simulation PyTorch " -"`_" +"Set to True to prevent `ray.shutdown()` in case " +"`ray.is_initialized()=True`." msgstr "" -"`Flower Quickstart (TensorFlow/Keras) " -"`_" -#: ../../source/ref-faq.rst:11 -#, fuzzy +#: flwr.simulation.app.start_simulation:50 of msgid "" -"`Flower simulation TensorFlow/Keras " -"`_" +"Optionally specify the type of actor to use. The actor object, which " +"persists throughout the simulation, will be the process in charge of " +"running the clients' jobs (i.e. their `fit()` method)." msgstr "" -"`Flower Quickstart (TensorFlow/Keras) " -"`_" -#: ../../source/ref-faq.rst -msgid ":fa:`eye,mr-1` How can I run Federated Learning on a Raspberry Pi?" +#: flwr.simulation.app.start_simulation:54 of +msgid "" +"If you want to create your own Actor classes, you might need to pass some" +" input argument. You can use this dictionary for such purpose." msgstr "" -":fa:`eye,mr-1` Comment puis-je faire fonctionner l'apprentissage fédéré " -"sur un Raspberry Pi ?" -#: ../../source/ref-faq.rst:15 -#, fuzzy +#: flwr.simulation.app.start_simulation:57 of msgid "" -"Find the `blog post about federated learning on embedded device here " -"`_" -" and the corresponding `GitHub code example " -"`_." +"(default: \"DEFAULT\") Optional string (\"DEFAULT\" or \"SPREAD\") for " +"the VCE to choose in which node the actor is placed. If you are an " +"advanced user needed more control you can use lower-level scheduling " +"strategies to pin actors to specific compute nodes (e.g. via " +"NodeAffinitySchedulingStrategy). Please note this is an advanced feature." +" For all details, please refer to the Ray documentation: " +"https://docs.ray.io/en/latest/ray-core/scheduling/index.html" msgstr "" -"Trouve le `blog post about federated learning on embedded device ici " -"`_" -" et l'exemple de code GitHub correspondant " -"`_." -#: ../../source/ref-faq.rst -msgid ":fa:`eye,mr-1` Does Flower support federated learning on Android devices?" +#: flwr.simulation.app.start_simulation:66 of +msgid "**hist** -- Object containing metrics from training." msgstr "" -":fa:`eye,mr-1` Est-ce que Flower prend en charge l'apprentissage fédéré " -"sur les appareils Android ?" -#: ../../source/ref-faq.rst:19 +#: ../../source/ref-changelog.md:1 +msgid "Changelog" +msgstr "Changelog" + +#: ../../source/ref-changelog.md:3 +#, fuzzy +msgid "Unreleased" +msgstr "Inédit" + +#: ../../source/ref-changelog.md:5 ../../source/ref-changelog.md:17 +#: ../../source/ref-changelog.md:110 ../../source/ref-changelog.md:210 +#: ../../source/ref-changelog.md:294 ../../source/ref-changelog.md:358 +#: ../../source/ref-changelog.md:416 ../../source/ref-changelog.md:485 +#: ../../source/ref-changelog.md:614 ../../source/ref-changelog.md:656 +#: ../../source/ref-changelog.md:723 ../../source/ref-changelog.md:789 +#: ../../source/ref-changelog.md:834 ../../source/ref-changelog.md:873 +#: ../../source/ref-changelog.md:906 ../../source/ref-changelog.md:956 +msgid "What's new?" +msgstr "Quoi de neuf ?" + +#: ../../source/ref-changelog.md:7 ../../source/ref-changelog.md:80 +#: ../../source/ref-changelog.md:192 ../../source/ref-changelog.md:282 +#: ../../source/ref-changelog.md:346 ../../source/ref-changelog.md:404 +#: ../../source/ref-changelog.md:473 ../../source/ref-changelog.md:535 +#: ../../source/ref-changelog.md:554 ../../source/ref-changelog.md:710 +#: ../../source/ref-changelog.md:781 ../../source/ref-changelog.md:818 +#: ../../source/ref-changelog.md:861 +msgid "Incompatible changes" +msgstr "Changements incompatibles" + +#: ../../source/ref-changelog.md:9 #, fuzzy +msgid "v1.7.0 (2024-02-05)" +msgstr "v1.3.0 (2023-02-06)" + +#: ../../source/ref-changelog.md:11 ../../source/ref-changelog.md:104 +#: ../../source/ref-changelog.md:204 ../../source/ref-changelog.md:288 +#: ../../source/ref-changelog.md:352 ../../source/ref-changelog.md:410 +#: ../../source/ref-changelog.md:479 ../../source/ref-changelog.md:548 +msgid "Thanks to our contributors" +msgstr "Merci à nos contributeurs" + +#: ../../source/ref-changelog.md:13 ../../source/ref-changelog.md:106 +#: ../../source/ref-changelog.md:206 ../../source/ref-changelog.md:290 +#: ../../source/ref-changelog.md:354 ../../source/ref-changelog.md:412 msgid "" -"Yes, it does. Please take a look at our `blog post " -"`_ or check out the code examples:" +"We would like to give our special thanks to all the contributors who made" +" the new version of Flower possible (in `git shortlog` order):" msgstr "" -"Oui. Jetez un coup d'œil à notre `blog post " -"`_ ou consultez l'`exemple de code Android sur GitHub" -" `_." +"Nous tenons à remercier tout particulièrement tous les contributeurs qui " +"ont rendu possible la nouvelle version de Flower (dans l'ordre `git " +"shortlog`) :" -#: ../../source/ref-faq.rst:21 +#: ../../source/ref-changelog.md:15 msgid "" -"`Android Kotlin example `_" +"`Aasheesh Singh`, `Adam Narozniak`, `Aml Hassan Esmil`, `Charles " +"Beauville`, `Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo " +"Gabrielli`, `Gustavo Bertoli`, `HelinLin`, `Heng Pan`, `Javier`, `M S " +"Chaitanya Kumar`, `Mohammad Naseri`, `Nikos Vlachakis`, `Pritam Neog`, " +"`Robert Kuska`, `Robert Steiner`, `Taner Topal`, `Yahia Salaheldin " +"Shaaban`, `Yan Gao`, `Yasar Abbas` " msgstr "" -#: ../../source/ref-faq.rst:22 -msgid "`Android Java example `_" +#: ../../source/ref-changelog.md:19 +#, fuzzy +msgid "" +"**Introduce stateful clients (experimental)** " +"([#2770](https://github.com/adap/flower/pull/2770), " +"[#2686](https://github.com/adap/flower/pull/2686), " +"[#2696](https://github.com/adap/flower/pull/2696), " +"[#2643](https://github.com/adap/flower/pull/2643), " +"[#2769](https://github.com/adap/flower/pull/2769))" msgstr "" +"**([#842](https://github.com/adap/flower/pull/842), " +"[#844](https://github.com/adap/flower/pull/844), " +"[#845](https://github.com/adap/flower/pull/845), " +"[#847](https://github.com/adap/flower/pull/847), " +"[#993](https://github.com/adap/flower/pull/993), " +"[#994](https://github.com/adap/flower/pull/994))" -#: ../../source/ref-faq.rst -msgid ":fa:`eye,mr-1` Can I combine federated learning with blockchain?" +#: ../../source/ref-changelog.md:21 +msgid "" +"Subclasses of `Client` and `NumPyClient` can now store local state that " +"remains on the client. Let's start with the highlight first: this new " +"feature is compatible with both simulated clients (via " +"`start_simulation`) and networked clients (via `start_client`). It's also" +" the first preview of new abstractions like `Context` and `RecordSet`. " +"Clients can access state of type `RecordSet` via `state: RecordSet = " +"self.context.state`. Changes to this `RecordSet` are preserved across " +"different rounds of execution to enable stateful computations in a " +"unified way across simulation and deployment." msgstr "" -":fa:`eye,mr-1` Puis-je combiner l'apprentissage fédéré avec la blockchain" -" ?" -#: ../../source/ref-faq.rst:26 +#: ../../source/ref-changelog.md:23 +#, fuzzy msgid "" -"Yes, of course. A list of available examples using Flower within a " -"blockchain environment is available here:" +"**Improve performance** " +"([#2293](https://github.com/adap/flower/pull/2293))" msgstr "" -"Oui, bien sûr, une liste d'exemples disponibles utilisant Flower dans un " -"environnement blockchain est disponible ici :" +"**Supprimer les stratégies expérimentales** " +"([#1280](https://github.com/adap/flower/pull/1280))" -#: ../../source/ref-faq.rst:28 +#: ../../source/ref-changelog.md:25 msgid "" -"`Flower meets Nevermined GitHub Repository `_." +"Flower is faster than ever. All `FedAvg`-derived strategies now use in-" +"place aggregation to reduce memory consumption. The Flower client " +"serialization/deserialization has been rewritten from the ground up, " +"which results in significant speedups, especially when the client-side " +"training time is short." msgstr "" -"`Flower meets Nevermined GitHub Repository `_." -#: ../../source/ref-faq.rst:29 +#: ../../source/ref-changelog.md:27 +#, fuzzy msgid "" -"`Flower meets Nevermined YouTube video " -"`_." +"**Support Federated Learning with Apple MLX and Flower** " +"([#2693](https://github.com/adap/flower/pull/2693))" msgstr "" -"`Flower rencontre Nevermined vidéo YouTube " -"`_." +"**Ajouter un nouvel exemple d'apprentissage fédéré utilisant fastai et " +"Flower** ([#1598](https://github.com/adap/flower/pull/1598))" -#: ../../source/ref-faq.rst:30 +#: ../../source/ref-changelog.md:29 msgid "" -"`Flower meets KOSMoS `_." +"Flower has official support for federated learning using [Appple " +"MLX](https://ml-explore.github.io/mlx) via the new `quickstart-mlx` code " +"example." msgstr "" -"`Flower rencontre KOSMoS `_." -#: ../../source/ref-faq.rst:31 +#: ../../source/ref-changelog.md:31 +#, fuzzy msgid "" -"`Flower meets Talan blog post `_ ." +"**Introduce new XGBoost cyclic strategy** " +"([#2666](https://github.com/adap/flower/pull/2666), " +"[#2668](https://github.com/adap/flower/pull/2668))" msgstr "" -"`Flower meets Talan blog post `_ ." +"**Introduction du SDK iOS (aperçu)** " +"([#1621](https://github.com/adap/flower/pull/1621), " +"[#1764](https://github.com/adap/flower/pull/1764))" -#: ../../source/ref-faq.rst:32 +#: ../../source/ref-changelog.md:33 msgid "" -"`Flower meets Talan GitHub Repository " -"`_ ." +"A new strategy called `FedXgbCyclic` supports a client-by-client style of" +" training (often called cyclic). The `xgboost-comprehensive` code example" +" shows how to use it in a full project. In addition to that, `xgboost-" +"comprehensive` now also supports simulation mode. With this, Flower " +"offers best-in-class XGBoost support." msgstr "" -"`Flower rencontre Talan Dépôt GitHub " -"`_ ." - -#: ../../source/ref-telemetry.md:1 -msgid "Telemetry" -msgstr "Télémétrie" -#: ../../source/ref-telemetry.md:3 +#: ../../source/ref-changelog.md:35 +#, fuzzy msgid "" -"The Flower open-source project collects **anonymous** usage metrics to " -"make well-informed decisions to improve Flower. Doing this enables the " -"Flower team to understand how Flower is used and what challenges users " -"might face." +"**Support Python 3.11** " +"([#2394](https://github.com/adap/flower/pull/2394))" msgstr "" -"Le projet open-source Flower recueille des mesures d'utilisation " -"**anonymes** afin de prendre des décisions éclairées pour améliorer " -"Flower. Cela permet à l'équipe de Flower de comprendre comment Flower est" -" utilisé et quels sont les défis auxquels les utilisateurs peuvent être " -"confrontés." +"**Support Python 3.10** " +"([#1320](https://github.com/adap/flower/pull/1320))" -#: ../../source/ref-telemetry.md:5 +#: ../../source/ref-changelog.md:37 msgid "" -"**Flower is a friendly framework for collaborative AI and data science.**" -" Staying true to this statement, Flower makes it easy to disable " -"telemetry for users that do not want to share anonymous usage metrics." +"Framework tests now run on Python 3.8, 3.9, 3.10, and 3.11. This will " +"ensure better support for users using more recent Python versions." msgstr "" -"**Flower est un cadre convivial pour l'IA collaborative et la science des" -" données.** En restant fidèle à cette déclaration, Flower permet de " -"désactiver facilement la télémétrie pour les utilisateurs qui ne " -"souhaitent pas partager des mesures d'utilisation anonymes." - -#: ../../source/ref-telemetry.md:7 -msgid "Principles" -msgstr "Principes" -#: ../../source/ref-telemetry.md:9 -msgid "We follow strong principles guarding anonymous usage metrics collection:" +#: ../../source/ref-changelog.md:39 +#, fuzzy +msgid "" +"**Update gRPC and ProtoBuf dependencies** " +"([#2814](https://github.com/adap/flower/pull/2814))" msgstr "" -"Nous suivons des principes stricts concernant la collecte de données " -"anonymes sur l'utilisation :" +"**Ajouter une nouvelle stratégie `FedProx`** " +"([#1619](https://github.com/adap/flower/pull/1619))" -#: ../../source/ref-telemetry.md:11 +#: ../../source/ref-changelog.md:41 msgid "" -"**Optional:** You will always be able to disable telemetry; read on to " -"learn “[How to opt-out](#how-to-opt-out)”." +"The `grpcio` and `protobuf` dependencies were updated to their latest " +"versions for improved security and performance." msgstr "" -"**Optionnel:** Tu pourras toujours désactiver la télémétrie ; lis la " -"suite pour apprendre \"[Comment se désengager](#how-to-opt-out)\"." -#: ../../source/ref-telemetry.md:12 +#: ../../source/ref-changelog.md:43 +#, fuzzy msgid "" -"**Anonymous:** The reported usage metrics are anonymous and do not " -"contain any personally identifiable information (PII). See “[Collected " -"metrics](#collected-metrics)” to understand what metrics are being " -"reported." +"**Introduce Docker image for Flower server** " +"([#2700](https://github.com/adap/flower/pull/2700), " +"[#2688](https://github.com/adap/flower/pull/2688), " +"[#2705](https://github.com/adap/flower/pull/2705), " +"[#2695](https://github.com/adap/flower/pull/2695), " +"[#2747](https://github.com/adap/flower/pull/2747), " +"[#2746](https://github.com/adap/flower/pull/2746), " +"[#2680](https://github.com/adap/flower/pull/2680), " +"[#2682](https://github.com/adap/flower/pull/2682), " +"[#2701](https://github.com/adap/flower/pull/2701))" msgstr "" -"**Anonyme:** Les mesures d'utilisation rapportées sont anonymes et ne " -"contiennent aucune information personnelle identifiable (PII). Voir " -"\"[Collected metrics](#collected-metrics)\" pour comprendre quelles " -"mesures sont rapportées." +"**([#842](https://github.com/adap/flower/pull/842), " +"[#844](https://github.com/adap/flower/pull/844), " +"[#845](https://github.com/adap/flower/pull/845), " +"[#847](https://github.com/adap/flower/pull/847), " +"[#993](https://github.com/adap/flower/pull/993), " +"[#994](https://github.com/adap/flower/pull/994))" -#: ../../source/ref-telemetry.md:13 +#: ../../source/ref-changelog.md:45 msgid "" -"**Transparent:** You can easily inspect what anonymous metrics are being " -"reported; see the section “[How to inspect what is being reported](#how-" -"to-inspect-what-is-being-reported)”" +"The Flower server can now be run using an official Docker image. A new " +"how-to guide explains [how to run Flower using " +"Docker](https://flower.ai/docs/framework/how-to-run-flower-using-" +"docker.html). An official Flower client Docker image will follow." msgstr "" -"**Transparent:** Tu peux facilement inspecter les métriques anonymes qui " -"sont rapportées ; voir la section \"[Comment inspecter ce qui est " -"rapporté](#how-to-inspect-what-is-being-reported)\"" -#: ../../source/ref-telemetry.md:14 +#: ../../source/ref-changelog.md:47 #, fuzzy msgid "" -"**Open for feedback:** You can always reach out to us if you have " -"feedback; see the section “[How to contact us](#how-to-contact-us)” for " -"details." +"**Introduce** `flower-via-docker-compose` **example** " +"([#2626](https://github.com/adap/flower/pull/2626))" msgstr "" -"**Ouvert pour les commentaires:** Tu peux toujours nous contacter si tu " -"as des commentaires ; voir la section \"[Comment nous contacter ](#how-" -"to-contact-us)\" pour plus de détails." - -#: ../../source/ref-telemetry.md:16 -msgid "How to opt-out" -msgstr "Comment se désinscrire" +"**Introduire une nouvelle ligne de base pour les fleurs : FedAvg " +"FEMNIST** ([#1655](https://github.com/adap/flower/pull/1655))" -#: ../../source/ref-telemetry.md:18 +#: ../../source/ref-changelog.md:49 +#, fuzzy msgid "" -"When Flower starts, it will check for an environment variable called " -"`FLWR_TELEMETRY_ENABLED`. Telemetry can easily be disabled by setting " -"`FLWR_TELEMETRY_ENABLED=0`. Assuming you are starting a Flower server or " -"client, simply do so by prepending your command as in:" +"**Introduce** `quickstart-sklearn-tabular` **example** " +"([#2719](https://github.com/adap/flower/pull/2719))" msgstr "" -"Lorsque Flower démarre, il vérifie la présence d'une variable " -"d'environnement appelée `FLWR_TELEMETRY_ENABLED`. La télémétrie peut " -"facilement être désactivée en réglant `FLWR_TELEMETRY_ENABLED=0`. En " -"supposant que tu démarres un serveur ou un client Flower, fais-le " -"simplement en faisant précéder ta commande de la façon suivante :" +"**Ajouter une nouvelle stratégie `FedProx`** " +"([#1619](https://github.com/adap/flower/pull/1619))" -#: ../../source/ref-telemetry.md:24 +#: ../../source/ref-changelog.md:51 +#, fuzzy msgid "" -"Alternatively, you can export `FLWR_TELEMETRY_ENABLED=0` in, for example," -" `.bashrc` (or whatever configuration file applies to your environment) " -"to disable Flower telemetry permanently." +"**Introduce** `custom-metrics` **example** " +"([#1958](https://github.com/adap/flower/pull/1958))" msgstr "" -"Tu peux aussi exporter `FLWR_TELEMETRY_ENABLED=0` dans, par exemple, " -"`.bashrc` (ou tout autre fichier de configuration qui s'applique à ton " -"environnement) pour désactiver la télémétrie de la fleur de façon " -"permanente." - -#: ../../source/ref-telemetry.md:26 -msgid "Collected metrics" -msgstr "Mesures collectées" - -#: ../../source/ref-telemetry.md:28 -msgid "Flower telemetry collects the following metrics:" -msgstr "La télémétrie des fleurs recueille les métriques suivantes :" +"**Ajouter une nouvelle stratégie `FedProx`** " +"([#1619](https://github.com/adap/flower/pull/1619))" -#: ../../source/ref-telemetry.md:30 +#: ../../source/ref-changelog.md:53 +#, fuzzy msgid "" -"**Flower version.** Understand which versions of Flower are currently " -"being used. This helps us to decide whether we should invest effort into " -"releasing a patch version for an older version of Flower or instead use " -"the bandwidth to build new features." +"**Update code examples to use Flower Datasets** " +"([#2450](https://github.com/adap/flower/pull/2450), " +"[#2456](https://github.com/adap/flower/pull/2456), " +"[#2318](https://github.com/adap/flower/pull/2318), " +"[#2712](https://github.com/adap/flower/pull/2712))" msgstr "" -"**Cela nous aide à décider si nous devons investir des efforts dans la " -"publication d'une version corrective pour une version plus ancienne de " -"Flower ou si nous devons plutôt utiliser la bande passante pour " -"développer de nouvelles fonctionnalités." +"Mettre à jour les outils de développement " +"([#1231](https://github.com/adap/flower/pull/1231), " +"[#1276](https://github.com/adap/flower/pull/1276), " +"[#1301](https://github.com/adap/flower/pull/1301), " +"[#1310](https://github.com/adap/flower/pull/1310))" -#: ../../source/ref-telemetry.md:32 +#: ../../source/ref-changelog.md:55 msgid "" -"**Operating system.** Enables us to answer questions such as: *Should we " -"create more guides for Linux, macOS, or Windows?*" +"Several code examples were updated to use [Flower " +"Datasets](https://flower.ai/docs/datasets/)." msgstr "" -"**Système d'exploitation.** Nous permet de répondre à des questions " -"telles que : *Faudrait-il créer plus de guides pour Linux, macOS ou " -"Windows ?" -#: ../../source/ref-telemetry.md:34 +#: ../../source/ref-changelog.md:57 +#, fuzzy msgid "" -"**Python version.** Knowing the Python version helps us, for example, to " -"decide whether we should invest effort into supporting old versions of " -"Python or stop supporting them and start taking advantage of new Python " -"features." +"**General updates to Flower Examples** " +"([#2381](https://github.com/adap/flower/pull/2381), " +"[#2805](https://github.com/adap/flower/pull/2805), " +"[#2782](https://github.com/adap/flower/pull/2782), " +"[#2806](https://github.com/adap/flower/pull/2806), " +"[#2829](https://github.com/adap/flower/pull/2829), " +"[#2825](https://github.com/adap/flower/pull/2825), " +"[#2816](https://github.com/adap/flower/pull/2816), " +"[#2726](https://github.com/adap/flower/pull/2726), " +"[#2659](https://github.com/adap/flower/pull/2659), " +"[#2655](https://github.com/adap/flower/pull/2655))" msgstr "" -"**Version de Python.** Connaître la version de Python nous aide, par " -"exemple, à décider si nous devons investir des efforts dans la prise en " -"charge des anciennes versions de Python ou cesser de les prendre en " -"charge et commencer à tirer parti des nouvelles fonctionnalités de " -"Python." +"**Améliorer l'API (expérimentale) du pilote** " +"([#1663](https://github.com/adap/flower/pull/1663), " +"[#1666](https://github.com/adap/flower/pull/1666), " +"[#1667](https://github.com/adap/flower/pull/1667), " +"[#1664](https://github.com/adap/flower/pull/1664), " +"[#1675](https://github.com/adap/flower/pull/1675), " +"[#1676](https://github.com/adap/flower/pull/1676), " +"[#1693](https://github.com/adap/flower/pull/1693), " +"[#1662](https://github.com/adap/flower/pull/1662), " +"[#1794](https://github.com/adap/flower/pull/1794))" -#: ../../source/ref-telemetry.md:36 -msgid "" -"**Hardware properties.** Understanding the hardware environment that " -"Flower is being used in helps to decide whether we should, for example, " -"put more effort into supporting low-resource environments." +#: ../../source/ref-changelog.md:59 +msgid "Many Flower code examples received substantial updates." msgstr "" -"**Comprendre l'environnement matériel dans lequel Flower est utilisé " -"permet de décider si nous devrions, par exemple, faire plus d'efforts " -"pour prendre en charge les environnements à faibles ressources." -#: ../../source/ref-telemetry.md:38 +#: ../../source/ref-changelog.md:61 ../../source/ref-changelog.md:154 +#, fuzzy +msgid "**Update Flower Baselines**" +msgstr "Demande pour une nouvelle Flower Baseline" + +#: ../../source/ref-changelog.md:63 +#, fuzzy msgid "" -"**Execution mode.** Knowing what execution mode Flower starts in enables " -"us to understand how heavily certain features are being used and better " -"prioritize based on that." +"HFedXGBoost ([#2226](https://github.com/adap/flower/pull/2226), " +"[#2771](https://github.com/adap/flower/pull/2771))" msgstr "" -"**Mode d'exécution** Connaître le mode d'exécution dans lequel Flower " -"démarre nous permet de comprendre à quel point certaines fonctionnalités " -"sont utilisées et de mieux établir les priorités en fonction de cela." +"**Nouvel exemple de code JAX** " +"([#906](https://github.com/adap/flower/pull/906), " +"[#1143](https://github.com/adap/flower/pull/1143))" -#: ../../source/ref-telemetry.md:40 -msgid "" -"**Cluster.** Flower telemetry assigns a random in-memory cluster ID each " -"time a Flower workload starts. This allows us to understand which device " -"types not only start Flower workloads but also successfully complete " -"them." +#: ../../source/ref-changelog.md:64 +#, fuzzy +msgid "FedVSSL ([#2412](https://github.com/adap/flower/pull/2412))" msgstr "" -"**Cluster.** La télémétrie Flower attribue un ID de cluster en mémoire " -"aléatoire à chaque fois qu'une charge de travail Flower démarre. Cela " -"nous permet de comprendre quels types d'appareils non seulement démarrent" -" les charges de travail Flower, mais aussi les terminent avec succès." +"Amélioration de la documentation sur le serveur gRPC " +"([#841](https://github.com/adap/flower/pull/841))" -#: ../../source/ref-telemetry.md:42 -msgid "" -"**Source.** Flower telemetry tries to store a random source ID in " -"`~/.flwr/source` the first time a telemetry event is generated. The " -"source ID is important to identify whether an issue is recurring or " -"whether an issue is triggered by multiple clusters running concurrently " -"(which often happens in simulation). For example, if a device runs " -"multiple workloads at the same time, and this results in an issue, then, " -"in order to reproduce the issue, multiple workloads must be started at " -"the same time." +#: ../../source/ref-changelog.md:65 +#, fuzzy +msgid "FedNova ([#2179](https://github.com/adap/flower/pull/2179))" msgstr "" -"**Source.** La télémétrie de Flower essaie de stocker un ID de source " -"aléatoire dans `~/.flwr/source` la première fois qu'un événement de " -"télémétrie est généré. L'ID de source est important pour identifier si un" -" problème est récurrent ou si un problème est déclenché par plusieurs " -"clusters fonctionnant simultanément (ce qui arrive souvent en " -"simulation). Par exemple, si un périphérique exécute plusieurs charges de" -" travail en même temps, et que cela entraîne un problème, alors, afin de " -"reproduire le problème, plusieurs charges de travail doivent être " -"démarrées en même temps." +"**Ajouter une nouvelle stratégie `FedProx`** " +"([#1619](https://github.com/adap/flower/pull/1619))" -#: ../../source/ref-telemetry.md:44 -msgid "" -"You may delete the source ID at any time. If you wish for all events " -"logged under a specific source ID to be deleted, you can send a deletion " -"request mentioning the source ID to `telemetry@flower.dev`. All events " -"related to that source ID will then be permanently deleted." +#: ../../source/ref-changelog.md:66 +#, fuzzy +msgid "HeteroFL ([#2439](https://github.com/adap/flower/pull/2439))" +msgstr "Nouvelle référence API ([#554](https://github.com/adap/flower/pull/554))" + +#: ../../source/ref-changelog.md:67 +#, fuzzy +msgid "FedAvgM ([#2246](https://github.com/adap/flower/pull/2246))" +msgstr "Nouvelle référence API ([#554](https://github.com/adap/flower/pull/554))" + +#: ../../source/ref-changelog.md:68 +#, fuzzy +msgid "FedPara ([#2722](https://github.com/adap/flower/pull/2722))" msgstr "" -"Tu peux supprimer l'identifiant de la source à tout moment. Si tu " -"souhaites que tous les événements enregistrés sous un identifiant de " -"source spécifique soient supprimés, tu peux envoyer une demande de " -"suppression mentionnant l'identifiant de source à `telemetry@flower.dev`." -" Tous les événements liés à cet identifiant de source seront alors " -"définitivement supprimés." +"**Renommé stratégie q-FedAvg** " +"([#802](https://github.com/adap/flower/pull/802))" -#: ../../source/ref-telemetry.md:46 +#: ../../source/ref-changelog.md:70 +#, fuzzy msgid "" -"We will not collect any personally identifiable information. If you think" -" any of the metrics collected could be misused in any way, please [get in" -" touch with us](#how-to-contact-us). We will update this page to reflect " -"any changes to the metrics collected and publish changes in the " -"changelog." +"**Improve documentation** " +"([#2674](https://github.com/adap/flower/pull/2674), " +"[#2480](https://github.com/adap/flower/pull/2480), " +"[#2826](https://github.com/adap/flower/pull/2826), " +"[#2727](https://github.com/adap/flower/pull/2727), " +"[#2761](https://github.com/adap/flower/pull/2761), " +"[#2900](https://github.com/adap/flower/pull/2900))" msgstr "" -"Nous ne collecterons aucune information personnelle identifiable. Si tu " -"penses que l'une des métriques collectées pourrait être utilisée à " -"mauvais escient de quelque manière que ce soit, merci de [nous " -"contacter](#commentnouscontacter). Nous mettrons à jour cette page pour " -"refléter toute modification des métriques collectées et nous publierons " -"les changements dans le journal des modifications (changelog)." +"**Mise à jour de la documentation** " +"([#1629](https://github.com/adap/flower/pull/1629), " +"[#1628](https://github.com/adap/flower/pull/1628), " +"[#1620](https://github.com/adap/flower/pull/1620), " +"[#1618](https://github.com/adap/flower/pull/1618), " +"[#1617](https://github.com/adap/flower/pull/1617), " +"[#1613](https://github.com/adap/flower/pull/1613), " +"[#1614](https://github.com/adap/flower/pull/1614))" -#: ../../source/ref-telemetry.md:48 +#: ../../source/ref-changelog.md:72 +msgid "" +"**Improved testing and development infrastructure** " +"([#2797](https://github.com/adap/flower/pull/2797), " +"[#2676](https://github.com/adap/flower/pull/2676), " +"[#2644](https://github.com/adap/flower/pull/2644), " +"[#2656](https://github.com/adap/flower/pull/2656), " +"[#2848](https://github.com/adap/flower/pull/2848), " +"[#2675](https://github.com/adap/flower/pull/2675), " +"[#2735](https://github.com/adap/flower/pull/2735), " +"[#2767](https://github.com/adap/flower/pull/2767), " +"[#2732](https://github.com/adap/flower/pull/2732), " +"[#2744](https://github.com/adap/flower/pull/2744), " +"[#2681](https://github.com/adap/flower/pull/2681), " +"[#2699](https://github.com/adap/flower/pull/2699), " +"[#2745](https://github.com/adap/flower/pull/2745), " +"[#2734](https://github.com/adap/flower/pull/2734), " +"[#2731](https://github.com/adap/flower/pull/2731), " +"[#2652](https://github.com/adap/flower/pull/2652), " +"[#2720](https://github.com/adap/flower/pull/2720), " +"[#2721](https://github.com/adap/flower/pull/2721), " +"[#2717](https://github.com/adap/flower/pull/2717), " +"[#2864](https://github.com/adap/flower/pull/2864), " +"[#2694](https://github.com/adap/flower/pull/2694), " +"[#2709](https://github.com/adap/flower/pull/2709), " +"[#2658](https://github.com/adap/flower/pull/2658), " +"[#2796](https://github.com/adap/flower/pull/2796), " +"[#2692](https://github.com/adap/flower/pull/2692), " +"[#2657](https://github.com/adap/flower/pull/2657), " +"[#2813](https://github.com/adap/flower/pull/2813), " +"[#2661](https://github.com/adap/flower/pull/2661), " +"[#2398](https://github.com/adap/flower/pull/2398))" +msgstr "" + +#: ../../source/ref-changelog.md:74 +msgid "" +"The Flower testing and development infrastructure has received " +"substantial updates. This makes Flower 1.7 the most tested release ever." +msgstr "" + +#: ../../source/ref-changelog.md:76 +msgid "" +"**Update dependencies** " +"([#2753](https://github.com/adap/flower/pull/2753), " +"[#2651](https://github.com/adap/flower/pull/2651), " +"[#2739](https://github.com/adap/flower/pull/2739), " +"[#2837](https://github.com/adap/flower/pull/2837), " +"[#2788](https://github.com/adap/flower/pull/2788), " +"[#2811](https://github.com/adap/flower/pull/2811), " +"[#2774](https://github.com/adap/flower/pull/2774), " +"[#2790](https://github.com/adap/flower/pull/2790), " +"[#2751](https://github.com/adap/flower/pull/2751), " +"[#2850](https://github.com/adap/flower/pull/2850), " +"[#2812](https://github.com/adap/flower/pull/2812), " +"[#2872](https://github.com/adap/flower/pull/2872), " +"[#2736](https://github.com/adap/flower/pull/2736), " +"[#2756](https://github.com/adap/flower/pull/2756), " +"[#2857](https://github.com/adap/flower/pull/2857), " +"[#2757](https://github.com/adap/flower/pull/2757), " +"[#2810](https://github.com/adap/flower/pull/2810), " +"[#2740](https://github.com/adap/flower/pull/2740), " +"[#2789](https://github.com/adap/flower/pull/2789))" +msgstr "" + +#: ../../source/ref-changelog.md:78 msgid "" -"If you think other metrics would be helpful for us to better guide our " -"decisions, please let us know! We will carefully review them; if we are " -"confident that they do not compromise user privacy, we may add them." +"**General improvements** " +"([#2803](https://github.com/adap/flower/pull/2803), " +"[#2847](https://github.com/adap/flower/pull/2847), " +"[#2877](https://github.com/adap/flower/pull/2877), " +"[#2690](https://github.com/adap/flower/pull/2690), " +"[#2889](https://github.com/adap/flower/pull/2889), " +"[#2874](https://github.com/adap/flower/pull/2874), " +"[#2819](https://github.com/adap/flower/pull/2819), " +"[#2689](https://github.com/adap/flower/pull/2689), " +"[#2457](https://github.com/adap/flower/pull/2457), " +"[#2870](https://github.com/adap/flower/pull/2870), " +"[#2669](https://github.com/adap/flower/pull/2669), " +"[#2876](https://github.com/adap/flower/pull/2876), " +"[#2885](https://github.com/adap/flower/pull/2885), " +"[#2858](https://github.com/adap/flower/pull/2858), " +"[#2867](https://github.com/adap/flower/pull/2867), " +"[#2351](https://github.com/adap/flower/pull/2351), " +"[#2886](https://github.com/adap/flower/pull/2886), " +"[#2860](https://github.com/adap/flower/pull/2860), " +"[#2828](https://github.com/adap/flower/pull/2828), " +"[#2869](https://github.com/adap/flower/pull/2869), " +"[#2875](https://github.com/adap/flower/pull/2875), " +"[#2733](https://github.com/adap/flower/pull/2733), " +"[#2488](https://github.com/adap/flower/pull/2488), " +"[#2646](https://github.com/adap/flower/pull/2646), " +"[#2879](https://github.com/adap/flower/pull/2879), " +"[#2821](https://github.com/adap/flower/pull/2821), " +"[#2855](https://github.com/adap/flower/pull/2855), " +"[#2800](https://github.com/adap/flower/pull/2800), " +"[#2807](https://github.com/adap/flower/pull/2807), " +"[#2801](https://github.com/adap/flower/pull/2801), " +"[#2804](https://github.com/adap/flower/pull/2804), " +"[#2851](https://github.com/adap/flower/pull/2851), " +"[#2787](https://github.com/adap/flower/pull/2787), " +"[#2852](https://github.com/adap/flower/pull/2852), " +"[#2672](https://github.com/adap/flower/pull/2672), " +"[#2759](https://github.com/adap/flower/pull/2759))" +msgstr "" + +#: ../../source/ref-changelog.md:82 +#, fuzzy +msgid "" +"**Deprecate** `start_numpy_client` " +"([#2563](https://github.com/adap/flower/pull/2563), " +"[#2718](https://github.com/adap/flower/pull/2718))" msgstr "" -"Si tu penses que d'autres mesures nous seraient utiles pour mieux " -"orienter nos décisions, fais-le nous savoir ! Nous les examinerons " -"attentivement ; si nous sommes convaincus qu'elles ne compromettent pas " -"la vie privée des utilisateurs, nous pourrons les ajouter." - -#: ../../source/ref-telemetry.md:50 -msgid "How to inspect what is being reported" -msgstr "Comment inspecter ce qui est rapporté" +"**Nouvelles stratégies intégrées** " +"([#828](https://github.com/adap/flower/pull/828) " +"[#822](https://github.com/adap/flower/pull/822))" -#: ../../source/ref-telemetry.md:52 +#: ../../source/ref-changelog.md:84 msgid "" -"We wanted to make it very easy for you to inspect what anonymous usage " -"metrics are reported. You can view all the reported telemetry information" -" by setting the environment variable `FLWR_TELEMETRY_LOGGING=1`. Logging " -"is disabled by default. You may use logging independently from " -"`FLWR_TELEMETRY_ENABLED` so that you can inspect the telemetry feature " -"without sending any metrics." +"Until now, clients of type `NumPyClient` needed to be started via " +"`start_numpy_client`. In our efforts to consolidate framework APIs, we " +"have introduced changes, and now all client types should start via " +"`start_client`. To continue using `NumPyClient` clients, you simply need " +"to first call the `.to_client()` method and then pass returned `Client` " +"object to `start_client`. The examples and the documentation have been " +"updated accordingly." msgstr "" -"Nous avons voulu qu'il soit très facile pour toi d'inspecter les mesures " -"d'utilisation anonymes qui sont rapportées. Tu peux voir toutes les " -"informations de télémétrie rapportées en définissant la variable " -"d'environnement `FLWR_TELEMETRY_LOGGING=1`. La journalisation est " -"désactivée par défaut. Tu peux utiliser la journalisation indépendamment " -"de `FLWR_TELEMETRY_ENABLED` afin d'inspecter la fonction de télémétrie " -"sans envoyer de mesures." -#: ../../source/ref-telemetry.md:58 +#: ../../source/ref-changelog.md:86 +#, fuzzy msgid "" -"The inspect Flower telemetry without sending any anonymous usage metrics," -" use both environment variables:" +"**Deprecate legacy DP wrappers** " +"([#2749](https://github.com/adap/flower/pull/2749))" msgstr "" -"L'inspecteur Flower telemetry sans envoyer de métriques d'utilisation " -"anonymes, utilise les deux variables d'environnement :" +"**Supprimez KerasClient** " +"([#857](https://github.com/adap/flower/pull/857))" -#: ../../source/ref-telemetry.md:64 -msgid "How to contact us" -msgstr "Comment nous contacter" +#: ../../source/ref-changelog.md:88 +msgid "" +"Legacy DP wrapper classes are deprecated, but still functional. This is " +"in preparation for an all-new pluggable version of differential privacy " +"support in Flower." +msgstr "" -#: ../../source/ref-telemetry.md:66 +#: ../../source/ref-changelog.md:90 +#, fuzzy msgid "" -"We want to hear from you. If you have any feedback or ideas on how to " -"improve the way we handle anonymous usage metrics, reach out to us via " -"[Slack](https://flower.dev/join-slack/) (channel `#telemetry`) or email " -"(`telemetry@flower.dev`)." +"**Make optional arg** `--callable` **in** `flower-client` **a required " +"positional arg** ([#2673](https://github.com/adap/flower/pull/2673))" msgstr "" -"Si tu as des commentaires ou des idées pour améliorer la façon dont nous " -"traitons les mesures d'utilisation anonymes, contacte-nous via " -"[Slack](https://flower.dev/join-slack/) (canal `#telemetry`) ou par " -"courriel (`telemetry@flower.dev`)." +"**Log** `Client` **exceptions dans le moteur de client virtuel** " +"([#1493](https://github.com/adap/flower/pull/1493))" -#: ../../source/tutorial-build-a-strategy-from-scratch-pytorch.ipynb:9 +#: ../../source/ref-changelog.md:92 #, fuzzy -msgid "Build a strategy from scratch" -msgstr "Élaborer une stratégie à partir de zéro" +msgid "" +"**Rename** `certificates` **to** `root_certificates` **in** `Driver` " +"([#2890](https://github.com/adap/flower/pull/2890))" +msgstr "" +"**Rename** `rnd` **to** `server_round` " +"([#1321](https://github.com/adap/flower/pull/1321))" -#: ../../source/tutorial-build-a-strategy-from-scratch-pytorch.ipynb:11 +#: ../../source/ref-changelog.md:94 #, fuzzy msgid "" -"Welcome to the third part of the Flower federated learning tutorial. In " -"previous parts of this tutorial, we introduced federated learning with " -"PyTorch and Flower (`part 1 `__) and we learned how strategies " -"can be used to customize the execution on both the server and the clients" -" (`part 2 `__)." +"**Drop experimental** `Task` **fields** " +"([#2866](https://github.com/adap/flower/pull/2866), " +"[#2865](https://github.com/adap/flower/pull/2865))" msgstr "" -"Bienvenue dans la troisième partie du tutoriel sur l'apprentissage fédéré" -" Flower. Dans les parties précédentes de ce tutoriel, nous avons présenté" -" l'apprentissage fédéré avec PyTorch et Flower (`partie 1 " -"`__) " -"et nous avons appris comment les stratégies peuvent être utilisées pour " -"personnaliser l'exécution à la fois sur le serveur et sur les clients " -"(`partie 2 `__)." +"**Rename** `Weights` **to** `NDArrays` " +"([#1258](https://github.com/adap/flower/pull/1258), " +"[#1259](https://github.com/adap/flower/pull/1259))" -#: ../../source/tutorial-build-a-strategy-from-scratch-pytorch.ipynb:13 +#: ../../source/ref-changelog.md:96 msgid "" -"In this notebook, we'll continue to customize the federated learning " -"system we built previously by creating a custom version of FedAvg (again," -" using `Flower `__ and `PyTorch " -"`__)." +"Experimental fields `sa`, `legacy_server_message` and " +"`legacy_client_message` were removed from `Task` message. The removed " +"fields are superseded by the new `RecordSet` abstraction." msgstr "" -"Dans ce carnet, nous allons continuer à personnaliser le système " -"d'apprentissage fédéré que nous avons construit précédemment en créant " -"une version personnalisée de FedAvg (encore une fois, en utilisant " -"`Flower `__ et `PyTorch `__)." -#: ../../source/tutorial-build-a-strategy-from-scratch-pytorch.ipynb:15 -#: ../../source/tutorial-customize-the-client-pytorch.ipynb:16 -#: ../../source/tutorial-get-started-with-flower-pytorch.ipynb:15 -#: ../../source/tutorial-use-a-federated-learning-strategy-pytorch.ipynb:15 +#: ../../source/ref-changelog.md:98 +#, fuzzy msgid "" -"`Star Flower on GitHub `__ ⭐️ and join " -"the Flower community on Slack to connect, ask questions, and get help: " -"`Join Slack `__ 🌼 We'd love to hear from " -"you in the ``#introductions`` channel! And if anything is unclear, head " -"over to the ``#questions`` channel." +"**Retire MXNet examples** " +"([#2724](https://github.com/adap/flower/pull/2724))" msgstr "" -"`Star Flower on GitHub `__ ⭐️ et " -"rejoignez la communauté Flower sur Slack pour vous connecter, poser des " -"questions et obtenir de l'aide : `Join Slack `__ 🌼 Nous serions ravis d'avoir de vos nouvelles dans le canal " -"``#introductions`` ! Et si quelque chose n'est pas clair, rendez-vous sur" -" le canal ``#questions``." +"**Nouvel exemple de code scikit-learn** " +"([#748](https://github.com/adap/flower/pull/748))" -#: ../../source/tutorial-build-a-strategy-from-scratch-pytorch.ipynb:17 -msgid "Let's build a new ``Strategy`` from scratch!" -msgstr "Construisons une nouvelle ``Stratégie`` à partir de zéro !" +#: ../../source/ref-changelog.md:100 +msgid "" +"The development of the MXNet fremework has ended and the project is now " +"[archived on GitHub](https://github.com/apache/mxnet). Existing MXNet " +"examples won't receive updates." +msgstr "" -#: ../../source/tutorial-build-a-strategy-from-scratch-pytorch.ipynb:29 -#: ../../source/tutorial-use-a-federated-learning-strategy-pytorch.ipynb:29 -msgid "Preparation" -msgstr "Préparation" +#: ../../source/ref-changelog.md:102 +#, fuzzy +msgid "v1.6.0 (2023-11-28)" +msgstr "v1.4.0 (2023-04-21)" -#: ../../source/tutorial-build-a-strategy-from-scratch-pytorch.ipynb:31 -#: ../../source/tutorial-customize-the-client-pytorch.ipynb:32 -#: ../../source/tutorial-use-a-federated-learning-strategy-pytorch.ipynb:31 +#: ../../source/ref-changelog.md:108 msgid "" -"Before we begin with the actual code, let's make sure that we have " -"everything we need." +"`Aashish Kolluri`, `Adam Narozniak`, `Alessio Mora`, `Barathwaja S`, " +"`Charles Beauville`, `Daniel J. Beutel`, `Daniel Nata Nugraha`, `Gabriel " +"Mota`, `Heng Pan`, `Ivan Agarský`, `JS.KIM`, `Javier`, `Marius Schlegel`," +" `Navin Chandra`, `Nic Lane`, `Peterpan828`, `Qinbin Li`, `Shaz-hash`, " +"`Steve Laskaridis`, `Taner Topal`, `William Lindskog`, `Yan Gao`, " +"`cnxdeveloper`, `k3nfalt` " msgstr "" -"Avant de commencer le code proprement dit, assurons-nous que nous " -"disposons de tout ce dont nous avons besoin." -#: ../../source/tutorial-build-a-strategy-from-scratch-pytorch.ipynb:43 -#: ../../source/tutorial-customize-the-client-pytorch.ipynb:44 -#: ../../source/tutorial-get-started-with-flower-pytorch.ipynb:43 -#: ../../source/tutorial-use-a-federated-learning-strategy-pytorch.ipynb:43 -msgid "Installing dependencies" -msgstr "Installation des dépendances" +#: ../../source/ref-changelog.md:112 +#, fuzzy +msgid "" +"**Add experimental support for Python 3.12** " +"([#2565](https://github.com/adap/flower/pull/2565))" +msgstr "" +"**Ajouter la prise en charge expérimentale de Python 3.10 et Python " +"3.11** ([#1135](https://github.com/adap/flower/pull/1135))" -#: ../../source/tutorial-build-a-strategy-from-scratch-pytorch.ipynb:45 -#: ../../source/tutorial-customize-the-client-pytorch.ipynb:46 -#: ../../source/tutorial-use-a-federated-learning-strategy-pytorch.ipynb:45 -msgid "First, we install the necessary packages:" -msgstr "Tout d'abord, nous installons les paquets nécessaires :" +#: ../../source/ref-changelog.md:114 +#, fuzzy +msgid "" +"**Add new XGBoost examples** " +"([#2612](https://github.com/adap/flower/pull/2612), " +"[#2554](https://github.com/adap/flower/pull/2554), " +"[#2617](https://github.com/adap/flower/pull/2617), " +"[#2618](https://github.com/adap/flower/pull/2618), " +"[#2619](https://github.com/adap/flower/pull/2619), " +"[#2567](https://github.com/adap/flower/pull/2567))" +msgstr "" +"**([#1520](https://github.com/adap/flower/pull/1520), " +"[#1525](https://github.com/adap/flower/pull/1525), " +"[#1545](https://github.com/adap/flower/pull/1545), " +"[#1546](https://github.com/adap/flower/pull/1546), " +"[#1550](https://github.com/adap/flower/pull/1550), " +"[#1551](https://github.com/adap/flower/pull/1551), " +"[#1567](https://github.com/adap/flower/pull/1567))" -#: ../../source/tutorial-build-a-strategy-from-scratch-pytorch.ipynb:65 -#: ../../source/tutorial-customize-the-client-pytorch.ipynb:66 -#: ../../source/tutorial-get-started-with-flower-pytorch.ipynb:65 -#: ../../source/tutorial-use-a-federated-learning-strategy-pytorch.ipynb:65 +#: ../../source/ref-changelog.md:116 msgid "" -"Now that we have all dependencies installed, we can import everything we " -"need for this tutorial:" +"We have added a new `xgboost-quickstart` example alongside a new " +"`xgboost-comprehensive` example that goes more in-depth." msgstr "" -"Maintenant que toutes les dépendances sont installées, nous pouvons " -"importer tout ce dont nous avons besoin pour ce tutoriel :" -#: ../../source/tutorial-build-a-strategy-from-scratch-pytorch.ipynb:101 -#: ../../source/tutorial-customize-the-client-pytorch.ipynb:102 -#: ../../source/tutorial-get-started-with-flower-pytorch.ipynb:104 -#: ../../source/tutorial-use-a-federated-learning-strategy-pytorch.ipynb:101 +#: ../../source/ref-changelog.md:118 +#, fuzzy msgid "" -"It is possible to switch to a runtime that has GPU acceleration enabled " -"(on Google Colab: ``Runtime > Change runtime type > Hardware acclerator: " -"GPU > Save``). Note, however, that Google Colab is not always able to " -"offer GPU acceleration. If you see an error related to GPU availability " -"in one of the following sections, consider switching back to CPU-based " -"execution by setting ``DEVICE = torch.device(\"cpu\")``. If the runtime " -"has GPU acceleration enabled, you should see the output ``Training on " -"cuda``, otherwise it'll say ``Training on cpu``." +"**Add Vertical FL example** " +"([#2598](https://github.com/adap/flower/pull/2598))" msgstr "" -"Il est possible de passer à un runtime dont l'accélération GPU est " -"activée (sur Google Colab : ``Runtime > Change runtime type > Hardware " -"acclerator : GPU > Save``). Note cependant que Google Colab n'est pas " -"toujours en mesure de proposer l'accélération GPU. Si tu vois une erreur " -"liée à la disponibilité du GPU dans l'une des sections suivantes, " -"envisage de repasser à une exécution basée sur le CPU en définissant " -"``DEVICE = torch.device(\"cpu\")``. Si le runtime a activé l'accélération" -" GPU, tu devrais voir apparaître le résultat ``Training on cuda``, sinon " -"il dira ``Training on cpu``." +"**Nouvel exemple de code CoreML pour iOS** " +"([#1289](https://github.com/adap/flower/pull/1289))" -#: ../../source/tutorial-build-a-strategy-from-scratch-pytorch.ipynb:114 -#: ../../source/tutorial-customize-the-client-pytorch.ipynb:115 -#: ../../source/tutorial-use-a-federated-learning-strategy-pytorch.ipynb:114 -msgid "Data loading" -msgstr "Chargement des données" +#: ../../source/ref-changelog.md:120 +msgid "" +"We had many questions about Vertical Federated Learning using Flower, so " +"we decided to add an simple example for it on the [Titanic " +"dataset](https://www.kaggle.com/competitions/titanic/data) alongside a " +"tutorial (in the README)." +msgstr "" -#: ../../source/tutorial-build-a-strategy-from-scratch-pytorch.ipynb:116 -#: ../../source/tutorial-use-a-federated-learning-strategy-pytorch.ipynb:116 +#: ../../source/ref-changelog.md:122 +#, fuzzy msgid "" -"Let's now load the CIFAR-10 training and test set, partition them into " -"ten smaller datasets (each split into training and validation set), and " -"wrap everything in their own ``DataLoader``. We introduce a new parameter" -" ``num_clients`` which allows us to call ``load_datasets`` with different" -" numbers of clients." +"**Support custom** `ClientManager` **in** `start_driver()` " +"([#2292](https://github.com/adap/flower/pull/2292))" msgstr "" -"Chargeons maintenant les ensembles d'entraînement et de test CIFAR-10, " -"divisons-les en dix ensembles de données plus petits (chacun divisé en " -"ensemble d'entraînement et de validation), et enveloppons le tout dans " -"leur propre ``DataLoader``. Nous introduisons un nouveau paramètre " -"``num_clients`` qui nous permet d'appeler ``load_datasets`` avec " -"différents nombres de clients." +"Ajout de la prise en charge d'un `ClientManager` personnalisé comme " +"paramètre de `start_simulation` " +"([#1171](https://github.com/adap/flower/pull/1171))" -#: ../../source/tutorial-build-a-strategy-from-scratch-pytorch.ipynb:167 -#: ../../source/tutorial-customize-the-client-pytorch.ipynb:168 -#: ../../source/tutorial-use-a-federated-learning-strategy-pytorch.ipynb:167 -msgid "Model training/evaluation" -msgstr "Formation/évaluation du modèle" +#: ../../source/ref-changelog.md:124 +#, fuzzy +msgid "" +"**Update REST API to support create and delete nodes** " +"([#2283](https://github.com/adap/flower/pull/2283))" +msgstr "" +"**Nouvelle stratégie expérimentale TensorBoard** " +"([#789](https://github.com/adap/flower/pull/789))" -#: ../../source/tutorial-build-a-strategy-from-scratch-pytorch.ipynb:169 -#: ../../source/tutorial-customize-the-client-pytorch.ipynb:170 -#: ../../source/tutorial-use-a-federated-learning-strategy-pytorch.ipynb:169 +#: ../../source/ref-changelog.md:126 +#, fuzzy msgid "" -"Let's continue with the usual model definition (including " -"``set_parameters`` and ``get_parameters``), training and test functions:" +"**Update the Android SDK** " +"([#2187](https://github.com/adap/flower/pull/2187))" msgstr "" -"Continuons avec la définition habituelle du modèle (y compris " -"``set_parameters`` et ``get_parameters``), les fonctions d'entraînement " -"et de test :" +"**Introduire une nouvelle ligne de base pour les fleurs : FedAvg " +"FEMNIST** ([#1655](https://github.com/adap/flower/pull/1655))" -#: ../../source/tutorial-build-a-strategy-from-scratch-pytorch.ipynb:258 -#: ../../source/tutorial-use-a-federated-learning-strategy-pytorch.ipynb:258 -msgid "Flower client" -msgstr "Client de Flower" +#: ../../source/ref-changelog.md:128 +msgid "Add gRPC request-response capability to the Android SDK." +msgstr "" -#: ../../source/tutorial-build-a-strategy-from-scratch-pytorch.ipynb:260 -#: ../../source/tutorial-use-a-federated-learning-strategy-pytorch.ipynb:260 +#: ../../source/ref-changelog.md:130 +#, fuzzy msgid "" -"To implement the Flower client, we (again) create a subclass of " -"``flwr.client.NumPyClient`` and implement the three methods " -"``get_parameters``, ``fit``, and ``evaluate``. Here, we also pass the " -"``cid`` to the client and use it log additional details:" +"**Update the C++ SDK** " +"([#2537](https://github.com/adap/flower/pull/2537), " +"[#2528](https://github.com/adap/flower/pull/2528), " +"[#2523](https://github.com/adap/flower/pull/2523), " +"[#2522](https://github.com/adap/flower/pull/2522))" msgstr "" -"Pour mettre en œuvre le client Flower, nous créons (à nouveau) une sous-" -"classe de ``flwr.client.NumPyClient`` et mettons en œuvre les trois " -"méthodes ``get_parameters``, ``fit`` et ``evaluate``. Ici, nous " -"transmettons également le ``cid`` au client et l'utilisons pour consigner" -" des détails supplémentaires :" - -#: ../../source/tutorial-build-a-strategy-from-scratch-pytorch.ipynb:308 -msgid "Let's test what we have so far before we continue:" -msgstr "Testons ce que nous avons jusqu'à présent avant de continuer :" +"Mettre à jour les outils de développement " +"([#1231](https://github.com/adap/flower/pull/1231), " +"[#1276](https://github.com/adap/flower/pull/1276), " +"[#1301](https://github.com/adap/flower/pull/1301), " +"[#1310](https://github.com/adap/flower/pull/1310))" -#: ../../source/tutorial-build-a-strategy-from-scratch-pytorch.ipynb:339 -msgid "Build a Strategy from scratch" -msgstr "Élaborer une stratégie à partir de zéro" +#: ../../source/ref-changelog.md:132 +msgid "Add gRPC request-response capability to the C++ SDK." +msgstr "" -#: ../../source/tutorial-build-a-strategy-from-scratch-pytorch.ipynb:341 +#: ../../source/ref-changelog.md:134 +#, fuzzy msgid "" -"Let’s overwrite the ``configure_fit`` method such that it passes a higher" -" learning rate (potentially also other hyperparameters) to the optimizer " -"of a fraction of the clients. We will keep the sampling of the clients as" -" it is in ``FedAvg`` and then change the configuration dictionary (one of" -" the ``FitIns`` attributes)." +"**Make HTTPS the new default** " +"([#2591](https://github.com/adap/flower/pull/2591), " +"[#2636](https://github.com/adap/flower/pull/2636))" msgstr "" -"Remplaçons la méthode ``configure_fit`` de façon à ce qu'elle transmette " -"un taux d'apprentissage plus élevé (potentiellement aussi d'autres " -"hyperparamètres) à l'optimiseur d'une fraction des clients. Nous " -"garderons l'échantillonnage des clients tel qu'il est dans ``FedAvg`` et " -"changerons ensuite le dictionnaire de configuration (l'un des attributs " -"``FitIns``)." +"**Exemple de code mis à jour** " +"([#1344](https://github.com/adap/flower/pull/1344), " +"[#1347](https://github.com/adap/flower/pull/1347))" -#: ../../source/tutorial-build-a-strategy-from-scratch-pytorch.ipynb:507 +#: ../../source/ref-changelog.md:136 msgid "" -"The only thing left is to use the newly created custom Strategy " -"``FedCustom`` when starting the experiment:" +"Flower is moving to HTTPS by default. The new `flower-server` requires " +"passing `--certificates`, but users can enable `--insecure` to use HTTP " +"for prototyping. The same applies to `flower-client`, which can either " +"use user-provided credentials or gRPC-bundled certificates to connect to " +"an HTTPS-enabled server or requires opt-out via passing `--insecure` to " +"enable insecure HTTP connections." msgstr "" -"Il ne reste plus qu'à utiliser la stratégie personnalisée nouvellement " -"créée ``FedCustom`` lors du démarrage de l'expérience :" - -#: ../../source/tutorial-build-a-strategy-from-scratch-pytorch.ipynb:534 -#: ../../source/tutorial-customize-the-client-pytorch.ipynb:932 -#: ../../source/tutorial-use-a-federated-learning-strategy-pytorch.ipynb:697 -msgid "Recap" -msgstr "Récapitulation" -#: ../../source/tutorial-build-a-strategy-from-scratch-pytorch.ipynb:536 +#: ../../source/ref-changelog.md:138 msgid "" -"In this notebook, we’ve seen how to implement a custom strategy. A custom" -" strategy enables granular control over client node configuration, result" -" aggregation, and more. To define a custom strategy, you only have to " -"overwrite the abstract methods of the (abstract) base class ``Strategy``." -" To make custom strategies even more powerful, you can pass custom " -"functions to the constructor of your new class (``__init__``) and then " -"call these functions whenever needed." +"For backward compatibility, `start_client()` and `start_numpy_client()` " +"will still start in insecure mode by default. In a future release, " +"insecure connections will require user opt-in by passing `insecure=True`." msgstr "" -"Dans ce carnet, nous avons vu comment mettre en place une stratégie " -"personnalisée. Une stratégie personnalisée permet un contrôle granulaire " -"sur la configuration des nœuds clients, l'agrégation des résultats, et " -"bien plus encore. Pour définir une stratégie personnalisée, il te suffit " -"d'écraser les méthodes abstraites de la classe de base (abstraite) " -"``Strategy``. Pour rendre les stratégies personnalisées encore plus " -"puissantes, tu peux passer des fonctions personnalisées au constructeur " -"de ta nouvelle classe (``__init__``) et appeler ensuite ces fonctions à " -"chaque fois que c'est nécessaire." -#: ../../source/tutorial-build-a-strategy-from-scratch-pytorch.ipynb:550 -#: ../../source/tutorial-customize-the-client-pytorch.ipynb:948 -#: ../../source/tutorial-get-started-with-flower-pytorch.ipynb:749 -#: ../../source/tutorial-use-a-federated-learning-strategy-pytorch.ipynb:715 -#: ../../source/tutorial-what-is-federated-learning.ipynb:369 +#: ../../source/ref-changelog.md:140 +#, fuzzy msgid "" -"Before you continue, make sure to join the Flower community on Slack: " -"`Join Slack `__" +"**Unify client API** ([#2303](https://github.com/adap/flower/pull/2303), " +"[#2390](https://github.com/adap/flower/pull/2390), " +"[#2493](https://github.com/adap/flower/pull/2493))" msgstr "" -"Avant de continuer, n'oublie pas de rejoindre la communauté Flower sur " -"Slack : `Join Slack `__" +"**Mettre à jour les exemples de code** " +"([#1291](https://github.com/adap/flower/pull/1291), " +"[#1286](https://github.com/adap/flower/pull/1286), " +"[#1282](https://github.com/adap/flower/pull/1282))" -#: ../../source/tutorial-build-a-strategy-from-scratch-pytorch.ipynb:552 -#: ../../source/tutorial-customize-the-client-pytorch.ipynb:950 -#: ../../source/tutorial-get-started-with-flower-pytorch.ipynb:751 -#: ../../source/tutorial-use-a-federated-learning-strategy-pytorch.ipynb:717 -#: ../../source/tutorial-what-is-federated-learning.ipynb:371 +#: ../../source/ref-changelog.md:142 msgid "" -"There's a dedicated ``#questions`` channel if you need help, but we'd " -"also love to hear who you are in ``#introductions``!" +"Using the `client_fn`, Flower clients can interchangeably run as " +"standalone processes (i.e. via `start_client`) or in simulation (i.e. via" +" `start_simulation`) without requiring changes to how the client class is" +" defined and instantiated. The `to_client()` function is introduced to " +"convert a `NumPyClient` to a `Client`." msgstr "" -"Il existe un canal dédié aux ``questions`` si vous avez besoin d'aide, " -"mais nous aimerions aussi savoir qui vous êtes dans ``#introductions`` !" -#: ../../source/tutorial-build-a-strategy-from-scratch-pytorch.ipynb:554 +#: ../../source/ref-changelog.md:144 #, fuzzy msgid "" -"The `Flower Federated Learning Tutorial - Part 4 " -"`__ introduces ``Client``, the flexible API underlying " -"``NumPyClient``." +"**Add new** `Bulyan` **strategy** " +"([#1817](https://github.com/adap/flower/pull/1817), " +"[#1891](https://github.com/adap/flower/pull/1891))" msgstr "" -"Le `Tutoriel d'apprentissage fédéré Flower - Partie 4 " -"`__ présente ``Client``, l'API flexible qui sous-tend " -"``NumPyClient``." - -#: ../../source/tutorial-customize-the-client-pytorch.ipynb:9 -#, fuzzy -msgid "Customize the client" -msgstr "Création du client IMDBC" +"**Nouvelles stratégies intégrées** " +"([#828](https://github.com/adap/flower/pull/828) " +"[#822](https://github.com/adap/flower/pull/822))" -#: ../../source/tutorial-customize-the-client-pytorch.ipynb:11 +#: ../../source/ref-changelog.md:146 #, fuzzy msgid "" -"Welcome to the fourth part of the Flower federated learning tutorial. In " -"the previous parts of this tutorial, we introduced federated learning " -"with PyTorch and Flower (`part 1 `__), we learned how " -"strategies can be used to customize the execution on both the server and " -"the clients (`part 2 `__), and we built our own " -"custom strategy from scratch (`part 3 `__)." +"The new `Bulyan` strategy implements Bulyan by [El Mhamdi et al., " +"2018](https://arxiv.org/abs/1802.07927)" msgstr "" -"Bienvenue dans la quatrième partie du tutoriel sur l'apprentissage fédéré" -" Flower. Dans les parties précédentes de ce tutoriel, nous avons présenté" -" l'apprentissage fédéré avec PyTorch et Flower (`partie 1 " -"`__)," -" nous avons appris comment les stratégies peuvent être utilisées pour " -"personnaliser l'exécution à la fois sur le serveur et les clients " -"(`partie 2 `__), et nous avons construit notre propre stratégie " -"personnalisée à partir de zéro (`partie 3 - WIP " -"`__)." +"La nouvelle stratégie `FedMedian` met en œuvre Federated Median " +"(FedMedian) par [Yin et al., 2018] " +"(https://arxiv.org/pdf/1803.01498v1.pdf)." -#: ../../source/tutorial-customize-the-client-pytorch.ipynb:14 +#: ../../source/ref-changelog.md:148 +#, fuzzy msgid "" -"In this notebook, we revisit ``NumPyClient`` and introduce a new " -"baseclass for building clients, simply named ``Client``. In previous " -"parts of this tutorial, we've based our client on ``NumPyClient``, a " -"convenience class which makes it easy to work with machine learning " -"libraries that have good NumPy interoperability. With ``Client``, we gain" -" a lot of flexibility that we didn't have before, but we'll also have to " -"do a few things the we didn't have to do before." +"**Add new** `XGB Bagging` **strategy** " +"([#2611](https://github.com/adap/flower/pull/2611))" msgstr "" -"Dans ce carnet, nous revisitons `NumPyClient`` et introduisons une " -"nouvelle classe de base pour construire des clients, simplement appelée " -"`Client``. Dans les parties précédentes de ce tutoriel, nous avons basé " -"notre client sur ``NumPyClient``, une classe de commodité qui facilite le" -" travail avec les bibliothèques d'apprentissage automatique qui ont une " -"bonne interopérabilité NumPy. Avec ``Client``, nous gagnons beaucoup de " -"flexibilité que nous n'avions pas auparavant, mais nous devrons également" -" faire quelques choses que nous n'avions pas à faire auparavant." +"**Ajouter une nouvelle stratégie `FedProx`** " +"([#1619](https://github.com/adap/flower/pull/1619))" -#: ../../source/tutorial-customize-the-client-pytorch.ipynb:18 +#: ../../source/ref-changelog.md:150 ../../source/ref-changelog.md:152 +#, fuzzy msgid "" -"Let's go deeper and see what it takes to move from ``NumPyClient`` to " -"``Client``!" +"**Introduce `WorkloadState`** " +"([#2564](https://github.com/adap/flower/pull/2564), " +"[#2632](https://github.com/adap/flower/pull/2632))" msgstr "" -"Allons plus loin et voyons ce qu'il faut faire pour passer de " -"``NumPyClient`` à ``Client`` !" - -#: ../../source/tutorial-customize-the-client-pytorch.ipynb:30 -#: ../../source/tutorial-get-started-with-flower-pytorch.ipynb:29 -msgid "Step 0: Preparation" -msgstr "Étape 0 : Préparation" +"**Nouvelles stratégies intégrées** " +"([#828](https://github.com/adap/flower/pull/828) " +"[#822](https://github.com/adap/flower/pull/822))" -#: ../../source/tutorial-customize-the-client-pytorch.ipynb:117 +#: ../../source/ref-changelog.md:156 +#, fuzzy msgid "" -"Let's now load the CIFAR-10 training and test set, partition them into " -"ten smaller datasets (each split into training and validation set), and " -"wrap everything in their own ``DataLoader``." +"FedProx ([#2210](https://github.com/adap/flower/pull/2210), " +"[#2286](https://github.com/adap/flower/pull/2286), " +"[#2509](https://github.com/adap/flower/pull/2509))" msgstr "" -"Chargeons maintenant les ensembles d'entraînement et de test CIFAR-10, " -"divisons-les en dix ensembles de données plus petits (chacun divisé en " -"ensemble d'entraînement et de validation) et enveloppons le tout dans " -"leur propre ``DataLoader``." - -#: ../../source/tutorial-customize-the-client-pytorch.ipynb:259 -msgid "Step 1: Revisiting NumPyClient" -msgstr "Étape 1 : Revoir NumPyClient" +"**Mettre à jour les exemples de code** " +"([#1291](https://github.com/adap/flower/pull/1291), " +"[#1286](https://github.com/adap/flower/pull/1286), " +"[#1282](https://github.com/adap/flower/pull/1282))" -#: ../../source/tutorial-customize-the-client-pytorch.ipynb:261 +#: ../../source/ref-changelog.md:158 +#, fuzzy msgid "" -"So far, we've implemented our client by subclassing " -"``flwr.client.NumPyClient``. The three methods we implemented are " -"``get_parameters``, ``fit``, and ``evaluate``. Finally, we wrap the " -"creation of instances of this class in a function called ``client_fn``:" +"Baselines Docs ([#2290](https://github.com/adap/flower/pull/2290), " +"[#2400](https://github.com/adap/flower/pull/2400))" msgstr "" -"Jusqu'à présent, nous avons implémenté notre client en sous-classant " -"``flwr.client.NumPyClient``. Les trois méthodes que nous avons " -"implémentées sont ``get_parameters``, ``fit`` et ``evaluate``. Enfin, " -"nous enveloppons la création d'instances de cette classe dans une " -"fonction appelée ``client_fn`` :" +"**Nouvel exemple de code JAX** " +"([#906](https://github.com/adap/flower/pull/906), " +"[#1143](https://github.com/adap/flower/pull/1143))" -#: ../../source/tutorial-customize-the-client-pytorch.ipynb:309 +#: ../../source/ref-changelog.md:160 +#, fuzzy msgid "" -"We've seen this before, there's nothing new so far. The only *tiny* " -"difference compared to the previous notebook is naming, we've changed " -"``FlowerClient`` to ``FlowerNumPyClient`` and ``client_fn`` to " -"``numpyclient_fn``. Let's run it to see the output we get:" +"FedMLB ([#2340](https://github.com/adap/flower/pull/2340), " +"[#2507](https://github.com/adap/flower/pull/2507))" msgstr "" -"Nous avons déjà vu cela auparavant, il n'y a rien de nouveau jusqu'à " -"présent. La seule *petite* différence par rapport au carnet précédent est" -" le nommage, nous avons changé ``FlowerClient`` en ``FlowerNumPyClient`` " -"et ``client_fn`` en ``numpyclient_fn``. Exécutons-le pour voir la sortie " -"que nous obtenons :" +"**Exemple de code mis à jour** " +"([#1344](https://github.com/adap/flower/pull/1344), " +"[#1347](https://github.com/adap/flower/pull/1347))" -#: ../../source/tutorial-customize-the-client-pytorch.ipynb:339 +#: ../../source/ref-changelog.md:162 +#, fuzzy msgid "" -"This works as expected, two clients are training for three rounds of " -"federated learning." +"TAMUNA ([#2254](https://github.com/adap/flower/pull/2254), " +"[#2508](https://github.com/adap/flower/pull/2508))" msgstr "" -"Cela fonctionne comme prévu, deux clients s'entraînent pour trois tours " -"d'apprentissage fédéré." +"**Nouvelles stratégies intégrées** " +"([#828](https://github.com/adap/flower/pull/828) " +"[#822](https://github.com/adap/flower/pull/822))" -#: ../../source/tutorial-customize-the-client-pytorch.ipynb:341 -msgid "" -"Let's dive a little bit deeper and discuss how Flower executes this " -"simulation. Whenever a client is selected to do some work, " -"``start_simulation`` calls the function ``numpyclient_fn`` to create an " -"instance of our ``FlowerNumPyClient`` (along with loading the model and " -"the data)." +#: ../../source/ref-changelog.md:164 +#, fuzzy +msgid "FedMeta [#2438](https://github.com/adap/flower/pull/2438)" +msgstr "Nouvelle référence API ([#554](https://github.com/adap/flower/pull/554))" + +#: ../../source/ref-changelog.md:166 +#, fuzzy +msgid "FjORD [#2431](https://github.com/adap/flower/pull/2431)" msgstr "" -"Plongeons un peu plus profondément et discutons de la façon dont Flower " -"exécute cette simulation. Chaque fois qu'un client est sélectionné pour " -"effectuer un travail, ``start_simulation`` appelle la fonction " -"``numpyclient_fn`` pour créer une instance de notre ``FlowerNumPyClient``" -" (en même temps qu'il charge le modèle et les données)." +"Amélioration de la documentation sur le serveur gRPC " +"([#841](https://github.com/adap/flower/pull/841))" -#: ../../source/tutorial-customize-the-client-pytorch.ipynb:343 -msgid "" -"But here's the perhaps surprising part: Flower doesn't actually use the " -"``FlowerNumPyClient`` object directly. Instead, it wraps the object to " -"makes it look like a subclass of ``flwr.client.Client``, not " -"``flwr.client.NumPyClient``. In fact, the Flower core framework doesn't " -"know how to handle ``NumPyClient``'s, it only knows how to handle " -"``Client``'s. ``NumPyClient`` is just a convenience abstraction built on " -"top of ``Client``." +#: ../../source/ref-changelog.md:168 +#, fuzzy +msgid "MOON [#2421](https://github.com/adap/flower/pull/2421)" msgstr "" -"Mais voici la partie la plus surprenante : Flower n'utilise pas " -"directement l'objet `FlowerNumPyClient`. Au lieu de cela, il enveloppe " -"l'objet pour le faire ressembler à une sous-classe de " -"`flwr.client.Client`, et non de `flwr.client.NumPyClient`. En fait, le " -"noyau de Flower ne sait pas comment gérer les `NumPyClient`, il sait " -"seulement comment gérer les `Client`. `NumPyClient` est juste une " -"abstraction de commodité construite au dessus de `Client`." +"**Ajouter une nouvelle stratégie `FedProx`** " +"([#1619](https://github.com/adap/flower/pull/1619))" -#: ../../source/tutorial-customize-the-client-pytorch.ipynb:345 -msgid "" -"Instead of building on top of ``NumPyClient``, we can directly build on " -"top of ``Client``." +#: ../../source/ref-changelog.md:170 +#, fuzzy +msgid "DepthFL [#2295](https://github.com/adap/flower/pull/2295)" msgstr "" -"Au lieu de construire par-dessus `NumPyClient``, nous pouvons construire " -"directement par-dessus `Client``." +"**Ajouter une nouvelle stratégie `FedProx`** " +"([#1619](https://github.com/adap/flower/pull/1619))" -#: ../../source/tutorial-customize-the-client-pytorch.ipynb:357 -msgid "Step 2: Moving from ``NumPyClient`` to ``Client``" -msgstr "Étape 2 : Passer de ``NumPyClient`` à ``Client``" +#: ../../source/ref-changelog.md:172 +#, fuzzy +msgid "FedPer [#2266](https://github.com/adap/flower/pull/2266)" +msgstr "Nouvelle référence API ([#554](https://github.com/adap/flower/pull/554))" -#: ../../source/tutorial-customize-the-client-pytorch.ipynb:359 -msgid "" -"Let's try to do the same thing using ``Client`` instead of " -"``NumPyClient``." -msgstr "" -"Essayons de faire la même chose en utilisant ``Client`` au lieu de " -"``NumPyClient``." +#: ../../source/ref-changelog.md:174 +#, fuzzy +msgid "FedWav2vec [#2551](https://github.com/adap/flower/pull/2551)" +msgstr "Nouvelle référence API ([#554](https://github.com/adap/flower/pull/554))" -#: ../../source/tutorial-customize-the-client-pytorch.ipynb:465 -msgid "" -"Before we discuss the code in more detail, let's try to run it! Gotta " -"make sure our new ``Client``-based client works, right?" -msgstr "" -"Avant de discuter du code plus en détail, essayons de l'exécuter ! Nous " -"devons nous assurer que notre nouveau client basé sur le ``Client`` " -"fonctionne, n'est-ce pas ?" +#: ../../source/ref-changelog.md:176 +#, fuzzy +msgid "niid-Bench [#2428](https://github.com/adap/flower/pull/2428)" +msgstr "Nouvelle référence API ([#554](https://github.com/adap/flower/pull/554))" -#: ../../source/tutorial-customize-the-client-pytorch.ipynb:490 +#: ../../source/ref-changelog.md:178 +#, fuzzy msgid "" -"That's it, we're now using ``Client``. It probably looks similar to what " -"we've done with ``NumPyClient``. So what's the difference?" +"FedBN ([#2608](https://github.com/adap/flower/pull/2608), " +"[#2615](https://github.com/adap/flower/pull/2615))" msgstr "" -"Voilà, nous utilisons maintenant ``Client``. Cela ressemble probablement " -"à ce que nous avons fait avec ``NumPyClient``. Alors quelle est la " -"différence ?" +"**Nouvelles stratégies intégrées** " +"([#828](https://github.com/adap/flower/pull/828) " +"[#822](https://github.com/adap/flower/pull/822))" -#: ../../source/tutorial-customize-the-client-pytorch.ipynb:492 +#: ../../source/ref-changelog.md:180 +#, fuzzy msgid "" -"First of all, it's more code. But why? The difference comes from the fact" -" that ``Client`` expects us to take care of parameter serialization and " -"deserialization. For Flower to be able to send parameters over the " -"network, it eventually needs to turn these parameters into ``bytes``. " -"Turning parameters (e.g., NumPy ``ndarray``'s) into raw bytes is called " -"serialization. Turning raw bytes into something more useful (like NumPy " -"``ndarray``'s) is called deserialization. Flower needs to do both: it " -"needs to serialize parameters on the server-side and send them to the " -"client, the client needs to deserialize them to use them for local " -"training, and then serialize the updated parameters again to send them " -"back to the server, which (finally!) deserializes them again in order to " -"aggregate them with the updates received from other clients." +"**General updates to Flower Examples** " +"([#2384](https://github.com/adap/flower/pull/2384), " +"[#2425](https://github.com/adap/flower/pull/2425), " +"[#2526](https://github.com/adap/flower/pull/2526), " +"[#2302](https://github.com/adap/flower/pull/2302), " +"[#2545](https://github.com/adap/flower/pull/2545))" msgstr "" -"First of all, it's more code. But why? The difference comes from the fact" -" that ``Client`` expects us to take care of parameter serialization and " -"deserialization. For Flower to be able to send parameters over the " -"network, it eventually needs to turn these parameters into ``bytes``. " -"Turning parameters (e.g., NumPy ``ndarray``'s) into raw bytes is called " -"serialization. Turning raw bytes into something more useful (like NumPy " -"``ndarray``'s) is called deserialization. Flower needs to do both: it " -"needs to serialize parameters on the server-side and send them to the " -"client, the client needs to deserialize them to use them for local " -"training, and then serialize the updated parameters again to send them " -"back to the server, which (finally!) deserializes them again in order to " -"aggregate them with the updates received from other clients." +"Mettre à jour les outils de développement " +"([#1231](https://github.com/adap/flower/pull/1231), " +"[#1276](https://github.com/adap/flower/pull/1276), " +"[#1301](https://github.com/adap/flower/pull/1301), " +"[#1310](https://github.com/adap/flower/pull/1310))" -#: ../../source/tutorial-customize-the-client-pytorch.ipynb:495 +#: ../../source/ref-changelog.md:182 +#, fuzzy msgid "" -"The only *real* difference between Client and NumPyClient is that " -"NumPyClient takes care of serialization and deserialization for you. It " -"can do so because it expects you to return parameters as NumPy ndarray's," -" and it knows how to handle these. This makes working with machine " -"learning libraries that have good NumPy support (most of them) a breeze." +"**General updates to Flower Baselines** " +"([#2301](https://github.com/adap/flower/pull/2301), " +"[#2305](https://github.com/adap/flower/pull/2305), " +"[#2307](https://github.com/adap/flower/pull/2307), " +"[#2327](https://github.com/adap/flower/pull/2327), " +"[#2435](https://github.com/adap/flower/pull/2435), " +"[#2462](https://github.com/adap/flower/pull/2462), " +"[#2463](https://github.com/adap/flower/pull/2463), " +"[#2461](https://github.com/adap/flower/pull/2461), " +"[#2469](https://github.com/adap/flower/pull/2469), " +"[#2466](https://github.com/adap/flower/pull/2466), " +"[#2471](https://github.com/adap/flower/pull/2471), " +"[#2472](https://github.com/adap/flower/pull/2472), " +"[#2470](https://github.com/adap/flower/pull/2470))" msgstr "" -"La seule *vraie* différence entre Client et NumPyClient est que " -"NumPyClient s'occupe de la sérialisation et de la désérialisation pour " -"toi. Il peut le faire parce qu'il s'attend à ce que tu renvoies des " -"paramètres sous forme de NumPy ndarray, et il sait comment les gérer. " -"Cela permet de travailler avec des bibliothèques d'apprentissage " -"automatique qui ont une bonne prise en charge de NumPy (la plupart " -"d'entre elles) en un clin d'œil." +"**Améliorations générales** " +"([#1491](https://github.com/adap/flower/pull/1491), " +"[#1504](https://github.com/adap/flower/pull/1504), " +"[#1506](https://github.com/adap/flower/pull/1506), " +"[#1514](https://github.com/adap/flower/pull/1514), " +"[#1522](https://github.com/adap/flower/pull/1522), " +"[#1523](https://github.com/adap/flower/pull/1523), " +"[#1526](https://github.com/adap/flower/pull/1526), " +"[#1528](https://github.com/adap/flower/pull/1528), " +"[#1547](https://github.com/adap/flower/pull/1547), " +"[#1549](https://github.com/adap/flower/pull/1549), " +"[#1560](https://github.com/adap/flower/pull/1560), " +"[#1564](https://github.com/adap/flower/pull/1564), " +"[#1566](https://github.com/adap/flower/pull/1566))" -#: ../../source/tutorial-customize-the-client-pytorch.ipynb:497 +#: ../../source/ref-changelog.md:184 +#, fuzzy msgid "" -"In terms of API, there's one major difference: all methods in Client take" -" exactly one argument (e.g., ``FitIns`` in ``Client.fit``) and return " -"exactly one value (e.g., ``FitRes`` in ``Client.fit``). The methods in " -"``NumPyClient`` on the other hand have multiple arguments (e.g., " -"``parameters`` and ``config`` in ``NumPyClient.fit``) and multiple return" -" values (e.g., ``parameters``, ``num_example``, and ``metrics`` in " -"``NumPyClient.fit``) if there are multiple things to handle. These " -"``*Ins`` and ``*Res`` objects in ``Client`` wrap all the individual " -"values you're used to from ``NumPyClient``." +"**General updates to the simulation engine** " +"([#2331](https://github.com/adap/flower/pull/2331), " +"[#2447](https://github.com/adap/flower/pull/2447), " +"[#2448](https://github.com/adap/flower/pull/2448), " +"[#2294](https://github.com/adap/flower/pull/2294))" msgstr "" -"In terms of API, there's one major difference: all methods in Client take" -" exactly one argument (e.g., ``FitIns`` in ``Client.fit``) and return " -"exactly one value (e.g., ``FitRes`` in ``Client.fit``). The methods in " -"``NumPyClient`` on the other hand have multiple arguments (e.g., " -"``parameters`` and ``config`` in ``NumPyClient.fit``) and multiple return" -" values (e.g., ``parameters``, ``num_example``, and ``metrics`` in " -"``NumPyClient.fit``) if there are multiple things to handle. These " -"``*Ins`` and ``*Res`` objects in ``Client`` wrap all the individual " -"values you're used to from ``NumPyClient``." - -#: ../../source/tutorial-customize-the-client-pytorch.ipynb:510 -msgid "Step 3: Custom serialization" -msgstr "Étape 3 : Sérialisation personnalisée" +"Mettre à jour les outils de développement " +"([#1231](https://github.com/adap/flower/pull/1231), " +"[#1276](https://github.com/adap/flower/pull/1276), " +"[#1301](https://github.com/adap/flower/pull/1301), " +"[#1310](https://github.com/adap/flower/pull/1310))" -#: ../../source/tutorial-customize-the-client-pytorch.ipynb:512 +#: ../../source/ref-changelog.md:186 +#, fuzzy msgid "" -"Here we will explore how to implement custom serialization with a simple " -"example." +"**General updates to Flower SDKs** " +"([#2288](https://github.com/adap/flower/pull/2288), " +"[#2429](https://github.com/adap/flower/pull/2429), " +"[#2555](https://github.com/adap/flower/pull/2555), " +"[#2543](https://github.com/adap/flower/pull/2543), " +"[#2544](https://github.com/adap/flower/pull/2544), " +"[#2597](https://github.com/adap/flower/pull/2597), " +"[#2623](https://github.com/adap/flower/pull/2623))" msgstr "" -"Nous allons ici explorer comment mettre en œuvre une sérialisation " -"personnalisée à l'aide d'un exemple simple." +"**Tutoriel amélioré** ([#1468](https://github.com/adap/flower/pull/1468)," +" [#1470](https://github.com/adap/flower/pull/1470), " +"[#1472](https://github.com/adap/flower/pull/1472), " +"[#1473](https://github.com/adap/flower/pull/1473), " +"[#1474](https://github.com/adap/flower/pull/1474), " +"[#1475](https://github.com/adap/flower/pull/1475))" -#: ../../source/tutorial-customize-the-client-pytorch.ipynb:514 +#: ../../source/ref-changelog.md:188 msgid "" -"But first what is serialization? Serialization is just the process of " -"converting an object into raw bytes, and equally as important, " -"deserialization is the process of converting raw bytes back into an " -"object. This is very useful for network communication. Indeed, without " -"serialization, you could not just a Python object through the internet." +"**General improvements** " +"([#2309](https://github.com/adap/flower/pull/2309), " +"[#2310](https://github.com/adap/flower/pull/2310), " +"[#2313](https://github.com/adap/flower/pull/2313), " +"[#2316](https://github.com/adap/flower/pull/2316), " +"[#2317](https://github.com/adap/flower/pull/2317), " +"[#2349](https://github.com/adap/flower/pull/2349), " +"[#2360](https://github.com/adap/flower/pull/2360), " +"[#2402](https://github.com/adap/flower/pull/2402), " +"[#2446](https://github.com/adap/flower/pull/2446), " +"[#2561](https://github.com/adap/flower/pull/2561), " +"[#2273](https://github.com/adap/flower/pull/2273), " +"[#2267](https://github.com/adap/flower/pull/2267), " +"[#2274](https://github.com/adap/flower/pull/2274), " +"[#2275](https://github.com/adap/flower/pull/2275), " +"[#2432](https://github.com/adap/flower/pull/2432), " +"[#2251](https://github.com/adap/flower/pull/2251), " +"[#2321](https://github.com/adap/flower/pull/2321), " +"[#1936](https://github.com/adap/flower/pull/1936), " +"[#2408](https://github.com/adap/flower/pull/2408), " +"[#2413](https://github.com/adap/flower/pull/2413), " +"[#2401](https://github.com/adap/flower/pull/2401), " +"[#2531](https://github.com/adap/flower/pull/2531), " +"[#2534](https://github.com/adap/flower/pull/2534), " +"[#2535](https://github.com/adap/flower/pull/2535), " +"[#2521](https://github.com/adap/flower/pull/2521), " +"[#2553](https://github.com/adap/flower/pull/2553), " +"[#2596](https://github.com/adap/flower/pull/2596))" +msgstr "" + +#: ../../source/ref-changelog.md:190 ../../source/ref-changelog.md:280 +#: ../../source/ref-changelog.md:344 ../../source/ref-changelog.md:398 +#: ../../source/ref-changelog.md:465 +msgid "Flower received many improvements under the hood, too many to list here." msgstr "" -"Mais d'abord, qu'est-ce que la sérialisation ? La sérialisation est " -"simplement le processus de conversion d'un objet en octets bruts, et tout" -" aussi important, la désérialisation est le processus de reconversion des" -" octets bruts en objet. Ceci est très utile pour la communication réseau." -" En effet, sans la sérialisation, tu ne pourrais pas faire passer un " -"objet Python par Internet." +"Flower a reçu de nombreuses améliorations sous le capot, trop nombreuses " +"pour être énumérées ici." -#: ../../source/tutorial-customize-the-client-pytorch.ipynb:516 +#: ../../source/ref-changelog.md:194 +#, fuzzy msgid "" -"Federated Learning relies heavily on internet communication for training " -"by sending Python objects back and forth between the clients and the " -"server. This means that serialization is an essential part of Federated " -"Learning." +"**Remove support for Python 3.7** " +"([#2280](https://github.com/adap/flower/pull/2280), " +"[#2299](https://github.com/adap/flower/pull/2299), " +"[#2304](https://github.com/adap/flower/pull/2304), " +"[#2306](https://github.com/adap/flower/pull/2306), " +"[#2355](https://github.com/adap/flower/pull/2355), " +"[#2356](https://github.com/adap/flower/pull/2356))" msgstr "" -"L'apprentissage fédéré s'appuie fortement sur la communication Internet " -"pour la formation en envoyant des objets Python dans les deux sens entre " -"les clients et le serveur, ce qui signifie que la sérialisation est un " -"élément essentiel de l'apprentissage fédéré." +"**Nouvel exemple de code MLCube** " +"([#779](https://github.com/adap/flower/pull/779), " +"[#1034](https://github.com/adap/flower/pull/1034), " +"[#1065](https://github.com/adap/flower/pull/1065), " +"[#1090](https://github.com/adap/flower/pull/1090))" -#: ../../source/tutorial-customize-the-client-pytorch.ipynb:518 +#: ../../source/ref-changelog.md:196 msgid "" -"In the following section, we will write a basic example where instead of " -"sending a serialized version of our ``ndarray``\\ s containing our " -"parameters, we will first convert the ``ndarray`` into sparse matrices, " -"before sending them. This technique can be used to save bandwidth, as in " -"certain cases where the weights of a model are sparse (containing many 0 " -"entries), converting them to a sparse matrix can greatly improve their " -"bytesize." +"Python 3.7 support was deprecated in Flower 1.5, and this release removes" +" support. Flower now requires Python 3.8." msgstr "" -"Dans la section suivante, nous allons écrire un exemple de base où, au " -"lieu d'envoyer une version sérialisée de nos ``ndarray`` contenant nos " -"paramètres, nous allons d'abord convertir les ``ndarray`` en matrices " -"éparses, avant de les envoyer. Cette technique peut être utilisée pour " -"économiser de la bande passante, car dans certains cas où les poids d'un " -"modèle sont épars (contenant de nombreuses entrées 0), les convertir en " -"une matrice éparse peut grandement améliorer leur taille en octets." - -#: ../../source/tutorial-customize-the-client-pytorch.ipynb:521 -msgid "Our custom serialization/deserialization functions" -msgstr "Nos fonctions de sérialisation/désérialisation personnalisées" -#: ../../source/tutorial-customize-the-client-pytorch.ipynb:523 +#: ../../source/ref-changelog.md:198 +#, fuzzy msgid "" -"This is where the real serialization/deserialization will happen, " -"especially in ``ndarray_to_sparse_bytes`` for serialization and " -"``sparse_bytes_to_ndarray`` for deserialization." +"**Remove experimental argument** `rest` **from** `start_client` " +"([#2324](https://github.com/adap/flower/pull/2324))" msgstr "" -"C'est là que la véritable sérialisation/désérialisation se produira, en " -"particulier dans ``ndarray_to_sparse_bytes`` pour la sérialisation et " -"``sparse_bytes_to_ndarray`` pour la désérialisation." +"**Supprimer les stratégies expérimentales** " +"([#1280](https://github.com/adap/flower/pull/1280))" -#: ../../source/tutorial-customize-the-client-pytorch.ipynb:525 +#: ../../source/ref-changelog.md:200 msgid "" -"Note that we imported the ``scipy.sparse`` library in order to convert " -"our arrays." +"The (still experimental) argument `rest` was removed from `start_client` " +"and `start_numpy_client`. Use `transport=\"rest\"` to opt into the " +"experimental REST API instead." msgstr "" -"Notez que nous avons importé la bibliothèque ``scipy.sparse`` afin de " -"convertir nos tableaux." -#: ../../source/tutorial-customize-the-client-pytorch.ipynb:613 -msgid "Client-side" -msgstr "Côté client" +#: ../../source/ref-changelog.md:202 +#, fuzzy +msgid "v1.5.0 (2023-08-31)" +msgstr "v1.4.0 (2023-04-21)" -#: ../../source/tutorial-customize-the-client-pytorch.ipynb:615 +#: ../../source/ref-changelog.md:208 msgid "" -"To be able to able to serialize our ``ndarray``\\ s into sparse " -"parameters, we will just have to call our custom functions in our " -"``flwr.client.Client``." +"`Adam Narozniak`, `Anass Anhari`, `Charles Beauville`, `Dana-Farber`, " +"`Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo Gabrielli`, `Gustavo " +"Bertoli`, `Heng Pan`, `Javier`, `Mahdi`, `Steven Hé (Sīchàng)`, `Taner " +"Topal`, `achiverram28`, `danielnugraha`, `eunchung`, `ruthgal` " msgstr "" -"Pour pouvoir sérialiser nos ``ndarray`` en paramètres sparse, il nous " -"suffira d'appeler nos fonctions personnalisées dans notre " -"``flwr.client.Client``." -#: ../../source/tutorial-customize-the-client-pytorch.ipynb:617 +#: ../../source/ref-changelog.md:212 +#, fuzzy msgid "" -"Indeed, in ``get_parameters`` we need to serialize the parameters we got " -"from our network using our custom ``ndarrays_to_sparse_parameters`` " -"defined above." +"**Introduce new simulation engine** " +"([#1969](https://github.com/adap/flower/pull/1969), " +"[#2221](https://github.com/adap/flower/pull/2221), " +"[#2248](https://github.com/adap/flower/pull/2248))" msgstr "" -"En effet, dans ``get_parameters`` nous devons sérialiser les paramètres " -"que nous avons obtenus de notre réseau en utilisant nos " -"``ndarrays_to_sparse_parameters`` personnalisés définis ci-dessus." +"**Introduire la télémétrie optionnelle** " +"([#1533](https://github.com/adap/flower/pull/1533), " +"[#1544](https://github.com/adap/flower/pull/1544), " +"[#1584](https://github.com/adap/flower/pull/1584))" -#: ../../source/tutorial-customize-the-client-pytorch.ipynb:619 +#: ../../source/ref-changelog.md:214 msgid "" -"In ``fit``, we first need to deserialize the parameters coming from the " -"server using our custom ``sparse_parameters_to_ndarrays`` and then we " -"need to serialize our local results with " -"``ndarrays_to_sparse_parameters``." +"The new simulation engine has been rewritten from the ground up, yet it " +"remains fully backwards compatible. It offers much improved stability and" +" memory handling, especially when working with GPUs. Simulations " +"transparently adapt to different settings to scale simulation in CPU-" +"only, CPU+GPU, multi-GPU, or multi-node multi-GPU environments." msgstr "" -"Dans ``fit``, nous devons d'abord désérialiser les paramètres provenant " -"du serveur en utilisant notre ``sparse_parameters_to_ndarrays`` " -"personnalisé, puis nous devons sérialiser nos résultats locaux avec " -"``ndarrays_to_sparse_parameters``." -#: ../../source/tutorial-customize-the-client-pytorch.ipynb:621 +#: ../../source/ref-changelog.md:216 msgid "" -"In ``evaluate``, we will only need to deserialize the global parameters " -"with our custom function." +"Comprehensive documentation includes a new [how-to run " +"simulations](https://flower.ai/docs/framework/how-to-run-" +"simulations.html) guide, new [simulation-" +"pytorch](https://flower.ai/docs/examples/simulation-pytorch.html) and " +"[simulation-tensorflow](https://flower.ai/docs/examples/simulation-" +"tensorflow.html) notebooks, and a new [YouTube tutorial " +"series](https://www.youtube.com/watch?v=cRebUIGB5RU&list=PLNG4feLHqCWlnj8a_E1A_n5zr2-8pafTB)." msgstr "" -"Dans ``evaluate``, nous n'aurons besoin que de désérialiser les " -"paramètres globaux avec notre fonction personnalisée." -#: ../../source/tutorial-customize-the-client-pytorch.ipynb:725 -msgid "Server-side" -msgstr "Côté serveur" - -#: ../../source/tutorial-customize-the-client-pytorch.ipynb:727 +#: ../../source/ref-changelog.md:218 msgid "" -"For this example, we will just use ``FedAvg`` as a strategy. To change " -"the serialization and deserialization here, we only need to reimplement " -"the ``evaluate`` and ``aggregate_fit`` functions of ``FedAvg``. The other" -" functions of the strategy will be inherited from the super class " -"``FedAvg``." +"**Restructure Flower Docs** " +"([#1824](https://github.com/adap/flower/pull/1824), " +"[#1865](https://github.com/adap/flower/pull/1865), " +"[#1884](https://github.com/adap/flower/pull/1884), " +"[#1887](https://github.com/adap/flower/pull/1887), " +"[#1919](https://github.com/adap/flower/pull/1919), " +"[#1922](https://github.com/adap/flower/pull/1922), " +"[#1920](https://github.com/adap/flower/pull/1920), " +"[#1923](https://github.com/adap/flower/pull/1923), " +"[#1924](https://github.com/adap/flower/pull/1924), " +"[#1962](https://github.com/adap/flower/pull/1962), " +"[#2006](https://github.com/adap/flower/pull/2006), " +"[#2133](https://github.com/adap/flower/pull/2133), " +"[#2203](https://github.com/adap/flower/pull/2203), " +"[#2215](https://github.com/adap/flower/pull/2215), " +"[#2122](https://github.com/adap/flower/pull/2122), " +"[#2223](https://github.com/adap/flower/pull/2223), " +"[#2219](https://github.com/adap/flower/pull/2219), " +"[#2232](https://github.com/adap/flower/pull/2232), " +"[#2233](https://github.com/adap/flower/pull/2233), " +"[#2234](https://github.com/adap/flower/pull/2234), " +"[#2235](https://github.com/adap/flower/pull/2235), " +"[#2237](https://github.com/adap/flower/pull/2237), " +"[#2238](https://github.com/adap/flower/pull/2238), " +"[#2242](https://github.com/adap/flower/pull/2242), " +"[#2231](https://github.com/adap/flower/pull/2231), " +"[#2243](https://github.com/adap/flower/pull/2243), " +"[#2227](https://github.com/adap/flower/pull/2227))" msgstr "" -"Pour cet exemple, nous utiliserons simplement ``FedAvg`` comme stratégie." -" Pour modifier la sérialisation et la désérialisation ici, il suffit de " -"réimplémenter les fonctions ``evaluate`` et ``aggregate_fit`` de " -"``FedAvg``. Les autres fonctions de la stratégie seront héritées de la " -"super-classe ``FedAvg``." - -#: ../../source/tutorial-customize-the-client-pytorch.ipynb:729 -msgid "As you can see only one line as change in ``evaluate``:" -msgstr "Comme tu peux le voir, seule une ligne a été modifiée dans ``evaluate`` :" -#: ../../source/tutorial-customize-the-client-pytorch.ipynb:735 +#: ../../source/ref-changelog.md:220 msgid "" -"And for ``aggregate_fit``, we will first deserialize every result we " -"received:" +"Much effort went into a completely restructured Flower docs experience. " +"The documentation on [flower.ai/docs](flower.ai/docs) is now divided " +"into Flower Framework, Flower Baselines, Flower Android SDK, Flower iOS " +"SDK, and code example projects." msgstr "" -"Et pour ``aggregate_fit``, nous allons d'abord désérialiser chaque " -"résultat que nous avons reçu :" -#: ../../source/tutorial-customize-the-client-pytorch.ipynb:744 -msgid "And then serialize the aggregated result:" -msgstr "Puis sérialise le résultat agrégé :" +#: ../../source/ref-changelog.md:222 +#, fuzzy +msgid "" +"**Introduce Flower Swift SDK** " +"([#1858](https://github.com/adap/flower/pull/1858), " +"[#1897](https://github.com/adap/flower/pull/1897))" +msgstr "" +"**Introduction du SDK iOS (aperçu)** " +"([#1621](https://github.com/adap/flower/pull/1621), " +"[#1764](https://github.com/adap/flower/pull/1764))" -#: ../../source/tutorial-customize-the-client-pytorch.ipynb:903 -msgid "We can now run our custom serialization example!" +#: ../../source/ref-changelog.md:224 +msgid "" +"This is the first preview release of the Flower Swift SDK. Flower support" +" on iOS is improving, and alongside the Swift SDK and code example, there" +" is now also an iOS quickstart tutorial." msgstr "" -"Nous pouvons maintenant exécuter notre exemple de sérialisation " -"personnalisée !" -#: ../../source/tutorial-customize-the-client-pytorch.ipynb:934 +#: ../../source/ref-changelog.md:226 +#, fuzzy msgid "" -"In this part of the tutorial, we've seen how we can build clients by " -"subclassing either ``NumPyClient`` or ``Client``. ``NumPyClient`` is a " -"convenience abstraction that makes it easier to work with machine " -"learning libraries that have good NumPy interoperability. ``Client`` is a" -" more flexible abstraction that allows us to do things that are not " -"possible in ``NumPyClient``. In order to do so, it requires us to handle " -"parameter serialization and deserialization ourselves." +"**Introduce Flower Android SDK** " +"([#2131](https://github.com/adap/flower/pull/2131))" msgstr "" -"Dans cette partie du tutoriel, nous avons vu comment construire des " -"clients en sous-classant soit ``NumPyClient``, soit ``Client``. " -"``NumPyClient`` est une abstraction de commodité qui facilite le travail " -"avec les bibliothèques d'apprentissage automatique qui ont une bonne " -"interopérabilité NumPy. ``Client`` est une abstraction plus flexible qui " -"nous permet de faire des choses qui ne sont pas possibles dans " -"``NumPyClient``. Pour ce faire, elle nous oblige à gérer nous-mêmes la " -"sérialisation et la désérialisation des paramètres." +"**Introduire une nouvelle ligne de base pour les fleurs : FedAvg " +"FEMNIST** ([#1655](https://github.com/adap/flower/pull/1655))" -#: ../../source/tutorial-customize-the-client-pytorch.ipynb:952 +#: ../../source/ref-changelog.md:228 msgid "" -"This is the final part of the Flower tutorial (for now!), " -"congratulations! You're now well equipped to understand the rest of the " -"documentation. There are many topics we didn't cover in the tutorial, we " -"recommend the following resources:" +"This is the first preview release of the Flower Kotlin SDK. Flower " +"support on Android is improving, and alongside the Kotlin SDK and code " +"example, there is now also an Android quickstart tutorial." msgstr "" -"C'est la dernière partie du tutoriel Flower (pour l'instant !), " -"félicitations ! Tu es maintenant bien équipé pour comprendre le reste de " -"la documentation. Il y a de nombreux sujets que nous n'avons pas abordés " -"dans le tutoriel, nous te recommandons les ressources suivantes :" -#: ../../source/tutorial-customize-the-client-pytorch.ipynb:954 -msgid "`Read Flower Docs `__" -msgstr "`Lire les docs sur les fleurs `__" +#: ../../source/ref-changelog.md:230 +#, fuzzy +msgid "" +"**Introduce new end-to-end testing infrastructure** " +"([#1842](https://github.com/adap/flower/pull/1842), " +"[#2071](https://github.com/adap/flower/pull/2071), " +"[#2072](https://github.com/adap/flower/pull/2072), " +"[#2068](https://github.com/adap/flower/pull/2068), " +"[#2067](https://github.com/adap/flower/pull/2067), " +"[#2069](https://github.com/adap/flower/pull/2069), " +"[#2073](https://github.com/adap/flower/pull/2073), " +"[#2070](https://github.com/adap/flower/pull/2070), " +"[#2074](https://github.com/adap/flower/pull/2074), " +"[#2082](https://github.com/adap/flower/pull/2082), " +"[#2084](https://github.com/adap/flower/pull/2084), " +"[#2093](https://github.com/adap/flower/pull/2093), " +"[#2109](https://github.com/adap/flower/pull/2109), " +"[#2095](https://github.com/adap/flower/pull/2095), " +"[#2140](https://github.com/adap/flower/pull/2140), " +"[#2137](https://github.com/adap/flower/pull/2137), " +"[#2165](https://github.com/adap/flower/pull/2165))" +msgstr "" +"**Améliorer l'API (expérimentale) du pilote** " +"([#1663](https://github.com/adap/flower/pull/1663), " +"[#1666](https://github.com/adap/flower/pull/1666), " +"[#1667](https://github.com/adap/flower/pull/1667), " +"[#1664](https://github.com/adap/flower/pull/1664), " +"[#1675](https://github.com/adap/flower/pull/1675), " +"[#1676](https://github.com/adap/flower/pull/1676), " +"[#1693](https://github.com/adap/flower/pull/1693), " +"[#1662](https://github.com/adap/flower/pull/1662), " +"[#1794](https://github.com/adap/flower/pull/1794))" -#: ../../source/tutorial-customize-the-client-pytorch.ipynb:955 +#: ../../source/ref-changelog.md:232 msgid "" -"`Check out Flower Code Examples " -"`__" +"A new testing infrastructure ensures that new changes stay compatible " +"with existing framework integrations or strategies." msgstr "" -"`Check out Flower Code Examples " -"`__" -#: ../../source/tutorial-customize-the-client-pytorch.ipynb:956 +#: ../../source/ref-changelog.md:234 #, fuzzy +msgid "**Deprecate Python 3.7**" +msgstr "**Créer le PR**" + +#: ../../source/ref-changelog.md:236 msgid "" -"`Use Flower Baselines for your research " -"`__" +"Since Python 3.7 reached its end of life (EOL) on 2023-06-27, support for" +" Python 3.7 is now deprecated and will be removed in an upcoming release." msgstr "" -"`Utilise les lignes de base des fleurs pour ta recherche " -"`__" -#: ../../source/tutorial-customize-the-client-pytorch.ipynb:957 +#: ../../source/ref-changelog.md:238 #, fuzzy msgid "" -"`Watch Flower Summit 2023 videos `__" +"**Add new** `FedTrimmedAvg` **strategy** " +"([#1769](https://github.com/adap/flower/pull/1769), " +"[#1853](https://github.com/adap/flower/pull/1853))" msgstr "" -"`Regardez les vidéos du Flower Summit 2022 `__" +"**Ajouter un nouvel exemple de Federated Analytics avec Pandas** " +"([#1469](https://github.com/adap/flower/pull/1469), " +"[#1535](https://github.com/adap/flower/pull/1535))" -#: ../../source/tutorial-get-started-with-flower-pytorch.ipynb:9 -msgid "Get started with Flower" +#: ../../source/ref-changelog.md:240 +#, fuzzy +msgid "" +"The new `FedTrimmedAvg` strategy implements Trimmed Mean by [Dong Yin, " +"2018](https://arxiv.org/abs/1803.01498)." msgstr "" +"La nouvelle stratégie `FedMedian` met en œuvre Federated Median " +"(FedMedian) par [Yin et al., 2018] " +"(https://arxiv.org/pdf/1803.01498v1.pdf)." -#: ../../source/tutorial-get-started-with-flower-pytorch.ipynb:11 -#: ../../source/tutorial-what-is-federated-learning.ipynb:11 -msgid "Welcome to the Flower federated learning tutorial!" -msgstr "Bienvenue au tutoriel sur l'apprentissage fédéré de la fleur !" - -#: ../../source/tutorial-get-started-with-flower-pytorch.ipynb:13 +#: ../../source/ref-changelog.md:242 +#, fuzzy msgid "" -"In this notebook, we'll build a federated learning system using Flower " -"and PyTorch. In part 1, we use PyTorch for the model training pipeline " -"and data loading. In part 2, we continue to federate the PyTorch-based " -"pipeline using Flower." +"**Introduce start_driver** " +"([#1697](https://github.com/adap/flower/pull/1697))" msgstr "" -"Dans ce carnet, nous allons construire un système d'apprentissage fédéré " -"en utilisant Flower et PyTorch. Dans la première partie, nous utilisons " -"PyTorch pour le pipeline d'entraînement des modèles et le chargement des " -"données. Dans la deuxième partie, nous continuons à fédérer le pipeline " -"basé sur PyTorch en utilisant Flower." - -#: ../../source/tutorial-get-started-with-flower-pytorch.ipynb:17 -#: ../../source/tutorial-what-is-federated-learning.ipynb:19 -msgid "Let's get stated!" -msgstr "Allons-y, déclarons-le !" +"**Ajouter une nouvelle stratégie `FedProx`** " +"([#1619](https://github.com/adap/flower/pull/1619))" -#: ../../source/tutorial-get-started-with-flower-pytorch.ipynb:31 +#: ../../source/ref-changelog.md:244 msgid "" -"Before we begin with any actual code, let's make sure that we have " -"everything we need." +"In addition to `start_server` and using the raw Driver API, there is a " +"new `start_driver` function that allows for running `start_server` " +"scripts as a Flower driver with only a single-line code change. Check out" +" the `mt-pytorch` code example to see a working example using " +"`start_driver`." msgstr "" -"Avant de commencer à coder, assurons-nous que nous disposons de tout ce " -"dont nous avons besoin." -#: ../../source/tutorial-get-started-with-flower-pytorch.ipynb:45 +#: ../../source/ref-changelog.md:246 +#, fuzzy msgid "" -"Next, we install the necessary packages for PyTorch (``torch`` and " -"``torchvision``) and Flower (``flwr``):" +"**Add parameter aggregation to** `mt-pytorch` **code example** " +"([#1785](https://github.com/adap/flower/pull/1785))" msgstr "" -"Ensuite, nous installons les paquets nécessaires pour PyTorch (``torch`` " -"et ``torchvision``) et Flower (``flwr``) :" - -#: ../../source/tutorial-get-started-with-flower-pytorch.ipynb:117 -msgid "Loading the data" -msgstr "Chargement des données" +"**Nouvel exemple de code PyTorch avancé** " +"([#1007](https://github.com/adap/flower/pull/1007))" -#: ../../source/tutorial-get-started-with-flower-pytorch.ipynb:119 +#: ../../source/ref-changelog.md:248 msgid "" -"Federated learning can be applied to many different types of tasks across" -" different domains. In this tutorial, we introduce federated learning by " -"training a simple convolutional neural network (CNN) on the popular " -"CIFAR-10 dataset. CIFAR-10 can be used to train image classifiers that " -"distinguish between images from ten different classes:" +"The `mt-pytorch` example shows how to aggregate parameters when writing a" +" driver script. The included `driver.py` and `server.py` have been " +"aligned to demonstrate both the low-level way and the high-level way of " +"building server-side logic." msgstr "" -"L'apprentissage fédéré peut être appliqué à de nombreux types de tâches " -"dans différents domaines. Dans ce tutoriel, nous présentons " -"l'apprentissage fédéré en formant un simple réseau neuronal " -"convolutionnel (CNN) sur l'ensemble de données populaire CIFAR-10. " -"CIFAR-10 peut être utilisé pour former des classificateurs d'images qui " -"font la distinction entre les images de dix classes différentes :" -#: ../../source/tutorial-get-started-with-flower-pytorch.ipynb:150 +#: ../../source/ref-changelog.md:250 +#, fuzzy msgid "" -"We simulate having multiple datasets from multiple organizations (also " -"called the \"cross-silo\" setting in federated learning) by splitting the" -" original CIFAR-10 dataset into multiple partitions. Each partition will " -"represent the data from a single organization. We're doing this purely " -"for experimentation purposes, in the real world there's no need for data " -"splitting because each organization already has their own data (so the " -"data is naturally partitioned)." +"**Migrate experimental REST API to Starlette** " +"([2171](https://github.com/adap/flower/pull/2171))" msgstr "" -"Nous simulons le fait d'avoir plusieurs ensembles de données provenant de" -" plusieurs organisations (également appelé le paramètre \"cross-silo\" " -"dans l'apprentissage fédéré) en divisant l'ensemble de données CIFAR-10 " -"original en plusieurs partitions. Chaque partition représentera les " -"données d'une seule organisation. Nous faisons cela purement à des fins " -"d'expérimentation, dans le monde réel, il n'y a pas besoin de diviser les" -" données parce que chaque organisation a déjà ses propres données (les " -"données sont donc naturellement partitionnées)." +"**Nouvelle stratégie expérimentale TensorBoard** " +"([#789](https://github.com/adap/flower/pull/789))" -#: ../../source/tutorial-get-started-with-flower-pytorch.ipynb:152 +#: ../../source/ref-changelog.md:252 msgid "" -"Each organization will act as a client in the federated learning system. " -"So having ten organizations participate in a federation means having ten " -"clients connected to the federated learning server:" +"The (experimental) REST API used to be implemented in " +"[FastAPI](https://fastapi.tiangolo.com/), but it has now been migrated to" +" use [Starlette](https://www.starlette.io/) directly." msgstr "" -"Chaque organisation agira comme un client dans le système d'apprentissage" -" fédéré. Ainsi, le fait que dix organisations participent à une " -"fédération signifie que dix clients sont connectés au serveur " -"d'apprentissage fédéré :" -#: ../../source/tutorial-get-started-with-flower-pytorch.ipynb:172 +#: ../../source/ref-changelog.md:254 +#, fuzzy msgid "" -"Let's now load the CIFAR-10 training and test set, partition them into " -"ten smaller datasets (each split into training and validation set), and " -"wrap the resulting partitions by creating a PyTorch ``DataLoader`` for " -"each of them:" +"Please note: The REST request-response API is still experimental and will" +" likely change significantly over time." msgstr "" -"Chargeons maintenant l'ensemble de formation et de test CIFAR-10, " -"partitionnons-les en dix ensembles de données plus petits (chacun divisé " -"en ensemble de formation et de validation), et enveloppons les partitions" -" résultantes en créant un PyTorch ``DataLoader`` pour chacun d'entre eux " -":" +"Remarque : l'API REST est encore expérimentale et est susceptible de " +"changer de manière significative au fil du temps." -#: ../../source/tutorial-get-started-with-flower-pytorch.ipynb:222 +#: ../../source/ref-changelog.md:256 +#, fuzzy msgid "" -"We now have a list of ten training sets and ten validation sets " -"(``trainloaders`` and ``valloaders``) representing the data of ten " -"different organizations. Each ``trainloader``/``valloader`` pair contains" -" 4500 training examples and 500 validation examples. There's also a " -"single ``testloader`` (we did not split the test set). Again, this is " -"only necessary for building research or educational systems, actual " -"federated learning systems have their data naturally distributed across " -"multiple partitions." +"**Introduce experimental gRPC request-response API** " +"([#1867](https://github.com/adap/flower/pull/1867), " +"[#1901](https://github.com/adap/flower/pull/1901))" msgstr "" -"Nous avons maintenant une liste de dix ensembles de formation et dix " -"ensembles de validation (``trainloaders`` et ``valloaders``) représentant" -" les données de dix organisations différentes. Chaque paire " -"``trainloader`/``valloader`` contient 4500 exemples de formation et 500 " -"exemples de validation. Il y a également un seul ``testloader`` (nous " -"n'avons pas divisé l'ensemble de test). Encore une fois, cela n'est " -"nécessaire que pour construire des systèmes de recherche ou d'éducation, " -"les systèmes d'apprentissage fédérés actuels ont leurs données " -"naturellement distribuées à travers plusieurs partitions." +"**Introduire les enveloppes de confidentialité différentielle (aperçu)** " +"([#1357](https://github.com/adap/flower/pull/1357), " +"[#1460](https://github.com/adap/flower/pull/1460))" -#: ../../source/tutorial-get-started-with-flower-pytorch.ipynb:225 +#: ../../source/ref-changelog.md:258 msgid "" -"Let's take a look at the first batch of images and labels in the first " -"training set (i.e., ``trainloaders[0]``) before we move on:" +"In addition to the existing gRPC API (based on bidirectional streaming) " +"and the experimental REST API, there is now a new gRPC API that uses a " +"request-response model to communicate with client nodes." msgstr "" -"Jetons un coup d'œil au premier lot d'images et d'étiquettes du premier " -"ensemble d'entraînement (c'est-à-dire ``trainloaders[0]``) avant de " -"poursuivre :" -#: ../../source/tutorial-get-started-with-flower-pytorch.ipynb:264 +#: ../../source/ref-changelog.md:260 +#, fuzzy msgid "" -"The output above shows a random batch of images from the first " -"``trainloader`` in our list of ten ``trainloaders``. It also prints the " -"labels associated with each image (i.e., one of the ten possible labels " -"we've seen above). If you run the cell again, you should see another " -"batch of images." +"Please note: The gRPC request-response API is still experimental and will" +" likely change significantly over time." msgstr "" -"La sortie ci-dessus montre un lot aléatoire d'images provenant du premier" -" ``chargeur de formation`` de notre liste de dix ``chargeurs de " -"formation``. Elle imprime également les étiquettes associées à chaque " -"image (c'est-à-dire l'une des dix étiquettes possibles que nous avons " -"vues ci-dessus). Si tu exécutes à nouveau la cellule, tu devrais voir un " -"autre lot d'images." - -#: ../../source/tutorial-get-started-with-flower-pytorch.ipynb:276 -msgid "Step 1: Centralized Training with PyTorch" -msgstr "Étape 1 : Formation centralisée avec PyTorch" +"Remarque : l'API REST est encore expérimentale et est susceptible de " +"changer de manière significative au fil du temps." -#: ../../source/tutorial-get-started-with-flower-pytorch.ipynb:287 +#: ../../source/ref-changelog.md:262 +#, fuzzy msgid "" -"Next, we're going to use PyTorch to define a simple convolutional neural " -"network. This introduction assumes basic familiarity with PyTorch, so it " -"doesn't cover the PyTorch-related aspects in full detail. If you want to " -"dive deeper into PyTorch, we recommend `DEEP LEARNING WITH PYTORCH: A 60 " -"MINUTE BLITZ " -"`__." +"**Replace the experimental** `start_client(rest=True)` **with the new** " +"`start_client(transport=\"rest\")` " +"([#1880](https://github.com/adap/flower/pull/1880))" msgstr "" -"Ensuite, nous allons utiliser PyTorch pour définir un simple réseau " -"neuronal convolutif. Cette introduction suppose une familiarité de base " -"avec PyTorch, elle ne couvre donc pas en détail les aspects liés à " -"PyTorch. Si tu veux plonger plus profondément dans PyTorch, nous te " -"recommandons `DEEP LEARNING WITH PYTORCH : A 60 MINUTE BLITZ " -"`__." - -#: ../../source/tutorial-get-started-with-flower-pytorch.ipynb:299 -msgid "Defining the model" -msgstr "Définir le modèle" +"**Initialise** `start_simulation` **avec une liste d'ID de clients** " +"([#860](https://github.com/adap/flower/pull/860))" -#: ../../source/tutorial-get-started-with-flower-pytorch.ipynb:301 +#: ../../source/ref-changelog.md:264 msgid "" -"We use the simple CNN described in the `PyTorch tutorial " -"`__:" +"The (experimental) `start_client` argument `rest` was deprecated in " +"favour of a new argument `transport`. `start_client(transport=\"rest\")` " +"will yield the same behaviour as `start_client(rest=True)` did before. " +"All code should migrate to the new argument `transport`. The deprecated " +"argument `rest` will be removed in a future release." msgstr "" -"Nous utilisons le CNN simple décrit dans le tutoriel `PyTorch " -"`__ :" - -#: ../../source/tutorial-get-started-with-flower-pytorch.ipynb:338 -msgid "Let's continue with the usual training and test functions:" -msgstr "Poursuivons avec les fonctions habituelles de formation et de test :" -#: ../../source/tutorial-get-started-with-flower-pytorch.ipynb:398 -msgid "Training the model" -msgstr "Entraîne le modèle" - -#: ../../source/tutorial-get-started-with-flower-pytorch.ipynb:400 +#: ../../source/ref-changelog.md:266 +#, fuzzy msgid "" -"We now have all the basic building blocks we need: a dataset, a model, a " -"training function, and a test function. Let's put them together to train " -"the model on the dataset of one of our organizations " -"(``trainloaders[0]``). This simulates the reality of most machine " -"learning projects today: each organization has their own data and trains " -"models only on this internal data:" +"**Add a new gRPC option** " +"([#2197](https://github.com/adap/flower/pull/2197))" msgstr "" -"Nous avons maintenant tous les éléments de base dont nous avons besoin : " -"un ensemble de données, un modèle, une fonction d'entraînement et une " -"fonction de test. Assemblons-les pour entraîner le modèle sur l'ensemble " -"de données de l'une de nos organisations (``trainloaders[0]``). Cela " -"simule la réalité de la plupart des projets d'apprentissage automatique " -"aujourd'hui : chaque organisation possède ses propres données et entraîne" -" les modèles uniquement sur ces données internes :" +"**Ajouter une nouvelle stratégie `FedProx`** " +"([#1619](https://github.com/adap/flower/pull/1619))" -#: ../../source/tutorial-get-started-with-flower-pytorch.ipynb:430 +#: ../../source/ref-changelog.md:268 msgid "" -"Training the simple CNN on our CIFAR-10 split for 5 epochs should result " -"in a test set accuracy of about 41%, which is not good, but at the same " -"time, it doesn't really matter for the purposes of this tutorial. The " -"intent was just to show a simplistic centralized training pipeline that " -"sets the stage for what comes next - federated learning!" +"We now start a gRPC server with the `grpc.keepalive_permit_without_calls`" +" option set to 0 by default. This prevents the clients from sending " +"keepalive pings when there is no outstanding stream." msgstr "" -"L'entraînement du CNN simple sur notre fractionnement CIFAR-10 pendant 5 " -"époques devrait se traduire par une précision de l'ensemble de test " -"d'environ 41 %, ce qui n'est pas bon, mais en même temps, cela n'a pas " -"vraiment d'importance pour les besoins de ce tutoriel. L'intention était " -"juste de montrer un pipeline d'entraînement centralisé simpliste qui " -"prépare le terrain pour ce qui vient ensuite - l'apprentissage fédéré !" - -#: ../../source/tutorial-get-started-with-flower-pytorch.ipynb:442 -msgid "Step 2: Federated Learning with Flower" -msgstr "Étape 2 : Apprentissage fédéré avec Flower" -#: ../../source/tutorial-get-started-with-flower-pytorch.ipynb:444 +#: ../../source/ref-changelog.md:270 +#, fuzzy msgid "" -"Step 1 demonstrated a simple centralized training pipeline. All data was " -"in one place (i.e., a single ``trainloader`` and a single ``valloader``)." -" Next, we'll simulate a situation where we have multiple datasets in " -"multiple organizations and where we train a model over these " -"organizations using federated learning." +"**Improve example notebooks** " +"([#2005](https://github.com/adap/flower/pull/2005))" msgstr "" -"L'étape 1 a montré un simple pipeline de formation centralisé. Toutes les" -" données étaient au même endroit (c'est-à-dire un seul ``trainloader`` et" -" un seul ``valloader``). Ensuite, nous allons simuler une situation où " -"nous avons plusieurs ensembles de données dans plusieurs organisations et" -" où nous formons un modèle sur ces organisations à l'aide de " -"l'apprentissage fédéré." +"**Supprimer les stratégies expérimentales** " +"([#1280](https://github.com/adap/flower/pull/1280))" -#: ../../source/tutorial-get-started-with-flower-pytorch.ipynb:456 -msgid "Updating model parameters" -msgstr "Mise à jour des paramètres du modèle" +#: ../../source/ref-changelog.md:272 +#, fuzzy +msgid "There's a new 30min Federated Learning PyTorch tutorial!" +msgstr "Bienvenue au tutoriel sur l'apprentissage fédéré de la fleur !" -#: ../../source/tutorial-get-started-with-flower-pytorch.ipynb:458 +#: ../../source/ref-changelog.md:274 msgid "" -"In federated learning, the server sends the global model parameters to " -"the client, and the client updates the local model with the parameters " -"received from the server. It then trains the model on the local data " -"(which changes the model parameters locally) and sends the " -"updated/changed model parameters back to the server (or, alternatively, " -"it sends just the gradients back to the server, not the full model " -"parameters)." +"**Example updates** ([#1772](https://github.com/adap/flower/pull/1772), " +"[#1873](https://github.com/adap/flower/pull/1873), " +"[#1981](https://github.com/adap/flower/pull/1981), " +"[#1988](https://github.com/adap/flower/pull/1988), " +"[#1984](https://github.com/adap/flower/pull/1984), " +"[#1982](https://github.com/adap/flower/pull/1982), " +"[#2112](https://github.com/adap/flower/pull/2112), " +"[#2144](https://github.com/adap/flower/pull/2144), " +"[#2174](https://github.com/adap/flower/pull/2174), " +"[#2225](https://github.com/adap/flower/pull/2225), " +"[#2183](https://github.com/adap/flower/pull/2183))" msgstr "" -"Dans l'apprentissage fédéré, le serveur envoie les paramètres du modèle " -"global au client, et le client met à jour le modèle local avec les " -"paramètres reçus du serveur. Il entraîne ensuite le modèle sur les " -"données locales (ce qui modifie les paramètres du modèle localement) et " -"renvoie les paramètres du modèle mis à jour/changés au serveur (ou, " -"alternativement, il renvoie seulement les gradients au serveur, et non " -"pas les paramètres complets du modèle)." -#: ../../source/tutorial-get-started-with-flower-pytorch.ipynb:460 +#: ../../source/ref-changelog.md:276 msgid "" -"We need two helper functions to update the local model with parameters " -"received from the server and to get the updated model parameters from the" -" local model: ``set_parameters`` and ``get_parameters``. The following " -"two functions do just that for the PyTorch model above." +"Many examples have received significant updates, including simplified " +"advanced-tensorflow and advanced-pytorch examples, improved macOS " +"compatibility of TensorFlow examples, and code examples for simulation. A" +" major upgrade is that all code examples now have a `requirements.txt` " +"(in addition to `pyproject.toml`)." msgstr "" -"Nous avons besoin de deux fonctions d'aide pour mettre à jour le modèle " -"local avec les paramètres reçus du serveur et pour obtenir les paramètres" -" mis à jour du modèle local : ``set_parameters`` et ``get_parameters``. " -"Les deux fonctions suivantes font exactement cela pour le modèle PyTorch " -"ci-dessus." -#: ../../source/tutorial-get-started-with-flower-pytorch.ipynb:462 +#: ../../source/ref-changelog.md:278 +#, fuzzy msgid "" -"The details of how this works are not really important here (feel free to" -" consult the PyTorch documentation if you want to learn more). In " -"essence, we use ``state_dict`` to access PyTorch model parameter tensors." -" The parameter tensors are then converted to/from a list of NumPy " -"ndarray's (which Flower knows how to serialize/deserialize):" +"**General improvements** " +"([#1872](https://github.com/adap/flower/pull/1872), " +"[#1866](https://github.com/adap/flower/pull/1866), " +"[#1884](https://github.com/adap/flower/pull/1884), " +"[#1837](https://github.com/adap/flower/pull/1837), " +"[#1477](https://github.com/adap/flower/pull/1477), " +"[#2171](https://github.com/adap/flower/pull/2171))" msgstr "" -"Les détails de ce fonctionnement ne sont pas vraiment importants ici " -"(n'hésite pas à consulter la documentation PyTorch si tu veux en savoir " -"plus). En substance, nous utilisons ``state_dict`` pour accéder aux " -"tenseurs de paramètres du modèle PyTorch. Les tenseurs de paramètres sont" -" ensuite convertis en/depuis une liste de ndarray NumPy (que Flower sait " -"sérialiser/désérialiser) :" +"**Mise à jour de la documentation** " +"([#1629](https://github.com/adap/flower/pull/1629), " +"[#1628](https://github.com/adap/flower/pull/1628), " +"[#1620](https://github.com/adap/flower/pull/1620), " +"[#1618](https://github.com/adap/flower/pull/1618), " +"[#1617](https://github.com/adap/flower/pull/1617), " +"[#1613](https://github.com/adap/flower/pull/1613), " +"[#1614](https://github.com/adap/flower/pull/1614))" -#: ../../source/tutorial-get-started-with-flower-pytorch.ipynb:490 -msgid "Implementing a Flower client" -msgstr "Mise en place d'un client Flower" +#: ../../source/ref-changelog.md:284 ../../source/ref-changelog.md:348 +#: ../../source/ref-changelog.md:406 ../../source/ref-changelog.md:475 +#: ../../source/ref-changelog.md:537 +msgid "None" +msgstr "Aucun" -#: ../../source/tutorial-get-started-with-flower-pytorch.ipynb:492 -msgid "" -"With that out of the way, let's move on to the interesting part. " -"Federated learning systems consist of a server and multiple clients. In " -"Flower, we create clients by implementing subclasses of " -"``flwr.client.Client`` or ``flwr.client.NumPyClient``. We use " -"``NumPyClient`` in this tutorial because it is easier to implement and " -"requires us to write less boilerplate." -msgstr "" -"Ceci étant dit, passons à la partie intéressante. Les systèmes " -"d'apprentissage fédérés se composent d'un serveur et de plusieurs " -"clients. Dans Flower, nous créons des clients en mettant en œuvre des " -"sous-classes de ``flwr.client.Client`` ou de ``flwr.client.NumPyClient``." -" Nous utilisons ``NumPyClient`` dans ce tutoriel parce qu'il est plus " -"facile à mettre en œuvre et qu'il nous oblige à rédiger moins de modèles " -"de chaudière." +#: ../../source/ref-changelog.md:286 +msgid "v1.4.0 (2023-04-21)" +msgstr "v1.4.0 (2023-04-21)" -#: ../../source/tutorial-get-started-with-flower-pytorch.ipynb:494 +#: ../../source/ref-changelog.md:292 msgid "" -"To implement the Flower client, we create a subclass of " -"``flwr.client.NumPyClient`` and implement the three methods " -"``get_parameters``, ``fit``, and ``evaluate``:" +"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " +"`Chenyang Ma (Danny)`, `Daniel J. Beutel`, `Edoardo`, `Gautam Jajoo`, " +"`Iacob-Alexandru-Andrei`, `JDRanpariya`, `Jean Charle Yaacoub`, `Kunal " +"Sarkhel`, `L. Jiang`, `Lennart Behme`, `Max Kapsecker`, `Michał`, `Nic " +"Lane`, `Nikolaos Episkopos`, `Ragy`, `Saurav Maheshkar`, `Semo Yang`, " +"`Steve Laskaridis`, `Steven Hé (Sīchàng)`, `Taner Topal`" msgstr "" -"Pour mettre en œuvre le client Flower, nous créons une sous-classe de " -"``flwr.client.NumPyClient`` et mettons en œuvre les trois méthodes " -"``get_parameters``, ``fit`` et ``evaluate`` :" +"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " +"`Chenyang Ma (Danny)`, `Daniel J. Beutel`, `Edoardo`, `Gautam Jajoo`, " +"`Iacob-Alexandru-Andrei`, `JDRanpariya`, `Jean Charle Yaacoub`, `Kunal " +"Sarkhel`, `L. Jiang`, `Lennart Behme`, `Max Kapsecker`, `Michał`, `Nic " +"Lane`, `Nikolaos Episkopos`, `Ragy`, `Saurav Maheshkar`, `Semo Yang`, " +"`Steve Laskaridis`, `Steven Hé (Sīchàng)`, `Taner Topal`" -#: ../../source/tutorial-get-started-with-flower-pytorch.ipynb:496 -msgid "``get_parameters``: Return the current local model parameters" -msgstr "``get_parameters`` : renvoie les paramètres du modèle local actuel" +#: ../../source/ref-changelog.md:296 +msgid "" +"**Introduce support for XGBoost (**`FedXgbNnAvg` **strategy and " +"example)** ([#1694](https://github.com/adap/flower/pull/1694), " +"[#1709](https://github.com/adap/flower/pull/1709), " +"[#1715](https://github.com/adap/flower/pull/1715), " +"[#1717](https://github.com/adap/flower/pull/1717), " +"[#1763](https://github.com/adap/flower/pull/1763), " +"[#1795](https://github.com/adap/flower/pull/1795))" +msgstr "" +"**Introduire la prise en charge de XGBoost (**`FedXgbNnAvg` **stratégie " +"et exemple)** ([#1694](https://github.com/adap/flower/pull/1694), " +"[#1709](https://github.com/adap/flower/pull/1709), " +"[#1715](https://github.com/adap/flower/pull/1715), " +"[#1717](https://github.com/adap/flower/pull/1717), " +"[#1763](https://github.com/adap/flower/pull/1763), " +"[#1795](https://github.com/adap/flower/pull/1795))" + +#: ../../source/ref-changelog.md:298 +msgid "" +"XGBoost is a tree-based ensemble machine learning algorithm that uses " +"gradient boosting to improve model accuracy. We added a new `FedXgbNnAvg`" +" " +"[strategy](https://github.com/adap/flower/tree/main/src/py/flwr/server/strategy/fedxgb_nn_avg.py)," +" and a [code " +"example](https://github.com/adap/flower/tree/main/examples/quickstart_xgboost_horizontal)" +" that demonstrates the usage of this new strategy in an XGBoost project." +msgstr "" +"Nous avons ajouté une nouvelle [stratégie] `FedXgbNnAvg` " +"(https://github.com/adap/flower/tree/main/src/py/flwr/server/strategy/fedxgb_nn_avg.py)," +" et un [exemple de code] " +"(https://github.com/adap/flower/tree/main/examples/quickstart_xgboost_horizontal)" +" qui démontre l'utilisation de cette nouvelle stratégie dans un projet " +"XGBoost." + +#: ../../source/ref-changelog.md:300 +msgid "" +"**Introduce iOS SDK (preview)** " +"([#1621](https://github.com/adap/flower/pull/1621), " +"[#1764](https://github.com/adap/flower/pull/1764))" +msgstr "" +"**Introduction du SDK iOS (aperçu)** " +"([#1621](https://github.com/adap/flower/pull/1621), " +"[#1764](https://github.com/adap/flower/pull/1764))" + +#: ../../source/ref-changelog.md:302 +msgid "" +"This is a major update for anyone wanting to implement Federated Learning" +" on iOS mobile devices. We now have a swift iOS SDK present under " +"[src/swift/flwr](https://github.com/adap/flower/tree/main/src/swift/flwr)" +" that will facilitate greatly the app creating process. To showcase its " +"use, the [iOS " +"example](https://github.com/adap/flower/tree/main/examples/ios) has also " +"been updated!" +msgstr "" +"Il s'agit d'une mise à jour majeure pour tous ceux qui souhaitent mettre " +"en œuvre l'apprentissage fédéré sur les appareils mobiles iOS. Nous " +"disposons désormais d'un SDK swift iOS présent sous " +"[src/swift/flwr](https://github.com/adap/flower/tree/main/src/swift/flwr)" +" qui facilitera grandement le processus de création d'applications. Pour " +"présenter son utilisation, l'[exemple " +"iOS](https://github.com/adap/flower/tree/main/examples/ios) a également " +"été mis à jour !" + +#: ../../source/ref-changelog.md:304 +msgid "" +"**Introduce new \"What is Federated Learning?\" tutorial** " +"([#1657](https://github.com/adap/flower/pull/1657), " +"[#1721](https://github.com/adap/flower/pull/1721))" +msgstr "" +"**Introduire un nouveau tutoriel \"Qu'est-ce que l'apprentissage fédéré ?" +" \"** ([#1657](https://github.com/adap/flower/pull/1657), " +"[#1721](https://github.com/adap/flower/pull/1721))" + +#: ../../source/ref-changelog.md:306 +#, fuzzy +msgid "" +"A new [entry-level tutorial](https://flower.ai/docs/framework/tutorial-" +"what-is-federated-learning.html) in our documentation explains the basics" +" of Fedetated Learning. It enables anyone who's unfamiliar with Federated" +" Learning to start their journey with Flower. Forward it to anyone who's " +"interested in Federated Learning!" +msgstr "" +"Un nouveau [tutoriel d'entrée de gamme] " +"(https://flower.ai/docs/tutorial/Flower-0-What-is-FL.html) dans notre " +"documentation explique les bases de l'apprentissage fédéré. Il permet à " +"tous ceux qui ne connaissent pas l'apprentissage fédéré de commencer leur" +" voyage avec Flower. Fais-le suivre à tous ceux qui s'intéressent à " +"l'apprentissage fédéré !" + +#: ../../source/ref-changelog.md:308 +msgid "" +"**Introduce new Flower Baseline: FedProx MNIST** " +"([#1513](https://github.com/adap/flower/pull/1513), " +"[#1680](https://github.com/adap/flower/pull/1680), " +"[#1681](https://github.com/adap/flower/pull/1681), " +"[#1679](https://github.com/adap/flower/pull/1679))" +msgstr "" +"**Introduire une nouvelle fleur Référence : FedProx MNIST** " +"([#1513](https://github.com/adap/flower/pull/1513), " +"[#1680](https://github.com/adap/flower/pull/1680), " +"[#1681](https://github.com/adap/flower/pull/1681), " +"[#1679](https://github.com/adap/flower/pull/1679))" + +#: ../../source/ref-changelog.md:310 +msgid "" +"This new baseline replicates the MNIST+CNN task from the paper [Federated" +" Optimization in Heterogeneous Networks (Li et al., " +"2018)](https://arxiv.org/abs/1812.06127). It uses the `FedProx` strategy," +" which aims at making convergence more robust in heterogenous settings." +msgstr "" +"Cette nouvelle ligne de base reproduit la tâche MNIST+CNN de l'article " +"[Federated Optimization in Heterogeneous Networks (Li et al., 2018)] " +"(https://arxiv.org/abs/1812.06127). Elle utilise la stratégie `FedProx`, " +"qui vise à rendre la convergence plus robuste dans des contextes " +"hétérogènes." + +#: ../../source/ref-changelog.md:312 +msgid "" +"**Introduce new Flower Baseline: FedAvg FEMNIST** " +"([#1655](https://github.com/adap/flower/pull/1655))" +msgstr "" +"**Introduire une nouvelle ligne de base pour les fleurs : FedAvg " +"FEMNIST** ([#1655](https://github.com/adap/flower/pull/1655))" + +#: ../../source/ref-changelog.md:314 +msgid "" +"This new baseline replicates an experiment evaluating the performance of " +"the FedAvg algorithm on the FEMNIST dataset from the paper [LEAF: A " +"Benchmark for Federated Settings (Caldas et al., " +"2018)](https://arxiv.org/abs/1812.01097)." +msgstr "" +"Cette nouvelle ligne de base reproduit une expérience évaluant les " +"performances de l'algorithme FedAvg sur le jeu de données FEMNIST tiré de" +" l'article [LEAF : A Benchmark for Federated Settings (Caldas et al., " +"2018)] (https://arxiv.org/abs/1812.01097)." + +#: ../../source/ref-changelog.md:316 +msgid "" +"**Introduce (experimental) REST API** " +"([#1594](https://github.com/adap/flower/pull/1594), " +"[#1690](https://github.com/adap/flower/pull/1690), " +"[#1695](https://github.com/adap/flower/pull/1695), " +"[#1712](https://github.com/adap/flower/pull/1712), " +"[#1802](https://github.com/adap/flower/pull/1802), " +"[#1770](https://github.com/adap/flower/pull/1770), " +"[#1733](https://github.com/adap/flower/pull/1733))" +msgstr "" +"**Introduire l'API REST (expérimentale)** " +"([#1594](https://github.com/adap/flower/pull/1594), " +"[#1690](https://github.com/adap/flower/pull/1690), " +"[#1695](https://github.com/adap/flower/pull/1695), " +"[#1712](https://github.com/adap/flower/pull/1712), " +"[#1802](https://github.com/adap/flower/pull/1802), " +"[#1770](https://github.com/adap/flower/pull/1770), " +"[#1733](https://github.com/adap/flower/pull/1733))" + +#: ../../source/ref-changelog.md:318 +msgid "" +"A new REST API has been introduced as an alternative to the gRPC-based " +"communication stack. In this initial version, the REST API only supports " +"anonymous clients." +msgstr "" +"Une nouvelle API REST a été introduite comme alternative à la pile de " +"communication basée sur gRPC. Dans cette version initiale, l'API REST ne " +"prend en charge que les clients anonymes." + +#: ../../source/ref-changelog.md:320 +msgid "" +"Please note: The REST API is still experimental and will likely change " +"significantly over time." +msgstr "" +"Remarque : l'API REST est encore expérimentale et est susceptible de " +"changer de manière significative au fil du temps." + +#: ../../source/ref-changelog.md:322 +msgid "" +"**Improve the (experimental) Driver API** " +"([#1663](https://github.com/adap/flower/pull/1663), " +"[#1666](https://github.com/adap/flower/pull/1666), " +"[#1667](https://github.com/adap/flower/pull/1667), " +"[#1664](https://github.com/adap/flower/pull/1664), " +"[#1675](https://github.com/adap/flower/pull/1675), " +"[#1676](https://github.com/adap/flower/pull/1676), " +"[#1693](https://github.com/adap/flower/pull/1693), " +"[#1662](https://github.com/adap/flower/pull/1662), " +"[#1794](https://github.com/adap/flower/pull/1794))" +msgstr "" +"**Améliorer l'API (expérimentale) du pilote** " +"([#1663](https://github.com/adap/flower/pull/1663), " +"[#1666](https://github.com/adap/flower/pull/1666), " +"[#1667](https://github.com/adap/flower/pull/1667), " +"[#1664](https://github.com/adap/flower/pull/1664), " +"[#1675](https://github.com/adap/flower/pull/1675), " +"[#1676](https://github.com/adap/flower/pull/1676), " +"[#1693](https://github.com/adap/flower/pull/1693), " +"[#1662](https://github.com/adap/flower/pull/1662), " +"[#1794](https://github.com/adap/flower/pull/1794))" + +#: ../../source/ref-changelog.md:324 +msgid "" +"The Driver API is still an experimental feature, but this release " +"introduces some major upgrades. One of the main improvements is the " +"introduction of an SQLite database to store server state on disk (instead" +" of in-memory). Another improvement is that tasks (instructions or " +"results) that have been delivered will now be deleted. This greatly " +"improves the memory efficiency of a long-running Flower server." +msgstr "" +"L'API du pilote est encore une fonction expérimentale, mais cette version" +" introduit quelques améliorations majeures. L'une des principales " +"améliorations est l'introduction d'une base de données SQLite pour " +"stocker l'état du serveur sur le disque (au lieu de la mémoire). Une " +"autre amélioration est que les tâches (instructions ou résultats) qui ont" +" été livrées seront désormais supprimées, ce qui améliore " +"considérablement l'efficacité de la mémoire d'un serveur Flower " +"fonctionnant depuis longtemps." + +#: ../../source/ref-changelog.md:326 +msgid "" +"**Fix spilling issues related to Ray during simulations** " +"([#1698](https://github.com/adap/flower/pull/1698))" +msgstr "" +"**Répare les problèmes de déversement liés à Ray pendant les " +"simulations** ([#1698](https://github.com/adap/flower/pull/1698))" + +#: ../../source/ref-changelog.md:328 +#, fuzzy +msgid "" +"While running long simulations, `ray` was sometimes spilling huge amounts" +" of data that would make the training unable to continue. This is now " +"fixed! 🎉" +msgstr "" +"Lors de l'exécution de longues simulations, `ray` déversait parfois " +"d'énormes quantités de données qui rendaient l'entraînement incapable de " +"continuer. ce problème est maintenant corrigé ! 🎉" + +#: ../../source/ref-changelog.md:330 +msgid "" +"**Add new example using** `TabNet` **and Flower** " +"([#1725](https://github.com/adap/flower/pull/1725))" +msgstr "" +"**Ajouter un nouvel exemple utilisant** `TabNet` **et Flower** " +"([#1725](https://github.com/adap/flower/pull/1725))" + +#: ../../source/ref-changelog.md:332 +msgid "" +"TabNet is a powerful and flexible framework for training machine learning" +" models on tabular data. We now have a federated example using Flower: " +"[https://github.com/adap/flower/tree/main/examples/tabnet](https://github.com/adap/flower/tree/main/examples/quickstart_tabnet)." +msgstr "" +"TabNet est un cadre puissant et flexible pour former des modèles " +"d'apprentissage automatique sur des données tabulaires. Nous avons " +"maintenant un exemple fédéré utilisant Flower : " +"[https://github.com/adap/flower/tree/main/examples/tabnet](https://github.com/adap/flower/tree/main/examples/quickstart_tabnet)." + +#: ../../source/ref-changelog.md:334 +msgid "" +"**Add new how-to guide for monitoring simulations** " +"([#1649](https://github.com/adap/flower/pull/1649))" +msgstr "" +"**Ajouter un nouveau guide pratique pour le suivi des simulations** " +"([#1649](https://github.com/adap/flower/pull/1649))" + +#: ../../source/ref-changelog.md:336 +msgid "" +"We now have a documentation guide to help users monitor their performance" +" during simulations." +msgstr "" +"Nous avons maintenant un guide de documentation pour aider les " +"utilisateurs à surveiller leurs performances pendant les simulations." + +#: ../../source/ref-changelog.md:338 +msgid "" +"**Add training metrics to** `History` **object during simulations** " +"([#1696](https://github.com/adap/flower/pull/1696))" +msgstr "" +"**Ajouter des mesures de formation à** `History` **objet pendant les " +"simulations** ([#1696](https://github.com/adap/flower/pull/1696))" + +#: ../../source/ref-changelog.md:340 +msgid "" +"The `fit_metrics_aggregation_fn` can be used to aggregate training " +"metrics, but previous releases did not save the results in the `History` " +"object. This is now the case!" +msgstr "" +"La fonction `fit_metrics_aggregation_fn` peut être utilisée pour agréger " +"les mesures d'entraînement, mais les versions précédentes " +"n'enregistraient pas les résultats dans l'objet `History`. c'est " +"désormais le cas !" + +#: ../../source/ref-changelog.md:342 +msgid "" +"**General improvements** " +"([#1659](https://github.com/adap/flower/pull/1659), " +"[#1646](https://github.com/adap/flower/pull/1646), " +"[#1647](https://github.com/adap/flower/pull/1647), " +"[#1471](https://github.com/adap/flower/pull/1471), " +"[#1648](https://github.com/adap/flower/pull/1648), " +"[#1651](https://github.com/adap/flower/pull/1651), " +"[#1652](https://github.com/adap/flower/pull/1652), " +"[#1653](https://github.com/adap/flower/pull/1653), " +"[#1659](https://github.com/adap/flower/pull/1659), " +"[#1665](https://github.com/adap/flower/pull/1665), " +"[#1670](https://github.com/adap/flower/pull/1670), " +"[#1672](https://github.com/adap/flower/pull/1672), " +"[#1677](https://github.com/adap/flower/pull/1677), " +"[#1684](https://github.com/adap/flower/pull/1684), " +"[#1683](https://github.com/adap/flower/pull/1683), " +"[#1686](https://github.com/adap/flower/pull/1686), " +"[#1682](https://github.com/adap/flower/pull/1682), " +"[#1685](https://github.com/adap/flower/pull/1685), " +"[#1692](https://github.com/adap/flower/pull/1692), " +"[#1705](https://github.com/adap/flower/pull/1705), " +"[#1708](https://github.com/adap/flower/pull/1708), " +"[#1711](https://github.com/adap/flower/pull/1711), " +"[#1713](https://github.com/adap/flower/pull/1713), " +"[#1714](https://github.com/adap/flower/pull/1714), " +"[#1718](https://github.com/adap/flower/pull/1718), " +"[#1716](https://github.com/adap/flower/pull/1716), " +"[#1723](https://github.com/adap/flower/pull/1723), " +"[#1735](https://github.com/adap/flower/pull/1735), " +"[#1678](https://github.com/adap/flower/pull/1678), " +"[#1750](https://github.com/adap/flower/pull/1750), " +"[#1753](https://github.com/adap/flower/pull/1753), " +"[#1736](https://github.com/adap/flower/pull/1736), " +"[#1766](https://github.com/adap/flower/pull/1766), " +"[#1760](https://github.com/adap/flower/pull/1760), " +"[#1775](https://github.com/adap/flower/pull/1775), " +"[#1776](https://github.com/adap/flower/pull/1776), " +"[#1777](https://github.com/adap/flower/pull/1777), " +"[#1779](https://github.com/adap/flower/pull/1779), " +"[#1784](https://github.com/adap/flower/pull/1784), " +"[#1773](https://github.com/adap/flower/pull/1773), " +"[#1755](https://github.com/adap/flower/pull/1755), " +"[#1789](https://github.com/adap/flower/pull/1789), " +"[#1788](https://github.com/adap/flower/pull/1788), " +"[#1798](https://github.com/adap/flower/pull/1798), " +"[#1799](https://github.com/adap/flower/pull/1799), " +"[#1739](https://github.com/adap/flower/pull/1739), " +"[#1800](https://github.com/adap/flower/pull/1800), " +"[#1804](https://github.com/adap/flower/pull/1804), " +"[#1805](https://github.com/adap/flower/pull/1805))" +msgstr "" +"**General improvements** " +"([#1659](https://github.com/adap/flower/pull/1659), " +"[#1646](https://github.com/adap/flower/pull/1646), " +"[#1647](https://github.com/adap/flower/pull/1647), " +"[#1471](https://github.com/adap/flower/pull/1471), " +"[#1648](https://github.com/adap/flower/pull/1648), " +"[#1651](https://github.com/adap/flower/pull/1651), " +"[#1652](https://github.com/adap/flower/pull/1652), " +"[#1653](https://github.com/adap/flower/pull/1653), " +"[#1659](https://github.com/adap/flower/pull/1659), " +"[#1665](https://github.com/adap/flower/pull/1665), " +"[#1670](https://github.com/adap/flower/pull/1670), " +"[#1672](https://github.com/adap/flower/pull/1672), " +"[#1677](https://github.com/adap/flower/pull/1677), " +"[#1684](https://github.com/adap/flower/pull/1684), " +"[#1683](https://github.com/adap/flower/pull/1683), " +"[#1686](https://github.com/adap/flower/pull/1686), " +"[#1682](https://github.com/adap/flower/pull/1682), " +"[#1685](https://github.com/adap/flower/pull/1685), " +"[#1692](https://github.com/adap/flower/pull/1692), " +"[#1705](https://github.com/ada" + +#: ../../source/ref-changelog.md:350 +msgid "v1.3.0 (2023-02-06)" +msgstr "v1.3.0 (2023-02-06)" + +#: ../../source/ref-changelog.md:356 +msgid "" +"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " +"`Daniel J. Beutel`, `JDRanpariya`, `Lennart Behme`, `Taner Topal`" +msgstr "" +"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " +"`Daniel J. Beutel`, `JDRanpariya`, `Lennart Behme`, `Taner Topal`" + +#: ../../source/ref-changelog.md:360 +msgid "" +"**Add support for** `workload_id` **and** `group_id` **in Driver API** " +"([#1595](https://github.com/adap/flower/pull/1595))" +msgstr "" +"**Ajouter la prise en charge de** `workload_id` **et** `group_id` **dans " +"l'API du pilote** ([#1595](https://github.com/adap/flower/pull/1595))" + +#: ../../source/ref-changelog.md:362 +msgid "" +"The (experimental) Driver API now supports a `workload_id` that can be " +"used to identify which workload a task belongs to. It also supports a new" +" `group_id` that can be used, for example, to indicate the current " +"training round. Both the `workload_id` and `group_id` enable client nodes" +" to decide whether they want to handle a task or not." +msgstr "" +"L'API (expérimentale) Driver prend désormais en charge un `workload_id` " +"qui peut être utilisé pour identifier la charge de travail à laquelle une" +" tâche appartient. Elle prend également en charge un nouveau `group_id` " +"qui peut être utilisé, par exemple, pour indiquer le cycle de formation " +"en cours. Le `workload_id` et le `group_id` permettent tous deux aux " +"nœuds clients de décider s'ils veulent traiter une tâche ou non." + +#: ../../source/ref-changelog.md:364 +msgid "" +"**Make Driver API and Fleet API address configurable** " +"([#1637](https://github.com/adap/flower/pull/1637))" +msgstr "" +"**Faire en sorte que l'adresse de l'API du conducteur et de l'API de la " +"flotte soit configurable** " +"([#1637](https://github.com/adap/flower/pull/1637))" + +#: ../../source/ref-changelog.md:366 +msgid "" +"The (experimental) long-running Flower server (Driver API and Fleet API) " +"can now configure the server address of both Driver API (via `--driver-" +"api-address`) and Fleet API (via `--fleet-api-address`) when starting:" +msgstr "" +"Le serveur Flower (expérimental) de longue durée (Driver API et Fleet " +"API) peut maintenant configurer l'adresse du serveur de Driver API (via " +"`--driver-api-address`) et de Fleet API (via `--fleet-api-address`) lors " +"de son démarrage :" + +#: ../../source/ref-changelog.md:368 +#, fuzzy +msgid "" +"`flower-server --driver-api-address \"0.0.0.0:8081\" --fleet-api-address " +"\"0.0.0.0:8086\"`" +msgstr "" +"``flower-superlink --driver-api-address \"0.0.0.0:8081\" --fleet-api-" +"address \"0.0.0.0:8086\" ``" + +#: ../../source/ref-changelog.md:370 +msgid "Both IPv4 and IPv6 addresses are supported." +msgstr "Les adresses IPv4 et IPv6 sont toutes deux prises en charge." + +#: ../../source/ref-changelog.md:372 +msgid "" +"**Add new example of Federated Learning using fastai and Flower** " +"([#1598](https://github.com/adap/flower/pull/1598))" +msgstr "" +"**Ajouter un nouvel exemple d'apprentissage fédéré utilisant fastai et " +"Flower** ([#1598](https://github.com/adap/flower/pull/1598))" + +#: ../../source/ref-changelog.md:374 +msgid "" +"A new code example (`quickstart_fastai`) demonstrates federated learning " +"with [fastai](https://www.fast.ai/) and Flower. You can find it here: " +"[quickstart_fastai](https://github.com/adap/flower/tree/main/examples/quickstart_fastai)." +msgstr "" +"Un nouvel exemple de code (`quickstart_fastai`) démontre l'apprentissage " +"fédéré avec [fastai](https://www.fast.ai/) et Flower. Tu peux le trouver " +"ici : " +"[quickstart_fastai](https://github.com/adap/flower/tree/main/examples/quickstart_fastai)." + +#: ../../source/ref-changelog.md:376 +msgid "" +"**Make Android example compatible with** `flwr >= 1.0.0` **and the latest" +" versions of Android** " +"([#1603](https://github.com/adap/flower/pull/1603))" +msgstr "" +"**Rendre l'exemple Android compatible avec** `flwr >= 1.0.0` **et les " +"dernières versions d'Android** " +"([#1603](https://github.com/adap/flower/pull/1603))" + +#: ../../source/ref-changelog.md:378 +#, fuzzy +msgid "" +"The Android code example has received a substantial update: the project " +"is compatible with Flower 1.0 (and later), the UI received a full " +"refresh, and the project is updated to be compatible with newer Android " +"tooling." +msgstr "" +"L'exemple de code Android a reçu une mise à jour substantielle : le " +"projet est compatible avec Flower 1.0 et les versions ultérieures, " +"l'interface utilisateur a reçu un rafraîchissement complet, et le projet " +"est mis à jour pour être compatible avec les outils Android les plus " +"récents." + +#: ../../source/ref-changelog.md:380 +msgid "" +"**Add new `FedProx` strategy** " +"([#1619](https://github.com/adap/flower/pull/1619))" +msgstr "" +"**Ajouter une nouvelle stratégie `FedProx`** " +"([#1619](https://github.com/adap/flower/pull/1619))" + +#: ../../source/ref-changelog.md:382 +msgid "" +"This " +"[strategy](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedprox.py)" +" is almost identical to " +"[`FedAvg`](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedavg.py)," +" but helps users replicate what is described in this " +"[paper](https://arxiv.org/abs/1812.06127). It essentially adds a " +"parameter called `proximal_mu` to regularize the local models with " +"respect to the global models." +msgstr "" +"Cette " +"[stratégie](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedprox.py)" +" est presque identique à " +"[`FedAvg`](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedavg.py)," +" mais aide les utilisateurs à reproduire ce qui est décrit dans cet " +"[article](https://arxiv.org/abs/1812.06127). Elle ajoute essentiellement " +"un paramètre appelé `proximal_mu` pour régulariser les modèles locaux par" +" rapport aux modèles globaux." + +#: ../../source/ref-changelog.md:384 +msgid "" +"**Add new metrics to telemetry events** " +"([#1640](https://github.com/adap/flower/pull/1640))" +msgstr "" +"**Ajouter de nouvelles métriques aux événements de télémétrie** " +"([#1640](https://github.com/adap/flower/pull/1640))" + +#: ../../source/ref-changelog.md:386 +msgid "" +"An updated event structure allows, for example, the clustering of events " +"within the same workload." +msgstr "" +"Une structure d'événements mise à jour permet, par exemple, de regrouper " +"des événements au sein d'une même charge de travail." + +#: ../../source/ref-changelog.md:388 +msgid "" +"**Add new custom strategy tutorial section** " +"[#1623](https://github.com/adap/flower/pull/1623)" +msgstr "" +"**Ajouter une nouvelle section de tutoriel sur les stratégies " +"personnalisées** [#1623](https://github.com/adap/flower/pull/1623)" + +#: ../../source/ref-changelog.md:390 +#, fuzzy +msgid "" +"The Flower tutorial now has a new section that covers implementing a " +"custom strategy from scratch: [Open in " +"Colab](https://colab.research.google.com/github/adap/flower/blob/main/doc/source" +"/tutorial-build-a-strategy-from-scratch-pytorch.ipynb)" +msgstr "" +"Le tutoriel sur les fleurs comporte désormais une nouvelle section qui " +"traite de la mise en œuvre d'une stratégie personnalisée à partir de zéro" +" : [Ouvrir dans " +"Colab](https://colab.research.google.com/github/adap/flower/blob/main/doc/source/tutorial/Flower-3-Building-a" +"-Strategy-PyTorch.ipynb)" + +#: ../../source/ref-changelog.md:392 +msgid "" +"**Add new custom serialization tutorial section** " +"([#1622](https://github.com/adap/flower/pull/1622))" +msgstr "" +"**Ajouter une nouvelle section de tutoriel sur la sérialisation " +"personnalisée** ([#1622](https://github.com/adap/flower/pull/1622))" + +#: ../../source/ref-changelog.md:394 +#, fuzzy +msgid "" +"The Flower tutorial now has a new section that covers custom " +"serialization: [Open in " +"Colab](https://colab.research.google.com/github/adap/flower/blob/main/doc/source" +"/tutorial-customize-the-client-pytorch.ipynb)" +msgstr "" +"Le tutoriel sur les fleurs comporte désormais une nouvelle section qui " +"traite de la sérialisation personnalisée : [Ouvrir dans " +"Colab](https://colab.research.google.com/github/adap/flower/blob/main/doc/source/tutorial/Flower-4" +"-Client-and-NumPyClient-PyTorch.ipynb)" + +#: ../../source/ref-changelog.md:396 +msgid "" +"**General improvements** " +"([#1638](https://github.com/adap/flower/pull/1638), " +"[#1634](https://github.com/adap/flower/pull/1634), " +"[#1636](https://github.com/adap/flower/pull/1636), " +"[#1635](https://github.com/adap/flower/pull/1635), " +"[#1633](https://github.com/adap/flower/pull/1633), " +"[#1632](https://github.com/adap/flower/pull/1632), " +"[#1631](https://github.com/adap/flower/pull/1631), " +"[#1630](https://github.com/adap/flower/pull/1630), " +"[#1627](https://github.com/adap/flower/pull/1627), " +"[#1593](https://github.com/adap/flower/pull/1593), " +"[#1616](https://github.com/adap/flower/pull/1616), " +"[#1615](https://github.com/adap/flower/pull/1615), " +"[#1607](https://github.com/adap/flower/pull/1607), " +"[#1609](https://github.com/adap/flower/pull/1609), " +"[#1608](https://github.com/adap/flower/pull/1608), " +"[#1603](https://github.com/adap/flower/pull/1603), " +"[#1590](https://github.com/adap/flower/pull/1590), " +"[#1580](https://github.com/adap/flower/pull/1580), " +"[#1599](https://github.com/adap/flower/pull/1599), " +"[#1600](https://github.com/adap/flower/pull/1600), " +"[#1601](https://github.com/adap/flower/pull/1601), " +"[#1597](https://github.com/adap/flower/pull/1597), " +"[#1595](https://github.com/adap/flower/pull/1595), " +"[#1591](https://github.com/adap/flower/pull/1591), " +"[#1588](https://github.com/adap/flower/pull/1588), " +"[#1589](https://github.com/adap/flower/pull/1589), " +"[#1587](https://github.com/adap/flower/pull/1587), " +"[#1573](https://github.com/adap/flower/pull/1573), " +"[#1581](https://github.com/adap/flower/pull/1581), " +"[#1578](https://github.com/adap/flower/pull/1578), " +"[#1574](https://github.com/adap/flower/pull/1574), " +"[#1572](https://github.com/adap/flower/pull/1572), " +"[#1586](https://github.com/adap/flower/pull/1586))" +msgstr "" +"**General improvements** " +"([#1638](https://github.com/adap/flower/pull/1638), " +"[#1634](https://github.com/adap/flower/pull/1634), " +"[#1636](https://github.com/adap/flower/pull/1636), " +"[#1635](https://github.com/adap/flower/pull/1635), " +"[#1633](https://github.com/adap/flower/pull/1633), " +"[#1632](https://github.com/adap/flower/pull/1632), " +"[#1631](https://github.com/adap/flower/pull/1631), " +"[#1630](https://github.com/adap/flower/pull/1630), " +"[#1627](https://github.com/adap/flower/pull/1627), " +"[#1593](https://github.com/adap/flower/pull/1593), " +"[#1616](https://github.com/adap/flower/pull/1616), " +"[#1615](https://github.com/adap/flower/pull/1615), " +"[#1607](https://github.com/adap/flower/pull/1607), " +"[#1609](https://github.com/adap/flower/pull/1609), " +"[#1608](https://github.com/adap/flower/pull/1608), " +"[#1603](https://github.com/adap/flower/pull/1603), " +"[#1590](https://github.com/adap/flower/pull/1590), " +"[#1580](https://github.com/adap/flower/pull/1580), " +"[#1599](https://github.com/adap/flower/pull/1599), " +"[#1600](https://github.com/ada" + +#: ../../source/ref-changelog.md:400 +msgid "" +"**Updated documentation** " +"([#1629](https://github.com/adap/flower/pull/1629), " +"[#1628](https://github.com/adap/flower/pull/1628), " +"[#1620](https://github.com/adap/flower/pull/1620), " +"[#1618](https://github.com/adap/flower/pull/1618), " +"[#1617](https://github.com/adap/flower/pull/1617), " +"[#1613](https://github.com/adap/flower/pull/1613), " +"[#1614](https://github.com/adap/flower/pull/1614))" +msgstr "" +"**Mise à jour de la documentation** " +"([#1629](https://github.com/adap/flower/pull/1629), " +"[#1628](https://github.com/adap/flower/pull/1628), " +"[#1620](https://github.com/adap/flower/pull/1620), " +"[#1618](https://github.com/adap/flower/pull/1618), " +"[#1617](https://github.com/adap/flower/pull/1617), " +"[#1613](https://github.com/adap/flower/pull/1613), " +"[#1614](https://github.com/adap/flower/pull/1614))" + +#: ../../source/ref-changelog.md:402 ../../source/ref-changelog.md:469 +msgid "" +"As usual, the documentation has improved quite a bit. It is another step " +"in our effort to make the Flower documentation the best documentation of " +"any project. Stay tuned and as always, feel free to provide feedback!" +msgstr "" +"Comme d'habitude, la documentation s'est beaucoup améliorée. C'est une " +"autre étape dans notre effort pour faire de la documentation de Flower la" +" meilleure documentation de tout projet. Reste à l'écoute et comme " +"toujours, n'hésite pas à nous faire part de tes commentaires !" + +#: ../../source/ref-changelog.md:408 +msgid "v1.2.0 (2023-01-13)" +msgstr "v1.2.0 (2023-01-13)" + +#: ../../source/ref-changelog.md:414 +msgid "" +"`Adam Narozniak`, `Charles Beauville`, `Daniel J. Beutel`, `Edoardo`, `L." +" Jiang`, `Ragy`, `Taner Topal`, `dannymcy`" +msgstr "" +"adam Narozniak`, `Charles Beauville`, `Daniel J. Beutel`, `Edoardo`, `L. " +"Jiang`, `Ragy`, `Taner Topal`, `dannymcy`" + +#: ../../source/ref-changelog.md:418 +msgid "" +"**Introduce new Flower Baseline: FedAvg MNIST** " +"([#1497](https://github.com/adap/flower/pull/1497), " +"[#1552](https://github.com/adap/flower/pull/1552))" +msgstr "" +"**Introduire une nouvelle fleur Référence : FedAvg MNIST** " +"([#1497](https://github.com/adap/flower/pull/1497), " +"[#1552](https://github.com/adap/flower/pull/1552))" + +#: ../../source/ref-changelog.md:420 +msgid "" +"Over the coming weeks, we will be releasing a number of new reference " +"implementations useful especially to FL newcomers. They will typically " +"revisit well known papers from the literature, and be suitable for " +"integration in your own application or for experimentation, in order to " +"deepen your knowledge of FL in general. Today's release is the first in " +"this series. [Read more.](https://flower.ai/blog/2023-01-12-fl-starter-" +"pack-fedavg-mnist-cnn/)" +msgstr "" +"Au cours des prochaines semaines, nous publierons un certain nombre de " +"nouvelles implémentations de référence utiles en particulier pour les " +"nouveaux venus en FL. Elles revisiteront généralement des articles bien " +"connus de la littérature, et seront adaptées à l'intégration dans votre " +"propre application ou à l'expérimentation, afin d'approfondir votre " +"connaissance de FL en général. La publication d'aujourd'hui est la " +"première de cette série. [Lire la " +"suite.](https://flower.ai/blog/2023-01-12-fl-starter-pack-fedavg-mnist-" +"cnn/)" + +#: ../../source/ref-changelog.md:422 +msgid "" +"**Improve GPU support in simulations** " +"([#1555](https://github.com/adap/flower/pull/1555))" +msgstr "" +"**Améliorer la prise en charge des GPU dans les simulations** " +"([#1555](https://github.com/adap/flower/pull/1555))" + +#: ../../source/ref-changelog.md:424 +msgid "" +"The Ray-based Virtual Client Engine (`start_simulation`) has been updated" +" to improve GPU support. The update includes some of the hard-earned " +"lessons from scaling simulations in GPU cluster environments. New " +"defaults make running GPU-based simulations substantially more robust." +msgstr "" +"Le moteur client virtuel basé sur Ray (`start_simulation`) a été mis à " +"jour pour améliorer la prise en charge des GPU. La mise à jour inclut " +"certaines des leçons durement apprises lors de la mise à l'échelle des " +"simulations dans des environnements de grappes de GPU. De nouveaux " +"paramètres par défaut rendent l'exécution des simulations basées sur les " +"GPU beaucoup plus robuste." + +#: ../../source/ref-changelog.md:426 +msgid "" +"**Improve GPU support in Jupyter Notebook tutorials** " +"([#1527](https://github.com/adap/flower/pull/1527), " +"[#1558](https://github.com/adap/flower/pull/1558))" +msgstr "" +"**Améliorer la prise en charge du GPU dans les tutoriels Jupyter " +"Notebook** ([#1527](https://github.com/adap/flower/pull/1527), " +"[#1558](https://github.com/adap/flower/pull/1558))" + +#: ../../source/ref-changelog.md:428 +msgid "" +"Some users reported that Jupyter Notebooks have not always been easy to " +"use on GPU instances. We listened and made improvements to all of our " +"Jupyter notebooks! Check out the updated notebooks here:" +msgstr "" +"Certains utilisateurs ont signalé que les carnets Jupyter n'ont pas " +"toujours été faciles à utiliser sur les instances GPU. Nous les avons " +"écoutés et avons apporté des améliorations à tous nos carnets Jupyter ! " +"Découvre les carnets mis à jour ici :" + +#: ../../source/ref-changelog.md:430 +#, fuzzy +msgid "" +"[An Introduction to Federated Learning](https://flower.ai/docs/framework" +"/tutorial-get-started-with-flower-pytorch.html)" +msgstr "" +"[Une introduction à l'apprentissage fédéré] " +"(https://flower.ai/docs/tutorial/Flower-1-Intro-to-FL-PyTorch.html)" + +#: ../../source/ref-changelog.md:431 +#, fuzzy +msgid "" +"[Strategies in Federated Learning](https://flower.ai/docs/framework" +"/tutorial-use-a-federated-learning-strategy-pytorch.html)" +msgstr "" +"[Stratégies d'apprentissage fédéré] " +"(https://flower.ai/docs/tutorial/Flower-2-Strategies-in-FL-PyTorch.html)" + +#: ../../source/ref-changelog.md:432 +#, fuzzy +msgid "" +"[Building a Strategy](https://flower.ai/docs/framework/tutorial-build-a" +"-strategy-from-scratch-pytorch.html)" +msgstr "" +"[Construire une stratégie] " +"(https://flower.ai/docs/tutorial/Flower-3-Building-a-Strategy-" +"PyTorch.html)" + +#: ../../source/ref-changelog.md:433 +#, fuzzy +msgid "" +"[Client and NumPyClient](https://flower.ai/docs/framework/tutorial-" +"customize-the-client-pytorch.html)" +msgstr "" +"[Client et NumPyClient] (https://flower.ai/docs/tutorial/Flower-4" +"-Client-and-NumPyClient-PyTorch.html)" + +#: ../../source/ref-changelog.md:435 +msgid "" +"**Introduce optional telemetry** " +"([#1533](https://github.com/adap/flower/pull/1533), " +"[#1544](https://github.com/adap/flower/pull/1544), " +"[#1584](https://github.com/adap/flower/pull/1584))" +msgstr "" +"**Introduire la télémétrie optionnelle** " +"([#1533](https://github.com/adap/flower/pull/1533), " +"[#1544](https://github.com/adap/flower/pull/1544), " +"[#1584](https://github.com/adap/flower/pull/1584))" + +#: ../../source/ref-changelog.md:437 +msgid "" +"After a [request for " +"feedback](https://github.com/adap/flower/issues/1534) from the community," +" the Flower open-source project introduces optional collection of " +"*anonymous* usage metrics to make well-informed decisions to improve " +"Flower. Doing this enables the Flower team to understand how Flower is " +"used and what challenges users might face." +msgstr "" +"À la suite d'une [demande de commentaires] " +"(https://github.com/adap/flower/issues/1534) de la part de la communauté," +" le projet open-source Flower introduit la collecte optionnelle de " +"mesures d'utilisation *anonymes* afin de prendre des décisions éclairées " +"pour améliorer Flower. Cela permet à l'équipe de Flower de comprendre " +"comment Flower est utilisé et quels sont les défis auxquels les " +"utilisateurs peuvent être confrontés." + +#: ../../source/ref-changelog.md:439 +#, fuzzy +msgid "" +"**Flower is a friendly framework for collaborative AI and data science.**" +" Staying true to this statement, Flower makes it easy to disable " +"telemetry for users who do not want to share anonymous usage metrics. " +"[Read more.](https://flower.ai/docs/telemetry.html)." +msgstr "" +"**Flower est un cadre convivial pour l'IA collaborative et la science des" +" données.** Restant fidèle à cette déclaration, Flower permet de " +"désactiver facilement la télémétrie pour les utilisateurs qui ne " +"souhaitent pas partager des métriques d'utilisation anonymes.[Lire la " +"suite.](https://flower.ai/docs/telemetry.html)." + +#: ../../source/ref-changelog.md:441 +msgid "" +"**Introduce (experimental) Driver API** " +"([#1520](https://github.com/adap/flower/pull/1520), " +"[#1525](https://github.com/adap/flower/pull/1525), " +"[#1545](https://github.com/adap/flower/pull/1545), " +"[#1546](https://github.com/adap/flower/pull/1546), " +"[#1550](https://github.com/adap/flower/pull/1550), " +"[#1551](https://github.com/adap/flower/pull/1551), " +"[#1567](https://github.com/adap/flower/pull/1567))" +msgstr "" +"**([#1520](https://github.com/adap/flower/pull/1520), " +"[#1525](https://github.com/adap/flower/pull/1525), " +"[#1545](https://github.com/adap/flower/pull/1545), " +"[#1546](https://github.com/adap/flower/pull/1546), " +"[#1550](https://github.com/adap/flower/pull/1550), " +"[#1551](https://github.com/adap/flower/pull/1551), " +"[#1567](https://github.com/adap/flower/pull/1567))" + +#: ../../source/ref-changelog.md:443 +msgid "" +"Flower now has a new (experimental) Driver API which will enable fully " +"programmable, async, and multi-tenant Federated Learning and Federated " +"Analytics applications. Phew, that's a lot! Going forward, the Driver API" +" will be the abstraction that many upcoming features will be built on - " +"and you can start building those things now, too." +msgstr "" +"Flower dispose désormais d'une nouvelle API de pilote (expérimentale) qui" +" permettra de créer des applications Federated Learning et Federated " +"Analytics entièrement programmables, asynchrones et multi-tenant. Ouf, " +"c'est beaucoup ! À l'avenir, l'API de pilote sera l'abstraction sur " +"laquelle de nombreuses fonctionnalités à venir seront construites - et tu" +" peux commencer à construire ces choses dès maintenant, aussi." + +#: ../../source/ref-changelog.md:445 +msgid "" +"The Driver API also enables a new execution mode in which the server runs" +" indefinitely. Multiple individual workloads can run concurrently and " +"start and stop their execution independent of the server. This is " +"especially useful for users who want to deploy Flower in production." +msgstr "" +"L'API du pilote permet également un nouveau mode d'exécution dans lequel " +"le serveur s'exécute indéfiniment. Plusieurs charges de travail " +"individuelles peuvent s'exécuter simultanément et démarrer et arrêter " +"leur exécution indépendamment du serveur. Ceci est particulièrement utile" +" pour les utilisateurs qui souhaitent déployer Flower en production." + +#: ../../source/ref-changelog.md:447 +msgid "" +"To learn more, check out the `mt-pytorch` code example. We look forward " +"to you feedback!" +msgstr "" +"Pour en savoir plus, consulte l'exemple de code `mt-pytorch`. Nous " +"attendons tes commentaires avec impatience !" + +#: ../../source/ref-changelog.md:449 +msgid "" +"Please note: *The Driver API is still experimental and will likely change" +" significantly over time.*" +msgstr "" +"Remarque : *L'API du pilote est encore expérimentale et est susceptible " +"de changer de manière significative au fil du temps.*" + +#: ../../source/ref-changelog.md:451 +msgid "" +"**Add new Federated Analytics with Pandas example** " +"([#1469](https://github.com/adap/flower/pull/1469), " +"[#1535](https://github.com/adap/flower/pull/1535))" +msgstr "" +"**Ajouter un nouvel exemple de Federated Analytics avec Pandas** " +"([#1469](https://github.com/adap/flower/pull/1469), " +"[#1535](https://github.com/adap/flower/pull/1535))" + +#: ../../source/ref-changelog.md:453 +msgid "" +"A new code example (`quickstart_pandas`) demonstrates federated analytics" +" with Pandas and Flower. You can find it here: " +"[quickstart_pandas](https://github.com/adap/flower/tree/main/examples/quickstart_pandas)." +msgstr "" +"Un nouvel exemple de code (`quickstart_pandas`) démontre l'analyse " +"fédérée avec Pandas et Flower. Tu peux le trouver ici : " +"[quickstart_pandas](https://github.com/adap/flower/tree/main/examples/quickstart_pandas)." + +#: ../../source/ref-changelog.md:455 +msgid "" +"**Add new strategies: Krum and MultiKrum** " +"([#1481](https://github.com/adap/flower/pull/1481))" +msgstr "" +"**Ajouter de nouvelles stratégies : Krum et MultiKrum** " +"([#1481](https://github.com/adap/flower/pull/1481))" + +#: ../../source/ref-changelog.md:457 +msgid "" +"Edoardo, a computer science student at the Sapienza University of Rome, " +"contributed a new `Krum` strategy that enables users to easily use Krum " +"and MultiKrum in their workloads." +msgstr "" +"Edoardo, étudiant en informatique à l'Université Sapienza de Rome, a " +"contribué à une nouvelle stratégie `Krum` qui permet aux utilisateurs " +"d'utiliser facilement Krum et MultiKrum dans leurs charges de travail." + +#: ../../source/ref-changelog.md:459 +msgid "" +"**Update C++ example to be compatible with Flower v1.2.0** " +"([#1495](https://github.com/adap/flower/pull/1495))" +msgstr "" +"**Mettre à jour l'exemple C++ pour qu'il soit compatible avec Flower " +"v1.2.0** ([#1495](https://github.com/adap/flower/pull/1495))" + +#: ../../source/ref-changelog.md:461 +msgid "" +"The C++ code example has received a substantial update to make it " +"compatible with the latest version of Flower." +msgstr "" +"L'exemple de code C++ a reçu une mise à jour substantielle pour le rendre" +" compatible avec la dernière version de Flower." + +#: ../../source/ref-changelog.md:463 +msgid "" +"**General improvements** " +"([#1491](https://github.com/adap/flower/pull/1491), " +"[#1504](https://github.com/adap/flower/pull/1504), " +"[#1506](https://github.com/adap/flower/pull/1506), " +"[#1514](https://github.com/adap/flower/pull/1514), " +"[#1522](https://github.com/adap/flower/pull/1522), " +"[#1523](https://github.com/adap/flower/pull/1523), " +"[#1526](https://github.com/adap/flower/pull/1526), " +"[#1528](https://github.com/adap/flower/pull/1528), " +"[#1547](https://github.com/adap/flower/pull/1547), " +"[#1549](https://github.com/adap/flower/pull/1549), " +"[#1560](https://github.com/adap/flower/pull/1560), " +"[#1564](https://github.com/adap/flower/pull/1564), " +"[#1566](https://github.com/adap/flower/pull/1566))" +msgstr "" +"**Améliorations générales** " +"([#1491](https://github.com/adap/flower/pull/1491), " +"[#1504](https://github.com/adap/flower/pull/1504), " +"[#1506](https://github.com/adap/flower/pull/1506), " +"[#1514](https://github.com/adap/flower/pull/1514), " +"[#1522](https://github.com/adap/flower/pull/1522), " +"[#1523](https://github.com/adap/flower/pull/1523), " +"[#1526](https://github.com/adap/flower/pull/1526), " +"[#1528](https://github.com/adap/flower/pull/1528), " +"[#1547](https://github.com/adap/flower/pull/1547), " +"[#1549](https://github.com/adap/flower/pull/1549), " +"[#1560](https://github.com/adap/flower/pull/1560), " +"[#1564](https://github.com/adap/flower/pull/1564), " +"[#1566](https://github.com/adap/flower/pull/1566))" + +#: ../../source/ref-changelog.md:467 +msgid "" +"**Updated documentation** " +"([#1494](https://github.com/adap/flower/pull/1494), " +"[#1496](https://github.com/adap/flower/pull/1496), " +"[#1500](https://github.com/adap/flower/pull/1500), " +"[#1503](https://github.com/adap/flower/pull/1503), " +"[#1505](https://github.com/adap/flower/pull/1505), " +"[#1524](https://github.com/adap/flower/pull/1524), " +"[#1518](https://github.com/adap/flower/pull/1518), " +"[#1519](https://github.com/adap/flower/pull/1519), " +"[#1515](https://github.com/adap/flower/pull/1515))" +msgstr "" +"**Documentation mise à jour** " +"([#1494](https://github.com/adap/flower/pull/1494), " +"[#1496](https://github.com/adap/flower/pull/1496), " +"[#1500](https://github.com/adap/flower/pull/1500), " +"[#1503](https://github.com/adap/flower/pull/1503), " +"[#1505](https://github.com/adap/flower/pull/1505), " +"[#1524](https://github.com/adap/flower/pull/1524), " +"[#1518](https://github.com/adap/flower/pull/1518), " +"[#1519](https://github.com/adap/flower/pull/1519), " +"[#1515](https://github.com/adap/flower/pull/1515))" + +#: ../../source/ref-changelog.md:471 +msgid "" +"One highlight is the new [first time contributor " +"guide](https://flower.ai/docs/first-time-contributors.html): if you've " +"never contributed on GitHub before, this is the perfect place to start!" +msgstr "" +"L'un des points forts est le nouveau [guide du premier contributeur] " +"(https://flower.ai/docs/first-time-contributors.html) : si tu n'as " +"jamais contribué sur GitHub auparavant, c'est l'endroit idéal pour " +"commencer !" + +#: ../../source/ref-changelog.md:477 +msgid "v1.1.0 (2022-10-31)" +msgstr "v1.1.0 (2022-10-31)" + +#: ../../source/ref-changelog.md:481 +msgid "" +"We would like to give our **special thanks** to all the contributors who " +"made the new version of Flower possible (in `git shortlog` order):" +msgstr "" +"Nous aimerions **remercier tout particulièrement** tous les contributeurs" +" qui ont rendu possible la nouvelle version de Flower (dans l'ordre `git " +"shortlog`) :" + +#: ../../source/ref-changelog.md:483 +msgid "" +"`Akis Linardos`, `Christopher S`, `Daniel J. Beutel`, `George`, `Jan " +"Schlicht`, `Mohammad Fares`, `Pedro Porto Buarque de Gusmão`, `Philipp " +"Wiesner`, `Rob Luke`, `Taner Topal`, `VasundharaAgarwal`, " +"`danielnugraha`, `edogab33`" +msgstr "" +"`Akis Linardos`, `Christopher S`, `Daniel J. Beutel`, `George`, `Jan " +"Schlicht`, `Mohammad Fares`, `Pedro Porto Buarque de Gusmão`, `Philipp " +"Wiesner`, `Rob Luke`, `Taner Topal`, `VasundharaAgarwal`, " +"`danielnugraha`, `edogab33`" + +#: ../../source/ref-changelog.md:487 +msgid "" +"**Introduce Differential Privacy wrappers (preview)** " +"([#1357](https://github.com/adap/flower/pull/1357), " +"[#1460](https://github.com/adap/flower/pull/1460))" +msgstr "" +"**Introduire les enveloppes de confidentialité différentielle (aperçu)** " +"([#1357](https://github.com/adap/flower/pull/1357), " +"[#1460](https://github.com/adap/flower/pull/1460))" + +#: ../../source/ref-changelog.md:489 +msgid "" +"The first (experimental) preview of pluggable Differential Privacy " +"wrappers enables easy configuration and usage of differential privacy " +"(DP). The pluggable DP wrappers enable framework-agnostic **and** " +"strategy-agnostic usage of both client-side DP and server-side DP. Head " +"over to the Flower docs, a new explainer goes into more detail." +msgstr "" +"Le premier aperçu (expérimental) des wrappers enfichables de " +"confidentialité différentielle permet de configurer et d'utiliser " +"facilement la confidentialité différentielle (DP). Les wrappers DP " +"enfichables permettent une utilisation agnostique du cadre **et** de la " +"stratégie à la fois de la DP côté client et de la DP côté serveur. Va " +"voir les documents de Flower, un nouvel explicatif va plus loin dans les " +"détails." + +#: ../../source/ref-changelog.md:491 +msgid "" +"**New iOS CoreML code example** " +"([#1289](https://github.com/adap/flower/pull/1289))" +msgstr "" +"**Nouvel exemple de code CoreML pour iOS** " +"([#1289](https://github.com/adap/flower/pull/1289))" + +#: ../../source/ref-changelog.md:493 +msgid "" +"Flower goes iOS! A massive new code example shows how Flower clients can " +"be built for iOS. The code example contains both Flower iOS SDK " +"components that can be used for many tasks, and one task example running " +"on CoreML." +msgstr "" +"Flower passe à iOS ! Un nouvel exemple de code massif montre comment les " +"clients Flower peuvent être construits pour iOS. L'exemple de code " +"contient à la fois des composants Flower iOS SDK qui peuvent être " +"utilisés pour de nombreuses tâches, et un exemple de tâche fonctionnant " +"sur CoreML." + +#: ../../source/ref-changelog.md:495 +msgid "" +"**New FedMedian strategy** " +"([#1461](https://github.com/adap/flower/pull/1461))" +msgstr "" +"**Nouvelle stratégie de FedMedian** " +"([#1461](https://github.com/adap/flower/pull/1461))" + +#: ../../source/ref-changelog.md:497 +msgid "" +"The new `FedMedian` strategy implements Federated Median (FedMedian) by " +"[Yin et al., 2018](https://arxiv.org/pdf/1803.01498v1.pdf)." +msgstr "" +"La nouvelle stratégie `FedMedian` met en œuvre Federated Median " +"(FedMedian) par [Yin et al., 2018] " +"(https://arxiv.org/pdf/1803.01498v1.pdf)." + +#: ../../source/ref-changelog.md:499 +msgid "" +"**Log** `Client` **exceptions in Virtual Client Engine** " +"([#1493](https://github.com/adap/flower/pull/1493))" +msgstr "" +"**Log** `Client` **exceptions dans le moteur de client virtuel** " +"([#1493](https://github.com/adap/flower/pull/1493))" + +#: ../../source/ref-changelog.md:501 +msgid "" +"All `Client` exceptions happening in the VCE are now logged by default " +"and not just exposed to the configured `Strategy` (via the `failures` " +"argument)." +msgstr "" +"Toutes les exceptions `Client` qui se produisent dans le VCE sont " +"maintenant enregistrées par défaut et ne sont pas seulement exposées à la" +" `Stratégie` configurée (via l'argument `failures`)." + +#: ../../source/ref-changelog.md:503 +msgid "" +"**Improve Virtual Client Engine internals** " +"([#1401](https://github.com/adap/flower/pull/1401), " +"[#1453](https://github.com/adap/flower/pull/1453))" +msgstr "" +"**Améliorer le moteur du client virtuel** " +"([#1401](https://github.com/adap/flower/pull/1401), " +"[#1453](https://github.com/adap/flower/pull/1453))" + +#: ../../source/ref-changelog.md:505 +msgid "" +"Some internals of the Virtual Client Engine have been revamped. The VCE " +"now uses Ray 2.0 under the hood, the value type of the `client_resources`" +" dictionary changed to `float` to allow fractions of resources to be " +"allocated." +msgstr "" +"Le VCE utilise maintenant Ray 2.0 sous le capot, le type de valeur du " +"dictionnaire `client_resources` a été remplacé par `float` pour permettre" +" l'allocation de fractions de ressources." + +#: ../../source/ref-changelog.md:507 +msgid "" +"**Support optional** `Client`**/**`NumPyClient` **methods in Virtual " +"Client Engine**" +msgstr "" +"**Support optional** `Client`**/**`NumPyClient` **methods in Virtual " +"Client Engine**" + +#: ../../source/ref-changelog.md:509 +msgid "" +"The Virtual Client Engine now has full support for optional `Client` (and" +" `NumPyClient`) methods." +msgstr "" +"Le moteur de client virtuel prend désormais en charge les méthodes " +"optionnelles `Client` (et `NumPyClient`)." + +#: ../../source/ref-changelog.md:511 +msgid "" +"**Provide type information to packages using** `flwr` " +"([#1377](https://github.com/adap/flower/pull/1377))" +msgstr "" +"**Fournir des informations de type aux paquets en utilisant** `flwr` " +"([#1377](https://github.com/adap/flower/pull/1377))" + +#: ../../source/ref-changelog.md:513 +msgid "" +"The package `flwr` is now bundled with a `py.typed` file indicating that " +"the package is typed. This enables typing support for projects or " +"packages that use `flwr` by enabling them to improve their code using " +"static type checkers like `mypy`." +msgstr "" +"Le paquet `flwr` est maintenant accompagné d'un fichier `py.typed` " +"indiquant que le paquet est typé. Cela permet de prendre en charge le " +"typage pour les projets ou les paquets qui utilisent `flwr` en leur " +"permettant d'améliorer leur code à l'aide de vérificateurs de types " +"statiques comme `mypy`." + +#: ../../source/ref-changelog.md:515 +msgid "" +"**Updated code example** " +"([#1344](https://github.com/adap/flower/pull/1344), " +"[#1347](https://github.com/adap/flower/pull/1347))" +msgstr "" +"**Exemple de code mis à jour** " +"([#1344](https://github.com/adap/flower/pull/1344), " +"[#1347](https://github.com/adap/flower/pull/1347))" + +#: ../../source/ref-changelog.md:517 +msgid "" +"The code examples covering scikit-learn and PyTorch Lightning have been " +"updated to work with the latest version of Flower." +msgstr "" +"Les exemples de code couvrant scikit-learn et PyTorch Lightning ont été " +"mis à jour pour fonctionner avec la dernière version de Flower." + +#: ../../source/ref-changelog.md:519 +msgid "" +"**Updated documentation** " +"([#1355](https://github.com/adap/flower/pull/1355), " +"[#1558](https://github.com/adap/flower/pull/1558), " +"[#1379](https://github.com/adap/flower/pull/1379), " +"[#1380](https://github.com/adap/flower/pull/1380), " +"[#1381](https://github.com/adap/flower/pull/1381), " +"[#1332](https://github.com/adap/flower/pull/1332), " +"[#1391](https://github.com/adap/flower/pull/1391), " +"[#1403](https://github.com/adap/flower/pull/1403), " +"[#1364](https://github.com/adap/flower/pull/1364), " +"[#1409](https://github.com/adap/flower/pull/1409), " +"[#1419](https://github.com/adap/flower/pull/1419), " +"[#1444](https://github.com/adap/flower/pull/1444), " +"[#1448](https://github.com/adap/flower/pull/1448), " +"[#1417](https://github.com/adap/flower/pull/1417), " +"[#1449](https://github.com/adap/flower/pull/1449), " +"[#1465](https://github.com/adap/flower/pull/1465), " +"[#1467](https://github.com/adap/flower/pull/1467))" +msgstr "" +"**Documentation mise à jour** " +"([#1355](https://github.com/adap/flower/pull/1355), " +"[#1558](https://github.com/adap/flower/pull/1558), " +"[#1379](https://github.com/adap/flower/pull/1379), " +"[#1380](https://github.com/adap/flower/pull/1380), " +"[#1381](https://github.com/adap/flower/pull/1381), " +"[#1332](https://github.com/adap/flower/pull/1332), " +"[#1391](https://github.com/adap/flower/pull/1391), " +"[#1403](https://github.com/adap/flower/pull/1403), " +"[#1364](https://github.com/adap/flower/pull/1364), " +"[#1409](https://github.com/adap/flower/pull/1409), " +"[#1419](https://github.com/adap/flower/pull/1419), " +"[#1444](https://github.com/adap/flower/pull/1444), " +"[#1448](https://github.com/adap/flower/pull/1448), " +"[#1417](https://github.com/adap/flower/pull/1417), " +"[#1449](https://github.com/adap/flower/pull/1449), " +"[#1465](https://github.com/adap/flower/pull/1465), " +"[#1467](https://github.com/adap/flower/pull/1467))" + +#: ../../source/ref-changelog.md:521 +msgid "" +"There have been so many documentation updates that it doesn't even make " +"sense to list them individually." +msgstr "" +"Il y a eu tellement de mises à jour de la documentation que cela n'a même" +" pas de sens de les énumérer individuellement." + +#: ../../source/ref-changelog.md:523 +msgid "" +"**Restructured documentation** " +"([#1387](https://github.com/adap/flower/pull/1387))" +msgstr "" +"**Documentation restructurée** " +"([#1387](https://github.com/adap/flower/pull/1387))" + +#: ../../source/ref-changelog.md:525 +msgid "" +"The documentation has been restructured to make it easier to navigate. " +"This is just the first step in a larger effort to make the Flower " +"documentation the best documentation of any project ever. Stay tuned!" +msgstr "" +"La documentation a été restructurée pour faciliter la navigation. Ce " +"n'est que la première étape d'un effort plus important visant à faire de " +"la documentation de Flower la meilleure documentation de tous les projets" + +#: ../../source/ref-changelog.md:527 +msgid "" +"**Open in Colab button** " +"([#1389](https://github.com/adap/flower/pull/1389))" +msgstr "" +"**Ouvrir dans le bouton Colab** " +"([#1389](https://github.com/adap/flower/pull/1389))" + +#: ../../source/ref-changelog.md:529 +msgid "" +"The four parts of the Flower Federated Learning Tutorial now come with a " +"new `Open in Colab` button. No need to install anything on your local " +"machine, you can now use and learn about Flower in your browser, it's " +"only a single click away." +msgstr "" +"Les quatre parties du didacticiel d'apprentissage fédéré Flower sont " +"maintenant accompagnées d'un nouveau bouton \"Ouvrir dans Colab\". Pas " +"besoin d'installer quoi que ce soit sur ta machine locale, tu peux " +"maintenant utiliser et apprendre à connaître Flower dans ton navigateur, " +"il te suffit d'un simple clic." + +#: ../../source/ref-changelog.md:531 +msgid "" +"**Improved tutorial** ([#1468](https://github.com/adap/flower/pull/1468)," +" [#1470](https://github.com/adap/flower/pull/1470), " +"[#1472](https://github.com/adap/flower/pull/1472), " +"[#1473](https://github.com/adap/flower/pull/1473), " +"[#1474](https://github.com/adap/flower/pull/1474), " +"[#1475](https://github.com/adap/flower/pull/1475))" +msgstr "" +"**Tutoriel amélioré** ([#1468](https://github.com/adap/flower/pull/1468)," +" [#1470](https://github.com/adap/flower/pull/1470), " +"[#1472](https://github.com/adap/flower/pull/1472), " +"[#1473](https://github.com/adap/flower/pull/1473), " +"[#1474](https://github.com/adap/flower/pull/1474), " +"[#1475](https://github.com/adap/flower/pull/1475))" + +#: ../../source/ref-changelog.md:533 +msgid "" +"The Flower Federated Learning Tutorial has two brand-new parts covering " +"custom strategies (still WIP) and the distinction between `Client` and " +"`NumPyClient`. The existing parts one and two have also been improved " +"(many small changes and fixes)." +msgstr "" +"Le tutoriel sur l'apprentissage fédéré des fleurs a deux toutes nouvelles" +" parties couvrant les stratégies personnalisées (encore WIP) et la " +"distinction entre `Client` et `NumPyClient`. Les parties un et deux " +"existantes ont également été améliorées (beaucoup de petits changements " +"et de corrections)." + +#: ../../source/ref-changelog.md:539 +msgid "v1.0.0 (2022-07-28)" +msgstr "v1.0.0 (2022-07-28)" + +#: ../../source/ref-changelog.md:541 +msgid "Highlights" +msgstr "Points forts" + +#: ../../source/ref-changelog.md:543 +msgid "Stable **Virtual Client Engine** (accessible via `start_simulation`)" +msgstr "Moteur de client virtuel stable** (accessible via `start_simulation`)" + +#: ../../source/ref-changelog.md:544 +msgid "All `Client`/`NumPyClient` methods are now optional" +msgstr "Toutes les méthodes `Client`/`NumPyClient` sont maintenant optionnelles" + +#: ../../source/ref-changelog.md:545 +msgid "Configurable `get_parameters`" +msgstr "`get_parameters` configurable" + +#: ../../source/ref-changelog.md:546 +msgid "" +"Tons of small API cleanups resulting in a more coherent developer " +"experience" +msgstr "" +"Des tonnes de petits nettoyages d'API résultant en une expérience plus " +"cohérente pour les développeurs" + +#: ../../source/ref-changelog.md:550 +msgid "" +"We would like to give our **special thanks** to all the contributors who " +"made Flower 1.0 possible (in reverse [GitHub " +"Contributors](https://github.com/adap/flower/graphs/contributors) order):" +msgstr "" +"Nous tenons à remercier **particulièrement** tous les contributeurs qui " +"ont rendu Flower 1.0 possible (dans l'ordre inverse de [GitHub " +"Contributors](https://github.com/adap/flower/graphs/contributors)) :" + +#: ../../source/ref-changelog.md:552 +msgid "" +"[@rtaiello](https://github.com/rtaiello), " +"[@g-pichler](https://github.com/g-pichler), [@rob-" +"luke](https://github.com/rob-luke), [@andreea-zaharia](https://github.com" +"/andreea-zaharia), [@kinshukdua](https://github.com/kinshukdua), " +"[@nfnt](https://github.com/nfnt), " +"[@tatiana-s](https://github.com/tatiana-s), " +"[@TParcollet](https://github.com/TParcollet), " +"[@vballoli](https://github.com/vballoli), " +"[@negedng](https://github.com/negedng), " +"[@RISHIKESHAVAN](https://github.com/RISHIKESHAVAN), " +"[@hei411](https://github.com/hei411), " +"[@SebastianSpeitel](https://github.com/SebastianSpeitel), " +"[@AmitChaulwar](https://github.com/AmitChaulwar), " +"[@Rubiel1](https://github.com/Rubiel1), [@FANTOME-PAN](https://github.com" +"/FANTOME-PAN), [@Rono-BC](https://github.com/Rono-BC), " +"[@lbhm](https://github.com/lbhm), " +"[@sishtiaq](https://github.com/sishtiaq), " +"[@remde](https://github.com/remde), [@Jueun-Park](https://github.com" +"/Jueun-Park), [@architjen](https://github.com/architjen), " +"[@PratikGarai](https://github.com/PratikGarai), " +"[@mrinaald](https://github.com/mrinaald), " +"[@zliel](https://github.com/zliel), " +"[@MeiruiJiang](https://github.com/MeiruiJiang), " +"[@sandracl72](https://github.com/sandracl72), " +"[@gubertoli](https://github.com/gubertoli), " +"[@Vingt100](https://github.com/Vingt100), " +"[@MakGulati](https://github.com/MakGulati), " +"[@cozek](https://github.com/cozek), " +"[@jafermarq](https://github.com/jafermarq), " +"[@sisco0](https://github.com/sisco0), " +"[@akhilmathurs](https://github.com/akhilmathurs), " +"[@CanTuerk](https://github.com/CanTuerk), " +"[@mariaboerner1987](https://github.com/mariaboerner1987), " +"[@pedropgusmao](https://github.com/pedropgusmao), " +"[@tanertopal](https://github.com/tanertopal), " +"[@danieljanes](https://github.com/danieljanes)." +msgstr "" +"[@rtaiello](https://github.com/rtaiello), " +"[@g-pichler](https://github.com/g-pichler), [@rob-" +"luke](https://github.com/rob-luke), [@andreea-zaharia](https://github.com" +"/andreea-zaharia), [@kinshukdua](https://github.com/kinshukdua), " +"[@nfnt](https://github.com/nfnt), " +"[@tatiana-s](https://github.com/tatiana-s), " +"[@TParcollet](https://github.com/TParcollet), " +"[@vballoli](https://github.com/vballoli), " +"[@negedng](https://github.com/negedng), " +"[@RISHIKESHAVAN](https://github.com/RISHIKESHAVAN), " +"[@hei411](https://github.com/hei411), " +"[@SebastianSpeitel](https://github.com/SebastianSpeitel), " +"[@AmitChaulwar](https://github.com/AmitChaulwar), " +"[@Rubiel1](https://github.com/Rubiel1), [@FANTOME-PAN](https://github.com" +"/FANTOME-PAN), [@Rono-BC](https://github.com/Rono-BC), " +"[@lbhm](https://github.com/lbhm), " +"[@sishtiaq](https://github.com/sishtiaq), " +"[@remde](https://github.com/remde), [@Jueun-Park](https://github.com" +"/Jueun-Park), [@architjen](https://github.com/architjen), " +"[@PratikGarai](https://github.com/PratikGarai), [@mrinaald](" + +#: ../../source/ref-changelog.md:556 +msgid "" +"**All arguments must be passed as keyword arguments** " +"([#1338](https://github.com/adap/flower/pull/1338))" +msgstr "" +"**Tous les arguments doivent être passés comme des arguments de mot-clé**" +" ([#1338](https://github.com/adap/flower/pull/1338))" + +#: ../../source/ref-changelog.md:558 +#, fuzzy +msgid "" +"Pass all arguments as keyword arguments, positional arguments are not " +"longer supported. Code that uses positional arguments (e.g., " +"`start_client(\"127.0.0.1:8080\", FlowerClient())`) must add the keyword " +"for each positional argument (e.g., " +"`start_client(server_address=\"127.0.0.1:8080\", " +"client=FlowerClient())`)." +msgstr "" +"Le code qui utilise des arguments positionnels (par exemple, " +"``start_client(\"127.0.0.1:8080\", FlowerClient())`) doit ajouter le mot-" +"clé pour chaque argument positionnel (par exemple, " +"``start_client(server_address=\"127.0.0.1:8080\", " +"client=FlowerClient())`)." + +#: ../../source/ref-changelog.md:560 +msgid "" +"**Introduce configuration object** `ServerConfig` **in** `start_server` " +"**and** `start_simulation` " +"([#1317](https://github.com/adap/flower/pull/1317))" +msgstr "" +"**Introduire l'objet de configuration** `ServerConfig` **dans** " +"`start_server` **et** `start_simulation` " +"([#1317](https://github.com/adap/flower/pull/1317))" + +#: ../../source/ref-changelog.md:562 +msgid "" +"Instead of a config dictionary `{\"num_rounds\": 3, \"round_timeout\": " +"600.0}`, `start_server` and `start_simulation` now expect a configuration" +" object of type `flwr.server.ServerConfig`. `ServerConfig` takes the same" +" arguments that as the previous config dict, but it makes writing type-" +"safe code easier and the default parameters values more transparent." +msgstr "" +"Au lieu d'un dictionnaire de configuration `{\"num_rounds\" : 3, " +"\"round_timeout\" : 600.0}`, `start_server` et `start_simulation` " +"attendent maintenant un objet de configuration de type " +"`flwr.server.ServerConfig`. `ServerConfig` prend les mêmes arguments que " +"le dict de configuration précédent, mais il rend l'écriture de code " +"sécurisé plus facile et les valeurs des paramètres par défaut plus " +"transparentes." + +#: ../../source/ref-changelog.md:564 +msgid "" +"**Rename built-in strategy parameters for clarity** " +"([#1334](https://github.com/adap/flower/pull/1334))" +msgstr "" +"**Renommer les paramètres de la stratégie intégrée pour plus de clarté** " +"([#1334](https://github.com/adap/flower/pull/1334))" + +#: ../../source/ref-changelog.md:566 +msgid "" +"The following built-in strategy parameters were renamed to improve " +"readability and consistency with other API's:" +msgstr "" +"Les paramètres de stratégie intégrés suivants ont été renommés pour " +"améliorer la lisibilité et la cohérence avec d'autres API :" + +#: ../../source/ref-changelog.md:568 +msgid "`fraction_eval` --> `fraction_evaluate`" +msgstr "`fraction_eval` --> `fraction_evaluate`" + +#: ../../source/ref-changelog.md:569 +msgid "`min_eval_clients` --> `min_evaluate_clients`" +msgstr "`min_eval_clients` --> `min_evaluate_clients`" + +#: ../../source/ref-changelog.md:570 +msgid "`eval_fn` --> `evaluate_fn`" +msgstr "`eval_fn` --> `evaluate_fn`" + +#: ../../source/ref-changelog.md:572 +msgid "" +"**Update default arguments of built-in strategies** " +"([#1278](https://github.com/adap/flower/pull/1278))" +msgstr "" +"**Mettre à jour les arguments par défaut des stratégies intégrées** " +"([#1278](https://github.com/adap/flower/pull/1278))" + +#: ../../source/ref-changelog.md:574 +msgid "" +"All built-in strategies now use `fraction_fit=1.0` and " +"`fraction_evaluate=1.0`, which means they select *all* currently " +"available clients for training and evaluation. Projects that relied on " +"the previous default values can get the previous behaviour by " +"initializing the strategy in the following way:" +msgstr "" +"Toutes les stratégies intégrées utilisent désormais `fraction_fit=1.0` et" +" `fraction_evaluate=1.0`, ce qui signifie qu'elles sélectionnent *tous* " +"les clients actuellement disponibles pour l'entraînement et l'évaluation." +" Les projets qui s'appuyaient sur les valeurs par défaut précédentes " +"peuvent retrouver le comportement antérieur en initialisant la stratégie " +"de la manière suivante :" + +#: ../../source/ref-changelog.md:576 +msgid "`strategy = FedAvg(fraction_fit=0.1, fraction_evaluate=0.1)`" +msgstr "`stratégie = FedAvg(fraction_fit=0.1, fraction_evaluate=0.1)`" + +#: ../../source/ref-changelog.md:578 +msgid "" +"**Add** `server_round` **to** `Strategy.evaluate` " +"([#1334](https://github.com/adap/flower/pull/1334))" +msgstr "" +"**Ajouter** `server_round` **à** `Strategy.evaluate` " +"([#1334](https://github.com/adap/flower/pull/1334))" + +#: ../../source/ref-changelog.md:580 +msgid "" +"The `Strategy` method `evaluate` now receives the current round of " +"federated learning/evaluation as the first parameter." +msgstr "" +"La méthode `Stratégie` `évaluer` reçoit maintenant le cycle actuel " +"d'apprentissage/évaluation fédéré comme premier paramètre." + +#: ../../source/ref-changelog.md:582 +msgid "" +"**Add** `server_round` **and** `config` **parameters to** `evaluate_fn` " +"([#1334](https://github.com/adap/flower/pull/1334))" +msgstr "" +"**Ajouter** `server_round` **et** `config` **paramètres à** `evaluate_fn`" +" ([#1334](https://github.com/adap/flower/pull/1334))" + +#: ../../source/ref-changelog.md:584 +msgid "" +"The `evaluate_fn` passed to built-in strategies like `FedAvg` now takes " +"three parameters: (1) The current round of federated learning/evaluation " +"(`server_round`), (2) the model parameters to evaluate (`parameters`), " +"and (3) a config dictionary (`config`)." +msgstr "" +"Le `evaluate_fn` passé aux stratégies intégrées comme `FedAvg` prend " +"maintenant trois paramètres : (1) le cycle actuel " +"d'apprentissage/évaluation fédéré (`server_round`), (2) les paramètres du" +" modèle à évaluer (`parameters`), et (3) un dictionnaire de configuration" +" (`config`)." + +#: ../../source/ref-changelog.md:586 +msgid "" +"**Rename** `rnd` **to** `server_round` " +"([#1321](https://github.com/adap/flower/pull/1321))" +msgstr "" +"**Rename** `rnd` **to** `server_round` " +"([#1321](https://github.com/adap/flower/pull/1321))" + +#: ../../source/ref-changelog.md:588 +msgid "" +"Several Flower methods and functions (`evaluate_fn`, `configure_fit`, " +"`aggregate_fit`, `configure_evaluate`, `aggregate_evaluate`) receive the " +"current round of federated learning/evaluation as their first parameter. " +"To improve reaability and avoid confusion with *random*, this parameter " +"has been renamed from `rnd` to `server_round`." +msgstr "" +"Plusieurs méthodes et fonctions de Flower (`evaluate_fn`, " +"`configure_fit`, `aggregate_fit`, `configure_evaluate`, " +"`aggregate_evaluate`) reçoivent le cycle actuel " +"d'apprentissage/évaluation fédéré comme premier paramètre. Pour améliorer" +" la fiabilité et éviter la confusion avec *random*, ce paramètre a été " +"renommé de `rnd` à `server_round`." + +#: ../../source/ref-changelog.md:590 +msgid "" +"**Move** `flwr.dataset` **to** `flwr_baselines` " +"([#1273](https://github.com/adap/flower/pull/1273))" +msgstr "" +"**Déplacer** `flwr.dataset` **vers** `flwr_baselines` " +"([#1273](https://github.com/adap/flower/pull/1273))" + +#: ../../source/ref-changelog.md:592 +msgid "The experimental package `flwr.dataset` was migrated to Flower Baselines." +msgstr "Le paquet expérimental `flwr.dataset` a été migré vers Flower Baselines." + +#: ../../source/ref-changelog.md:594 +msgid "" +"**Remove experimental strategies** " +"([#1280](https://github.com/adap/flower/pull/1280))" +msgstr "" +"**Supprimer les stratégies expérimentales** " +"([#1280](https://github.com/adap/flower/pull/1280))" + +#: ../../source/ref-changelog.md:596 +msgid "" +"Remove unmaintained experimental strategies (`FastAndSlow`, `FedFSv0`, " +"`FedFSv1`)." +msgstr "" +"Supprimer les stratégies expérimentales non maintenues (`FastAndSlow`, " +"`FedFSv0`, `FedFSv1`)." + +#: ../../source/ref-changelog.md:598 +msgid "" +"**Rename** `Weights` **to** `NDArrays` " +"([#1258](https://github.com/adap/flower/pull/1258), " +"[#1259](https://github.com/adap/flower/pull/1259))" +msgstr "" +"**Rename** `Weights` **to** `NDArrays` " +"([#1258](https://github.com/adap/flower/pull/1258), " +"[#1259](https://github.com/adap/flower/pull/1259))" + +#: ../../source/ref-changelog.md:600 +msgid "" +"`flwr.common.Weights` was renamed to `flwr.common.NDArrays` to better " +"capture what this type is all about." +msgstr "" +"`flwr.common.Weights` a été renommé en `flwr.common.NDArys` pour mieux " +"rendre compte de la nature de ce type." + +#: ../../source/ref-changelog.md:602 +msgid "" +"**Remove antiquated** `force_final_distributed_eval` **from** " +"`start_server` ([#1258](https://github.com/adap/flower/pull/1258), " +"[#1259](https://github.com/adap/flower/pull/1259))" +msgstr "" +"**Supprimez l'ancien** `force_final_distributed_eval` **de** " +"`start_server` ([#1258](https://github.com/adap/flower/pull/1258), " +"[#1259](https://github.com/adap/flower/pull/1259))" + +#: ../../source/ref-changelog.md:604 +msgid "" +"The `start_server` parameter `force_final_distributed_eval` has long been" +" a historic artefact, in this release it is finally gone for good." +msgstr "" +"Le paramètre `start_server` `force_final_distributed_eval` a longtemps " +"été un artefact historique, dans cette version il a finalement disparu " +"pour de bon." + +#: ../../source/ref-changelog.md:606 +msgid "" +"**Make** `get_parameters` **configurable** " +"([#1242](https://github.com/adap/flower/pull/1242))" +msgstr "" +"**Make** `get_parameters` **configurable** " +"([#1242](https://github.com/adap/flower/pull/1242))" + +#: ../../source/ref-changelog.md:608 +msgid "" +"The `get_parameters` method now accepts a configuration dictionary, just " +"like `get_properties`, `fit`, and `evaluate`." +msgstr "" +"La méthode `get_parameters` accepte maintenant un dictionnaire de " +"configuration, tout comme `get_properties`, `fit`, et `evaluate`." + +#: ../../source/ref-changelog.md:610 +msgid "" +"**Replace** `num_rounds` **in** `start_simulation` **with new** `config` " +"**parameter** ([#1281](https://github.com/adap/flower/pull/1281))" +msgstr "" +"**Remplace** `num_rounds` **dans** `start_simulation` **avec le nouveau**" +" `config` **paramètre** " +"([#1281](https://github.com/adap/flower/pull/1281))" + +#: ../../source/ref-changelog.md:612 +msgid "" +"The `start_simulation` function now accepts a configuration dictionary " +"`config` instead of the `num_rounds` integer. This improves the " +"consistency between `start_simulation` and `start_server` and makes " +"transitioning between the two easier." +msgstr "" +"La fonction `start_simulation` accepte maintenant un dictionnaire de " +"configuration `config` au lieu de l'entier `num_rounds`. Cela améliore la" +" cohérence entre `start_simulation` et `start_server` et facilite la " +"transition entre les deux." + +#: ../../source/ref-changelog.md:616 +msgid "" +"**Support Python 3.10** " +"([#1320](https://github.com/adap/flower/pull/1320))" +msgstr "" +"**Support Python 3.10** " +"([#1320](https://github.com/adap/flower/pull/1320))" + +#: ../../source/ref-changelog.md:618 +msgid "" +"The previous Flower release introduced experimental support for Python " +"3.10, this release declares Python 3.10 support as stable." +msgstr "" +"La version précédente de Flower a introduit la prise en charge " +"expérimentale de Python 3.10, cette version déclare la prise en charge de" +" Python 3.10 comme stable." + +#: ../../source/ref-changelog.md:620 +msgid "" +"**Make all** `Client` **and** `NumPyClient` **methods optional** " +"([#1260](https://github.com/adap/flower/pull/1260), " +"[#1277](https://github.com/adap/flower/pull/1277))" +msgstr "" +"**Rendre toutes les **méthodes `Client` **et** `NumPyClient` " +"**facultatives** ([#1260](https://github.com/adap/flower/pull/1260), " +"[#1277](https://github.com/adap/flower/pull/1277))" + +#: ../../source/ref-changelog.md:622 +msgid "" +"The `Client`/`NumPyClient` methods `get_properties`, `get_parameters`, " +"`fit`, and `evaluate` are all optional. This enables writing clients that" +" implement, for example, only `fit`, but no other method. No need to " +"implement `evaluate` when using centralized evaluation!" +msgstr "" +"Les méthodes `Client`/`NumPyClient` `get_properties`, `get_parameters`, " +"`fit`, et `evaluate` sont toutes optionnelles. Cela permet d'écrire des " +"clients qui n'implémentent, par exemple, que `fit`, mais aucune autre " +"méthode. Pas besoin d'implémenter `evaluate` quand on utilise " +"l'évaluation centralisée !" + +#: ../../source/ref-changelog.md:624 +msgid "" +"**Enable passing a** `Server` **instance to** `start_simulation` " +"([#1281](https://github.com/adap/flower/pull/1281))" +msgstr "" +"**Autoriser le passage d'une **instance `Server` à** `start_simulation` " +"([#1281](https://github.com/adap/flower/pull/1281))" + +#: ../../source/ref-changelog.md:626 +msgid "" +"Similar to `start_server`, `start_simulation` now accepts a full `Server`" +" instance. This enables users to heavily customize the execution of " +"eperiments and opens the door to running, for example, async FL using the" +" Virtual Client Engine." +msgstr "" +"Comme pour `start_server`, `start_simulation` accepte maintenant une " +"instance complète de `Server`. Cela permet aux utilisateurs de " +"personnaliser fortement l'exécution des expériences et ouvre la porte à " +"l'exécution, par exemple, de FL asynchrones à l'aide du moteur de client " +"virtuel." + +#: ../../source/ref-changelog.md:628 +msgid "" +"**Update code examples** " +"([#1291](https://github.com/adap/flower/pull/1291), " +"[#1286](https://github.com/adap/flower/pull/1286), " +"[#1282](https://github.com/adap/flower/pull/1282))" +msgstr "" +"**Mettre à jour les exemples de code** " +"([#1291](https://github.com/adap/flower/pull/1291), " +"[#1286](https://github.com/adap/flower/pull/1286), " +"[#1282](https://github.com/adap/flower/pull/1282))" + +#: ../../source/ref-changelog.md:630 +msgid "" +"Many code examples received small or even large maintenance updates, " +"among them are" +msgstr "" +"De nombreux exemples de code ont reçu de petites ou même de grandes mises" +" à jour de maintenance" + +#: ../../source/ref-changelog.md:632 +msgid "`scikit-learn`" +msgstr "`scikit-learn`" + +#: ../../source/ref-changelog.md:633 +msgid "`simulation_pytorch`" +msgstr "`simulation_pytorch`" + +#: ../../source/ref-changelog.md:634 +msgid "`quickstart_pytorch`" +msgstr "`quickstart_pytorch` (démarrage rapide)" + +#: ../../source/ref-changelog.md:635 +msgid "`quickstart_simulation`" +msgstr "`quickstart_simulation`" + +#: ../../source/ref-changelog.md:636 +msgid "`quickstart_tensorflow`" +msgstr "`quickstart_tensorflow`" + +#: ../../source/ref-changelog.md:637 +msgid "`advanced_tensorflow`" +msgstr "`advanced_tensorflow` (en anglais)" + +#: ../../source/ref-changelog.md:639 +msgid "" +"**Remove the obsolete simulation example** " +"([#1328](https://github.com/adap/flower/pull/1328))" +msgstr "" +"**Supprime l'exemple de simulation obsolète** " +"([#1328](https://github.com/adap/flower/pull/1328))" + +#: ../../source/ref-changelog.md:641 +msgid "" +"Removes the obsolete `simulation` example and renames " +"`quickstart_simulation` to `simulation_tensorflow` so it fits withs the " +"naming of `simulation_pytorch`" +msgstr "" +"Supprime l'exemple obsolète `simulation` et renomme " +"`quickstart_simulation` en `simulation_tensorflow` pour qu'il corresponde" +" au nom de `simulation_pytorch`" + +#: ../../source/ref-changelog.md:643 +msgid "" +"**Update documentation** " +"([#1223](https://github.com/adap/flower/pull/1223), " +"[#1209](https://github.com/adap/flower/pull/1209), " +"[#1251](https://github.com/adap/flower/pull/1251), " +"[#1257](https://github.com/adap/flower/pull/1257), " +"[#1267](https://github.com/adap/flower/pull/1267), " +"[#1268](https://github.com/adap/flower/pull/1268), " +"[#1300](https://github.com/adap/flower/pull/1300), " +"[#1304](https://github.com/adap/flower/pull/1304), " +"[#1305](https://github.com/adap/flower/pull/1305), " +"[#1307](https://github.com/adap/flower/pull/1307))" +msgstr "" +"**Mise à jour de la documentation** " +"([#1223](https://github.com/adap/flower/pull/1223), " +"[#1209](https://github.com/adap/flower/pull/1209), " +"[#1251](https://github.com/adap/flower/pull/1251), " +"[#1257](https://github.com/adap/flower/pull/1257), " +"[#1267](https://github.com/adap/flower/pull/1267), " +"[#1268](https://github.com/adap/flower/pull/1268), " +"[#1300](https://github.com/adap/flower/pull/1300), " +"[#1304](https://github.com/adap/flower/pull/1304), " +"[#1305](https://github.com/adap/flower/pull/1305), " +"[#1307](https://github.com/adap/flower/pull/1307))" + +#: ../../source/ref-changelog.md:645 +msgid "" +"One substantial documentation update fixes multiple smaller rendering " +"issues, makes titles more succinct to improve navigation, removes a " +"deprecated library, updates documentation dependencies, includes the " +"`flwr.common` module in the API reference, includes support for markdown-" +"based documentation, migrates the changelog from `.rst` to `.md`, and " +"fixes a number of smaller details!" +msgstr "" +"Une mise à jour substantielle de la documentation corrige plusieurs " +"petits problèmes de rendu, rend les titres plus succincts pour améliorer " +"la navigation, supprime une bibliothèque obsolète, met à jour les " +"dépendances de la documentation, inclut le module `flwr.common` dans la " +"référence de l'API, inclut le support de la documentation basée sur le " +"markdown, migre le changelog de `.rst` vers `.md`, et corrige un certain " +"nombre de détails plus petits !" + +#: ../../source/ref-changelog.md:647 ../../source/ref-changelog.md:702 +#: ../../source/ref-changelog.md:771 ../../source/ref-changelog.md:810 +msgid "**Minor updates**" +msgstr "**Mises à jour mineures**" + +#: ../../source/ref-changelog.md:649 +msgid "" +"Add round number to fit and evaluate log messages " +"([#1266](https://github.com/adap/flower/pull/1266))" +msgstr "" +"Ajoute un chiffre rond pour ajuster et évaluer les messages du journal " +"([#1266](https://github.com/adap/flower/pull/1266))" + +#: ../../source/ref-changelog.md:650 +msgid "" +"Add secure gRPC connection to the `advanced_tensorflow` code example " +"([#847](https://github.com/adap/flower/pull/847))" +msgstr "" +"Ajouter une connexion gRPC sécurisée à l'exemple de code " +"`advanced_tensorflow` ([#847](https://github.com/adap/flower/pull/847))" + +#: ../../source/ref-changelog.md:651 +msgid "" +"Update developer tooling " +"([#1231](https://github.com/adap/flower/pull/1231), " +"[#1276](https://github.com/adap/flower/pull/1276), " +"[#1301](https://github.com/adap/flower/pull/1301), " +"[#1310](https://github.com/adap/flower/pull/1310))" +msgstr "" +"Mettre à jour les outils de développement " +"([#1231](https://github.com/adap/flower/pull/1231), " +"[#1276](https://github.com/adap/flower/pull/1276), " +"[#1301](https://github.com/adap/flower/pull/1301), " +"[#1310](https://github.com/adap/flower/pull/1310))" + +#: ../../source/ref-changelog.md:652 +msgid "" +"Rename ProtoBuf messages to improve consistency " +"([#1214](https://github.com/adap/flower/pull/1214), " +"[#1258](https://github.com/adap/flower/pull/1258), " +"[#1259](https://github.com/adap/flower/pull/1259))" +msgstr "" +"Renomme les messages ProtoBuf pour améliorer la cohérence " +"([#1214](https://github.com/adap/flower/pull/1214), " +"[#1258](https://github.com/adap/flower/pull/1258), " +"[#1259](https://github.com/adap/flower/pull/1259))" + +#: ../../source/ref-changelog.md:654 +msgid "v0.19.0 (2022-05-18)" +msgstr "v0.19.0 (2022-05-18)" + +#: ../../source/ref-changelog.md:658 +msgid "" +"**Flower Baselines (preview): FedOpt, FedBN, FedAvgM** " +"([#919](https://github.com/adap/flower/pull/919), " +"[#1127](https://github.com/adap/flower/pull/1127), " +"[#914](https://github.com/adap/flower/pull/914))" +msgstr "" +"**Flower Baselines (preview) : FedOpt, FedBN, FedAvgM** " +"([#919](https://github.com/adap/flower/pull/919), " +"[#1127](https://github.com/adap/flower/pull/1127), " +"[#914](https://github.com/adap/flower/pull/914))" + +#: ../../source/ref-changelog.md:660 +msgid "" +"The first preview release of Flower Baselines has arrived! We're " +"kickstarting Flower Baselines with implementations of FedOpt (FedYogi, " +"FedAdam, FedAdagrad), FedBN, and FedAvgM. Check the documentation on how " +"to use [Flower Baselines](https://flower.ai/docs/using-baselines.html). " +"With this first preview release we're also inviting the community to " +"[contribute their own baselines](https://flower.ai/docs/contributing-" +"baselines.html)." +msgstr "" +"La première version préliminaire de Flower Baselines est arrivée ! Nous " +"démarrons Flower Baselines avec des implémentations de FedOpt (FedYogi, " +"FedAdam, FedAdagrad), FedBN, et FedAvgM. Consultez la documentation sur " +"l'utilisation de [Flower Baselines](https://flower.ai/docs/using-" +"baselines.html). Avec cette première version préliminaire, nous invitons " +"également la communauté à [contribuer à leurs propres lignes de " +"base](https://flower.ai/docs/contributing-baselines.html)." + +#: ../../source/ref-changelog.md:662 +msgid "" +"**C++ client SDK (preview) and code example** " +"([#1111](https://github.com/adap/flower/pull/1111))" +msgstr "" +"**SDK client C++ (aperçu) et exemple de code** " +"([#1111](https://github.com/adap/flower/pull/1111))" + +#: ../../source/ref-changelog.md:664 +msgid "" +"Preview support for Flower clients written in C++. The C++ preview " +"includes a Flower client SDK and a quickstart code example that " +"demonstrates a simple C++ client using the SDK." +msgstr "" +"L'aperçu C++ comprend un SDK pour les clients Flower et un exemple de " +"code de démarrage rapide qui démontre un client C++ simple utilisant le " +"SDK." + +#: ../../source/ref-changelog.md:666 +msgid "" +"**Add experimental support for Python 3.10 and Python 3.11** " +"([#1135](https://github.com/adap/flower/pull/1135))" +msgstr "" +"**Ajouter la prise en charge expérimentale de Python 3.10 et Python " +"3.11** ([#1135](https://github.com/adap/flower/pull/1135))" + +#: ../../source/ref-changelog.md:668 +msgid "" +"Python 3.10 is the latest stable release of Python and Python 3.11 is due" +" to be released in October. This Flower release adds experimental support" +" for both Python versions." +msgstr "" +"Python 3.10 est la dernière version stable de Python et Python 3.11 " +"devrait sortir en octobre. Cette version de Flower ajoute une prise en " +"charge expérimentale pour les deux versions de Python." + +#: ../../source/ref-changelog.md:670 +msgid "" +"**Aggregate custom metrics through user-provided functions** " +"([#1144](https://github.com/adap/flower/pull/1144))" +msgstr "" +"**Agréger des mesures personnalisées grâce à des fonctions fournies par " +"l'utilisateur** ([#1144](https://github.com/adap/flower/pull/1144))" + +#: ../../source/ref-changelog.md:672 +msgid "" +"Custom metrics (e.g., `accuracy`) can now be aggregated without having to" +" customize the strategy. Built-in strategies support two new arguments, " +"`fit_metrics_aggregation_fn` and `evaluate_metrics_aggregation_fn`, that " +"allow passing custom metric aggregation functions." +msgstr "" +"Les stratégies intégrées prennent en charge deux nouveaux arguments, " +"`fit_metrics_aggregation_fn` et `evaluate_metrics_aggregation_fn`, qui " +"permettent de passer des fonctions d'agrégation de métriques " +"personnalisées." + +#: ../../source/ref-changelog.md:674 +msgid "" +"**User-configurable round timeout** " +"([#1162](https://github.com/adap/flower/pull/1162))" +msgstr "" +"**Temps d'attente configurable par l'utilisateur** " +"([#1162](https://github.com/adap/flower/pull/1162))" + +#: ../../source/ref-changelog.md:676 +msgid "" +"A new configuration value allows the round timeout to be set for " +"`start_server` and `start_simulation`. If the `config` dictionary " +"contains a `round_timeout` key (with a `float` value in seconds), the " +"server will wait *at least* `round_timeout` seconds before it closes the " +"connection." +msgstr "" +"Si le dictionnaire `config` contient une clé `round_timeout` (avec une " +"valeur `float` en secondes), le serveur attendra *au moins* " +"`round_timeout` secondes avant de fermer la connexion." + +#: ../../source/ref-changelog.md:678 +msgid "" +"**Enable both federated evaluation and centralized evaluation to be used " +"at the same time in all built-in strategies** " +"([#1091](https://github.com/adap/flower/pull/1091))" +msgstr "" +"**Permettre l'utilisation simultanée de l'évaluation fédérée et de " +"l'évaluation centralisée dans toutes les stratégies intégrées** " +"([#1091](https://github.com/adap/flower/pull/1091))" + +#: ../../source/ref-changelog.md:680 +msgid "" +"Built-in strategies can now perform both federated evaluation (i.e., " +"client-side) and centralized evaluation (i.e., server-side) in the same " +"round. Federated evaluation can be disabled by setting `fraction_eval` to" +" `0.0`." +msgstr "" +"Les stratégies intégrées peuvent maintenant effectuer une évaluation " +"fédérée (c'est-à-dire côté client) et une évaluation centralisée " +"(c'est-à-dire côté serveur) dans le même tour. L'évaluation fédérée peut " +"être désactivée en réglant `fraction_eval` sur `0.0`." + +#: ../../source/ref-changelog.md:682 +msgid "" +"**Two new Jupyter Notebook tutorials** " +"([#1141](https://github.com/adap/flower/pull/1141))" +msgstr "" +"**Deux nouveaux tutoriels Jupyter Notebook** " +"([#1141](https://github.com/adap/flower/pull/1141))" + +#: ../../source/ref-changelog.md:684 +msgid "" +"Two Jupyter Notebook tutorials (compatible with Google Colab) explain " +"basic and intermediate Flower features:" +msgstr "" +"Deux tutoriels Jupyter Notebook (compatibles avec Google Colab) " +"expliquent les fonctionnalités de base et intermédiaires de Flower :" + +#: ../../source/ref-changelog.md:686 +msgid "" +"*An Introduction to Federated Learning*: [Open in " +"Colab](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-1" +"-Intro-to-FL-PyTorch.ipynb)" +msgstr "" +"*Introduction à l'apprentissage fédéré* : [Open in " +"Colab](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-1" +"-Intro-to-FL-PyTorch.ipynb)" + +#: ../../source/ref-changelog.md:688 +msgid "" +"*Using Strategies in Federated Learning*: [Open in " +"Colab](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-2" +"-Strategies-in-FL-PyTorch.ipynb)" +msgstr "" +"*Utiliser des stratégies dans l'apprentissage fédéré* : [Open in " +"Colab](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-2" +"-Strategies-in-FL-PyTorch.ipynb)" + +#: ../../source/ref-changelog.md:690 +msgid "" +"**New FedAvgM strategy (Federated Averaging with Server Momentum)** " +"([#1076](https://github.com/adap/flower/pull/1076))" +msgstr "" +"**Nouvelle stratégie FedAvgM (Federated Averaging with Server Momentum)**" +" ([#1076](https://github.com/adap/flower/pull/1076))" + +#: ../../source/ref-changelog.md:692 +#, fuzzy +msgid "" +"The new `FedAvgM` strategy implements Federated Averaging with Server " +"Momentum \\[Hsu et al., 2019\\]." +msgstr "" +"La nouvelle stratégie `FedAvgM` met en œuvre la moyenne fédérée avec le " +"momentum du serveur [Hsu et al., 2019]." + +#: ../../source/ref-changelog.md:694 +msgid "" +"**New advanced PyTorch code example** " +"([#1007](https://github.com/adap/flower/pull/1007))" +msgstr "" +"**Nouvel exemple de code PyTorch avancé** " +"([#1007](https://github.com/adap/flower/pull/1007))" + +#: ../../source/ref-changelog.md:696 +msgid "" +"A new code example (`advanced_pytorch`) demonstrates advanced Flower " +"concepts with PyTorch." +msgstr "" +"Un nouvel exemple de code (`advanced_pytorch`) démontre des concepts de " +"fleur avancés avec PyTorch." + +#: ../../source/ref-changelog.md:698 +msgid "" +"**New JAX code example** " +"([#906](https://github.com/adap/flower/pull/906), " +"[#1143](https://github.com/adap/flower/pull/1143))" +msgstr "" +"**Nouvel exemple de code JAX** " +"([#906](https://github.com/adap/flower/pull/906), " +"[#1143](https://github.com/adap/flower/pull/1143))" + +#: ../../source/ref-changelog.md:700 +msgid "" +"A new code example (`jax_from_centralized_to_federated`) shows federated " +"learning with JAX and Flower." +msgstr "" +"Un nouvel exemple de code (`jax_from_centralized_to_federated`) montre " +"l'apprentissage fédéré avec JAX et Flower." + +#: ../../source/ref-changelog.md:704 +msgid "" +"New option to keep Ray running if Ray was already initialized in " +"`start_simulation` ([#1177](https://github.com/adap/flower/pull/1177))" +msgstr "" +"Nouvelle option pour continuer à faire fonctionner Ray si Ray a déjà été " +"initialisé dans `start_simulation` " +"([#1177](https://github.com/adap/flower/pull/1177))" + +#: ../../source/ref-changelog.md:705 +msgid "" +"Add support for custom `ClientManager` as a `start_simulation` parameter " +"([#1171](https://github.com/adap/flower/pull/1171))" +msgstr "" +"Ajout de la prise en charge d'un `ClientManager` personnalisé comme " +"paramètre de `start_simulation` " +"([#1171](https://github.com/adap/flower/pull/1171))" + +#: ../../source/ref-changelog.md:706 +#, fuzzy +msgid "" +"New documentation for [implementing " +"strategies](https://flower.ai/docs/framework/how-to-implement-" +"strategies.html) ([#1097](https://github.com/adap/flower/pull/1097), " +"[#1175](https://github.com/adap/flower/pull/1175))" +msgstr "" +"Nouvelle documentation pour [mettre en œuvre des " +"stratégies](https://flower.ai/docs/framework/how-to-implement-" +"strategies.html) ([#1097](https://github.com/adap/flower/pull/1097), " +"[#1175](https://github.com/adap/flower/pull/1175))" + +#: ../../source/ref-changelog.md:707 +msgid "" +"New mobile-friendly documentation theme " +"([#1174](https://github.com/adap/flower/pull/1174))" +msgstr "" +"Nouveau thème de documentation adapté aux mobiles " +"([#1174](https://github.com/adap/flower/pull/1174))" + +#: ../../source/ref-changelog.md:708 +msgid "" +"Limit version range for (optional) `ray` dependency to include only " +"compatible releases (`>=1.9.2,<1.12.0`) " +"([#1205](https://github.com/adap/flower/pull/1205))" +msgstr "" +"Limite la plage de versions pour la dépendance (optionnelle) `ray` pour " +"n'inclure que les versions compatibles (`>=1.9.2,<1.12.0`) " +"([#1205](https://github.com/adap/flower/pull/1205))" + +#: ../../source/ref-changelog.md:712 +msgid "" +"**Remove deprecated support for Python 3.6** " +"([#871](https://github.com/adap/flower/pull/871))" +msgstr "" +"**Supprime la prise en charge obsolète de Python 3.6** " +"([#871](https://github.com/adap/flower/pull/871))" + +#: ../../source/ref-changelog.md:713 +msgid "" +"**Remove deprecated KerasClient** " +"([#857](https://github.com/adap/flower/pull/857))" +msgstr "" +"**Supprimez KerasClient** " +"([#857](https://github.com/adap/flower/pull/857))" + +#: ../../source/ref-changelog.md:714 +msgid "" +"**Remove deprecated no-op extra installs** " +"([#973](https://github.com/adap/flower/pull/973))" +msgstr "" +"**Supprimer les installations supplémentaires no-op dépréciées** " +"([#973](https://github.com/adap/flower/pull/973))" + +#: ../../source/ref-changelog.md:715 +msgid "" +"**Remove deprecated proto fields from** `FitRes` **and** `EvaluateRes` " +"([#869](https://github.com/adap/flower/pull/869))" +msgstr "" +"**Supprimez les champs proto obsolètes de** `FitRes` **et** `EvaluateRes`" +" ([#869](https://github.com/adap/flower/pull/869))" + +#: ../../source/ref-changelog.md:716 +msgid "" +"**Remove deprecated QffedAvg strategy (replaced by QFedAvg)** " +"([#1107](https://github.com/adap/flower/pull/1107))" +msgstr "" +"**Supprime la stratégie QffedAvg (remplacée par QFedAvg)** " +"([#1107](https://github.com/adap/flower/pull/1107))" + +#: ../../source/ref-changelog.md:717 +msgid "" +"**Remove deprecated DefaultStrategy strategy** " +"([#1142](https://github.com/adap/flower/pull/1142))" +msgstr "" +"**Supprime la stratégie DefaultStrategy qui est obsolète** " +"([#1142](https://github.com/adap/flower/pull/1142))" + +#: ../../source/ref-changelog.md:718 +msgid "" +"**Remove deprecated support for eval_fn accuracy return value** " +"([#1142](https://github.com/adap/flower/pull/1142))" +msgstr "" +"**Supprimer la prise en charge obsolète de la valeur de retour de la " +"précision eval_fn** ([#1142](https://github.com/adap/flower/pull/1142))" + +#: ../../source/ref-changelog.md:719 +msgid "" +"**Remove deprecated support for passing initial parameters as NumPy " +"ndarrays** ([#1142](https://github.com/adap/flower/pull/1142))" +msgstr "" +"**Supprime la prise en charge obsolète du passage des paramètres initiaux" +" en tant que ndarrays NumPy** " +"([#1142](https://github.com/adap/flower/pull/1142))" + +#: ../../source/ref-changelog.md:721 +msgid "v0.18.0 (2022-02-28)" +msgstr "v0.18.0 (2022-02-28)" + +#: ../../source/ref-changelog.md:725 +msgid "" +"**Improved Virtual Client Engine compatibility with Jupyter Notebook / " +"Google Colab** ([#866](https://github.com/adap/flower/pull/866), " +"[#872](https://github.com/adap/flower/pull/872), " +"[#833](https://github.com/adap/flower/pull/833), " +"[#1036](https://github.com/adap/flower/pull/1036))" +msgstr "" +"**Amélioration de la compatibilité du moteur de client virtuel avec " +"Jupyter Notebook / Google Colab** " +"([#866](https://github.com/adap/flower/pull/866), " +"[#872](https://github.com/adap/flower/pull/872), " +"[#833](https://github.com/adap/flower/pull/833), " +"[#1036](https://github.com/adap/flower/pull/1036))" + +#: ../../source/ref-changelog.md:727 +msgid "" +"Simulations (using the Virtual Client Engine through `start_simulation`) " +"now work more smoothly on Jupyter Notebooks (incl. Google Colab) after " +"installing Flower with the `simulation` extra (`pip install " +"flwr[simulation]`)." +msgstr "" +"Les simulations (utilisant le moteur de client virtuel via " +"`start_simulation`) fonctionnent maintenant plus facilement sur les " +"Notebooks Jupyter (y compris Google Colab) après avoir installé Flower " +"avec l'option `simulation` (`pip install flwr[simulation]`)." + +#: ../../source/ref-changelog.md:729 +msgid "" +"**New Jupyter Notebook code example** " +"([#833](https://github.com/adap/flower/pull/833))" +msgstr "" +"**Nouvel exemple de code Jupyter Notebook** " +"([#833](https://github.com/adap/flower/pull/833))" + +#: ../../source/ref-changelog.md:731 +msgid "" +"A new code example (`quickstart_simulation`) demonstrates Flower " +"simulations using the Virtual Client Engine through Jupyter Notebook " +"(incl. Google Colab)." +msgstr "" +"Un nouvel exemple de code (`quickstart_simulation`) démontre des " +"simulations de Flower en utilisant le moteur de client virtuel via " +"Jupyter Notebook (y compris Google Colab)." + +#: ../../source/ref-changelog.md:733 +msgid "" +"**Client properties (feature preview)** " +"([#795](https://github.com/adap/flower/pull/795))" +msgstr "" +"**Propriétés du client (aperçu des fonctionnalités)** " +"([#795](https://github.com/adap/flower/pull/795))" + +#: ../../source/ref-changelog.md:735 +msgid "" +"Clients can implement a new method `get_properties` to enable server-side" +" strategies to query client properties." +msgstr "" +"Les clients peuvent implémenter une nouvelle méthode `get_properties` " +"pour permettre aux stratégies côté serveur d'interroger les propriétés du" +" client." + +#: ../../source/ref-changelog.md:737 +msgid "" +"**Experimental Android support with TFLite** " +"([#865](https://github.com/adap/flower/pull/865))" +msgstr "" +"**Support expérimental d'Android avec TFLite** " +"([#865](https://github.com/adap/flower/pull/865))" + +#: ../../source/ref-changelog.md:739 +msgid "" +"Android support has finally arrived in `main`! Flower is both client-" +"agnostic and framework-agnostic by design. One can integrate arbitrary " +"client platforms and with this release, using Flower on Android has " +"become a lot easier." +msgstr "" +"La prise en charge d'Android est enfin arrivée dans `main` ! Flower est à" +" la fois agnostique au niveau du client et du cadre de travail. On peut " +"intégrer des plates-formes client arbitraires et avec cette version, " +"l'utilisation de Flower sur Android est devenue beaucoup plus facile." + +#: ../../source/ref-changelog.md:741 +msgid "" +"The example uses TFLite on the client side, along with a new " +"`FedAvgAndroid` strategy. The Android client and `FedAvgAndroid` are " +"still experimental, but they are a first step towards a fully-fledged " +"Android SDK and a unified `FedAvg` implementation that integrated the new" +" functionality from `FedAvgAndroid`." +msgstr "" +"L'exemple utilise TFLite du côté client, ainsi qu'une nouvelle stratégie " +"`FedAvgAndroid`. Le client Android et `FedAvgAndroid` sont encore " +"expérimentaux, mais ils constituent un premier pas vers un SDK Android à " +"part entière et une implémentation unifiée de `FedAvg` intégrant la " +"nouvelle fonctionnalité de `FedAvgAndroid`." + +#: ../../source/ref-changelog.md:743 +msgid "" +"**Make gRPC keepalive time user-configurable and decrease default " +"keepalive time** ([#1069](https://github.com/adap/flower/pull/1069))" +msgstr "" +"**Rendre le temps de garde gRPC configurable par l'utilisateur et " +"diminuer le temps de garde par défaut** " +"([#1069](https://github.com/adap/flower/pull/1069))" + +#: ../../source/ref-changelog.md:745 +msgid "" +"The default gRPC keepalive time has been reduced to increase the " +"compatibility of Flower with more cloud environments (for example, " +"Microsoft Azure). Users can configure the keepalive time to customize the" +" gRPC stack based on specific requirements." +msgstr "" +"Le temps de keepalive gRPC par défaut a été réduit pour augmenter la " +"compatibilité de Flower avec davantage d'environnements cloud (par " +"exemple, Microsoft Azure). Les utilisateurs peuvent configurer le temps " +"de keepalive pour personnaliser la pile gRPC en fonction d'exigences " +"spécifiques." + +#: ../../source/ref-changelog.md:747 +msgid "" +"**New differential privacy example using Opacus and PyTorch** " +"([#805](https://github.com/adap/flower/pull/805))" +msgstr "" +"**Nouvel exemple de confidentialité différentielle utilisant Opacus et " +"PyTorch** ([#805](https://github.com/adap/flower/pull/805))" + +#: ../../source/ref-changelog.md:749 +msgid "" +"A new code example (`opacus`) demonstrates differentially-private " +"federated learning with Opacus, PyTorch, and Flower." +msgstr "" +"Un nouvel exemple de code (`opacus`) démontre l'apprentissage fédéré " +"différentiellement privé avec Opacus, PyTorch et Flower." + +#: ../../source/ref-changelog.md:751 +msgid "" +"**New Hugging Face Transformers code example** " +"([#863](https://github.com/adap/flower/pull/863))" +msgstr "" +"**Nouvel exemple de code pour les Transformers à visage embrassant** " +"([#863](https://github.com/adap/flower/pull/863))" + +#: ../../source/ref-changelog.md:753 +msgid "" +"A new code example (`quickstart_huggingface`) demonstrates usage of " +"Hugging Face Transformers with Flower." +msgstr "" +"Un nouvel exemple de code (`quickstart_huggingface`) démontre " +"l'utilisation des transformateurs Hugging Face avec Flower." + +#: ../../source/ref-changelog.md:755 +msgid "" +"**New MLCube code example** " +"([#779](https://github.com/adap/flower/pull/779), " +"[#1034](https://github.com/adap/flower/pull/1034), " +"[#1065](https://github.com/adap/flower/pull/1065), " +"[#1090](https://github.com/adap/flower/pull/1090))" +msgstr "" +"**Nouvel exemple de code MLCube** " +"([#779](https://github.com/adap/flower/pull/779), " +"[#1034](https://github.com/adap/flower/pull/1034), " +"[#1065](https://github.com/adap/flower/pull/1065), " +"[#1090](https://github.com/adap/flower/pull/1090))" + +#: ../../source/ref-changelog.md:757 +msgid "" +"A new code example (`quickstart_mlcube`) demonstrates usage of MLCube " +"with Flower." +msgstr "" +"Un nouvel exemple de code (`quickstart_mlcube`) démontre l'utilisation de" +" MLCube avec Flower." + +#: ../../source/ref-changelog.md:759 +msgid "" +"**SSL-enabled server and client** " +"([#842](https://github.com/adap/flower/pull/842), " +"[#844](https://github.com/adap/flower/pull/844), " +"[#845](https://github.com/adap/flower/pull/845), " +"[#847](https://github.com/adap/flower/pull/847), " +"[#993](https://github.com/adap/flower/pull/993), " +"[#994](https://github.com/adap/flower/pull/994))" +msgstr "" +"**([#842](https://github.com/adap/flower/pull/842), " +"[#844](https://github.com/adap/flower/pull/844), " +"[#845](https://github.com/adap/flower/pull/845), " +"[#847](https://github.com/adap/flower/pull/847), " +"[#993](https://github.com/adap/flower/pull/993), " +"[#994](https://github.com/adap/flower/pull/994))" + +#: ../../source/ref-changelog.md:761 +msgid "" +"SSL enables secure encrypted connections between clients and servers. " +"This release open-sources the Flower secure gRPC implementation to make " +"encrypted communication channels accessible to all Flower users." +msgstr "" +"SSL permet d'établir des connexions cryptées et sécurisées entre les " +"clients et les serveurs. Cette version met en open-source " +"l'implémentation gRPC sécurisée de Flower afin de rendre les canaux de " +"communication cryptés accessibles à tous les utilisateurs de Flower." + +#: ../../source/ref-changelog.md:763 +msgid "" +"**Updated** `FedAdam` **and** `FedYogi` **strategies** " +"([#885](https://github.com/adap/flower/pull/885), " +"[#895](https://github.com/adap/flower/pull/895))" +msgstr "" +"**Mise à jour** `FedAdam` **et** `FedYogi` **stratégies** " +"([#885](https://github.com/adap/flower/pull/885), " +"[#895](https://github.com/adap/flower/pull/895))" + +#: ../../source/ref-changelog.md:765 +msgid "" +"`FedAdam` and `FedAdam` match the latest version of the Adaptive " +"Federated Optimization paper." +msgstr "" +"`FedAdam` et `FedAdam` correspondent à la dernière version de l'article " +"sur l'optimisation fédérée adaptative." + +#: ../../source/ref-changelog.md:767 +msgid "" +"**Initialize** `start_simulation` **with a list of client IDs** " +"([#860](https://github.com/adap/flower/pull/860))" +msgstr "" +"**Initialise** `start_simulation` **avec une liste d'ID de clients** " +"([#860](https://github.com/adap/flower/pull/860))" + +#: ../../source/ref-changelog.md:769 +msgid "" +"`start_simulation` can now be called with a list of client IDs " +"(`clients_ids`, type: `List[str]`). Those IDs will be passed to the " +"`client_fn` whenever a client needs to be initialized, which can make it " +"easier to load data partitions that are not accessible through `int` " +"identifiers." +msgstr "" +"`start_simulation` peut maintenant être appelé avec une liste " +"d'identifiants de clients (`clients_ids`, type : `List[str]`). Ces " +"identifiants seront passés à `client_fn` chaque fois qu'un client doit " +"être initialisé, ce qui peut faciliter le chargement de partitions de " +"données qui ne sont pas accessibles par des identifiants `int`." + +#: ../../source/ref-changelog.md:773 +msgid "" +"Update `num_examples` calculation in PyTorch code examples in " +"([#909](https://github.com/adap/flower/pull/909))" +msgstr "" +"Mettre à jour le calcul de `num_examples` dans les exemples de code " +"PyTorch dans ([#909](https://github.com/adap/flower/pull/909))" + +#: ../../source/ref-changelog.md:774 +msgid "" +"Expose Flower version through `flwr.__version__` " +"([#952](https://github.com/adap/flower/pull/952))" +msgstr "" +"Exposer la version de Flower à travers `flwr.__version__` " +"([#952](https://github.com/adap/flower/pull/952))" + +#: ../../source/ref-changelog.md:775 +msgid "" +"`start_server` in `app.py` now returns a `History` object containing " +"metrics from training ([#974](https://github.com/adap/flower/pull/974))" +msgstr "" +"`start_server` dans `app.py` renvoie maintenant un objet `History` " +"contenant les métriques de l'entraînement " +"([#974](https://github.com/adap/flower/pull/974))" + +#: ../../source/ref-changelog.md:776 +msgid "" +"Make `max_workers` (used by `ThreadPoolExecutor`) configurable " +"([#978](https://github.com/adap/flower/pull/978))" +msgstr "" +"Rendre `max_workers` (utilisé par `ThreadPoolExecutor`) configurable " +"([#978](https://github.com/adap/flower/pull/978))" + +#: ../../source/ref-changelog.md:777 +msgid "" +"Increase sleep time after server start to three seconds in all code " +"examples ([#1086](https://github.com/adap/flower/pull/1086))" +msgstr "" +"Augmente le temps de sommeil après le démarrage du serveur à trois " +"secondes dans tous les exemples de code " +"([#1086](https://github.com/adap/flower/pull/1086))" + +#: ../../source/ref-changelog.md:778 +msgid "" +"Added a new FAQ section to the documentation " +"([#948](https://github.com/adap/flower/pull/948))" +msgstr "" +"Ajout d'une nouvelle section FAQ à la documentation " +"([#948](https://github.com/adap/flower/pull/948))" + +#: ../../source/ref-changelog.md:779 +msgid "" +"And many more under-the-hood changes, library updates, documentation " +"changes, and tooling improvements!" +msgstr "" +"Et bien d'autres changements sous le capot, des mises à jour de la " +"bibliothèque, des modifications de la documentation et des améliorations " +"de l'outillage !" + +#: ../../source/ref-changelog.md:783 +msgid "" +"**Removed** `flwr_example` **and** `flwr_experimental` **from release " +"build** ([#869](https://github.com/adap/flower/pull/869))" +msgstr "" +"**Supprimé** `flwr_example` **et** `flwr_experimental` **de la version " +"release build** ([#869](https://github.com/adap/flower/pull/869))" + +#: ../../source/ref-changelog.md:785 +msgid "" +"The packages `flwr_example` and `flwr_experimental` have been deprecated " +"since Flower 0.12.0 and they are not longer included in Flower release " +"builds. The associated extras (`baseline`, `examples-pytorch`, `examples-" +"tensorflow`, `http-logger`, `ops`) are now no-op and will be removed in " +"an upcoming release." +msgstr "" +"Les paquets `flwr_example` et `flwr_experimental` ont été dépréciés " +"depuis Flower 0.12.0 et ils ne sont plus inclus dans les builds de " +"Flower. Les extras associés (`baseline`, `examples-pytorch`, `examples-" +"tensorflow`, `http-logger`, `ops`) sont maintenant no-op et seront " +"supprimés dans une prochaine version." + +#: ../../source/ref-changelog.md:787 +msgid "v0.17.0 (2021-09-24)" +msgstr "v0.17.0 (2021-09-24)" + +#: ../../source/ref-changelog.md:791 +msgid "" +"**Experimental virtual client engine** " +"([#781](https://github.com/adap/flower/pull/781) " +"[#790](https://github.com/adap/flower/pull/790) " +"[#791](https://github.com/adap/flower/pull/791))" +msgstr "" +"**Moteur expérimental de client virtuel** " +"([#781](https://github.com/adap/flower/pull/781) " +"[#790](https://github.com/adap/flower/pull/790) " +"[#791](https://github.com/adap/flower/pull/791))" + +#: ../../source/ref-changelog.md:793 +msgid "" +"One of Flower's goals is to enable research at scale. This release " +"enables a first (experimental) peek at a major new feature, codenamed the" +" virtual client engine. Virtual clients enable simulations that scale to " +"a (very) large number of clients on a single machine or compute cluster. " +"The easiest way to test the new functionality is to look at the two new " +"code examples called `quickstart_simulation` and `simulation_pytorch`." +msgstr "" +"L'un des objectifs de Flower est de permettre la recherche à grande " +"échelle. Cette version donne un premier aperçu (expérimental) d'une " +"nouvelle fonctionnalité majeure, connue sous le nom de code de moteur de " +"client virtuel. Les clients virtuels permettent des simulations qui " +"s'étendent à un (très) grand nombre de clients sur une seule machine ou " +"une grappe de calcul. La façon la plus simple de tester la nouvelle " +"fonctionnalité est de regarder les deux nouveaux exemples de code appelés" +" `quickstart_simulation` et `simulation_pytorch`." + +#: ../../source/ref-changelog.md:795 +msgid "" +"The feature is still experimental, so there's no stability guarantee for " +"the API. It's also not quite ready for prime time and comes with a few " +"known caveats. However, those who are curious are encouraged to try it " +"out and share their thoughts." +msgstr "" +"La fonction est encore expérimentale, il n'y a donc aucune garantie de " +"stabilité pour l'API. Elle n'est pas non plus tout à fait prête pour le " +"prime time et s'accompagne de quelques mises en garde connues. Cependant," +" les personnes curieuses sont encouragées à l'essayer et à faire part de " +"leurs réflexions." + +#: ../../source/ref-changelog.md:797 +msgid "" +"**New built-in strategies** " +"([#828](https://github.com/adap/flower/pull/828) " +"[#822](https://github.com/adap/flower/pull/822))" +msgstr "" +"**Nouvelles stratégies intégrées** " +"([#828](https://github.com/adap/flower/pull/828) " +"[#822](https://github.com/adap/flower/pull/822))" + +#: ../../source/ref-changelog.md:799 +msgid "" +"FedYogi - Federated learning strategy using Yogi on server-side. " +"Implementation based on https://arxiv.org/abs/2003.00295" +msgstr "" +"FedYogi - Stratégie d'apprentissage fédéré utilisant Yogi côté serveur. " +"Mise en oeuvre basée sur https://arxiv.org/abs/2003.00295" + +#: ../../source/ref-changelog.md:800 +msgid "" +"FedAdam - Federated learning strategy using Adam on server-side. " +"Implementation based on https://arxiv.org/abs/2003.00295" +msgstr "" +"FedAdam - Stratégie d'apprentissage fédéré utilisant Adam côté serveur. " +"Mise en œuvre basée sur https://arxiv.org/abs/2003.00295" + +#: ../../source/ref-changelog.md:802 +msgid "" +"**New PyTorch Lightning code example** " +"([#617](https://github.com/adap/flower/pull/617))" +msgstr "" +"**Nouvel exemple de code PyTorch Lightning** " +"([#617](https://github.com/adap/flower/pull/617))" + +#: ../../source/ref-changelog.md:804 +msgid "" +"**New Variational Auto-Encoder code example** " +"([#752](https://github.com/adap/flower/pull/752))" +msgstr "" +"**Nouvel exemple de code d'autocodage variationnel** " +"([#752](https://github.com/adap/flower/pull/752))" + +#: ../../source/ref-changelog.md:806 +msgid "" +"**New scikit-learn code example** " +"([#748](https://github.com/adap/flower/pull/748))" +msgstr "" +"**Nouvel exemple de code scikit-learn** " +"([#748](https://github.com/adap/flower/pull/748))" + +#: ../../source/ref-changelog.md:808 +msgid "" +"**New experimental TensorBoard strategy** " +"([#789](https://github.com/adap/flower/pull/789))" +msgstr "" +"**Nouvelle stratégie expérimentale TensorBoard** " +"([#789](https://github.com/adap/flower/pull/789))" + +#: ../../source/ref-changelog.md:812 +msgid "" +"Improved advanced TensorFlow code example " +"([#769](https://github.com/adap/flower/pull/769))" +msgstr "" +"Amélioration de l'exemple de code TensorFlow avancé " +"([#769](https://github.com/adap/flower/pull/769))" + +#: ../../source/ref-changelog.md:813 +msgid "" +"Warning when `min_available_clients` is misconfigured " +"([#830](https://github.com/adap/flower/pull/830))" +msgstr "" +"Avertissement lorsque `min_available_clients` est mal configuré " +"([#830](https://github.com/adap/flower/pull/830))" + +#: ../../source/ref-changelog.md:814 +msgid "" +"Improved gRPC server docs " +"([#841](https://github.com/adap/flower/pull/841))" +msgstr "" +"Amélioration de la documentation sur le serveur gRPC " +"([#841](https://github.com/adap/flower/pull/841))" + +#: ../../source/ref-changelog.md:815 +msgid "" +"Improved error message in `NumPyClient` " +"([#851](https://github.com/adap/flower/pull/851))" +msgstr "" +"Amélioration du message d'erreur dans `NumPyClient` " +"([#851](https://github.com/adap/flower/pull/851))" + +#: ../../source/ref-changelog.md:816 +msgid "" +"Improved PyTorch quickstart code example " +"([#852](https://github.com/adap/flower/pull/852))" +msgstr "" +"Exemple de code de démarrage rapide PyTorch amélioré " +"([#852](https://github.com/adap/flower/pull/852))" + +#: ../../source/ref-changelog.md:820 +msgid "" +"**Disabled final distributed evaluation** " +"([#800](https://github.com/adap/flower/pull/800))" +msgstr "" +"**Désactivé l'évaluation finale distribuée** " +"([#800](https://github.com/adap/flower/pull/800))" + +#: ../../source/ref-changelog.md:822 +msgid "" +"Prior behaviour was to perform a final round of distributed evaluation on" +" all connected clients, which is often not required (e.g., when using " +"server-side evaluation). The prior behaviour can be enabled by passing " +"`force_final_distributed_eval=True` to `start_server`." +msgstr "" +"Le comportement précédent consistait à effectuer un dernier tour " +"d'évaluation distribuée sur tous les clients connectés, ce qui n'est " +"souvent pas nécessaire (par exemple, lors de l'utilisation de " +"l'évaluation côté serveur). Le comportement précédent peut être activé en" +" passant `force_final_distributed_eval=True` à `start_server`." + +#: ../../source/ref-changelog.md:824 +msgid "" +"**Renamed q-FedAvg strategy** " +"([#802](https://github.com/adap/flower/pull/802))" +msgstr "" +"**Renommé stratégie q-FedAvg** " +"([#802](https://github.com/adap/flower/pull/802))" + +#: ../../source/ref-changelog.md:826 +msgid "" +"The strategy named `QffedAvg` was renamed to `QFedAvg` to better reflect " +"the notation given in the original paper (q-FFL is the optimization " +"objective, q-FedAvg is the proposed solver). Note the the original (now " +"deprecated) `QffedAvg` class is still available for compatibility reasons" +" (it will be removed in a future release)." +msgstr "" +"La stratégie nommée `QffedAvg` a été renommée en `QFedAvg` pour mieux " +"refléter la notation donnée dans l'article original (q-FFL est l'objectif" +" d'optimisation, q-FedAvg est le solveur proposé). Notez que la classe " +"`QffedAvg` originale (maintenant obsolète) est toujours disponible pour " +"des raisons de compatibilité (elle sera supprimée dans une prochaine " +"version)." + +#: ../../source/ref-changelog.md:828 +msgid "" +"**Deprecated and renamed code example** `simulation_pytorch` **to** " +"`simulation_pytorch_legacy` " +"([#791](https://github.com/adap/flower/pull/791))" +msgstr "" +"**Exemple de code déprécié et renommé** `simulation_pytorch` **en** " +"`simulation_pytorch_legacy` " +"([#791](https://github.com/adap/flower/pull/791))" + +#: ../../source/ref-changelog.md:830 +msgid "" +"This example has been replaced by a new example. The new example is based" +" on the experimental virtual client engine, which will become the new " +"default way of doing most types of large-scale simulations in Flower. The" +" existing example was kept for reference purposes, but it might be " +"removed in the future." +msgstr "" +"Cet exemple a été remplacé par un nouvel exemple. Le nouvel exemple est " +"basé sur le moteur expérimental du client virtuel, qui deviendra la " +"nouvelle méthode par défaut pour effectuer la plupart des types de " +"simulations à grande échelle dans Flower. L'exemple existant a été " +"conservé à des fins de référence, mais il pourrait être supprimé à " +"l'avenir." + +#: ../../source/ref-changelog.md:832 +msgid "v0.16.0 (2021-05-11)" +msgstr "v0.16.0 (2021-05-11)" + +#: ../../source/ref-changelog.md:836 +msgid "" +"**New built-in strategies** " +"([#549](https://github.com/adap/flower/pull/549))" +msgstr "" +"**Nouvelles stratégies intégrées** " +"([#549](https://github.com/adap/flower/pull/549))" + +#: ../../source/ref-changelog.md:838 +msgid "(abstract) FedOpt" +msgstr "(résumé) FedOpt" + +#: ../../source/ref-changelog.md:841 +msgid "" +"**Custom metrics for server and strategies** " +"([#717](https://github.com/adap/flower/pull/717))" +msgstr "" +"**Métriques personnalisées pour le serveur et les stratégies** " +"([#717](https://github.com/adap/flower/pull/717))" + +#: ../../source/ref-changelog.md:843 +msgid "" +"The Flower server is now fully task-agnostic, all remaining instances of " +"task-specific metrics (such as `accuracy`) have been replaced by custom " +"metrics dictionaries. Flower 0.15 introduced the capability to pass a " +"dictionary containing custom metrics from client to server. As of this " +"release, custom metrics replace task-specific metrics on the server." +msgstr "" +"Le serveur Flower est maintenant totalement agnostique, toutes les " +"instances restantes de métriques spécifiques à une tâche (telles que " +"`accuracy`) ont été remplacées par des dictionnaires de métriques " +"personnalisées. Flower 0.15 a introduit la possibilité de passer un " +"dictionnaire contenant des métriques personnalisées du client au serveur." +" À partir de cette version, les métriques personnalisées remplacent les " +"métriques spécifiques à une tâche sur le serveur." + +#: ../../source/ref-changelog.md:845 +msgid "" +"Custom metric dictionaries are now used in two user-facing APIs: they are" +" returned from Strategy methods `aggregate_fit`/`aggregate_evaluate` and " +"they enable evaluation functions passed to build-in strategies (via " +"`eval_fn`) to return more than two evaluation metrics. Strategies can " +"even return *aggregated* metrics dictionaries for the server to keep " +"track of." +msgstr "" +"Les dictionnaires de métriques personnalisés sont maintenant utilisés " +"dans deux API orientées vers l'utilisateur : ils sont renvoyés par les " +"méthodes de stratégie `aggregate_fit`/`aggregate_evaluate` et ils " +"permettent aux fonctions d'évaluation passées aux stratégies intégrées " +"(via `eval_fn`) de renvoyer plus de deux métriques d'évaluation. Les " +"stratégies peuvent même renvoyer des dictionnaires de métriques " +"*agrégées* pour que le serveur puisse en garder la trace." + +#: ../../source/ref-changelog.md:847 +msgid "" +"Stratey implementations should migrate their `aggregate_fit` and " +"`aggregate_evaluate` methods to the new return type (e.g., by simply " +"returning an empty `{}`), server-side evaluation functions should migrate" +" from `return loss, accuracy` to `return loss, {\"accuracy\": accuracy}`." +msgstr "" +"Les implémentations de Stratey doivent migrer leurs méthodes " +"`aggregate_fit` et `aggregate_evaluate` vers le nouveau type de retour " +"(par exemple, en renvoyant simplement un `{}` vide), les fonctions " +"d'évaluation côté serveur doivent migrer de `return loss, accuracy` à " +"`return loss, {\"accuracy\" : accuracy}`." + +#: ../../source/ref-changelog.md:849 +msgid "" +"Flower 0.15-style return types are deprecated (but still supported), " +"compatibility will be removed in a future release." +msgstr "" +"Les types de retour du style Flower 0.15 sont dépréciés (mais toujours " +"pris en charge), la compatibilité sera supprimée dans une prochaine " +"version." + +#: ../../source/ref-changelog.md:851 +msgid "" +"**Migration warnings for deprecated functionality** " +"([#690](https://github.com/adap/flower/pull/690))" +msgstr "" +"**Avertissements de migration pour les fonctionnalités obsolètes** " +"([#690](https://github.com/adap/flower/pull/690))" + +#: ../../source/ref-changelog.md:853 +msgid "" +"Earlier versions of Flower were often migrated to new APIs, while " +"maintaining compatibility with legacy APIs. This release introduces " +"detailed warning messages if usage of deprecated APIs is detected. The " +"new warning messages often provide details on how to migrate to more " +"recent APIs, thus easing the transition from one release to another." +msgstr "" +"Les versions antérieures de Flower ont souvent été migrées vers de " +"nouvelles API, tout en maintenant la compatibilité avec les anciennes " +"API. Cette version introduit des messages d'avertissement détaillés si " +"l'utilisation d'API obsolètes est détectée. Les nouveaux messages " +"d'avertissement fournissent souvent des détails sur la façon de migrer " +"vers des API plus récentes, facilitant ainsi la transition d'une version " +"à l'autre." + +#: ../../source/ref-changelog.md:855 +msgid "" +"Improved docs and docstrings " +"([#691](https://github.com/adap/flower/pull/691) " +"[#692](https://github.com/adap/flower/pull/692) " +"[#713](https://github.com/adap/flower/pull/713))" +msgstr "" +"Amélioration des docs et des docstrings " +"([#691](https://github.com/adap/flower/pull/691) " +"[#692](https://github.com/adap/flower/pull/692) " +"[#713](https://github.com/adap/flower/pull/713))" + +#: ../../source/ref-changelog.md:857 +msgid "MXNet example and documentation" +msgstr "Exemple et documentation MXNet" + +#: ../../source/ref-changelog.md:859 +msgid "" +"FedBN implementation in example PyTorch: From Centralized To Federated " +"([#696](https://github.com/adap/flower/pull/696) " +"[#702](https://github.com/adap/flower/pull/702) " +"[#705](https://github.com/adap/flower/pull/705))" +msgstr "" +"Mise en œuvre de FedBN dans l'exemple PyTorch : De la centralisation à la" +" fédération ([#696](https://github.com/adap/flower/pull/696) " +"[#702](https://github.com/adap/flower/pull/702) " +"[#705](https://github.com/adap/flower/pull/705))" + +#: ../../source/ref-changelog.md:863 +msgid "" +"**Serialization-agnostic server** " +"([#721](https://github.com/adap/flower/pull/721))" +msgstr "" +"**Serveur agnostique de sérialisation** " +"([#721](https://github.com/adap/flower/pull/721))" + +#: ../../source/ref-changelog.md:865 +msgid "" +"The Flower server is now fully serialization-agnostic. Prior usage of " +"class `Weights` (which represents parameters as deserialized NumPy " +"ndarrays) was replaced by class `Parameters` (e.g., in `Strategy`). " +"`Parameters` objects are fully serialization-agnostic and represents " +"parameters as byte arrays, the `tensor_type` attributes indicates how " +"these byte arrays should be interpreted (e.g., for " +"serialization/deserialization)." +msgstr "" +"Le serveur Flower est désormais totalement agnostique en matière de " +"sérialisation. L'utilisation antérieure de la classe `Weights` (qui " +"représente les paramètres sous forme de tableaux NumPy désérialisés) a " +"été remplacée par la classe `Parameters` (par exemple, dans `Strategy`). " +"Les objets `Parameters` sont totalement agnostiques en matière de " +"sérialisation et représentent les paramètres sous forme de tableaux " +"d'octets, les attributs `tensor_type` indiquent comment ces tableaux " +"d'octets doivent être interprétés (par exemple, pour la " +"sérialisation/désérialisation)." + +#: ../../source/ref-changelog.md:867 +msgid "" +"Built-in strategies implement this approach by handling serialization and" +" deserialization to/from `Weights` internally. Custom/3rd-party Strategy " +"implementations should update to the slighly changed Strategy method " +"definitions. Strategy authors can consult PR " +"[#721](https://github.com/adap/flower/pull/721) to see how strategies can" +" easily migrate to the new format." +msgstr "" +"Les stratégies intégrées mettent en œuvre cette approche en gérant en " +"interne la sérialisation et la désérialisation de `Weights`. Les " +"implémentations de stratégies personnalisées ou tierces doivent être " +"mises à jour avec les définitions de méthodes de stratégie légèrement " +"modifiées. Les auteurs de stratégies peuvent consulter le PR " +"[#721](https://github.com/adap/flower/pull/721) pour voir comment les " +"stratégies peuvent facilement migrer vers le nouveau format." + +#: ../../source/ref-changelog.md:869 +msgid "" +"Deprecated `flwr.server.Server.evaluate`, use " +"`flwr.server.Server.evaluate_round` instead " +"([#717](https://github.com/adap/flower/pull/717))" +msgstr "" +"Déclassé `flwr.server.Server.evaluate`, utiliser " +"`flwr.server.Server.evaluate_round` à la place " +"([#717](https://github.com/adap/flower/pull/717))" + +#: ../../source/ref-changelog.md:871 +msgid "v0.15.0 (2021-03-12)" +msgstr "v0.15.0 (2021-03-12)" + +#: ../../source/ref-changelog.md:875 +msgid "" +"**Server-side parameter initialization** " +"([#658](https://github.com/adap/flower/pull/658))" +msgstr "" +"**Initialisation des paramètres côté serveur** " +"([#658](https://github.com/adap/flower/pull/658))" + +#: ../../source/ref-changelog.md:877 +msgid "" +"Model parameters can now be initialized on the server-side. Server-side " +"parameter initialization works via a new `Strategy` method called " +"`initialize_parameters`." +msgstr "" +"Les paramètres du modèle peuvent maintenant être initialisés côté " +"serveur. L'initialisation des paramètres côté serveur fonctionne via une " +"nouvelle méthode `Strategy` appelée `initialize_parameters`." + +#: ../../source/ref-changelog.md:879 +msgid "" +"Built-in strategies support a new constructor argument called " +"`initial_parameters` to set the initial parameters. Built-in strategies " +"will provide these initial parameters to the server on startup and then " +"delete them to free the memory afterwards." +msgstr "" +"Les stratégies intégrées prennent en charge un nouvel argument du " +"constructeur appelé `initial_parameters` pour définir les paramètres " +"initiaux. Les stratégies intégrées fourniront ces paramètres initiaux au " +"serveur au démarrage et les supprimeront ensuite pour libérer la mémoire." + +#: ../../source/ref-changelog.md:898 +msgid "" +"If no initial parameters are provided to the strategy, the server will " +"continue to use the current behaviour (namely, it will ask one of the " +"connected clients for its parameters and use these as the initial global " +"parameters)." +msgstr "" +"Si aucun paramètre initial n'est fourni à la stratégie, le serveur " +"continuera à utiliser le comportement actuel (à savoir qu'il demandera à " +"l'un des clients connectés ses paramètres et les utilisera comme " +"paramètres globaux initiaux)." + +#: ../../source/ref-changelog.md:900 +msgid "Deprecations" +msgstr "Dépréciations" + +#: ../../source/ref-changelog.md:902 +msgid "" +"Deprecate `flwr.server.strategy.DefaultStrategy` (migrate to " +"`flwr.server.strategy.FedAvg`, which is equivalent)" +msgstr "" +"Déclasser `flwr.server.strategy.DefaultStrategy` (migrer vers " +"`flwr.server.strategy.FedAvg`, qui est équivalent)" + +#: ../../source/ref-changelog.md:904 +msgid "v0.14.0 (2021-02-18)" +msgstr "v0.14.0 (2021-02-18)" + +#: ../../source/ref-changelog.md:908 +msgid "" +"**Generalized** `Client.fit` **and** `Client.evaluate` **return values** " +"([#610](https://github.com/adap/flower/pull/610) " +"[#572](https://github.com/adap/flower/pull/572) " +"[#633](https://github.com/adap/flower/pull/633))" +msgstr "" +"**Généralisé** `Client.fit` **et** `Client.evaluate` **valeurs de " +"retour** ([#610](https://github.com/adap/flower/pull/610) " +"[#572](https://github.com/adap/flower/pull/572) " +"[#633](https://github.com/adap/flower/pull/633))" + +#: ../../source/ref-changelog.md:910 +msgid "" +"Clients can now return an additional dictionary mapping `str` keys to " +"values of the following types: `bool`, `bytes`, `float`, `int`, `str`. " +"This means one can return almost arbitrary values from `fit`/`evaluate` " +"and make use of them on the server side!" +msgstr "" +"Les clients peuvent maintenant renvoyer un dictionnaire supplémentaire " +"associant les clés `str` aux valeurs des types suivants : `bool`, " +"`bytes`, `float`, `int`, `str`. Cela signifie que l'on peut renvoyer des " +"valeurs presque arbitraires de `fit`/`evaluate` et les utiliser du côté " +"du serveur !" + +#: ../../source/ref-changelog.md:912 +msgid "" +"This improvement also allowed for more consistent return types between " +"`fit` and `evaluate`: `evaluate` should now return a tuple `(float, int, " +"dict)` representing the loss, number of examples, and a dictionary " +"holding arbitrary problem-specific values like accuracy." +msgstr "" +"Cette amélioration a également permis de rendre plus cohérents les types " +"de retour entre `fit` et `evaluate` : `evaluate` devrait maintenant " +"retourner un tuple `(float, int, dict)` représentant la perte, le nombre " +"d'exemples, et un dictionnaire contenant des valeurs arbitraires " +"spécifiques au problème comme la précision." + +#: ../../source/ref-changelog.md:914 +msgid "" +"In case you wondered: this feature is compatible with existing projects, " +"the additional dictionary return value is optional. New code should " +"however migrate to the new return types to be compatible with upcoming " +"Flower releases (`fit`: `List[np.ndarray], int, Dict[str, Scalar]`, " +"`evaluate`: `float, int, Dict[str, Scalar]`). See the example below for " +"details." +msgstr "" +"Au cas où tu te poserais la question : cette fonctionnalité est " +"compatible avec les projets existants, la valeur de retour supplémentaire" +" du dictionnaire est facultative. Le nouveau code doit cependant migrer " +"vers les nouveaux types de retour pour être compatible avec les " +"prochaines versions de Flower (`fit` : `List[np.ndarray], int, Dict[str, " +"Scalar]`, `evaluate` : `float, int, Dict[str, Scalar]`). Voir l'exemple " +"ci-dessous pour plus de détails." + +#: ../../source/ref-changelog.md:916 +msgid "" +"*Code example:* note the additional dictionary return values in both " +"`FlwrClient.fit` and `FlwrClient.evaluate`:" +msgstr "" +"*Exemple de code:* note les valeurs de retour du dictionnaire " +"supplémentaires dans `FlwrClient.fit` et `FlwrClient.evaluate` :" + +#: ../../source/ref-changelog.md:931 +msgid "" +"**Generalized** `config` **argument in** `Client.fit` **and** " +"`Client.evaluate` ([#595](https://github.com/adap/flower/pull/595))" +msgstr "" +"**Généralisé** `config` **argument dans** `Client.fit` **et** " +"`Client.evaluate` ([#595](https://github.com/adap/flower/pull/595))" + +#: ../../source/ref-changelog.md:933 +msgid "" +"The `config` argument used to be of type `Dict[str, str]`, which means " +"that dictionary values were expected to be strings. The new release " +"generalizes this to enable values of the following types: `bool`, " +"`bytes`, `float`, `int`, `str`." +msgstr "" +"L'argument `config` était auparavant de type `Dict[str, str]`, ce qui " +"signifie que les valeurs du dictionnaire devaient être des chaînes. La " +"nouvelle version généralise cela pour permettre les valeurs des types " +"suivants : `bool`, `bytes`, `float`, `int`, `str`." + +#: ../../source/ref-changelog.md:935 +msgid "" +"This means one can now pass almost arbitrary values to `fit`/`evaluate` " +"using the `config` dictionary. Yay, no more `str(epochs)` on the server-" +"side and `int(config[\"epochs\"])` on the client side!" +msgstr "" +"Cela signifie que l'on peut maintenant passer des valeurs presque " +"arbitraires à `fit`/`evaluate` en utilisant le dictionnaire `config`. " +"Yay, plus de `str(epochs)` du côté serveur et `int(config[\"epochs\"])` " +"du côté client !" + +#: ../../source/ref-changelog.md:937 +msgid "" +"*Code example:* note that the `config` dictionary now contains non-`str` " +"values in both `Client.fit` and `Client.evaluate`:" +msgstr "" +"*Exemple de code:* Notez que le dictionnaire `config` contient maintenant" +" des valeurs autres que `str` dans `Client.fit` et `Client.evaluate` :" + +#: ../../source/ref-changelog.md:954 +msgid "v0.13.0 (2021-01-08)" +msgstr "v0.13.0 (2021-01-08)" + +#: ../../source/ref-changelog.md:958 +msgid "" +"New example: PyTorch From Centralized To Federated " +"([#549](https://github.com/adap/flower/pull/549))" +msgstr "" +"Nouvel exemple : PyTorch de centralisé à fédéré " +"([#549](https://github.com/adap/flower/pull/549))" + +#: ../../source/ref-changelog.md:959 +msgid "Improved documentation" +msgstr "Amélioration de la documentation" + +#: ../../source/ref-changelog.md:960 +msgid "New documentation theme ([#551](https://github.com/adap/flower/pull/551))" +msgstr "" +"Nouveau thème de documentation " +"([#551](https://github.com/adap/flower/pull/551))" + +#: ../../source/ref-changelog.md:961 +msgid "New API reference ([#554](https://github.com/adap/flower/pull/554))" +msgstr "Nouvelle référence API ([#554](https://github.com/adap/flower/pull/554))" + +#: ../../source/ref-changelog.md:962 +msgid "" +"Updated examples documentation " +"([#549](https://github.com/adap/flower/pull/549))" +msgstr "" +"Mise à jour de la documentation des exemples " +"([#549](https://github.com/adap/flower/pull/549))" + +#: ../../source/ref-changelog.md:963 +msgid "" +"Removed obsolete documentation " +"([#548](https://github.com/adap/flower/pull/548))" +msgstr "" +"Suppression de la documentation obsolète " +"([#548](https://github.com/adap/flower/pull/548))" + +#: ../../source/ref-changelog.md:965 +msgid "Bugfix:" +msgstr "Correction de bogues :" + +#: ../../source/ref-changelog.md:967 +msgid "" +"`Server.fit` does not disconnect clients when finished, disconnecting the" +" clients is now handled in `flwr.server.start_server` " +"([#553](https://github.com/adap/flower/pull/553) " +"[#540](https://github.com/adap/flower/issues/540))." +msgstr "" +"`Server.fit` ne déconnecte pas les clients lorsqu'il est terminé, la " +"déconnexion des clients est maintenant gérée dans " +"`flwr.server.start_server` " +"([#553](https://github.com/adap/flower/pull/553) " +"[#540](https://github.com/adap/flower/issues/540))." + +#: ../../source/ref-changelog.md:969 +msgid "v0.12.0 (2020-12-07)" +msgstr "v0.12.0 (2020-12-07)" + +#: ../../source/ref-changelog.md:971 ../../source/ref-changelog.md:987 +msgid "Important changes:" +msgstr "Changements importants :" + +#: ../../source/ref-changelog.md:973 +msgid "" +"Added an example for embedded devices " +"([#507](https://github.com/adap/flower/pull/507))" +msgstr "" +"Ajout d'un exemple pour les périphériques embarqués " +"([#507](https://github.com/adap/flower/pull/507))" + +#: ../../source/ref-changelog.md:974 +msgid "" +"Added a new NumPyClient (in addition to the existing KerasClient) " +"([#504](https://github.com/adap/flower/pull/504) " +"[#508](https://github.com/adap/flower/pull/508))" +msgstr "" +"Ajout d'un nouveau NumPyClient (en plus du KerasClient existant) " +"([#504](https://github.com/adap/flower/pull/504) " +"[#508](https://github.com/adap/flower/pull/508))" + +#: ../../source/ref-changelog.md:975 +msgid "" +"Deprecated `flwr_example` package and started to migrate examples into " +"the top-level `examples` directory " +"([#494](https://github.com/adap/flower/pull/494) " +"[#512](https://github.com/adap/flower/pull/512))" +msgstr "" +"Déclassement du paquet `flwr_example` et migration des exemples dans le " +"répertoire de premier niveau `examples` " +"([#494](https://github.com/adap/flower/pull/494) " +"[#512](https://github.com/adap/flower/pull/512))" + +#: ../../source/ref-changelog.md:977 +msgid "v0.11.0 (2020-11-30)" +msgstr "v0.11.0 (2020-11-30)" + +#: ../../source/ref-changelog.md:979 +msgid "Incompatible changes:" +msgstr "Changements incompatibles :" + +#: ../../source/ref-changelog.md:981 +msgid "" +"Renamed strategy methods " +"([#486](https://github.com/adap/flower/pull/486)) to unify the naming of " +"Flower's public APIs. Other public methods/functions (e.g., every method " +"in `Client`, but also `Strategy.evaluate`) do not use the `on_` prefix, " +"which is why we're removing it from the four methods in Strategy. To " +"migrate rename the following `Strategy` methods accordingly:" +msgstr "" +"Renommé les méthodes de stratégie " +"([#486](https://github.com/adap/flower/pull/486)) pour unifier le nommage" +" des API publiques de Flower. D'autres méthodes/fonctions publiques (par " +"exemple, toutes les méthodes de `Client`, mais aussi `Strategy.evaluate`)" +" n'utilisent pas le préfixe `on_`, c'est pourquoi nous le supprimons des " +"quatre méthodes de Stratégie. Pour migrer, renommez les méthodes de " +"`Strategy` suivantes en conséquence :" + +#: ../../source/ref-changelog.md:982 +msgid "`on_configure_evaluate` => `configure_evaluate`" +msgstr "`on_configure_evaluate` => `configure_evaluate`" + +#: ../../source/ref-changelog.md:983 +msgid "`on_aggregate_evaluate` => `aggregate_evaluate`" +msgstr "`on_aggregate_evaluate` => `aggregate_evaluate`" + +#: ../../source/ref-changelog.md:984 +msgid "`on_configure_fit` => `configure_fit`" +msgstr "`on_configure_fit` => `configure_fit`" + +#: ../../source/ref-changelog.md:985 +msgid "`on_aggregate_fit` => `aggregate_fit`" +msgstr "`on_aggregate_fit` => `aggregate_fit`" + +#: ../../source/ref-changelog.md:989 +msgid "" +"Deprecated `DefaultStrategy` " +"([#479](https://github.com/adap/flower/pull/479)). To migrate use " +"`FedAvg` instead." +msgstr "" +"Déclassé `DefaultStrategy` " +"([#479](https://github.com/adap/flower/pull/479)). Pour migrer, utilisez " +"`FedAvg` à la place." + +#: ../../source/ref-changelog.md:990 +msgid "" +"Simplified examples and baselines " +"([#484](https://github.com/adap/flower/pull/484))." +msgstr "" +"Exemples simplifiés et lignes de base " +"([#484](https://github.com/adap/flower/pull/484))." + +#: ../../source/ref-changelog.md:991 +msgid "" +"Removed presently unused `on_conclude_round` from strategy interface " +"([#483](https://github.com/adap/flower/pull/483))." +msgstr "" +"Suppression de `on_conclude_round` actuellement inutilisé de l'interface " +"de stratégie ([#483](https://github.com/adap/flower/pull/483))." + +#: ../../source/ref-changelog.md:992 +msgid "" +"Set minimal Python version to 3.6.1 instead of 3.6.9 " +"([#471](https://github.com/adap/flower/pull/471))." +msgstr "" +"Fixe la version minimale de Python à 3.6.1 au lieu de 3.6.9 " +"([#471](https://github.com/adap/flower/pull/471))." + +#: ../../source/ref-changelog.md:993 +msgid "" +"Improved `Strategy` docstrings " +"([#470](https://github.com/adap/flower/pull/470))." +msgstr "" +"Amélioration des docstrings `Stratégie` " +"([#470](https://github.com/adap/flower/pull/470))." + +#: ../../source/ref-example-projects.rst:2 +#, fuzzy +msgid "Example projects" +msgstr "Exemples de PyTorch" + +#: ../../source/ref-example-projects.rst:4 +msgid "" +"Flower comes with a number of usage examples. The examples demonstrate " +"how Flower can be used to federate different kinds of existing machine " +"learning pipelines, usually leveraging popular machine learning " +"frameworks such as `PyTorch `_ or `TensorFlow " +"`_." +msgstr "" +"Flower est livré avec un certain nombre d'exemples d'utilisation, qui " +"montrent comment Flower peut être utilisé pour fédérer différents types " +"de pipelines d'apprentissage automatique existants, qui s'appuient " +"généralement sur des frameworks d'apprentissage automatique populaires " +"tels que `PyTorch `_ ou `TensorFlow " +"`_." + +#: ../../source/ref-example-projects.rst:11 +msgid "" +"Flower usage examples used to be bundled with Flower in a package called " +"``flwr_example``. We are migrating those examples to standalone projects " +"to make them easier to use. All new examples are based in the directory " +"`examples `_." +msgstr "" +"Les exemples d'utilisation de Flower étaient auparavant regroupés avec " +"Flower dans un paquet appelé ``flwr_example``. Nous migrons ces exemples " +"vers des projets autonomes pour les rendre plus faciles à utiliser. Tous " +"les nouveaux exemples sont basés dans le répertoire ``examples " +"`_." + +#: ../../source/ref-example-projects.rst:16 +msgid "The following examples are available as standalone projects." +msgstr "Les exemples suivants sont disponibles sous forme de projets autonomes." + +#: ../../source/ref-example-projects.rst:20 +msgid "Quickstart TensorFlow/Keras" +msgstr "Démarrage rapide de TensorFlow/Keras" + +#: ../../source/ref-example-projects.rst:22 +msgid "" +"The TensorFlow/Keras quickstart example shows CIFAR-10 image " +"classification with MobileNetV2:" +msgstr "" +"L'exemple de démarrage rapide TensorFlow/Keras montre la classification " +"d'images CIFAR-10 avec MobileNetV2 :" + +#: ../../source/ref-example-projects.rst:25 +#, fuzzy +msgid "" +"`Quickstart TensorFlow (Code) " +"`_" +msgstr "" +"`Quickstart TensorFlow (Code) " +"`_" + +#: ../../source/ref-example-projects.rst:26 +#, fuzzy +msgid "" +"`Quickstart TensorFlow (Tutorial) `_" +msgstr "" +"`Quickstart TensorFlow (Tutorial) `_" + +#: ../../source/ref-example-projects.rst:27 +msgid "" +"`Quickstart TensorFlow (Blog Post) `_" +msgstr "" +"`Quickstart TensorFlow (Blog Post) `_" + +#: ../../source/ref-example-projects.rst:31 +#: ../../source/tutorial-quickstart-pytorch.rst:5 +msgid "Quickstart PyTorch" +msgstr "Démarrage rapide de PyTorch" + +#: ../../source/ref-example-projects.rst:33 +msgid "" +"The PyTorch quickstart example shows CIFAR-10 image classification with a" +" simple Convolutional Neural Network:" +msgstr "" +"L'exemple de démarrage rapide PyTorch montre la classification d'images " +"CIFAR-10 avec un simple réseau neuronal convolutif :" + +#: ../../source/ref-example-projects.rst:36 +#, fuzzy +msgid "" +"`Quickstart PyTorch (Code) " +"`_" +msgstr "" +"`Quickstart PyTorch (Code) " +"`_" + +#: ../../source/ref-example-projects.rst:37 +#, fuzzy +msgid "" +"`Quickstart PyTorch (Tutorial) `_" +msgstr "" +"`Quickstart PyTorch (Tutorial) `_" + +#: ../../source/ref-example-projects.rst:41 +msgid "PyTorch: From Centralized To Federated" +msgstr "PyTorch : De la centralisation à la fédération" + +#: ../../source/ref-example-projects.rst:43 +msgid "" +"This example shows how a regular PyTorch project can be federated using " +"Flower:" +msgstr "" +"Cet exemple montre comment un projet PyTorch ordinaire peut être fédéré à" +" l'aide de Flower :" + +#: ../../source/ref-example-projects.rst:45 +#, fuzzy +msgid "" +"`PyTorch: From Centralized To Federated (Code) " +"`_" +msgstr "" +"`PyTorch : De la centralisation à la fédération (Code) " +"`_" + +#: ../../source/ref-example-projects.rst:46 +#, fuzzy +msgid "" +"`PyTorch: From Centralized To Federated (Tutorial) " +"`_" +msgstr "" +"`PyTorch : De la centralisation à la fédération (Tutoriel) " +"`_" + +#: ../../source/ref-example-projects.rst:50 +msgid "Federated Learning on Raspberry Pi and Nvidia Jetson" +msgstr "Apprentissage fédéré sur Raspberry Pi et Nvidia Jetson" + +#: ../../source/ref-example-projects.rst:52 +msgid "" +"This example shows how Flower can be used to build a federated learning " +"system that run across Raspberry Pi and Nvidia Jetson:" +msgstr "" +"Cet exemple montre comment Flower peut être utilisé pour construire un " +"système d'apprentissage fédéré qui fonctionne sur Raspberry Pi et Nvidia " +"Jetson :" + +#: ../../source/ref-example-projects.rst:54 +#, fuzzy +msgid "" +"`Federated Learning on Raspberry Pi and Nvidia Jetson (Code) " +"`_" +msgstr "" +"`L'apprentissage fédéré sur Raspberry Pi et Nvidia Jetson (Code) " +"`_" + +#: ../../source/ref-example-projects.rst:55 +msgid "" +"`Federated Learning on Raspberry Pi and Nvidia Jetson (Blog Post) " +"`_" +msgstr "" +"`L'apprentissage fédéré sur Raspberry Pi et Nvidia Jetson (Blog Post) " +"`_" + +#: ../../source/ref-example-projects.rst:60 +msgid "Legacy Examples (`flwr_example`)" +msgstr "Exemples hérités (`flwr_example`)" + +#: ../../source/ref-example-projects.rst:63 +msgid "" +"The useage examples in `flwr_example` are deprecated and will be removed " +"in the future. New examples are provided as standalone projects in " +"`examples `_." +msgstr "" +"Les exemples d'utilisation dans `flwr_example` sont obsolètes et seront " +"supprimés à l'avenir. De nouveaux exemples sont fournis en tant que " +"projets autonomes dans `examples " +"`_." + +#: ../../source/ref-example-projects.rst:69 +msgid "Extra Dependencies" +msgstr "Dépendances supplémentaires" + +#: ../../source/ref-example-projects.rst:71 +msgid "" +"The core Flower framework keeps a minimal set of dependencies. The " +"examples demonstrate Flower in the context of different machine learning " +"frameworks, so additional dependencies need to be installed before an " +"example can be run." +msgstr "" +"Le noyau du framework Flower conserve un ensemble minimal de dépendances." +" Les exemples démontrent Flower dans le contexte de différents frameworks" +" d'apprentissage automatique, de sorte que des dépendances " +"supplémentaires doivent être installées avant qu'un exemple puisse être " +"exécuté." + +#: ../../source/ref-example-projects.rst:75 +msgid "For PyTorch examples::" +msgstr "Pour les exemples de PyTorch: :" + +#: ../../source/ref-example-projects.rst:79 +msgid "For TensorFlow examples::" +msgstr "Pour les exemples de TensorFlow : :" + +#: ../../source/ref-example-projects.rst:83 +msgid "For both PyTorch and TensorFlow examples::" +msgstr "Pour les exemples PyTorch et TensorFlow: :" + +#: ../../source/ref-example-projects.rst:87 +msgid "" +"Please consult :code:`pyproject.toml` for a full list of possible extras " +"(section :code:`[tool.poetry.extras]`)." +msgstr "" +"Tu peux consulter :code:`pyproject.toml` pour une liste complète des " +"extras possibles (section :code:`[tool.poetry.extras]`)." + +#: ../../source/ref-example-projects.rst:92 +msgid "PyTorch Examples" +msgstr "Exemples de PyTorch" + +#: ../../source/ref-example-projects.rst:94 +msgid "" +"Our PyTorch examples are based on PyTorch 1.7. They should work with " +"other releases as well. So far, we provide the following examples." +msgstr "" +"Nos exemples PyTorch sont basés sur PyTorch 1.7. Ils devraient " +"fonctionner avec d'autres versions également. Jusqu'à présent, nous " +"fournissons les exemples suivants." + +#: ../../source/ref-example-projects.rst:98 +msgid "CIFAR-10 Image Classification" +msgstr "Classification d'images CIFAR-10" + +#: ../../source/ref-example-projects.rst:100 +msgid "" +"`CIFAR-10 and CIFAR-100 `_ " +"are popular RGB image datasets. The Flower CIFAR-10 example uses PyTorch " +"to train a simple CNN classifier in a federated learning setup with two " +"clients." +msgstr "" +"`CIFAR-10 et CIFAR-100 `_ " +"sont des ensembles de données d'images RVB populaires. L'exemple Flower " +"CIFAR-10 utilise PyTorch pour former un classificateur CNN simple dans " +"une configuration d'apprentissage fédéré avec deux clients." + +#: ../../source/ref-example-projects.rst:104 +#: ../../source/ref-example-projects.rst:121 +#: ../../source/ref-example-projects.rst:146 +msgid "First, start a Flower server:" +msgstr "Tout d'abord, démarre un serveur Flower :" + +#: ../../source/ref-example-projects.rst:106 +msgid "$ ./src/py/flwr_example/pytorch_cifar/run-server.sh" +msgstr "$ ./src/py/flwr_example/pytorch_cifar/run-server.sh" + +#: ../../source/ref-example-projects.rst:108 +#: ../../source/ref-example-projects.rst:125 +#: ../../source/ref-example-projects.rst:150 +msgid "Then, start the two clients in a new terminal window:" +msgstr "Ensuite, démarre les deux clients dans une nouvelle fenêtre de terminal :" + +#: ../../source/ref-example-projects.rst:110 +msgid "$ ./src/py/flwr_example/pytorch_cifar/run-clients.sh" +msgstr "$ ./src/py/flwr_example/pytorch_cifar/run-clients.sh" + +#: ../../source/ref-example-projects.rst:112 +msgid "For more details, see :code:`src/py/flwr_example/pytorch_cifar`." +msgstr "Pour plus de détails, voir :code:`src/py/flwr_example/pytorch_cifar`." + +#: ../../source/ref-example-projects.rst:115 +msgid "ImageNet-2012 Image Classification" +msgstr "ImageNet-2012 Classification des images" + +#: ../../source/ref-example-projects.rst:117 +msgid "" +"`ImageNet-2012 `_ is one of the major computer" +" vision datasets. The Flower ImageNet example uses PyTorch to train a " +"ResNet-18 classifier in a federated learning setup with ten clients." +msgstr "" +"`ImageNet-2012 `_ est l'un des principaux " +"ensembles de données de vision par ordinateur. L'exemple Flower ImageNet " +"utilise PyTorch pour entraîner un classificateur ResNet-18 dans une " +"configuration d'apprentissage fédéré avec dix clients." + +#: ../../source/ref-example-projects.rst:123 +msgid "$ ./src/py/flwr_example/pytorch_imagenet/run-server.sh" +msgstr "$ ./src/py/flwr_example/pytorch_imagenet/run-server.sh" + +#: ../../source/ref-example-projects.rst:127 +msgid "$ ./src/py/flwr_example/pytorch_imagenet/run-clients.sh" +msgstr "$ ./src/py/flwr_example/pytorch_imagenet/run-clients.sh" + +#: ../../source/ref-example-projects.rst:129 +msgid "For more details, see :code:`src/py/flwr_example/pytorch_imagenet`." +msgstr "Pour plus de détails, voir :code:`src/py/flwr_example/pytorch_imagenet`." + +#: ../../source/ref-example-projects.rst:133 +msgid "TensorFlow Examples" +msgstr "Exemples de TensorFlow" + +#: ../../source/ref-example-projects.rst:135 +msgid "" +"Our TensorFlow examples are based on TensorFlow 2.0 or newer. So far, we " +"provide the following examples." +msgstr "" +"Nos exemples TensorFlow sont basés sur TensorFlow 2.0 ou une version plus" +" récente. Jusqu'à présent, nous te proposons les exemples suivants." + +#: ../../source/ref-example-projects.rst:139 +msgid "Fashion-MNIST Image Classification" +msgstr "Classification d'images Fashion-MNIST" + +#: ../../source/ref-example-projects.rst:141 +msgid "" +"`Fashion-MNIST `_ is " +"often used as the \"Hello, world!\" of machine learning. We follow this " +"tradition and provide an example which samples random local datasets from" +" Fashion-MNIST and trains a simple image classification model over those " +"partitions." +msgstr "" +"nous suivons cette tradition et fournissons un exemple qui échantillonne " +"des ensembles de données locales aléatoires de Fashion-MNIST et entraîne " +"un modèle simple de classification d'images sur ces partitions." + +#: ../../source/ref-example-projects.rst:148 +msgid "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-server.sh" +msgstr "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-server.sh" + +#: ../../source/ref-example-projects.rst:152 +msgid "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-clients.sh" +msgstr "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-clients.sh" + +#: ../../source/ref-example-projects.rst:154 +msgid "" +"For more details, see " +":code:`src/py/flwr_example/tensorflow_fashion_mnist`." +msgstr "" +"Pour plus de détails, voir " +":code:`src/py/flwr_example/tensorflow_fashion_mnist`." + +#: ../../source/ref-faq.rst:4 +msgid "" +"This page collects answers to commonly asked questions about Federated " +"Learning with Flower." +msgstr "" +"Cette page rassemble les réponses aux questions les plus fréquemment " +"posées sur l'apprentissage fédéré avec Flower." + +#: ../../source/ref-faq.rst +msgid ":fa:`eye,mr-1` Can Flower run on Juptyter Notebooks / Google Colab?" +msgstr "" +":fa:`eye,mr-1` Flower peut-il fonctionner sur les ordinateurs portables " +"Juptyter / Google Colab ?" + +#: ../../source/ref-faq.rst:8 +msgid "" +"Yes, it can! Flower even comes with a few under-the-hood optimizations to" +" make it work even better on Colab. Here's a quickstart example:" +msgstr "" +"Oui, c'est possible ! Flower est même livré avec quelques optimisations " +"pour qu'il fonctionne encore mieux sur Colab. Voici un exemple de " +"démarrage rapide :" + +#: ../../source/ref-faq.rst:10 +#, fuzzy +msgid "" +"`Flower simulation PyTorch " +"`_" +msgstr "" +"`Flower Quickstart (TensorFlow/Keras) " +"`_" + +#: ../../source/ref-faq.rst:11 +#, fuzzy +msgid "" +"`Flower simulation TensorFlow/Keras " +"`_" +msgstr "" +"`Flower Quickstart (TensorFlow/Keras) " +"`_" + +#: ../../source/ref-faq.rst +msgid ":fa:`eye,mr-1` How can I run Federated Learning on a Raspberry Pi?" +msgstr "" +":fa:`eye,mr-1` Comment puis-je faire fonctionner l'apprentissage fédéré " +"sur un Raspberry Pi ?" + +#: ../../source/ref-faq.rst:15 +#, fuzzy +msgid "" +"Find the `blog post about federated learning on embedded device here " +"`_" +" and the corresponding `GitHub code example " +"`_." +msgstr "" +"Trouve le `blog post about federated learning on embedded device ici " +"`_" +" et l'exemple de code GitHub correspondant " +"`_." + +#: ../../source/ref-faq.rst +msgid ":fa:`eye,mr-1` Does Flower support federated learning on Android devices?" +msgstr "" +":fa:`eye,mr-1` Est-ce que Flower prend en charge l'apprentissage fédéré " +"sur les appareils Android ?" + +#: ../../source/ref-faq.rst:19 +#, fuzzy +msgid "" +"Yes, it does. Please take a look at our `blog post " +"`_ or check out the code examples:" +msgstr "" +"Oui. Jetez un coup d'œil à notre `blog post " +"`_ ou consultez l'`exemple de code Android sur GitHub" +" `_." + +#: ../../source/ref-faq.rst:21 +msgid "" +"`Android Kotlin example `_" +msgstr "" + +#: ../../source/ref-faq.rst:22 +msgid "`Android Java example `_" +msgstr "" + +#: ../../source/ref-faq.rst +msgid ":fa:`eye,mr-1` Can I combine federated learning with blockchain?" +msgstr "" +":fa:`eye,mr-1` Puis-je combiner l'apprentissage fédéré avec la blockchain" +" ?" + +#: ../../source/ref-faq.rst:26 +msgid "" +"Yes, of course. A list of available examples using Flower within a " +"blockchain environment is available here:" +msgstr "" +"Oui, bien sûr, une liste d'exemples disponibles utilisant Flower dans un " +"environnement blockchain est disponible ici :" + +#: ../../source/ref-faq.rst:28 +msgid "" +"`Flower meets Nevermined GitHub Repository `_." +msgstr "" +"`Flower meets Nevermined GitHub Repository `_." + +#: ../../source/ref-faq.rst:29 +msgid "" +"`Flower meets Nevermined YouTube video " +"`_." +msgstr "" +"`Flower rencontre Nevermined vidéo YouTube " +"`_." + +#: ../../source/ref-faq.rst:30 +msgid "" +"`Flower meets KOSMoS `_." +msgstr "" +"`Flower rencontre KOSMoS `_." + +#: ../../source/ref-faq.rst:31 +msgid "" +"`Flower meets Talan blog post `_ ." +msgstr "" +"`Flower meets Talan blog post `_ ." + +#: ../../source/ref-faq.rst:32 +msgid "" +"`Flower meets Talan GitHub Repository " +"`_ ." +msgstr "" +"`Flower rencontre Talan Dépôt GitHub " +"`_ ." + +#: ../../source/ref-telemetry.md:1 +msgid "Telemetry" +msgstr "Télémétrie" + +#: ../../source/ref-telemetry.md:3 +msgid "" +"The Flower open-source project collects **anonymous** usage metrics to " +"make well-informed decisions to improve Flower. Doing this enables the " +"Flower team to understand how Flower is used and what challenges users " +"might face." +msgstr "" +"Le projet open-source Flower recueille des mesures d'utilisation " +"**anonymes** afin de prendre des décisions éclairées pour améliorer " +"Flower. Cela permet à l'équipe de Flower de comprendre comment Flower est" +" utilisé et quels sont les défis auxquels les utilisateurs peuvent être " +"confrontés." + +#: ../../source/ref-telemetry.md:5 +msgid "" +"**Flower is a friendly framework for collaborative AI and data science.**" +" Staying true to this statement, Flower makes it easy to disable " +"telemetry for users that do not want to share anonymous usage metrics." +msgstr "" +"**Flower est un cadre convivial pour l'IA collaborative et la science des" +" données.** En restant fidèle à cette déclaration, Flower permet de " +"désactiver facilement la télémétrie pour les utilisateurs qui ne " +"souhaitent pas partager des mesures d'utilisation anonymes." + +#: ../../source/ref-telemetry.md:7 +msgid "Principles" +msgstr "Principes" + +#: ../../source/ref-telemetry.md:9 +msgid "We follow strong principles guarding anonymous usage metrics collection:" +msgstr "" +"Nous suivons des principes stricts concernant la collecte de données " +"anonymes sur l'utilisation :" + +#: ../../source/ref-telemetry.md:11 +msgid "" +"**Optional:** You will always be able to disable telemetry; read on to " +"learn “[How to opt-out](#how-to-opt-out)”." +msgstr "" +"**Optionnel:** Tu pourras toujours désactiver la télémétrie ; lis la " +"suite pour apprendre \"[Comment se désengager](#how-to-opt-out)\"." + +#: ../../source/ref-telemetry.md:12 +msgid "" +"**Anonymous:** The reported usage metrics are anonymous and do not " +"contain any personally identifiable information (PII). See “[Collected " +"metrics](#collected-metrics)” to understand what metrics are being " +"reported." +msgstr "" +"**Anonyme:** Les mesures d'utilisation rapportées sont anonymes et ne " +"contiennent aucune information personnelle identifiable (PII). Voir " +"\"[Collected metrics](#collected-metrics)\" pour comprendre quelles " +"mesures sont rapportées." + +#: ../../source/ref-telemetry.md:13 +msgid "" +"**Transparent:** You can easily inspect what anonymous metrics are being " +"reported; see the section “[How to inspect what is being reported](#how-" +"to-inspect-what-is-being-reported)”" +msgstr "" +"**Transparent:** Tu peux facilement inspecter les métriques anonymes qui " +"sont rapportées ; voir la section \"[Comment inspecter ce qui est " +"rapporté](#how-to-inspect-what-is-being-reported)\"" + +#: ../../source/ref-telemetry.md:14 +#, fuzzy +msgid "" +"**Open for feedback:** You can always reach out to us if you have " +"feedback; see the section “[How to contact us](#how-to-contact-us)” for " +"details." +msgstr "" +"**Ouvert pour les commentaires:** Tu peux toujours nous contacter si tu " +"as des commentaires ; voir la section \"[Comment nous contacter ](#how-" +"to-contact-us)\" pour plus de détails." + +#: ../../source/ref-telemetry.md:16 +msgid "How to opt-out" +msgstr "Comment se désinscrire" + +#: ../../source/ref-telemetry.md:18 +msgid "" +"When Flower starts, it will check for an environment variable called " +"`FLWR_TELEMETRY_ENABLED`. Telemetry can easily be disabled by setting " +"`FLWR_TELEMETRY_ENABLED=0`. Assuming you are starting a Flower server or " +"client, simply do so by prepending your command as in:" +msgstr "" +"Lorsque Flower démarre, il vérifie la présence d'une variable " +"d'environnement appelée `FLWR_TELEMETRY_ENABLED`. La télémétrie peut " +"facilement être désactivée en réglant `FLWR_TELEMETRY_ENABLED=0`. En " +"supposant que tu démarres un serveur ou un client Flower, fais-le " +"simplement en faisant précéder ta commande de la façon suivante :" + +#: ../../source/ref-telemetry.md:24 +msgid "" +"Alternatively, you can export `FLWR_TELEMETRY_ENABLED=0` in, for example," +" `.bashrc` (or whatever configuration file applies to your environment) " +"to disable Flower telemetry permanently." +msgstr "" +"Tu peux aussi exporter `FLWR_TELEMETRY_ENABLED=0` dans, par exemple, " +"`.bashrc` (ou tout autre fichier de configuration qui s'applique à ton " +"environnement) pour désactiver la télémétrie de la fleur de façon " +"permanente." + +#: ../../source/ref-telemetry.md:26 +msgid "Collected metrics" +msgstr "Mesures collectées" + +#: ../../source/ref-telemetry.md:28 +msgid "Flower telemetry collects the following metrics:" +msgstr "La télémétrie des fleurs recueille les métriques suivantes :" + +#: ../../source/ref-telemetry.md:30 +msgid "" +"**Flower version.** Understand which versions of Flower are currently " +"being used. This helps us to decide whether we should invest effort into " +"releasing a patch version for an older version of Flower or instead use " +"the bandwidth to build new features." +msgstr "" +"**Cela nous aide à décider si nous devons investir des efforts dans la " +"publication d'une version corrective pour une version plus ancienne de " +"Flower ou si nous devons plutôt utiliser la bande passante pour " +"développer de nouvelles fonctionnalités." + +#: ../../source/ref-telemetry.md:32 +msgid "" +"**Operating system.** Enables us to answer questions such as: *Should we " +"create more guides for Linux, macOS, or Windows?*" +msgstr "" +"**Système d'exploitation.** Nous permet de répondre à des questions " +"telles que : *Faudrait-il créer plus de guides pour Linux, macOS ou " +"Windows ?" + +#: ../../source/ref-telemetry.md:34 +msgid "" +"**Python version.** Knowing the Python version helps us, for example, to " +"decide whether we should invest effort into supporting old versions of " +"Python or stop supporting them and start taking advantage of new Python " +"features." +msgstr "" +"**Version de Python.** Connaître la version de Python nous aide, par " +"exemple, à décider si nous devons investir des efforts dans la prise en " +"charge des anciennes versions de Python ou cesser de les prendre en " +"charge et commencer à tirer parti des nouvelles fonctionnalités de " +"Python." + +#: ../../source/ref-telemetry.md:36 +msgid "" +"**Hardware properties.** Understanding the hardware environment that " +"Flower is being used in helps to decide whether we should, for example, " +"put more effort into supporting low-resource environments." +msgstr "" +"**Comprendre l'environnement matériel dans lequel Flower est utilisé " +"permet de décider si nous devrions, par exemple, faire plus d'efforts " +"pour prendre en charge les environnements à faibles ressources." + +#: ../../source/ref-telemetry.md:38 +msgid "" +"**Execution mode.** Knowing what execution mode Flower starts in enables " +"us to understand how heavily certain features are being used and better " +"prioritize based on that." +msgstr "" +"**Mode d'exécution** Connaître le mode d'exécution dans lequel Flower " +"démarre nous permet de comprendre à quel point certaines fonctionnalités " +"sont utilisées et de mieux établir les priorités en fonction de cela." + +#: ../../source/ref-telemetry.md:40 +msgid "" +"**Cluster.** Flower telemetry assigns a random in-memory cluster ID each " +"time a Flower workload starts. This allows us to understand which device " +"types not only start Flower workloads but also successfully complete " +"them." +msgstr "" +"**Cluster.** La télémétrie Flower attribue un ID de cluster en mémoire " +"aléatoire à chaque fois qu'une charge de travail Flower démarre. Cela " +"nous permet de comprendre quels types d'appareils non seulement démarrent" +" les charges de travail Flower, mais aussi les terminent avec succès." + +#: ../../source/ref-telemetry.md:42 +msgid "" +"**Source.** Flower telemetry tries to store a random source ID in " +"`~/.flwr/source` the first time a telemetry event is generated. The " +"source ID is important to identify whether an issue is recurring or " +"whether an issue is triggered by multiple clusters running concurrently " +"(which often happens in simulation). For example, if a device runs " +"multiple workloads at the same time, and this results in an issue, then, " +"in order to reproduce the issue, multiple workloads must be started at " +"the same time." +msgstr "" +"**Source.** La télémétrie de Flower essaie de stocker un ID de source " +"aléatoire dans `~/.flwr/source` la première fois qu'un événement de " +"télémétrie est généré. L'ID de source est important pour identifier si un" +" problème est récurrent ou si un problème est déclenché par plusieurs " +"clusters fonctionnant simultanément (ce qui arrive souvent en " +"simulation). Par exemple, si un périphérique exécute plusieurs charges de" +" travail en même temps, et que cela entraîne un problème, alors, afin de " +"reproduire le problème, plusieurs charges de travail doivent être " +"démarrées en même temps." + +#: ../../source/ref-telemetry.md:44 +msgid "" +"You may delete the source ID at any time. If you wish for all events " +"logged under a specific source ID to be deleted, you can send a deletion " +"request mentioning the source ID to `telemetry@flower.ai`. All events " +"related to that source ID will then be permanently deleted." +msgstr "" +"Tu peux supprimer l'identifiant de la source à tout moment. Si tu " +"souhaites que tous les événements enregistrés sous un identifiant de " +"source spécifique soient supprimés, tu peux envoyer une demande de " +"suppression mentionnant l'identifiant de source à `telemetry@flower.ai`." +" Tous les événements liés à cet identifiant de source seront alors " +"définitivement supprimés." + +#: ../../source/ref-telemetry.md:46 +msgid "" +"We will not collect any personally identifiable information. If you think" +" any of the metrics collected could be misused in any way, please [get in" +" touch with us](#how-to-contact-us). We will update this page to reflect " +"any changes to the metrics collected and publish changes in the " +"changelog." +msgstr "" +"Nous ne collecterons aucune information personnelle identifiable. Si tu " +"penses que l'une des métriques collectées pourrait être utilisée à " +"mauvais escient de quelque manière que ce soit, merci de [nous " +"contacter](#commentnouscontacter). Nous mettrons à jour cette page pour " +"refléter toute modification des métriques collectées et nous publierons " +"les changements dans le journal des modifications (changelog)." + +#: ../../source/ref-telemetry.md:48 +msgid "" +"If you think other metrics would be helpful for us to better guide our " +"decisions, please let us know! We will carefully review them; if we are " +"confident that they do not compromise user privacy, we may add them." +msgstr "" +"Si tu penses que d'autres mesures nous seraient utiles pour mieux " +"orienter nos décisions, fais-le nous savoir ! Nous les examinerons " +"attentivement ; si nous sommes convaincus qu'elles ne compromettent pas " +"la vie privée des utilisateurs, nous pourrons les ajouter." + +#: ../../source/ref-telemetry.md:50 +msgid "How to inspect what is being reported" +msgstr "Comment inspecter ce qui est rapporté" + +#: ../../source/ref-telemetry.md:52 +msgid "" +"We wanted to make it very easy for you to inspect what anonymous usage " +"metrics are reported. You can view all the reported telemetry information" +" by setting the environment variable `FLWR_TELEMETRY_LOGGING=1`. Logging " +"is disabled by default. You may use logging independently from " +"`FLWR_TELEMETRY_ENABLED` so that you can inspect the telemetry feature " +"without sending any metrics." +msgstr "" +"Nous avons voulu qu'il soit très facile pour toi d'inspecter les mesures " +"d'utilisation anonymes qui sont rapportées. Tu peux voir toutes les " +"informations de télémétrie rapportées en définissant la variable " +"d'environnement `FLWR_TELEMETRY_LOGGING=1`. La journalisation est " +"désactivée par défaut. Tu peux utiliser la journalisation indépendamment " +"de `FLWR_TELEMETRY_ENABLED` afin d'inspecter la fonction de télémétrie " +"sans envoyer de mesures." + +#: ../../source/ref-telemetry.md:58 +msgid "" +"The inspect Flower telemetry without sending any anonymous usage metrics," +" use both environment variables:" +msgstr "" +"L'inspecteur Flower telemetry sans envoyer de métriques d'utilisation " +"anonymes, utilise les deux variables d'environnement :" + +#: ../../source/ref-telemetry.md:64 +msgid "How to contact us" +msgstr "Comment nous contacter" + +#: ../../source/ref-telemetry.md:66 +msgid "" +"We want to hear from you. If you have any feedback or ideas on how to " +"improve the way we handle anonymous usage metrics, reach out to us via " +"[Slack](https://flower.ai/join-slack/) (channel `#telemetry`) or email " +"(`telemetry@flower.ai`)." +msgstr "" +"Si tu as des commentaires ou des idées pour améliorer la façon dont nous " +"traitons les mesures d'utilisation anonymes, contacte-nous via " +"[Slack](https://flower.ai/join-slack/) (canal `#telemetry`) ou par " +"courriel (`telemetry@flower.ai`)." + +#: ../../source/tutorial-quickstart-android.rst:-1 +msgid "" +"Read this Federated Learning quickstart tutorial for creating an Android " +"app using Flower." +msgstr "" + +#: ../../source/tutorial-quickstart-android.rst:5 +#, fuzzy +msgid "Quickstart Android" +msgstr "Démarrage rapide des Pandas" + +#: ../../source/tutorial-quickstart-android.rst:10 +#, fuzzy +msgid "" +"Let's build a federated learning system using TFLite and Flower on " +"Android!" +msgstr "" +"Construisons un système d'apprentissage fédéré en utilisant fastai et " +"Flower !" + +#: ../../source/tutorial-quickstart-android.rst:12 +#, fuzzy +msgid "" +"Please refer to the `full code example " +"`_ to learn " +"more." +msgstr "" +"Réfère-toi à l'exemple de code complet " +"`_ " +"pour en savoir plus." + +#: ../../source/tutorial-quickstart-fastai.rst:-1 +msgid "" +"Check out this Federated Learning quickstart tutorial for using Flower " +"with FastAI to train a vision model on CIFAR-10." +msgstr "" + +#: ../../source/tutorial-quickstart-fastai.rst:5 +msgid "Quickstart fastai" +msgstr "Démarrage rapide fastai" + +#: ../../source/tutorial-quickstart-fastai.rst:10 +msgid "Let's build a federated learning system using fastai and Flower!" +msgstr "" +"Construisons un système d'apprentissage fédéré en utilisant fastai et " +"Flower !" + +#: ../../source/tutorial-quickstart-fastai.rst:12 +#, fuzzy +msgid "" +"Please refer to the `full code example " +"`_ " +"to learn more." +msgstr "" +"Réfère-toi à l'exemple de code complet " +"`_ " +"pour en savoir plus." + +#: ../../source/tutorial-quickstart-huggingface.rst:-1 +msgid "" +"Check out this Federating Learning quickstart tutorial for using Flower " +"with HuggingFace Transformers in order to fine-tune an LLM." +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:5 +msgid "Quickstart 🤗 Transformers" +msgstr "Démarrage rapide 🤗 Transformateurs" + +#: ../../source/tutorial-quickstart-huggingface.rst:10 +msgid "" +"Let's build a federated learning system using Hugging Face Transformers " +"and Flower!" +msgstr "" +"Construisons un système d'apprentissage fédéré à l'aide des " +"transformateurs Hugging Face et de Flower !" + +#: ../../source/tutorial-quickstart-huggingface.rst:12 +msgid "" +"We will leverage Hugging Face to federate the training of language models" +" over multiple clients using Flower. More specifically, we will fine-tune" +" a pre-trained Transformer model (distilBERT) for sequence classification" +" over a dataset of IMDB ratings. The end goal is to detect if a movie " +"rating is positive or negative." +msgstr "" +"Nous nous appuierons sur Hugging Face pour fédérer l'entraînement de " +"modèles de langage sur plusieurs clients à l'aide de Flower. Plus " +"précisément, nous mettrons au point un modèle Transformer pré-entraîné " +"(distilBERT) pour la classification de séquences sur un ensemble de " +"données d'évaluations IMDB. L'objectif final est de détecter si " +"l'évaluation d'un film est positive ou négative." + +#: ../../source/tutorial-quickstart-huggingface.rst:18 +msgid "Dependencies" +msgstr "Dépendances" + +#: ../../source/tutorial-quickstart-huggingface.rst:20 +msgid "" +"To follow along this tutorial you will need to install the following " +"packages: :code:`datasets`, :code:`evaluate`, :code:`flwr`, " +":code:`torch`, and :code:`transformers`. This can be done using " +":code:`pip`:" +msgstr "" +"Pour suivre ce tutoriel, tu devras installer les paquets suivants : " +":code:`datasets`, :code:`evaluate`, :code:`flwr`, :code:`torch`, et " +":code:`transformers`. Cela peut être fait en utilisant :code:`pip` :" + +#: ../../source/tutorial-quickstart-huggingface.rst:30 +msgid "Standard Hugging Face workflow" +msgstr "Flux de travail standard pour le visage" + +#: ../../source/tutorial-quickstart-huggingface.rst:33 +msgid "Handling the data" +msgstr "Traitement des données" + +#: ../../source/tutorial-quickstart-huggingface.rst:35 +msgid "" +"To fetch the IMDB dataset, we will use Hugging Face's :code:`datasets` " +"library. We then need to tokenize the data and create :code:`PyTorch` " +"dataloaders, this is all done in the :code:`load_data` function:" +msgstr "" +"Pour récupérer le jeu de données IMDB, nous utiliserons la bibliothèque " +":code:`datasets` de Hugging Face. Nous devons ensuite tokeniser les " +"données et créer des :code:`PyTorch` dataloaders, ce qui est fait dans la" +" fonction :code:`load_data` :" + +#: ../../source/tutorial-quickstart-huggingface.rst:81 +msgid "Training and testing the model" +msgstr "Former et tester le modèle" + +#: ../../source/tutorial-quickstart-huggingface.rst:83 +msgid "" +"Once we have a way of creating our trainloader and testloader, we can " +"take care of the training and testing. This is very similar to any " +":code:`PyTorch` training or testing loop:" +msgstr "" +"Une fois que nous avons trouvé un moyen de créer notre trainloader et " +"notre testloader, nous pouvons nous occuper de l'entraînement et du test." +" C'est très similaire à n'importe quelle boucle d'entraînement ou de test" +" :code:`PyTorch` :" + +#: ../../source/tutorial-quickstart-huggingface.rst:121 +msgid "Creating the model itself" +msgstr "Créer le modèle lui-même" + +#: ../../source/tutorial-quickstart-huggingface.rst:123 +msgid "" +"To create the model itself, we will just load the pre-trained distillBERT" +" model using Hugging Face’s :code:`AutoModelForSequenceClassification` :" +msgstr "" +"Pour créer le modèle lui-même, nous allons simplement charger le modèle " +"distillBERT pré-entraîné en utilisant le " +":code:`AutoModelForSequenceClassification` de Hugging Face :" + +#: ../../source/tutorial-quickstart-huggingface.rst:136 +msgid "Federating the example" +msgstr "Fédérer l'exemple" + +#: ../../source/tutorial-quickstart-huggingface.rst:139 +msgid "Creating the IMDBClient" +msgstr "Création du client IMDBC" + +#: ../../source/tutorial-quickstart-huggingface.rst:141 +msgid "" +"To federate our example to multiple clients, we first need to write our " +"Flower client class (inheriting from :code:`flwr.client.NumPyClient`). " +"This is very easy, as our model is a standard :code:`PyTorch` model:" +msgstr "" +"Pour fédérer notre exemple à plusieurs clients, nous devons d'abord " +"écrire notre classe de client Flower (héritant de " +":code:`flwr.client.NumPyClient`). C'est très facile, car notre modèle est" +" un modèle :code:`PyTorch` standard :" + +#: ../../source/tutorial-quickstart-huggingface.rst:169 +msgid "" +"The :code:`get_parameters` function lets the server get the client's " +"parameters. Inversely, the :code:`set_parameters` function allows the " +"server to send its parameters to the client. Finally, the :code:`fit` " +"function trains the model locally for the client, and the " +":code:`evaluate` function tests the model locally and returns the " +"relevant metrics." +msgstr "" +"La fonction :code:`get_parameters` permet au serveur d'obtenir les " +"paramètres du client. Inversement, la fonction :code:`set_parameters` " +"permet au serveur d'envoyer ses paramètres au client. Enfin, la fonction " +":code:`fit` forme le modèle localement pour le client, et la fonction " +":code:`evaluate` teste le modèle localement et renvoie les mesures " +"correspondantes." + +#: ../../source/tutorial-quickstart-huggingface.rst:175 +msgid "Starting the server" +msgstr "Démarrer le serveur" + +#: ../../source/tutorial-quickstart-huggingface.rst:177 +msgid "" +"Now that we have a way to instantiate clients, we need to create our " +"server in order to aggregate the results. Using Flower, this can be done " +"very easily by first choosing a strategy (here, we are using " +":code:`FedAvg`, which will define the global weights as the average of " +"all the clients' weights at each round) and then using the " +":code:`flwr.server.start_server` function:" +msgstr "" +"Maintenant que nous avons un moyen d'instancier les clients, nous devons " +"créer notre serveur afin d'agréger les résultats. Avec Flower, cela peut " +"être fait très facilement en choisissant d'abord une stratégie (ici, nous" +" utilisons :code:`FedAvg`, qui définira les poids globaux comme la " +"moyenne des poids de tous les clients à chaque tour) et en utilisant " +"ensuite la fonction :code:`flwr.server.start_server` :" + +#: ../../source/tutorial-quickstart-huggingface.rst:205 +msgid "" +"The :code:`weighted_average` function is there to provide a way to " +"aggregate the metrics distributed amongst the clients (basically this " +"allows us to display a nice average accuracy and loss for every round)." +msgstr "" +"La fonction :code:`weighted_average` est là pour fournir un moyen " +"d'agréger les mesures réparties entre les clients (en gros, cela nous " +"permet d'afficher une belle moyenne de précision et de perte pour chaque " +"tour)." + +#: ../../source/tutorial-quickstart-huggingface.rst:209 +msgid "Putting everything together" +msgstr "Tout assembler" + +#: ../../source/tutorial-quickstart-huggingface.rst:211 +msgid "We can now start client instances using:" +msgstr "Nous pouvons maintenant démarrer des instances de clients en utilisant :" + +#: ../../source/tutorial-quickstart-huggingface.rst:221 +msgid "" +"And they will be able to connect to the server and start the federated " +"training." +msgstr "Et ils pourront se connecter au serveur et démarrer la formation fédérée." + +#: ../../source/tutorial-quickstart-huggingface.rst:223 +#, fuzzy +msgid "" +"If you want to check out everything put together, you should check out " +"the full code example: [https://github.com/adap/flower/tree/main/examples" +"/quickstart-" +"huggingface](https://github.com/adap/flower/tree/main/examples" +"/quickstart-huggingface)." +msgstr "" +"Si tu veux voir tout ce qui est mis ensemble, tu devrais consulter " +"l'exemple de code complet : " +"[https://github.com/adap/flower/tree/main/examples/quickstart-" +"huggingface](https://github.com/adap/flower/tree/main/examples" +"/quickstart-huggingface)." + +#: ../../source/tutorial-quickstart-huggingface.rst:227 +msgid "" +"Of course, this is a very basic example, and a lot can be added or " +"modified, it was just to showcase how simply we could federate a Hugging " +"Face workflow using Flower." +msgstr "" +"Bien sûr, c'est un exemple très basique, et beaucoup de choses peuvent " +"être ajoutées ou modifiées, il s'agissait juste de montrer avec quelle " +"simplicité on pouvait fédérer un flux de travail Hugging Face à l'aide de" +" Flower." + +#: ../../source/tutorial-quickstart-huggingface.rst:230 +msgid "" +"Note that in this example we used :code:`PyTorch`, but we could have very" +" well used :code:`TensorFlow`." +msgstr "" +"Notez que dans cet exemple, nous avons utilisé :code:`PyTorch`, mais nous" +" aurions très bien pu utiliser :code:`TensorFlow`." + +#: ../../source/tutorial-quickstart-ios.rst:-1 +msgid "" +"Read this Federated Learning quickstart tutorial for creating an iOS app " +"using Flower to train a neural network on MNIST." +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:5 +#, fuzzy +msgid "Quickstart iOS" +msgstr "Démarrage rapide XGBoost" + +#: ../../source/tutorial-quickstart-ios.rst:10 +#, fuzzy +msgid "" +"In this tutorial we will learn how to train a Neural Network on MNIST " +"using Flower and CoreML on iOS devices." +msgstr "" +"Dans ce tutoriel, nous allons apprendre, comment former un réseau " +"neuronal convolutif sur MNIST en utilisant Flower et PyTorch." + +#: ../../source/tutorial-quickstart-ios.rst:12 +#, fuzzy +msgid "" +"First of all, for running the Flower Python server, it is recommended to " +"create a virtual environment and run everything within a `virtualenv " +"`_. For the Flower " +"client implementation in iOS, it is recommended to use Xcode as our IDE." +msgstr "" +"Tout d'abord, il est recommandé de créer un environnement virtuel et de " +"tout exécuter au sein d'un `virtualenv `_." + +#: ../../source/tutorial-quickstart-ios.rst:15 +#, fuzzy +msgid "" +"Our example consists of one Python *server* and two iPhone *clients* that" +" all have the same model." +msgstr "" +"Notre exemple consiste en un *serveur* et deux *clients* ayant tous le " +"même modèle." + +#: ../../source/tutorial-quickstart-ios.rst:17 +#, fuzzy +msgid "" +"*Clients* are responsible for generating individual weight updates for " +"the model based on their local datasets. These updates are then sent to " +"the *server* which will aggregate them to produce a better model. " +"Finally, the *server* sends this improved version of the model back to " +"each *client*. A complete cycle of weight updates is called a *round*." +msgstr "" +"*Les clients* sont chargés de générer des mises à jour de poids " +"individuelles pour le modèle en fonction de leurs ensembles de données " +"locales. Ces mises à jour sont ensuite envoyées au *serveur* qui les " +"agrège pour produire un meilleur modèle. Enfin, le *serveur* renvoie " +"cette version améliorée du modèle à chaque *client*. Un cycle complet de " +"mises à jour de poids s'appelle un *round*." + +#: ../../source/tutorial-quickstart-ios.rst:21 +#, fuzzy +msgid "" +"Now that we have a rough idea of what is going on, let's get started to " +"setup our Flower server environment. We first need to install Flower. You" +" can do this by using pip:" +msgstr "" +"Maintenant que nous avons une idée générale de ce qui se passe, " +"commençons. Nous devons d'abord installer Flower. Tu peux le faire en " +"exécutant :" + +#: ../../source/tutorial-quickstart-ios.rst:27 +msgid "Or Poetry:" +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:36 +msgid "" +"Now that we have all our dependencies installed, let's run a simple " +"distributed training using CoreML as our local training pipeline and " +"MNIST as our dataset. For simplicity reasons we will use the complete " +"Flower client with CoreML, that has been implemented and stored inside " +"the Swift SDK. The client implementation can be seen below:" +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:72 +msgid "" +"Let's create a new application project in Xcode and add :code:`flwr` as a" +" dependency in your project. For our application, we will store the logic" +" of our app in :code:`FLiOSModel.swift` and the UI elements in " +":code:`ContentView.swift`. We will focus more on :code:`FLiOSModel.swift`" +" in this quickstart. Please refer to the `full code example " +"`_ to learn more " +"about the app." +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:75 +msgid "Import Flower and CoreML related packages in :code:`FLiOSModel.swift`:" +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:83 +msgid "" +"Then add the mlmodel to the project simply by drag-and-drop, the mlmodel " +"will be bundled inside the application during deployment to your iOS " +"device. We need to pass the url to access mlmodel and run CoreML machine " +"learning processes, it can be retrieved by calling the function " +":code:`Bundle.main.url`. For the MNIST dataset, we need to preprocess it " +"into :code:`MLBatchProvider` object. The preprocessing is done inside " +":code:`DataLoader.swift`." +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:99 +msgid "" +"Since CoreML does not allow the model parameters to be seen before " +"training, and accessing the model parameters during or after the training" +" can only be done by specifying the layer name, we need to know this " +"informations beforehand, through looking at the model specification, " +"which are written as proto files. The implementation can be seen in " +":code:`MLModelInspect`." +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:102 +msgid "" +"After we have all of the necessary informations, let's create our Flower " +"client." +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:117 +msgid "" +"Then start the Flower gRPC client and start communicating to the server " +"by passing our Flower client to the function :code:`startFlwrGRPC`." +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:124 +msgid "" +"That's it for the client. We only have to implement :code:`Client` or " +"call the provided :code:`MLFlwrClient` and call :code:`startFlwrGRPC()`. " +"The attribute :code:`hostname` and :code:`port` tells the client which " +"server to connect to. This can be done by entering the hostname and port " +"in the application before clicking the start button to start the " +"federated learning process." +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:131 +#: ../../source/tutorial-quickstart-mxnet.rst:228 +#: ../../source/tutorial-quickstart-pytorch.rst:205 +#: ../../source/tutorial-quickstart-tensorflow.rst:100 +msgid "" +"For simple workloads we can start a Flower server and leave all the " +"configuration possibilities at their default values. In a file named " +":code:`server.py`, import Flower and start the server:" +msgstr "" +"Pour les charges de travail simples, nous pouvons démarrer un serveur " +"Flower et laisser toutes les possibilités de configuration à leurs " +"valeurs par défaut. Dans un fichier nommé :code:`server.py`, importe " +"Flower et démarre le serveur :" + +#: ../../source/tutorial-quickstart-ios.rst:142 +#: ../../source/tutorial-quickstart-mxnet.rst:239 +#: ../../source/tutorial-quickstart-pytorch.rst:216 +#: ../../source/tutorial-quickstart-scikitlearn.rst:215 +#: ../../source/tutorial-quickstart-tensorflow.rst:112 +msgid "Train the model, federated!" +msgstr "Entraîne le modèle, fédéré !" + +#: ../../source/tutorial-quickstart-ios.rst:144 +#: ../../source/tutorial-quickstart-pytorch.rst:218 +#: ../../source/tutorial-quickstart-tensorflow.rst:114 +#: ../../source/tutorial-quickstart-xgboost.rst:525 +msgid "" +"With both client and server ready, we can now run everything and see " +"federated learning in action. FL systems usually have a server and " +"multiple clients. We therefore have to start the server first:" +msgstr "" +"Le client et le serveur étant prêts, nous pouvons maintenant tout " +"exécuter et voir l'apprentissage fédéré en action. Les systèmes FL ont " +"généralement un serveur et plusieurs clients. Nous devons donc commencer " +"par démarrer le serveur :" + +#: ../../source/tutorial-quickstart-ios.rst:152 +msgid "" +"Once the server is running we can start the clients in different " +"terminals. Build and run the client through your Xcode, one through Xcode" +" Simulator and the other by deploying it to your iPhone. To see more " +"about how to deploy your app to iPhone or Simulator visit `here " +"`_." +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:156 +#, fuzzy +msgid "" +"Congratulations! You've successfully built and run your first federated " +"learning system in your ios device. The full `source code " +"`_ for this " +"example can be found in :code:`examples/ios`." +msgstr "" +"Félicitations ! Tu as réussi à construire et à faire fonctionner ton " +"premier système d'apprentissage fédéré. Le code source complet " +"`_ de cet exemple se trouve dans :code:`examples" +"/quickstart-mxnet`." + +#: ../../source/tutorial-quickstart-jax.rst:-1 +msgid "" +"Check out this Federated Learning quickstart tutorial for using Flower " +"with Jax to train a linear regression model on a scikit-learn dataset." +msgstr "" + +#: ../../source/tutorial-quickstart-jax.rst:5 +msgid "Quickstart JAX" +msgstr "Démarrage rapide de JAX" + +#: ../../source/tutorial-quickstart-mxnet.rst:-1 +msgid "" +"Check out this Federated Learning quickstart tutorial for using Flower " +"with MXNet to train a Sequential model on MNIST." +msgstr "" + +#: ../../source/tutorial-quickstart-mxnet.rst:5 +msgid "Quickstart MXNet" +msgstr "Démarrage rapide de MXNet" + +#: ../../source/tutorial-quickstart-mxnet.rst:7 +msgid "" +"MXNet is no longer maintained and has been moved into `Attic " +"`_. As a result, we would " +"encourage you to use other ML frameworks alongise Flower, for example, " +"PyTorch. This tutorial might be removed in future versions of Flower." +msgstr "" + +#: ../../source/tutorial-quickstart-mxnet.rst:12 +msgid "" +"In this tutorial, we will learn how to train a :code:`Sequential` model " +"on MNIST using Flower and MXNet." +msgstr "" +"Dans ce tutoriel, nous allons apprendre à former un modèle " +":code:`Sequential` sur MNIST à l'aide de Flower et de MXNet." + +#: ../../source/tutorial-quickstart-mxnet.rst:14 +#: ../../source/tutorial-quickstart-scikitlearn.rst:12 +msgid "" +"It is recommended to create a virtual environment and run everything " +"within this `virtualenv `_." +msgstr "" +"Il est recommandé de créer un environnement virtuel et de tout exécuter " +"dans ce `virtualenv `_." + +#: ../../source/tutorial-quickstart-mxnet.rst:18 +#: ../../source/tutorial-quickstart-scikitlearn.rst:16 +msgid "" +"*Clients* are responsible for generating individual model parameter " +"updates for the model based on their local datasets. These updates are " +"then sent to the *server* which will aggregate them to produce an updated" +" global model. Finally, the *server* sends this improved version of the " +"model back to each *client*. A complete cycle of parameters updates is " +"called a *round*." +msgstr "" +"*Les clients* sont chargés de générer des mises à jour individuelles des " +"paramètres du modèle en fonction de leurs ensembles de données locales. " +"Ces mises à jour sont ensuite envoyées au *serveur* qui les agrège pour " +"produire un modèle global mis à jour. Enfin, le *serveur* renvoie cette " +"version améliorée du modèle à chaque *client*. Un cycle complet de mises " +"à jour des paramètres s'appelle un *round*." + +#: ../../source/tutorial-quickstart-mxnet.rst:22 +#: ../../source/tutorial-quickstart-scikitlearn.rst:20 +msgid "" +"Now that we have a rough idea of what is going on, let's get started. We " +"first need to install Flower. You can do this by running:" +msgstr "" +"Maintenant que nous avons une idée approximative de ce qui se passe, " +"commençons. Nous devons d'abord installer Flower. Tu peux le faire en " +"lançant :" + +#: ../../source/tutorial-quickstart-mxnet.rst:28 +msgid "Since we want to use MXNet, let's go ahead and install it:" +msgstr "Puisque nous voulons utiliser MXNet, allons-y et installons-le :" + +#: ../../source/tutorial-quickstart-mxnet.rst:38 +msgid "" +"Now that we have all our dependencies installed, let's run a simple " +"distributed training with two clients and one server. Our training " +"procedure and network architecture are based on MXNet´s `Hand-written " +"Digit Recognition tutorial " +"`_." +msgstr "" +"Maintenant que toutes nos dépendances sont installées, lançons une " +"formation distribuée simple avec deux clients et un serveur. Notre " +"procédure de formation et l'architecture du réseau sont basées sur le " +"tutoriel de reconnaissance de chiffres écrits à la main du MXNet " +"`_." + +#: ../../source/tutorial-quickstart-mxnet.rst:40 +msgid "" +"In a file called :code:`client.py`, import Flower and MXNet related " +"packages:" +msgstr "" +"Dans un fichier appelé :code:`client.py`, importe Flower et les paquets " +"liés au MXNet :" + +#: ../../source/tutorial-quickstart-mxnet.rst:55 +msgid "In addition, define the device allocation in MXNet with:" +msgstr "En outre, définis l'attribution de l'appareil dans MXNet avec :" + +#: ../../source/tutorial-quickstart-mxnet.rst:61 +msgid "" +"We use MXNet to load MNIST, a popular image classification dataset of " +"handwritten digits for machine learning. The MXNet utility " +":code:`mx.test_utils.get_mnist()` downloads the training and test data." +msgstr "" +"Nous utilisons MXNet pour charger MNIST, un ensemble de données de " +"classification d'images populaire de chiffres manuscrits pour " +"l'apprentissage automatique. L'utilitaire MXNet " +":code:`mx.test_utils.get_mnist()` télécharge les données d'entraînement " +"et de test." + +#: ../../source/tutorial-quickstart-mxnet.rst:75 +msgid "" +"Define the training and loss with MXNet. We train the model by looping " +"over the dataset, measure the corresponding loss, and optimize it." +msgstr "" +"Définis l'entraînement et la perte avec MXNet. Nous entraînons le modèle " +"en parcourant en boucle l'ensemble des données, nous mesurons la perte " +"correspondante et nous l'optimisons." + +#: ../../source/tutorial-quickstart-mxnet.rst:113 +msgid "" +"Next, we define the validation of our machine learning model. We loop " +"over the test set and measure both loss and accuracy on the test set." +msgstr "" +"Ensuite, nous définissons la validation de notre modèle d'apprentissage " +"automatique. Nous effectuons une boucle sur l'ensemble de test et " +"mesurons à la fois la perte et la précision sur l'ensemble de test." + +#: ../../source/tutorial-quickstart-mxnet.rst:137 +msgid "" +"After defining the training and testing of a MXNet machine learning " +"model, we use these functions to implement a Flower client." +msgstr "" +"Après avoir défini la formation et le test d'un modèle d'apprentissage " +"automatique MXNet, nous utilisons ces fonctions pour mettre en œuvre un " +"client Flower." + +#: ../../source/tutorial-quickstart-mxnet.rst:139 +msgid "Our Flower clients will use a simple :code:`Sequential` model:" +msgstr "Nos clients Flower utiliseront un modèle simple :code:`Sequential` :" + +#: ../../source/tutorial-quickstart-mxnet.rst:158 +msgid "" +"After loading the dataset with :code:`load_data()` we perform one forward" +" propagation to initialize the model and model parameters with " +":code:`model(init)`. Next, we implement a Flower client." +msgstr "" +"Après avoir chargé l'ensemble de données avec :code:`load_data()`, nous " +"effectuons une propagation vers l'avant pour initialiser le modèle et les" +" paramètres du modèle avec :code:`model(init)`. Ensuite, nous " +"implémentons un client Flower." + +#: ../../source/tutorial-quickstart-mxnet.rst:160 +#: ../../source/tutorial-quickstart-pytorch.rst:144 +#: ../../source/tutorial-quickstart-tensorflow.rst:54 +msgid "" +"The Flower server interacts with clients through an interface called " +":code:`Client`. When the server selects a particular client for training," +" it sends training instructions over the network. The client receives " +"those instructions and calls one of the :code:`Client` methods to run " +"your code (i.e., to train the neural network we defined earlier)." +msgstr "" +"Le serveur Flower interagit avec les clients par le biais d'une interface" +" appelée :code:`Client`. Lorsque le serveur sélectionne un client " +"particulier pour la formation, il envoie des instructions de formation " +"sur le réseau. Le client reçoit ces instructions et appelle l'une des " +"méthodes :code:`Client` pour exécuter ton code (c'est-à-dire pour former " +"le réseau neuronal que nous avons défini plus tôt)." + +#: ../../source/tutorial-quickstart-mxnet.rst:166 +msgid "" +"Flower provides a convenience class called :code:`NumPyClient` which " +"makes it easier to implement the :code:`Client` interface when your " +"workload uses MXNet. Implementing :code:`NumPyClient` usually means " +"defining the following methods (:code:`set_parameters` is optional " +"though):" +msgstr "" +"Flower fournit une classe de commodité appelée :code:`NumPyClient` qui " +"facilite l'implémentation de l'interface :code:`Client` lorsque ta charge" +" de travail utilise MXNet. L'implémentation de :code:`NumPyClient` " +"signifie généralement la définition des méthodes suivantes " +"(:code:`set_parameters` est cependant facultatif) :" + +#: ../../source/tutorial-quickstart-mxnet.rst:172 +#: ../../source/tutorial-quickstart-pytorch.rst:156 +#: ../../source/tutorial-quickstart-scikitlearn.rst:109 +msgid "return the model weight as a list of NumPy ndarrays" +msgstr "renvoie le poids du modèle sous la forme d'une liste de ndarrays NumPy" + +#: ../../source/tutorial-quickstart-mxnet.rst:173 +#: ../../source/tutorial-quickstart-pytorch.rst:157 +#: ../../source/tutorial-quickstart-scikitlearn.rst:111 +msgid ":code:`set_parameters` (optional)" +msgstr ":code:`set_parameters` (optionnel)" + +#: ../../source/tutorial-quickstart-mxnet.rst:174 +#: ../../source/tutorial-quickstart-pytorch.rst:158 +#: ../../source/tutorial-quickstart-scikitlearn.rst:111 +msgid "" +"update the local model weights with the parameters received from the " +"server" +msgstr "" +"mettre à jour les poids du modèle local avec les paramètres reçus du " +"serveur" + +#: ../../source/tutorial-quickstart-mxnet.rst:176 +#: ../../source/tutorial-quickstart-pytorch.rst:160 +#: ../../source/tutorial-quickstart-scikitlearn.rst:114 +msgid "set the local model weights" +msgstr "fixe les poids du modèle local" + +#: ../../source/tutorial-quickstart-mxnet.rst:177 +#: ../../source/tutorial-quickstart-pytorch.rst:161 +#: ../../source/tutorial-quickstart-scikitlearn.rst:115 +msgid "train the local model" +msgstr "entraîne le modèle local" + +#: ../../source/tutorial-quickstart-mxnet.rst:178 +#: ../../source/tutorial-quickstart-pytorch.rst:162 +#: ../../source/tutorial-quickstart-scikitlearn.rst:116 +msgid "receive the updated local model weights" +msgstr "recevoir les poids du modèle local mis à jour" + +#: ../../source/tutorial-quickstart-mxnet.rst:180 +#: ../../source/tutorial-quickstart-pytorch.rst:164 +#: ../../source/tutorial-quickstart-scikitlearn.rst:118 +msgid "test the local model" +msgstr "teste le modèle local" + +#: ../../source/tutorial-quickstart-mxnet.rst:182 +msgid "They can be implemented in the following way:" +msgstr "Ils peuvent être mis en œuvre de la manière suivante :" + +#: ../../source/tutorial-quickstart-mxnet.rst:212 +msgid "" +"We can now create an instance of our class :code:`MNISTClient` and add " +"one line to actually run this client:" +msgstr "" +"Nous pouvons maintenant créer une instance de notre classe " +":code:`MNISTClient` et ajouter une ligne pour exécuter ce client :" + +#: ../../source/tutorial-quickstart-mxnet.rst:219 +#, fuzzy +msgid "" +"That's it for the client. We only have to implement :code:`Client` or " +":code:`NumPyClient` and call :code:`fl.client.start_client()` or " +":code:`fl.client.start_numpy_client()`. The string " +":code:`\"0.0.0.0:8080\"` tells the client which server to connect to. In " +"our case we can run the server and the client on the same machine, " +"therefore we use :code:`\"0.0.0.0:8080\"`. If we run a truly federated " +"workload with the server and clients running on different machines, all " +"that needs to change is the :code:`server_address` we pass to the client." +msgstr "" +"C'est tout pour le client. Il nous suffit d'implémenter :code:`Client` ou" +" :code:`NumPyClient` et d'appeler :code:`fl.client.start_client()`. La " +"chaîne :code:`\"0.0.0:8080\"` indique au client à quel serveur se " +"connecter. Dans notre cas, nous pouvons exécuter le serveur et le client " +"sur la même machine, c'est pourquoi nous utilisons " +":code:`\"0.0.0:8080\"`. Si nous exécutons une charge de travail " +"véritablement fédérée avec le serveur et les clients s'exécutant sur des " +"machines différentes, tout ce qui doit changer est :code:`server_address`" +" que nous transmettons au client." + +#: ../../source/tutorial-quickstart-mxnet.rst:241 +msgid "" +"With both client and server ready, we can now run everything and see " +"federated learning in action. Federated learning systems usually have a " +"server and multiple clients. We therefore have to start the server first:" +msgstr "" +"Le client et le serveur étant prêts, nous pouvons maintenant tout " +"exécuter et voir l'apprentissage fédéré en action. Les systèmes " +"d'apprentissage fédéré ont généralement un serveur et plusieurs clients. " +"Nous devons donc commencer par démarrer le serveur :" + +#: ../../source/tutorial-quickstart-mxnet.rst:249 +#: ../../source/tutorial-quickstart-pytorch.rst:226 +#: ../../source/tutorial-quickstart-scikitlearn.rst:224 +#: ../../source/tutorial-quickstart-tensorflow.rst:122 +#: ../../source/tutorial-quickstart-xgboost.rst:533 +msgid "" +"Once the server is running we can start the clients in different " +"terminals. Open a new terminal and start the first client:" +msgstr "" +"Une fois que le serveur fonctionne, nous pouvons démarrer les clients " +"dans différents terminaux. Ouvre un nouveau terminal et démarre le " +"premier client :" + +#: ../../source/tutorial-quickstart-mxnet.rst:256 +#: ../../source/tutorial-quickstart-pytorch.rst:233 +#: ../../source/tutorial-quickstart-scikitlearn.rst:231 +#: ../../source/tutorial-quickstart-tensorflow.rst:129 +#: ../../source/tutorial-quickstart-xgboost.rst:540 +msgid "Open another terminal and start the second client:" +msgstr "Ouvre un autre terminal et démarre le deuxième client :" + +#: ../../source/tutorial-quickstart-mxnet.rst:262 +#: ../../source/tutorial-quickstart-pytorch.rst:239 +#: ../../source/tutorial-quickstart-scikitlearn.rst:237 +#: ../../source/tutorial-quickstart-xgboost.rst:546 +msgid "" +"Each client will have its own dataset. You should now see how the " +"training does in the very first terminal (the one that started the " +"server):" +msgstr "" +"Chaque client aura son propre ensemble de données. Tu devrais maintenant " +"voir comment la formation se déroule dans le tout premier terminal (celui" +" qui a démarré le serveur) :" + +#: ../../source/tutorial-quickstart-mxnet.rst:294 +#, fuzzy +msgid "" +"Congratulations! You've successfully built and run your first federated " +"learning system. The full `source code " +"`_ for this example can be found in :code:`examples" +"/quickstart-mxnet`." +msgstr "" +"Félicitations ! Tu as réussi à construire et à faire fonctionner ton " +"premier système d'apprentissage fédéré. Le code source complet " +"`_ de cet exemple se trouve dans :code:`examples" +"/quickstart-mxnet`." + +#: ../../source/tutorial-quickstart-pandas.rst:-1 +msgid "" +"Check out this Federated Learning quickstart tutorial for using Flower " +"with Pandas to perform Federated Analytics." +msgstr "" + +#: ../../source/tutorial-quickstart-pandas.rst:5 +msgid "Quickstart Pandas" +msgstr "Démarrage rapide des Pandas" + +#: ../../source/tutorial-quickstart-pandas.rst:10 +msgid "Let's build a federated analytics system using Pandas and Flower!" +msgstr "Construisons un système d'analyse fédéré à l'aide de Pandas et de Flower !" + +#: ../../source/tutorial-quickstart-pandas.rst:12 +#, fuzzy +msgid "" +"Please refer to the `full code example " +"`_ " +"to learn more." +msgstr "" +"Réfère-toi à l'exemple de code complet " +"`_ " +"pour en savoir plus." + +#: ../../source/tutorial-quickstart-pytorch.rst:-1 +msgid "" +"Check out this Federated Learning quickstart tutorial for using Flower " +"with PyTorch to train a CNN model on MNIST." +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:13 +msgid "" +"In this tutorial we will learn how to train a Convolutional Neural " +"Network on CIFAR10 using Flower and PyTorch." +msgstr "" +"Dans ce tutoriel, nous allons apprendre à entraîner un réseau neuronal " +"convolutif sur CIFAR10 à l'aide de Flower et PyTorch." + +#: ../../source/tutorial-quickstart-pytorch.rst:15 +#: ../../source/tutorial-quickstart-xgboost.rst:39 +msgid "" +"First of all, it is recommended to create a virtual environment and run " +"everything within a `virtualenv `_." +msgstr "" +"Tout d'abord, il est recommandé de créer un environnement virtuel et de " +"tout exécuter au sein d'un `virtualenv `_." + +#: ../../source/tutorial-quickstart-pytorch.rst:29 +msgid "" +"Since we want to use PyTorch to solve a computer vision task, let's go " +"ahead and install PyTorch and the **torchvision** library:" +msgstr "" +"Puisque nous voulons utiliser PyTorch pour résoudre une tâche de vision " +"par ordinateur, allons-y et installons PyTorch et la bibliothèque " +"**torchvision** :" + +#: ../../source/tutorial-quickstart-pytorch.rst:39 +msgid "" +"Now that we have all our dependencies installed, let's run a simple " +"distributed training with two clients and one server. Our training " +"procedure and network architecture are based on PyTorch's `Deep Learning " +"with PyTorch " +"`_." +msgstr "" +"Maintenant que nous avons installé toutes nos dépendances, lançons une " +"formation distribuée simple avec deux clients et un serveur. Notre " +"procédure de formation et l'architecture de notre réseau sont basées sur " +"`Deep Learning with PyTorch " +"`_ de" +" PyTorch." + +#: ../../source/tutorial-quickstart-pytorch.rst:41 +msgid "" +"In a file called :code:`client.py`, import Flower and PyTorch related " +"packages:" +msgstr "" +"Dans un fichier appelé :code:`client.py`, importe Flower et les paquets " +"liés à PyTorch :" + +#: ../../source/tutorial-quickstart-pytorch.rst:56 +msgid "In addition, we define the device allocation in PyTorch with:" +msgstr "En outre, nous définissons l'attribution des appareils dans PyTorch avec :" + +#: ../../source/tutorial-quickstart-pytorch.rst:62 +msgid "" +"We use PyTorch to load CIFAR10, a popular colored image classification " +"dataset for machine learning. The PyTorch :code:`DataLoader()` downloads " +"the training and test data that are then normalized." +msgstr "" +"Nous utilisons PyTorch pour charger CIFAR10, un ensemble de données de " +"classification d'images colorées populaire pour l'apprentissage " +"automatique. Le :code:`DataLoader()` de PyTorch télécharge les données " +"d'entraînement et de test qui sont ensuite normalisées." + +#: ../../source/tutorial-quickstart-pytorch.rst:78 +msgid "" +"Define the loss and optimizer with PyTorch. The training of the dataset " +"is done by looping over the dataset, measure the corresponding loss and " +"optimize it." +msgstr "" +"Définis la perte et l'optimiseur avec PyTorch L'entraînement de " +"l'ensemble de données se fait en bouclant sur l'ensemble de données, en " +"mesurant la perte correspondante et en l'optimisant." + +#: ../../source/tutorial-quickstart-pytorch.rst:94 +msgid "" +"Define then the validation of the machine learning network. We loop over" +" the test set and measure the loss and accuracy of the test set." +msgstr "" +"Définis ensuite la validation du réseau d'apprentissage automatique. Nous" +" passons en boucle sur l'ensemble de test et mesurons la perte et la " +"précision de l'ensemble de test." + +#: ../../source/tutorial-quickstart-pytorch.rst:113 +msgid "" +"After defining the training and testing of a PyTorch machine learning " +"model, we use the functions for the Flower clients." +msgstr "" +"Après avoir défini l'entraînement et le test d'un modèle d'apprentissage " +"automatique PyTorch, nous utilisons les fonctions pour les clients " +"Flower." + +#: ../../source/tutorial-quickstart-pytorch.rst:115 +msgid "" +"The Flower clients will use a simple CNN adapted from 'PyTorch: A 60 " +"Minute Blitz':" +msgstr "" +"Les clients de Flower utiliseront un CNN simple adapté de \"PyTorch : A " +"60 Minute Blitz\" :" + +#: ../../source/tutorial-quickstart-pytorch.rst:142 +msgid "" +"After loading the data set with :code:`load_data()` we define the Flower " +"interface." +msgstr "" +"Après avoir chargé l'ensemble des données avec :code:`load_data()`, nous " +"définissons l'interface Flower." + +#: ../../source/tutorial-quickstart-pytorch.rst:150 +msgid "" +"Flower provides a convenience class called :code:`NumPyClient` which " +"makes it easier to implement the :code:`Client` interface when your " +"workload uses PyTorch. Implementing :code:`NumPyClient` usually means " +"defining the following methods (:code:`set_parameters` is optional " +"though):" +msgstr "" +"Flower fournit une classe de commodité appelée :code:`NumPyClient` qui " +"facilite la mise en œuvre de l'interface :code:`Client` lorsque ta charge" +" de travail utilise PyTorch. Mettre en œuvre :code:`NumPyClient` signifie" +" généralement définir les méthodes suivantes (:code:`set_parameters` est " +"cependant facultatif) :" + +#: ../../source/tutorial-quickstart-pytorch.rst:166 +msgid "which can be implemented in the following way:" +msgstr "qui peut être mis en œuvre de la manière suivante :" + +#: ../../source/tutorial-quickstart-pytorch.rst:189 +#: ../../source/tutorial-quickstart-tensorflow.rst:82 +msgid "" +"We can now create an instance of our class :code:`CifarClient` and add " +"one line to actually run this client:" +msgstr "" +"Nous pouvons maintenant créer une instance de notre classe " +":code:`CifarClient` et ajouter une ligne pour exécuter ce client :" + +#: ../../source/tutorial-quickstart-pytorch.rst:196 +#: ../../source/tutorial-quickstart-tensorflow.rst:90 +#, fuzzy +msgid "" +"That's it for the client. We only have to implement :code:`Client` or " +":code:`NumPyClient` and call :code:`fl.client.start_client()`. If you " +"implement a client of type :code:`NumPyClient` you'll need to first call " +"its :code:`to_client()` method. The string :code:`\"[::]:8080\"` tells " +"the client which server to connect to. In our case we can run the server " +"and the client on the same machine, therefore we use " +":code:`\"[::]:8080\"`. If we run a truly federated workload with the " +"server and clients running on different machines, all that needs to " +"change is the :code:`server_address` we point the client at." +msgstr "" +"C'est tout pour le client. Il nous suffit d'implémenter :code:`Client` ou" +" :code:`NumPyClient` et d'appeler :code:`fl.client.start_client()`. La " +"chaîne :code:`\"[: :]:8080\"` indique au client à quel serveur se " +"connecter. Dans notre cas, nous pouvons exécuter le serveur et le client " +"sur la même machine, c'est pourquoi nous utilisons :code:`\"[: " +":]:8080\"`. Si nous exécutons une charge de travail véritablement fédérée" +" avec le serveur et les clients fonctionnant sur des machines " +"différentes, tout ce qui doit changer est l'adresse " +":code:`server_address` vers laquelle nous dirigeons le client." + +#: ../../source/tutorial-quickstart-pytorch.rst:271 +#, fuzzy +msgid "" +"Congratulations! You've successfully built and run your first federated " +"learning system. The full `source code " +"`_ for this example can be found in :code:`examples" +"/quickstart-pytorch`." +msgstr "" +"Félicitations ! Tu as réussi à construire et à faire fonctionner ton " +"premier système d'apprentissage fédéré. Le code source complet " +"`_ de cet exemple se trouve dans :code:`examples" +"/quickstart-pytorch`." + +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:-1 +msgid "" +"Check out this Federated Learning quickstart tutorial for using Flower " +"with PyTorch Lightning to train an Auto Encoder model on MNIST." +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:5 +msgid "Quickstart PyTorch Lightning" +msgstr "Démarrage rapide de PyTorch Lightning" + +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:10 +#, fuzzy +msgid "" +"Let's build a horizontal federated learning system using PyTorch " +"Lightning and Flower!" +msgstr "" +"Construisons un système d'apprentissage fédéré en utilisant PyTorch " +"Lightning et Flower !" + +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:12 +#, fuzzy +msgid "" +"Please refer to the `full code example " +"`_ to learn more." +msgstr "" +"Réfère-toi à l'exemple de code complet " +"`_ pour en savoir plus." + +#: ../../source/tutorial-quickstart-scikitlearn.rst:-1 +msgid "" +"Check out this Federated Learning quickstart tutorial for using Flower " +"with scikit-learn to train a linear regression model." +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:5 +msgid "Quickstart scikit-learn" +msgstr "Démarrage rapide de scikit-learn" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:10 +msgid "" +"In this tutorial, we will learn how to train a :code:`Logistic " +"Regression` model on MNIST using Flower and scikit-learn." +msgstr "" +"Dans ce tutoriel, nous allons apprendre à former un :code:`modèle de " +"régression logistique` sur MNIST en utilisant Flower et scikit-learn." + +#: ../../source/tutorial-quickstart-scikitlearn.rst:26 +msgid "Since we want to use scikt-learn, let's go ahead and install it:" +msgstr "Puisque nous voulons utiliser scikt-learn, allons-y et installons-le :" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:32 +msgid "Or simply install all dependencies using Poetry:" +msgstr "Ou installe simplement toutes les dépendances à l'aide de Poetry :" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:42 +msgid "" +"Now that we have all our dependencies installed, let's run a simple " +"distributed training with two clients and one server. However, before " +"setting up the client and server, we will define all functionalities that" +" we need for our federated learning setup within :code:`utils.py`. The " +":code:`utils.py` contains different functions defining all the machine " +"learning basics:" +msgstr "" +"Maintenant que toutes nos dépendances sont installées, exécutons une " +"formation distribuée simple avec deux clients et un serveur. Cependant, " +"avant de configurer le client et le serveur, nous allons définir toutes " +"les fonctionnalités dont nous avons besoin pour notre configuration " +"d'apprentissage fédéré dans :code:`utils.py`. Le :code:`utils.py` " +"contient différentes fonctions définissant toutes les bases de " +"l'apprentissage automatique :" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:45 +msgid ":code:`get_model_parameters()`" +msgstr ":code:`get_model_parameters()`" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:46 +msgid "Returns the paramters of a :code:`sklearn` LogisticRegression model" +msgstr "" +"Renvoie les paramètres d'un modèle de régression logistique " +":code:`sklearn`" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:47 +msgid ":code:`set_model_params()`" +msgstr ":code:`set_model_params()`" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:48 +msgid "Sets the parameters of a :code:`sklean` LogisticRegression model" +msgstr "Définit les paramètres d'un modèle de régression logistique :code:`sklean`" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:49 +msgid ":code:`set_initial_params()`" +msgstr ":code:`set_initial_params()`" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:50 +msgid "Initializes the model parameters that the Flower server will ask for" +msgstr "Initialise les paramètres du modèle que le serveur de Flower demandera" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:51 +msgid ":code:`load_mnist()`" +msgstr ":code:`load_mnist()`" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:52 +msgid "Loads the MNIST dataset using OpenML" +msgstr "Charge l'ensemble de données MNIST à l'aide d'OpenML" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:53 +msgid ":code:`shuffle()`" +msgstr ":code:`shuffle()`" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:54 +msgid "Shuffles data and its label" +msgstr "Mélange les données et leur étiquette" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:56 +msgid ":code:`partition()`" +msgstr ":code:`partition()`" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:56 +msgid "Splits datasets into a number of partitions" +msgstr "Divise les ensembles de données en un certain nombre de partitions" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:58 +msgid "" +"Please check out :code:`utils.py` `here " +"`_ for more details. The pre-defined functions are used in" +" the :code:`client.py` and imported. The :code:`client.py` also requires " +"to import several packages such as Flower and scikit-learn:" +msgstr "" +"Tu peux consulter :code:`utils.py` `ici " +"`_ pour plus de détails. Les fonctions prédéfinies sont " +"utilisées dans :code:`client.py` et importées. :code:`client.py` " +"nécessite également d'importer plusieurs paquets tels que Flower et " +"scikit-learn :" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:73 +msgid "" +"We load the MNIST dataset from `OpenML `_, " +"a popular image classification dataset of handwritten digits for machine " +"learning. The utility :code:`utils.load_mnist()` downloads the training " +"and test data. The training set is split afterwards into 10 partitions " +"with :code:`utils.partition()`." +msgstr "" +"Nous chargeons l'ensemble de données MNIST de `OpenML " +"`_, un ensemble de données de " +"classification d'images populaires de chiffres manuscrits pour " +"l'apprentissage automatique. L'utilitaire :code:`utils.load_mnist()` " +"télécharge les données d'entraînement et de test. L'ensemble " +"d'entraînement est ensuite divisé en 10 partitions avec " +":code:`utils.partition()`." + +#: ../../source/tutorial-quickstart-scikitlearn.rst:85 +msgid "" +"Next, the logistic regression model is defined and initialized with " +":code:`utils.set_initial_params()`." +msgstr "" +"Ensuite, le modèle de régression logistique est défini et initialisé avec" +" :code:`utils.set_initial_params()`." + +#: ../../source/tutorial-quickstart-scikitlearn.rst:97 +msgid "" +"The Flower server interacts with clients through an interface called " +":code:`Client`. When the server selects a particular client for training," +" it sends training instructions over the network. The client receives " +"those instructions and calls one of the :code:`Client` methods to run " +"your code (i.e., to fit the logistic regression we defined earlier)." +msgstr "" +"Le serveur Flower interagit avec les clients par le biais d'une interface" +" appelée :code:`Client`. Lorsque le serveur sélectionne un client " +"particulier pour la formation, il envoie des instructions de formation " +"sur le réseau. Le client reçoit ces instructions et appelle l'une des " +"méthodes :code:`Client` pour exécuter ton code (c'est-à-dire pour ajuster" +" la régression logistique que nous avons définie plus tôt)." + +#: ../../source/tutorial-quickstart-scikitlearn.rst:103 +msgid "" +"Flower provides a convenience class called :code:`NumPyClient` which " +"makes it easier to implement the :code:`Client` interface when your " +"workload uses scikit-learn. Implementing :code:`NumPyClient` usually " +"means defining the following methods (:code:`set_parameters` is optional " +"though):" +msgstr "" +"Flower fournit une classe de commodité appelée :code:`NumPyClient` qui " +"facilite la mise en œuvre de l'interface :code:`Client` lorsque ta charge" +" de travail utilise scikit-learn. Mettre en œuvre :code:`NumPyClient` " +"signifie généralement définir les méthodes suivantes " +"(:code:`set_parameters` est cependant facultatif) :" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:112 +msgid "is directly imported with :code:`utils.set_model_params()`" +msgstr "est directement importé avec :code:`utils.set_model_params()`" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:120 +msgid "The methods can be implemented in the following way:" +msgstr "Les méthodes peuvent être mises en œuvre de la manière suivante :" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:143 +msgid "" +"We can now create an instance of our class :code:`MnistClient` and add " +"one line to actually run this client:" +msgstr "" +"Nous pouvons maintenant créer une instance de notre classe " +":code:`MnistClient` et ajouter une ligne pour exécuter ce client :" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:150 +#, fuzzy +msgid "" +"That's it for the client. We only have to implement :code:`Client` or " +":code:`NumPyClient` and call :code:`fl.client.start_client()`. If you " +"implement a client of type :code:`NumPyClient` you'll need to first call " +"its :code:`to_client()` method. The string :code:`\"0.0.0.0:8080\"` tells" +" the client which server to connect to. In our case we can run the server" +" and the client on the same machine, therefore we use " +":code:`\"0.0.0.0:8080\"`. If we run a truly federated workload with the " +"server and clients running on different machines, all that needs to " +"change is the :code:`server_address` we pass to the client." +msgstr "" +"C'est tout pour le client. Il nous suffit d'implémenter :code:`Client` ou" +" :code:`NumPyClient` et d'appeler :code:`fl.client.start_client()`. La " +"chaîne :code:`\"0.0.0:8080\"` indique au client à quel serveur se " +"connecter. Dans notre cas, nous pouvons exécuter le serveur et le client " +"sur la même machine, c'est pourquoi nous utilisons " +":code:`\"0.0.0:8080\"`. Si nous exécutons une charge de travail " +"véritablement fédérée avec le serveur et les clients s'exécutant sur des " +"machines différentes, tout ce qui doit changer est :code:`server_address`" +" que nous transmettons au client." + +#: ../../source/tutorial-quickstart-scikitlearn.rst:159 +msgid "" +"The following Flower server is a little bit more advanced and returns an " +"evaluation function for the server-side evaluation. First, we import " +"again all required libraries such as Flower and scikit-learn." +msgstr "" +"Le serveur Flower suivant est un peu plus avancé et renvoie une fonction " +"d'évaluation pour l'évaluation côté serveur. Tout d'abord, nous importons" +" à nouveau toutes les bibliothèques requises telles que Flower et scikit-" +"learn." + +#: ../../source/tutorial-quickstart-scikitlearn.rst:162 +msgid ":code:`server.py`, import Flower and start the server:" +msgstr ":code:`server.py`, importe Flower et démarre le serveur :" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:173 +msgid "" +"The number of federated learning rounds is set in :code:`fit_round()` and" +" the evaluation is defined in :code:`get_evaluate_fn()`. The evaluation " +"function is called after each federated learning round and gives you " +"information about loss and accuracy." +msgstr "" +"Le nombre de tours d'apprentissage fédéré est défini dans " +":code:`fit_round()` et l'évaluation est définie dans " +":code:`get_evaluate_fn()`. La fonction d'évaluation est appelée après " +"chaque tour d'apprentissage fédéré et te donne des informations sur la " +"perte et la précision." + +#: ../../source/tutorial-quickstart-scikitlearn.rst:198 +msgid "" +"The :code:`main` contains the server-side parameter initialization " +":code:`utils.set_initial_params()` as well as the aggregation strategy " +":code:`fl.server.strategy:FedAvg()`. The strategy is the default one, " +"federated averaging (or FedAvg), with two clients and evaluation after " +"each federated learning round. The server can be started with the command" +" :code:`fl.server.start_server(server_address=\"0.0.0.0:8080\", " +"strategy=strategy, config=fl.server.ServerConfig(num_rounds=3))`." +msgstr "" +"Le :code:`main` contient l'initialisation des paramètres côté serveur " +":code:`utils.set_initial_params()` ainsi que la stratégie d'agrégation " +":code:`fl.server.strategy:FedAvg()`. La stratégie est celle par défaut, " +"la moyenne fédérée (ou FedAvg), avec deux clients et une évaluation après" +" chaque tour d'apprentissage fédéré. Le serveur peut être démarré avec la" +" commande :code:`fl.server.start_server(server_address=\"0.0.0.0:8080\", " +"strategy=strategy, config=fl.server.ServerConfig(num_rounds=3))`." + +#: ../../source/tutorial-quickstart-scikitlearn.rst:217 +msgid "" +"With both client and server ready, we can now run everything and see " +"federated learning in action. Federated learning systems usually have a " +"server and multiple clients. We, therefore, have to start the server " +"first:" +msgstr "" +"Le client et le serveur étant prêts, nous pouvons maintenant tout lancer " +"et voir l'apprentissage fédéré en action. Les systèmes d'apprentissage " +"fédéré ont généralement un serveur et plusieurs clients. Nous devons donc" +" commencer par lancer le serveur :" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:271 +msgid "" +"Congratulations! You've successfully built and run your first federated " +"learning system. The full `source code " +"`_ for this example can be found in :code:`examples/sklearn-logreg-" +"mnist`." +msgstr "" +"Félicitations ! Tu as réussi à construire et à faire fonctionner ton " +"premier système d'apprentissage fédéré. Le code source complet " +"`_ de cet exemple se trouve dans :code:`examples/sklearn-logreg-" +"mnist`." + +#: ../../source/tutorial-quickstart-tensorflow.rst:-1 +msgid "" +"Check out this Federated Learning quickstart tutorial for using Flower " +"with TensorFlow to train a MobilNetV2 model on CIFAR-10." +msgstr "" + +#: ../../source/tutorial-quickstart-tensorflow.rst:5 +msgid "Quickstart TensorFlow" +msgstr "Démarrage rapide de TensorFlow" -#: ../../source/tutorial-get-started-with-flower-pytorch.ipynb:497 +#: ../../source/tutorial-quickstart-tensorflow.rst:13 +msgid "Let's build a federated learning system in less than 20 lines of code!" +msgstr "" +"Construisons un système d'apprentissage fédéré en moins de 20 lignes de " +"code !" + +#: ../../source/tutorial-quickstart-tensorflow.rst:15 +msgid "Before Flower can be imported we have to install it:" +msgstr "Avant de pouvoir importer une fleur, nous devons l'installer :" + +#: ../../source/tutorial-quickstart-tensorflow.rst:21 msgid "" -"``fit``: Receive model parameters from the server, train the model " -"parameters on the local data, and return the (updated) model parameters " -"to the server" +"Since we want to use the Keras API of TensorFlow (TF), we have to install" +" TF as well:" msgstr "" -"``fit`` : reçoit les paramètres du modèle du serveur, entraîne les " -"paramètres du modèle sur les données locales et renvoie les paramètres du" -" modèle (mis à jour) au serveur" +"Comme nous voulons utiliser l'API Keras de TensorFlow (TF), nous devons " +"également installer TF :" + +#: ../../source/tutorial-quickstart-tensorflow.rst:31 +msgid "Next, in a file called :code:`client.py`, import Flower and TensorFlow:" +msgstr "" +"Ensuite, dans un fichier appelé :code:`client.py`, importe Flower et " +"TensorFlow :" -#: ../../source/tutorial-get-started-with-flower-pytorch.ipynb:498 +#: ../../source/tutorial-quickstart-tensorflow.rst:38 msgid "" -"``evaluate``: Receive model parameters from the server, evaluate the " -"model parameters on the local data, and return the evaluation result to " -"the server" +"We use the Keras utilities of TF to load CIFAR10, a popular colored image" +" classification dataset for machine learning. The call to " +":code:`tf.keras.datasets.cifar10.load_data()` downloads CIFAR10, caches " +"it locally, and then returns the entire training and test set as NumPy " +"ndarrays." msgstr "" -"``evaluate`` : reçoit les paramètres du modèle du serveur, évalue les " -"paramètres du modèle sur les données locales et renvoie le résultat de " -"l'évaluation au serveur" +"Nous utilisons les utilitaires Keras de TF pour charger CIFAR10, un " +"ensemble de données de classification d'images colorées populaire pour " +"l'apprentissage automatique. L'appel à " +":code:`tf.keras.datasets.cifar10.load_data()` télécharge CIFAR10, le met " +"en cache localement, puis renvoie l'ensemble d'entraînement et de test " +"sous forme de NumPy ndarrays." -#: ../../source/tutorial-get-started-with-flower-pytorch.ipynb:500 +#: ../../source/tutorial-quickstart-tensorflow.rst:47 msgid "" -"We mentioned that our clients will use the previously defined PyTorch " -"components for model training and evaluation. Let's see a simple Flower " -"client implementation that brings everything together:" +"Next, we need a model. For the purpose of this tutorial, we use " +"MobilNetV2 with 10 output classes:" msgstr "" -"Nous avons mentionné que nos clients utiliseront les composants PyTorch " -"définis précédemment pour la formation et l'évaluation des modèles. " -"Voyons une simple mise en œuvre du client Flower qui réunit tout cela :" +"Ensuite, nous avons besoin d'un modèle. Pour les besoins de ce tutoriel, " +"nous utilisons MobilNetV2 avec 10 classes de sortie :" -#: ../../source/tutorial-get-started-with-flower-pytorch.ipynb:537 +#: ../../source/tutorial-quickstart-tensorflow.rst:60 msgid "" -"Our class ``FlowerClient`` defines how local training/evaluation will be " -"performed and allows Flower to call the local training/evaluation through" -" ``fit`` and ``evaluate``. Each instance of ``FlowerClient`` represents a" -" *single client* in our federated learning system. Federated learning " -"systems have multiple clients (otherwise, there's not much to federate), " -"so each client will be represented by its own instance of " -"``FlowerClient``. If we have, for example, three clients in our workload," -" then we'd have three instances of ``FlowerClient``. Flower calls " -"``FlowerClient.fit`` on the respective instance when the server selects a" -" particular client for training (and ``FlowerClient.evaluate`` for " -"evaluation)." +"Flower provides a convenience class called :code:`NumPyClient` which " +"makes it easier to implement the :code:`Client` interface when your " +"workload uses Keras. The :code:`NumPyClient` interface defines three " +"methods which can be implemented in the following way:" msgstr "" -"Our class ``FlowerClient`` defines how local training/evaluation will be " -"performed and allows Flower to call the local training/evaluation through" -" ``fit`` and ``evaluate``. Each instance of ``FlowerClient`` represents a" -" *single client* in our federated learning system. Federated learning " -"systems have multiple clients (otherwise, there's not much to federate), " -"so each client will be represented by its own instance of " -"``FlowerClient``. If we have, for example, three clients in our workload," -" then we'd have three instances of ``FlowerClient``. Flower calls " -"``FlowerClient.fit`` on the respective instance when the server selects a" -" particular client for training (and ``FlowerClient.evaluate`` for " -"evaluation)." +"Flower fournit une classe de commodité appelée :code:`NumPyClient` qui " +"facilite la mise en œuvre de l'interface :code:`Client` lorsque ta charge" +" de travail utilise Keras. L'interface :code:`NumPyClient` définit trois " +"méthodes qui peuvent être mises en œuvre de la manière suivante :" -#: ../../source/tutorial-get-started-with-flower-pytorch.ipynb:541 -msgid "Using the Virtual Client Engine" -msgstr "Utilisation du moteur du client virtuel" +#: ../../source/tutorial-quickstart-tensorflow.rst:135 +msgid "Each client will have its own dataset." +msgstr "Chaque client aura son propre ensemble de données." -#: ../../source/tutorial-get-started-with-flower-pytorch.ipynb:543 +#: ../../source/tutorial-quickstart-tensorflow.rst:137 msgid "" -"In this notebook, we want to simulate a federated learning system with 10" -" clients on a single machine. This means that the server and all 10 " -"clients will live on a single machine and share resources such as CPU, " -"GPU, and memory. Having 10 clients would mean having 10 instances of " -"``FlowerClient`` in memory. Doing this on a single machine can quickly " -"exhaust the available memory resources, even if only a subset of these " -"clients participates in a single round of federated learning." +"You should now see how the training does in the very first terminal (the " +"one that started the server):" msgstr "" -"Dans ce carnet, nous voulons simuler un système d'apprentissage fédéré " -"avec 10 clients sur une seule machine. Cela signifie que le serveur et " -"les 10 clients vivront sur une seule machine et partageront des " -"ressources telles que le CPU, le GPU et la mémoire. Avoir 10 clients " -"signifierait avoir 10 instances de ``FlowerClient`` en mémoire. Faire " -"cela sur une seule machine peut rapidement épuiser les ressources mémoire" -" disponibles, même si seulement un sous-ensemble de ces clients participe" -" à un seul tour d'apprentissage fédéré." +"Tu devrais maintenant voir comment la formation se déroule dans le tout " +"premier terminal (celui qui a démarré le serveur) :" -#: ../../source/tutorial-get-started-with-flower-pytorch.ipynb:545 +#: ../../source/tutorial-quickstart-tensorflow.rst:169 +#, fuzzy msgid "" -"In addition to the regular capabilities where server and clients run on " -"multiple machines, Flower, therefore, provides special simulation " -"capabilities that create ``FlowerClient`` instances only when they are " -"actually necessary for training or evaluation. To enable the Flower " -"framework to create clients when necessary, we need to implement a " -"function called ``client_fn`` that creates a ``FlowerClient`` instance on" -" demand. Flower calls ``client_fn`` whenever it needs an instance of one " -"particular client to call ``fit`` or ``evaluate`` (those instances are " -"usually discarded after use, so they should not keep any local state). " -"Clients are identified by a client ID, or short ``cid``. The ``cid`` can " -"be used, for example, to load different local data partitions for " -"different clients, as can be seen below:" +"Congratulations! You've successfully built and run your first federated " +"learning system. The full `source code " +"`_ for this can be found in :code:`examples" +"/quickstart-tensorflow/client.py`." +msgstr "" +"Félicitations ! Tu as réussi à construire et à faire fonctionner ton " +"premier système d'apprentissage fédéré. Le `code source complet " +"`_ pour cela se trouve dans :code:`examples" +"/quickstart-tensorflow/client.py`." + +#: ../../source/tutorial-quickstart-xgboost.rst:-1 +msgid "" +"Check out this Federated Learning quickstart tutorial for using Flower " +"with XGBoost to train classification models on trees." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:5 +msgid "Quickstart XGBoost" +msgstr "Démarrage rapide XGBoost" + +#: ../../source/tutorial-quickstart-xgboost.rst:14 +#, fuzzy +msgid "Federated XGBoost" +msgstr "Formation fédérée" + +#: ../../source/tutorial-quickstart-xgboost.rst:16 +msgid "" +"EXtreme Gradient Boosting (**XGBoost**) is a robust and efficient " +"implementation of gradient-boosted decision tree (**GBDT**), that " +"maximises the computational boundaries for boosted tree methods. It's " +"primarily designed to enhance both the performance and computational " +"speed of machine learning models. In XGBoost, trees are constructed " +"concurrently, unlike the sequential approach taken by GBDT." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:20 +msgid "" +"Often, for tabular data on medium-sized datasets with fewer than 10k " +"training examples, XGBoost surpasses the results of deep learning " +"techniques." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:23 +#, fuzzy +msgid "Why federated XGBoost?" +msgstr "Qu'est-ce que l'apprentissage fédéré ?" + +#: ../../source/tutorial-quickstart-xgboost.rst:25 +msgid "" +"Indeed, as the demand for data privacy and decentralized learning grows, " +"there's an increasing requirement to implement federated XGBoost systems " +"for specialised applications, like survival analysis and financial fraud " +"detection." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:27 +msgid "" +"Federated learning ensures that raw data remains on the local device, " +"making it an attractive approach for sensitive domains where data " +"security and privacy are paramount. Given the robustness and efficiency " +"of XGBoost, combining it with federated learning offers a promising " +"solution for these specific challenges." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:30 +msgid "" +"In this tutorial we will learn how to train a federated XGBoost model on " +"HIGGS dataset using Flower and :code:`xgboost` package. We use a simple " +"example (`full code xgboost-quickstart " +"`_)" +" with two *clients* and one *server* to demonstrate how federated XGBoost" +" works, and then we dive into a more complex example (`full code xgboost-" +"comprehensive `_) to run various experiments." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:37 +msgid "Environment Setup" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:41 +msgid "" +"We first need to install Flower and Flower Datasets. You can do this by " +"running :" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:47 +#, fuzzy +msgid "" +"Since we want to use :code:`xgboost` package to build up XGBoost trees, " +"let's go ahead and install :code:`xgboost`:" +msgstr "Puisque nous voulons utiliser scikt-learn, allons-y et installons-le :" + +#: ../../source/tutorial-quickstart-xgboost.rst:57 +msgid "" +"*Clients* are responsible for generating individual weight-updates for " +"the model based on their local datasets. Now that we have all our " +"dependencies installed, let's run a simple distributed training with two " +"clients and one server." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:60 +#, fuzzy +msgid "" +"In a file called :code:`client.py`, import xgboost, Flower, Flower " +"Datasets and other related functions:" +msgstr "" +"Dans un fichier appelé :code:`client.py`, importe Flower et les paquets " +"liés à PyTorch :" + +#: ../../source/tutorial-quickstart-xgboost.rst:87 +msgid "Dataset partition and hyper-parameter selection" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:89 +msgid "" +"Prior to local training, we require loading the HIGGS dataset from Flower" +" Datasets and conduct data partitioning for FL:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:102 +msgid "" +"In this example, we split the dataset into two partitions with uniform " +"distribution (:code:`IidPartitioner(num_partitions=2)`). Then, we load " +"the partition for the given client based on :code:`node_id`:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:121 +msgid "" +"After that, we do train/test splitting on the given partition (client's " +"local data), and transform data format for :code:`xgboost` package." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:134 +msgid "" +"The functions of :code:`train_test_split` and " +":code:`transform_dataset_to_dmatrix` are defined as below:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:158 +msgid "Finally, we define the hyper-parameters used for XGBoost training." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:174 +msgid "" +"The :code:`num_local_round` represents the number of iterations for local" +" tree boost. We use CPU for the training in default. One can shift it to " +"GPU by setting :code:`tree_method` to :code:`gpu_hist`. We use AUC as " +"evaluation metric." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:181 +msgid "Flower client definition for XGBoost" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:183 +msgid "" +"After loading the dataset we define the Flower client. We follow the " +"general rule to define :code:`XgbClient` class inherited from " +":code:`fl.client.Client`." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:193 +msgid "" +"The :code:`self.bst` is used to keep the Booster objects that remain " +"consistent across rounds, allowing them to store predictions from trees " +"integrated in earlier rounds and maintain other essential data structures" +" for training." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:196 +msgid "" +"Then, we override :code:`get_parameters`, :code:`fit` and " +":code:`evaluate` methods insides :code:`XgbClient` class as follows." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:210 +msgid "" +"Unlike neural network training, XGBoost trees are not started from a " +"specified random weights. In this case, we do not use " +":code:`get_parameters` and :code:`set_parameters` to initialise model " +"parameters for XGBoost. As a result, let's return an empty tensor in " +":code:`get_parameters` when it is called by the server at the first " +"round." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:251 +msgid "" +"In :code:`fit`, at the first round, we call :code:`xgb.train()` to build " +"up the first set of trees. the returned Booster object and config are " +"stored in :code:`self.bst` and :code:`self.config`, respectively. From " +"the second round, we load the global model sent from server to " +":code:`self.bst`, and then update model weights on local training data " +"with function :code:`local_boost` as follows:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:269 +msgid "" +"Given :code:`num_local_round`, we update trees by calling " +":code:`self.bst.update` method. After training, the last " +":code:`N=num_local_round` trees will be extracted to send to the server." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:291 +msgid "" +"In :code:`evaluate`, we call :code:`self.bst.eval_set` function to " +"conduct evaluation on valid set. The AUC value will be returned." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:294 +#, fuzzy +msgid "" +"Now, we can create an instance of our class :code:`XgbClient` and add one" +" line to actually run this client:" +msgstr "" +"Nous pouvons maintenant créer une instance de notre classe " +":code:`MnistClient` et ajouter une ligne pour exécuter ce client :" + +#: ../../source/tutorial-quickstart-xgboost.rst:300 +#, fuzzy +msgid "" +"That's it for the client. We only have to implement :code:`Client`and " +"call :code:`fl.client.start_client()`. The string :code:`\"[::]:8080\"` " +"tells the client which server to connect to. In our case we can run the " +"server and the client on the same machine, therefore we use " +":code:`\"[::]:8080\"`. If we run a truly federated workload with the " +"server and clients running on different machines, all that needs to " +"change is the :code:`server_address` we point the client at." +msgstr "" +"C'est tout pour le client. Il nous suffit d'implémenter :code:`Client` ou" +" :code:`NumPyClient` et d'appeler :code:`fl.client.start_client()`. La " +"chaîne :code:`\"[: :]:8080\"` indique au client à quel serveur se " +"connecter. Dans notre cas, nous pouvons exécuter le serveur et le client " +"sur la même machine, c'est pourquoi nous utilisons :code:`\"[: " +":]:8080\"`. Si nous exécutons une charge de travail véritablement fédérée" +" avec le serveur et les clients fonctionnant sur des machines " +"différentes, tout ce qui doit changer est l'adresse " +":code:`server_address` vers laquelle nous dirigeons le client." + +#: ../../source/tutorial-quickstart-xgboost.rst:311 +#, fuzzy +msgid "" +"These updates are then sent to the *server* which will aggregate them to " +"produce a better model. Finally, the *server* sends this improved version" +" of the model back to each *client* to finish a complete FL round." +msgstr "" +"*Les clients* sont chargés de générer des mises à jour de poids " +"individuelles pour le modèle en fonction de leurs ensembles de données " +"locales. Ces mises à jour sont ensuite envoyées au *serveur* qui les " +"agrège pour produire un meilleur modèle. Enfin, le *serveur* renvoie " +"cette version améliorée du modèle à chaque *client*. Un cycle complet de " +"mises à jour de poids s'appelle un *round*." + +#: ../../source/tutorial-quickstart-xgboost.rst:314 +#, fuzzy +msgid "" +"In a file named :code:`server.py`, import Flower and FedXgbBagging from " +":code:`flwr.server.strategy`." +msgstr "" +"Dans un fichier appelé :code:`client.py`, importe Flower et les paquets " +"liés au MXNet :" + +#: ../../source/tutorial-quickstart-xgboost.rst:316 +msgid "We first define a strategy for XGBoost bagging aggregation." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:339 +msgid "" +"We use two clients for this example. An " +":code:`evaluate_metrics_aggregation` function is defined to collect and " +"wighted average the AUC values from clients." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:342 +#, fuzzy +msgid "Then, we start the server:" +msgstr "Démarrer le serveur" + +#: ../../source/tutorial-quickstart-xgboost.rst:354 +msgid "Tree-based bagging aggregation" msgstr "" -"In addition to the regular capabilities where server and clients run on " -"multiple machines, Flower, therefore, provides special simulation " -"capabilities that create ``FlowerClient`` instances only when they are " -"actually necessary for training or evaluation. To enable the Flower " -"framework to create clients when necessary, we need to implement a " -"function called ``client_fn`` that creates a ``FlowerClient`` instance on" -" demand. Flower calls ``client_fn`` whenever it needs an instance of one " -"particular client to call ``fit`` or ``evaluate`` (those instances are " -"usually discarded after use, so they should not keep any local state). " -"Clients are identified by a client ID, or short ``cid``. The ``cid`` can " -"be used, for example, to load different local data partitions for " -"different clients, as can be seen below:" -#: ../../source/tutorial-get-started-with-flower-pytorch.ipynb:580 -msgid "Starting the training" -msgstr "Commencer la formation" +#: ../../source/tutorial-quickstart-xgboost.rst:356 +msgid "" +"You must be curious about how bagging aggregation works. Let's look into " +"the details." +msgstr "" -#: ../../source/tutorial-get-started-with-flower-pytorch.ipynb:582 +#: ../../source/tutorial-quickstart-xgboost.rst:358 msgid "" -"We now have the class ``FlowerClient`` which defines client-side " -"training/evaluation and ``client_fn`` which allows Flower to create " -"``FlowerClient`` instances whenever it needs to call ``fit`` or " -"``evaluate`` on one particular client. The last step is to start the " -"actual simulation using ``flwr.simulation.start_simulation``." +"In file :code:`flwr.server.strategy.fedxgb_bagging.py`, we define " +":code:`FedXgbBagging` inherited from :code:`flwr.server.strategy.FedAvg`." +" Then, we override the :code:`aggregate_fit`, :code:`aggregate_evaluate` " +"and :code:`evaluate` methods as follows:" msgstr "" -"Nous avons maintenant la classe ``FlowerClient`` qui définit " -"l'entraînement/évaluation côté client et ``client_fn`` qui permet à " -"Flower de créer des instances de ``FlowerClient`` chaque fois qu'il a " -"besoin d'appeler ``fit`` ou ``evaluate`` sur un client particulier. La " -"dernière étape consiste à démarrer la simulation réelle en utilisant " -"``flwr.simulation.start_simulation``." -#: ../../source/tutorial-get-started-with-flower-pytorch.ipynb:584 +#: ../../source/tutorial-quickstart-xgboost.rst:454 msgid "" -"The function ``start_simulation`` accepts a number of arguments, amongst " -"them the ``client_fn`` used to create ``FlowerClient`` instances, the " -"number of clients to simulate (``num_clients``), the number of federated " -"learning rounds (``num_rounds``), and the strategy. The strategy " -"encapsulates the federated learning approach/algorithm, for example, " -"*Federated Averaging* (FedAvg)." +"In :code:`aggregate_fit`, we sequentially aggregate the clients' XGBoost " +"trees by calling :code:`aggregate()` function:" msgstr "" -"La fonction ``start_simulation`` accepte un certain nombre d'arguments, " -"parmi lesquels le ``client_fn`` utilisé pour créer les instances " -"``FlowerClient``, le nombre de clients à simuler (``num_clients``), le " -"nombre de tours d'apprentissage fédéré (``num_rounds``), et la stratégie." -" La stratégie encapsule l'approche/algorithme d'apprentissage fédéré, par" -" exemple, *Federated Averaging* (FedAvg)." -#: ../../source/tutorial-get-started-with-flower-pytorch.ipynb:586 +#: ../../source/tutorial-quickstart-xgboost.rst:513 msgid "" -"Flower has a number of built-in strategies, but we can also use our own " -"strategy implementations to customize nearly all aspects of the federated" -" learning approach. For this example, we use the built-in ``FedAvg`` " -"implementation and customize it using a few basic parameters. The last " -"step is the actual call to ``start_simulation`` which - you guessed it - " -"starts the simulation:" +"In this function, we first fetch the number of trees and the number of " +"parallel trees for the current and previous model by calling " +":code:`_get_tree_nums`. Then, the fetched information will be aggregated." +" After that, the trees (containing model weights) are aggregated to " +"generate a new tree model." msgstr "" -"Flower dispose d'un certain nombre de stratégies intégrées, mais nous " -"pouvons également utiliser nos propres implémentations de stratégies pour" -" personnaliser presque tous les aspects de l'approche de l'apprentissage " -"fédéré. Pour cet exemple, nous utilisons l'implémentation intégrée " -"``FedAvg`` et nous la personnalisons en utilisant quelques paramètres de " -"base. La dernière étape est l'appel à ``start_simulation`` qui - tu l'as " -"deviné - démarre la simulation :" -#: ../../source/tutorial-get-started-with-flower-pytorch.ipynb:628 -msgid "Behind the scenes" -msgstr "Dans les coulisses" +#: ../../source/tutorial-quickstart-xgboost.rst:518 +msgid "" +"After traversal of all clients' models, a new global model is generated, " +"followed by the serialisation, and sending back to each client." +msgstr "" -#: ../../source/tutorial-get-started-with-flower-pytorch.ipynb:630 -msgid "So how does this work? How does Flower execute this simulation?" +#: ../../source/tutorial-quickstart-xgboost.rst:523 +msgid "Launch Federated XGBoost!" msgstr "" -"Alors, comment cela fonctionne-t-il ? Comment Flower exécute-t-il cette " -"simulation ?" -#: ../../source/tutorial-get-started-with-flower-pytorch.ipynb:632 -#, python-format +#: ../../source/tutorial-quickstart-xgboost.rst:585 msgid "" -"When we call ``start_simulation``, we tell Flower that there are 10 " -"clients (``num_clients=10``). Flower then goes ahead an asks the " -"``FedAvg`` strategy to select clients. ``FedAvg`` knows that it should " -"select 100% of the available clients (``fraction_fit=1.0``), so it goes " -"ahead and selects 10 random clients (i.e., 100% of 10)." +"Congratulations! You've successfully built and run your first federated " +"XGBoost system. The AUC values can be checked in " +":code:`metrics_distributed`. One can see that the average AUC increases " +"over FL rounds." msgstr "" -"Lorsque nous appelons ``start_simulation``, nous disons à Flower qu'il y " -"a 10 clients (``num_clients=10``). Flower demande alors à la stratégie " -"``FedAvg`` de sélectionner des clients. ``FedAvg` sait qu'il doit " -"sélectionner 100% des clients disponibles (``fraction_fit=1.0``), alors " -"il choisit 10 clients au hasard (c'est à dire 100% de 10)." -#: ../../source/tutorial-get-started-with-flower-pytorch.ipynb:634 +#: ../../source/tutorial-quickstart-xgboost.rst:590 +#, fuzzy msgid "" -"Flower then asks the selected 10 clients to train the model. When the " -"server receives the model parameter updates from the clients, it hands " -"those updates over to the strategy (*FedAvg*) for aggregation. The " -"strategy aggregates those updates and returns the new global model, which" -" then gets used in the next round of federated learning." +"The full `source code `_ for this example can be found in :code:`examples" +"/xgboost-quickstart`." msgstr "" -"Flower demande ensuite aux 10 clients sélectionnés d'entraîner le modèle." -" Lorsque le serveur reçoit les mises à jour des paramètres du modèle de " -"la part des clients, il les transmet à la stratégie (*FedAvg*) pour " -"qu'elle les agrège. La stratégie agrège ces mises à jour et renvoie le " -"nouveau modèle global, qui est ensuite utilisé dans le prochain cycle " -"d'apprentissage fédéré." +"Félicitations ! Tu as réussi à construire et à faire fonctionner ton " +"premier système d'apprentissage fédéré. Le code source complet " +"`_ de cet exemple se trouve dans :code:`examples" +"/quickstart-mxnet`." -#: ../../source/tutorial-get-started-with-flower-pytorch.ipynb:646 -msgid "Where's the accuracy?" -msgstr "Où est la précision ?" +#: ../../source/tutorial-quickstart-xgboost.rst:594 +msgid "Comprehensive Federated XGBoost" +msgstr "" -#: ../../source/tutorial-get-started-with-flower-pytorch.ipynb:648 +#: ../../source/tutorial-quickstart-xgboost.rst:596 msgid "" -"You may have noticed that all metrics except for ``losses_distributed`` " -"are empty. Where did the ``{\"accuracy\": float(accuracy)}`` go?" +"Now that you have known how federated XGBoost work with Flower, it's time" +" to run some more comprehensive experiments by customising the " +"experimental settings. In the xgboost-comprehensive example (`full code " +"`_), we provide more options to define various experimental" +" setups, including aggregation strategies, data partitioning and " +"centralised/distributed evaluation. We also support `Flower simulation " +"`_ making " +"it easy to simulate large client cohorts in a resource-aware manner. " +"Let's take a look!" msgstr "" -"Tu as peut-être remarqué que toutes les mesures, à l'exception de " -"``pertes_distribuées``, sont vides. Où est passée la ``{\"précision\" : " -"float(précision)}`` ?" -#: ../../source/tutorial-get-started-with-flower-pytorch.ipynb:650 +#: ../../source/tutorial-quickstart-xgboost.rst:603 +#, fuzzy +msgid "Cyclic training" +msgstr "Formation centralisée" + +#: ../../source/tutorial-quickstart-xgboost.rst:605 msgid "" -"Flower can automatically aggregate losses returned by individual clients," -" but it cannot do the same for metrics in the generic metrics dictionary " -"(the one with the ``accuracy`` key). Metrics dictionaries can contain " -"very different kinds of metrics and even key/value pairs that are not " -"metrics at all, so the framework does not (and can not) know how to " -"handle these automatically." +"In addition to bagging aggregation, we offer a cyclic training scheme, " +"which performs FL in a client-by-client fashion. Instead of aggregating " +"multiple clients, there is only one single client participating in the " +"training per round in the cyclic training scenario. The trained local " +"XGBoost trees will be passed to the next client as an initialised model " +"for next round's boosting." msgstr "" -"Flower peut automatiquement agréger les pertes renvoyées par les clients " -"individuels, mais il ne peut pas faire la même chose pour les mesures " -"dans le dictionnaire de mesures générique (celui avec la clé " -"``accuracy``). Les dictionnaires de mesures peuvent contenir des types de" -" mesures très différents et même des paires clé/valeur qui ne sont pas " -"des mesures du tout, donc le cadre ne sait pas (et ne peut pas) savoir " -"comment les gérer automatiquement." -#: ../../source/tutorial-get-started-with-flower-pytorch.ipynb:652 +#: ../../source/tutorial-quickstart-xgboost.rst:609 msgid "" -"As users, we need to tell the framework how to handle/aggregate these " -"custom metrics, and we do so by passing metric aggregation functions to " -"the strategy. The strategy will then call these functions whenever it " -"receives fit or evaluate metrics from clients. The two possible functions" -" are ``fit_metrics_aggregation_fn`` and " -"``evaluate_metrics_aggregation_fn``." +"To do this, we first customise a :code:`ClientManager` in " +":code:`server_utils.py`:" msgstr "" -"En tant qu'utilisateurs, nous devons indiquer au framework comment " -"gérer/agréger ces métriques personnalisées, et nous le faisons en passant" -" des fonctions d'agrégation de métriques à la stratégie. La stratégie " -"appellera alors ces fonctions chaque fois qu'elle recevra des métriques " -"d'ajustement ou d'évaluation de la part des clients. Les deux fonctions " -"possibles sont ``fit_metrics_aggregation_fn`` et " -"``evaluate_metrics_aggregation_fn``." -#: ../../source/tutorial-get-started-with-flower-pytorch.ipynb:654 +#: ../../source/tutorial-quickstart-xgboost.rst:649 msgid "" -"Let's create a simple weighted averaging function to aggregate the " -"``accuracy`` metric we return from ``evaluate``:" +"The customised :code:`ClientManager` samples all available clients in " +"each FL round based on the order of connection to the server. Then, we " +"define a new strategy :code:`FedXgbCyclic` in " +":code:`flwr.server.strategy.fedxgb_cyclic.py`, in order to sequentially " +"select only one client in given round and pass the received model to next" +" client." msgstr "" -"Créons une simple fonction de calcul de la moyenne pondérée pour agréger " -"la mesure de \"précision\" que nous renvoie ``evaluate`` :" -#: ../../source/tutorial-get-started-with-flower-pytorch.ipynb:680 +#: ../../source/tutorial-quickstart-xgboost.rst:690 msgid "" -"The only thing left to do is to tell the strategy to call this function " -"whenever it receives evaluation metric dictionaries from the clients:" +"Unlike the original :code:`FedAvg`, we don't perform aggregation here. " +"Instead, we just make a copy of the received client model as global model" +" by overriding :code:`aggregate_fit`." msgstr "" -"La seule chose qui reste à faire est d'indiquer à la stratégie d'appeler " -"cette fonction chaque fois qu'elle reçoit des dictionnaires de métriques " -"d'évaluation de la part des clients :" -#: ../../source/tutorial-get-started-with-flower-pytorch.ipynb:717 +#: ../../source/tutorial-quickstart-xgboost.rst:693 msgid "" -"We now have a full system that performs federated training and federated " -"evaluation. It uses the ``weighted_average`` function to aggregate custom" -" evaluation metrics and calculates a single ``accuracy`` metric across " -"all clients on the server side." +"Also, the customised :code:`configure_fit` and :code:`configure_evaluate`" +" methods ensure the clients to be sequentially selected given FL round:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:757 +msgid "Customised data partitioning" msgstr "" -"Nous avons maintenant un système complet qui effectue la formation " -"fédérée et l'évaluation fédérée. Il utilise la fonction ``moyenne " -"pondérée`` pour agréger les mesures d'évaluation personnalisées et " -"calcule une seule mesure de ``précision`` pour tous les clients du côté " -"du serveur." -#: ../../source/tutorial-get-started-with-flower-pytorch.ipynb:719 +#: ../../source/tutorial-quickstart-xgboost.rst:759 msgid "" -"The other two categories of metrics (``losses_centralized`` and " -"``metrics_centralized``) are still empty because they only apply when " -"centralized evaluation is being used. Part two of the Flower tutorial " -"will cover centralized evaluation." +"In :code:`dataset.py`, we have a function :code:`instantiate_partitioner`" +" to instantiate the data partitioner based on the given " +":code:`num_partitions` and :code:`partitioner_type`. Currently, we " +"provide four supported partitioner type to simulate the uniformity/non-" +"uniformity in data quantity (uniform, linear, square, exponential)." msgstr "" -"Les deux autres catégories de mesures (``pertes_centralisées`` et " -"``métriques_centralisées``) sont toujours vides car elles ne s'appliquent" -" que lorsque l'évaluation centralisée est utilisée. La deuxième partie du" -" tutoriel sur les fleurs couvrira l'évaluation centralisée." -#: ../../source/tutorial-get-started-with-flower-pytorch.ipynb:731 -#: ../../source/tutorial-what-is-federated-learning.ipynb:351 -msgid "Final remarks" -msgstr "Remarques finales" +#: ../../source/tutorial-quickstart-xgboost.rst:790 +#, fuzzy +msgid "Customised centralised/distributed evaluation" +msgstr "Évaluation centralisée" -#: ../../source/tutorial-get-started-with-flower-pytorch.ipynb:733 +#: ../../source/tutorial-quickstart-xgboost.rst:792 msgid "" -"Congratulations, you just trained a convolutional neural network, " -"federated over 10 clients! With that, you understand the basics of " -"federated learning with Flower. The same approach you've seen can be used" -" with other machine learning frameworks (not just PyTorch) and tasks (not" -" just CIFAR-10 images classification), for example NLP with Hugging Face " -"Transformers or speech with SpeechBrain." +"To facilitate centralised evaluation, we define a function in " +":code:`server_utils.py`:" msgstr "" -"Félicitations, tu viens d'entraîner un réseau neuronal convolutif, fédéré" -" sur 10 clients ! Avec ça, tu comprends les bases de l'apprentissage " -"fédéré avec Flower. La même approche que tu as vue peut être utilisée " -"avec d'autres cadres d'apprentissage automatique (pas seulement PyTorch) " -"et d'autres tâches (pas seulement la classification des images CIFAR-10)," -" par exemple le NLP avec Hugging Face Transformers ou la parole avec " -"SpeechBrain." -#: ../../source/tutorial-get-started-with-flower-pytorch.ipynb:735 +#: ../../source/tutorial-quickstart-xgboost.rst:824 msgid "" -"In the next notebook, we're going to cover some more advanced concepts. " -"Want to customize your strategy? Initialize parameters on the server " -"side? Or evaluate the aggregated model on the server side? We'll cover " -"all this and more in the next tutorial." +"This function returns a evaluation function which instantiates a " +":code:`Booster` object and loads the global model weights to it. The " +"evaluation is conducted by calling :code:`eval_set()` method, and the " +"tested AUC value is reported." msgstr "" -"Dans le prochain cahier, nous allons aborder des concepts plus avancés. " -"Tu veux personnaliser ta stratégie ? Initialiser des paramètres côté " -"serveur ? Ou évaluer le modèle agrégé côté serveur ? Nous aborderons tout" -" cela et bien plus encore dans le prochain tutoriel." -#: ../../source/tutorial-get-started-with-flower-pytorch.ipynb:753 -#, fuzzy +#: ../../source/tutorial-quickstart-xgboost.rst:827 msgid "" -"The `Flower Federated Learning Tutorial - Part 2 " -"`__ goes into more depth about strategies and all " -"the advanced things you can build with them." +"As for distributed evaluation on the clients, it's same as the quick-" +"start example by overriding the :code:`evaluate()` method insides the " +":code:`XgbClient` class in :code:`client_utils.py`." msgstr "" -"Le `Tutoriel d'apprentissage fédéré Flower - Partie 2 " -"`__ va plus en profondeur sur les stratégies et toutes les " -"choses avancées que tu peux construire avec elles." -#: ../../source/tutorial-quickstart-android.rst:5 +#: ../../source/tutorial-quickstart-xgboost.rst:831 #, fuzzy -msgid "Quickstart Android" -msgstr "Démarrage rapide des Pandas" +msgid "Flower simulation" +msgstr "Simulation de moniteur" -#: ../../source/tutorial-quickstart-android.rst:7 -#, fuzzy +#: ../../source/tutorial-quickstart-xgboost.rst:832 msgid "" -"Let's build a federated learning system using TFLite and Flower on " -"Android!" +"We also provide an example code (:code:`sim.py`) to use the simulation " +"capabilities of Flower to simulate federated XGBoost training on either a" +" single machine or a cluster of machines." msgstr "" -"Construisons un système d'apprentissage fédéré en utilisant fastai et " -"Flower !" -#: ../../source/tutorial-quickstart-android.rst:9 -#, fuzzy +#: ../../source/tutorial-quickstart-xgboost.rst:866 msgid "" -"Please refer to the `full code example " -"`_ to learn " -"more." +"After importing all required packages, we define a :code:`main()` " +"function to perform the simulation process:" msgstr "" -"Réfère-toi à l'exemple de code complet " -"`_ " -"pour en savoir plus." -#: ../../source/tutorial-quickstart-fastai.rst:5 -msgid "Quickstart fastai" -msgstr "Démarrage rapide fastai" +#: ../../source/tutorial-quickstart-xgboost.rst:921 +msgid "" +"We first load the dataset and perform data partitioning, and the pre-" +"processed data is stored in a :code:`list`. After the simulation begins, " +"the clients won't need to pre-process their partitions again." +msgstr "" -#: ../../source/tutorial-quickstart-fastai.rst:7 -msgid "Let's build a federated learning system using fastai and Flower!" +#: ../../source/tutorial-quickstart-xgboost.rst:924 +msgid "Then, we define the strategies and other hyper-parameters:" msgstr "" -"Construisons un système d'apprentissage fédéré en utilisant fastai et " -"Flower !" -#: ../../source/tutorial-quickstart-fastai.rst:9 -#, fuzzy +#: ../../source/tutorial-quickstart-xgboost.rst:975 msgid "" -"Please refer to the `full code example " -"`_ " -"to learn more." +"After that, we start the simulation by calling " +":code:`fl.simulation.start_simulation`:" msgstr "" -"Réfère-toi à l'exemple de code complet " -"`_ " -"pour en savoir plus." -#: ../../source/tutorial-quickstart-huggingface.rst:5 -msgid "Quickstart 🤗 Transformers" -msgstr "Démarrage rapide 🤗 Transformateurs" +#: ../../source/tutorial-quickstart-xgboost.rst:995 +msgid "" +"One of key parameters for :code:`start_simulation` is :code:`client_fn` " +"which returns a function to construct a client. We define it as follows:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1038 +msgid "Arguments parser" +msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:7 +#: ../../source/tutorial-quickstart-xgboost.rst:1040 msgid "" -"Let's build a federated learning system using Hugging Face Transformers " -"and Flower!" +"In :code:`utils.py`, we define the arguments parsers for clients, server " +"and simulation, allowing users to specify different experimental " +"settings. Let's first see the sever side:" msgstr "" -"Construisons un système d'apprentissage fédéré à l'aide des " -"transformateurs Hugging Face et de Flower !" -#: ../../source/tutorial-quickstart-huggingface.rst:9 +#: ../../source/tutorial-quickstart-xgboost.rst:1086 msgid "" -"We will leverage Hugging Face to federate the training of language models" -" over multiple clients using Flower. More specifically, we will fine-tune" -" a pre-trained Transformer model (distilBERT) for sequence classification" -" over a dataset of IMDB ratings. The end goal is to detect if a movie " -"rating is positive or negative." +"This allows user to specify training strategies / the number of total " +"clients / FL rounds / participating clients / clients for evaluation, and" +" evaluation fashion. Note that with :code:`--centralised-eval`, the sever" +" will do centralised evaluation and all functionalities for client " +"evaluation will be disabled." msgstr "" -"Nous nous appuierons sur Hugging Face pour fédérer l'entraînement de " -"modèles de langage sur plusieurs clients à l'aide de Flower. Plus " -"précisément, nous mettrons au point un modèle Transformer pré-entraîné " -"(distilBERT) pour la classification de séquences sur un ensemble de " -"données d'évaluations IMDB. L'objectif final est de détecter si " -"l'évaluation d'un film est positive ou négative." -#: ../../source/tutorial-quickstart-huggingface.rst:15 -msgid "Dependencies" -msgstr "Dépendances" +#: ../../source/tutorial-quickstart-xgboost.rst:1090 +msgid "Then, the argument parser on client side:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1144 +msgid "" +"This defines various options for client data partitioning. Besides, " +"clients also have an option to conduct evaluation on centralised test set" +" by setting :code:`--centralised-eval`, as well as an option to perform " +"scaled learning rate based on the number of clients by setting :code" +":`--scaled-lr`." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1148 +msgid "We also have an argument parser for simulation:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1226 +msgid "This integrates all arguments for both client and server sides." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1229 +#, fuzzy +msgid "Example commands" +msgstr "Exemples de PyTorch" + +#: ../../source/tutorial-quickstart-xgboost.rst:1231 +msgid "" +"To run a centralised evaluated experiment with bagging strategy on 5 " +"clients with exponential distribution for 50 rounds, we first start the " +"server as below:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1238 +#, fuzzy +msgid "Then, on each client terminal, we start the clients:" +msgstr "Ouvre un autre terminal et démarre le deuxième client :" + +#: ../../source/tutorial-quickstart-xgboost.rst:1244 +msgid "To run the same experiment with Flower simulation:" +msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:17 +#: ../../source/tutorial-quickstart-xgboost.rst:1250 +#, fuzzy msgid "" -"To follow along this tutorial you will need to install the following " -"packages: :code:`datasets`, :code:`evaluate`, :code:`flwr`, " -":code:`torch`, and :code:`transformers`. This can be done using " -":code:`pip`:" +"The full `code `_ for this comprehensive example can be found in" +" :code:`examples/xgboost-comprehensive`." msgstr "" -"Pour suivre ce tutoriel, tu devras installer les paquets suivants : " -":code:`datasets`, :code:`evaluate`, :code:`flwr`, :code:`torch`, et " -":code:`transformers`. Cela peut être fait en utilisant :code:`pip` :" - -#: ../../source/tutorial-quickstart-huggingface.rst:27 -msgid "Standard Hugging Face workflow" -msgstr "Flux de travail standard pour le visage" +"Félicitations ! Tu as réussi à construire et à faire fonctionner ton " +"premier système d'apprentissage fédéré. Le code source complet " +"`_ de cet exemple se trouve dans :code:`examples" +"/quickstart-mxnet`." -#: ../../source/tutorial-quickstart-huggingface.rst:30 -msgid "Handling the data" -msgstr "Traitement des données" +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:9 +#, fuzzy +msgid "Build a strategy from scratch" +msgstr "Élaborer une stratégie à partir de zéro" -#: ../../source/tutorial-quickstart-huggingface.rst:32 +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:11 +#, fuzzy msgid "" -"To fetch the IMDB dataset, we will use Hugging Face's :code:`datasets` " -"library. We then need to tokenize the data and create :code:`PyTorch` " -"dataloaders, this is all done in the :code:`load_data` function:" +"Welcome to the third part of the Flower federated learning tutorial. In " +"previous parts of this tutorial, we introduced federated learning with " +"PyTorch and Flower (`part 1 `__) and we learned how strategies " +"can be used to customize the execution on both the server and the clients" +" (`part 2 `__)." msgstr "" -"Pour récupérer le jeu de données IMDB, nous utiliserons la bibliothèque " -":code:`datasets` de Hugging Face. Nous devons ensuite tokeniser les " -"données et créer des :code:`PyTorch` dataloaders, ce qui est fait dans la" -" fonction :code:`load_data` :" +"Bienvenue dans la troisième partie du tutoriel sur l'apprentissage fédéré" +" Flower. Dans les parties précédentes de ce tutoriel, nous avons présenté" +" l'apprentissage fédéré avec PyTorch et Flower (`partie 1 " +"`__) " +"et nous avons appris comment les stratégies peuvent être utilisées pour " +"personnaliser l'exécution à la fois sur le serveur et sur les clients " +"(`partie 2 `__)." -#: ../../source/tutorial-quickstart-huggingface.rst:78 -msgid "Training and testing the model" -msgstr "Former et tester le modèle" +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:13 +msgid "" +"In this notebook, we'll continue to customize the federated learning " +"system we built previously by creating a custom version of FedAvg (again," +" using `Flower `__ and `PyTorch " +"`__)." +msgstr "" +"Dans ce carnet, nous allons continuer à personnaliser le système " +"d'apprentissage fédéré que nous avons construit précédemment en créant " +"une version personnalisée de FedAvg (encore une fois, en utilisant " +"`Flower `__ et `PyTorch `__)." -#: ../../source/tutorial-quickstart-huggingface.rst:80 +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:15 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:16 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:15 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:15 msgid "" -"Once we have a way of creating our trainloader and testloader, we can " -"take care of the training and testing. This is very similar to any " -":code:`PyTorch` training or testing loop:" +"`Star Flower on GitHub `__ ⭐️ and join " +"the Flower community on Slack to connect, ask questions, and get help: " +"`Join Slack `__ 🌼 We'd love to hear from " +"you in the ``#introductions`` channel! And if anything is unclear, head " +"over to the ``#questions`` channel." msgstr "" -"Une fois que nous avons trouvé un moyen de créer notre trainloader et " -"notre testloader, nous pouvons nous occuper de l'entraînement et du test." -" C'est très similaire à n'importe quelle boucle d'entraînement ou de test" -" :code:`PyTorch` :" +"`Star Flower on GitHub `__ ⭐️ et " +"rejoignez la communauté Flower sur Slack pour vous connecter, poser des " +"questions et obtenir de l'aide : `Join Slack `__ 🌼 Nous serions ravis d'avoir de vos nouvelles dans le canal " +"``#introductions`` ! Et si quelque chose n'est pas clair, rendez-vous sur" +" le canal ``#questions``." -#: ../../source/tutorial-quickstart-huggingface.rst:118 -msgid "Creating the model itself" -msgstr "Créer le modèle lui-même" +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:17 +msgid "Let's build a new ``Strategy`` from scratch!" +msgstr "Construisons une nouvelle ``Stratégie`` à partir de zéro !" -#: ../../source/tutorial-quickstart-huggingface.rst:120 +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:29 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:29 +msgid "Preparation" +msgstr "Préparation" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:31 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:32 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:31 msgid "" -"To create the model itself, we will just load the pre-trained distillBERT" -" model using Hugging Face’s :code:`AutoModelForSequenceClassification` :" +"Before we begin with the actual code, let's make sure that we have " +"everything we need." msgstr "" -"Pour créer le modèle lui-même, nous allons simplement charger le modèle " -"distillBERT pré-entraîné en utilisant le " -":code:`AutoModelForSequenceClassification` de Hugging Face :" +"Avant de commencer le code proprement dit, assurons-nous que nous " +"disposons de tout ce dont nous avons besoin." -#: ../../source/tutorial-quickstart-huggingface.rst:133 -msgid "Federating the example" -msgstr "Fédérer l'exemple" +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:43 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:44 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:43 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:43 +msgid "Installing dependencies" +msgstr "Installation des dépendances" -#: ../../source/tutorial-quickstart-huggingface.rst:136 -msgid "Creating the IMDBClient" -msgstr "Création du client IMDBC" +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:45 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:46 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:45 +msgid "First, we install the necessary packages:" +msgstr "Tout d'abord, nous installons les paquets nécessaires :" -#: ../../source/tutorial-quickstart-huggingface.rst:138 +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:65 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:66 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:65 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:65 msgid "" -"To federate our example to multiple clients, we first need to write our " -"Flower client class (inheriting from :code:`flwr.client.NumPyClient`). " -"This is very easy, as our model is a standard :code:`PyTorch` model:" +"Now that we have all dependencies installed, we can import everything we " +"need for this tutorial:" msgstr "" -"Pour fédérer notre exemple à plusieurs clients, nous devons d'abord " -"écrire notre classe de client Flower (héritant de " -":code:`flwr.client.NumPyClient`). C'est très facile, car notre modèle est" -" un modèle :code:`PyTorch` standard :" +"Maintenant que toutes les dépendances sont installées, nous pouvons " +"importer tout ce dont nous avons besoin pour ce tutoriel :" -#: ../../source/tutorial-quickstart-huggingface.rst:166 +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:101 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:102 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:101 msgid "" -"The :code:`get_parameters` function lets the server get the client's " -"parameters. Inversely, the :code:`set_parameters` function allows the " -"server to send its parameters to the client. Finally, the :code:`fit` " -"function trains the model locally for the client, and the " -":code:`evaluate` function tests the model locally and returns the " -"relevant metrics." +"It is possible to switch to a runtime that has GPU acceleration enabled " +"(on Google Colab: ``Runtime > Change runtime type > Hardware acclerator: " +"GPU > Save``). Note, however, that Google Colab is not always able to " +"offer GPU acceleration. If you see an error related to GPU availability " +"in one of the following sections, consider switching back to CPU-based " +"execution by setting ``DEVICE = torch.device(\"cpu\")``. If the runtime " +"has GPU acceleration enabled, you should see the output ``Training on " +"cuda``, otherwise it'll say ``Training on cpu``." msgstr "" -"La fonction :code:`get_parameters` permet au serveur d'obtenir les " -"paramètres du client. Inversement, la fonction :code:`set_parameters` " -"permet au serveur d'envoyer ses paramètres au client. Enfin, la fonction " -":code:`fit` forme le modèle localement pour le client, et la fonction " -":code:`evaluate` teste le modèle localement et renvoie les mesures " -"correspondantes." +"Il est possible de passer à un runtime dont l'accélération GPU est " +"activée (sur Google Colab : ``Runtime > Change runtime type > Hardware " +"acclerator : GPU > Save``). Note cependant que Google Colab n'est pas " +"toujours en mesure de proposer l'accélération GPU. Si tu vois une erreur " +"liée à la disponibilité du GPU dans l'une des sections suivantes, " +"envisage de repasser à une exécution basée sur le CPU en définissant " +"``DEVICE = torch.device(\"cpu\")``. Si le runtime a activé l'accélération" +" GPU, tu devrais voir apparaître le résultat ``Training on cuda``, sinon " +"il dira ``Training on cpu``." -#: ../../source/tutorial-quickstart-huggingface.rst:172 -msgid "Starting the server" -msgstr "Démarrer le serveur" +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:114 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:115 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:114 +msgid "Data loading" +msgstr "Chargement des données" -#: ../../source/tutorial-quickstart-huggingface.rst:174 +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:116 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:116 msgid "" -"Now that we have a way to instantiate clients, we need to create our " -"server in order to aggregate the results. Using Flower, this can be done " -"very easily by first choosing a strategy (here, we are using " -":code:`FedAvg`, which will define the global weights as the average of " -"all the clients' weights at each round) and then using the " -":code:`flwr.server.start_server` function:" +"Let's now load the CIFAR-10 training and test set, partition them into " +"ten smaller datasets (each split into training and validation set), and " +"wrap everything in their own ``DataLoader``. We introduce a new parameter" +" ``num_clients`` which allows us to call ``load_datasets`` with different" +" numbers of clients." msgstr "" -"Maintenant que nous avons un moyen d'instancier les clients, nous devons " -"créer notre serveur afin d'agréger les résultats. Avec Flower, cela peut " -"être fait très facilement en choisissant d'abord une stratégie (ici, nous" -" utilisons :code:`FedAvg`, qui définira les poids globaux comme la " -"moyenne des poids de tous les clients à chaque tour) et en utilisant " -"ensuite la fonction :code:`flwr.server.start_server` :" +"Chargeons maintenant les ensembles d'entraînement et de test CIFAR-10, " +"divisons-les en dix ensembles de données plus petits (chacun divisé en " +"ensemble d'entraînement et de validation), et enveloppons le tout dans " +"leur propre ``DataLoader``. Nous introduisons un nouveau paramètre " +"``num_clients`` qui nous permet d'appeler ``load_datasets`` avec " +"différents nombres de clients." -#: ../../source/tutorial-quickstart-huggingface.rst:202 +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:167 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:168 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:167 +msgid "Model training/evaluation" +msgstr "Formation/évaluation du modèle" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:169 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:170 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:169 msgid "" -"The :code:`weighted_average` function is there to provide a way to " -"aggregate the metrics distributed amongst the clients (basically this " -"allows us to display a nice average accuracy and loss for every round)." +"Let's continue with the usual model definition (including " +"``set_parameters`` and ``get_parameters``), training and test functions:" msgstr "" -"La fonction :code:`weighted_average` est là pour fournir un moyen " -"d'agréger les mesures réparties entre les clients (en gros, cela nous " -"permet d'afficher une belle moyenne de précision et de perte pour chaque " -"tour)." +"Continuons avec la définition habituelle du modèle (y compris " +"``set_parameters`` et ``get_parameters``), les fonctions d'entraînement " +"et de test :" -#: ../../source/tutorial-quickstart-huggingface.rst:206 -msgid "Putting everything together" -msgstr "Tout assembler" +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:258 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:258 +msgid "Flower client" +msgstr "Client de Flower" -#: ../../source/tutorial-quickstart-huggingface.rst:208 -msgid "We can now start client instances using:" -msgstr "Nous pouvons maintenant démarrer des instances de clients en utilisant :" +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:260 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:260 +msgid "" +"To implement the Flower client, we (again) create a subclass of " +"``flwr.client.NumPyClient`` and implement the three methods " +"``get_parameters``, ``fit``, and ``evaluate``. Here, we also pass the " +"``cid`` to the client and use it log additional details:" +msgstr "" +"Pour mettre en œuvre le client Flower, nous créons (à nouveau) une sous-" +"classe de ``flwr.client.NumPyClient`` et mettons en œuvre les trois " +"méthodes ``get_parameters``, ``fit`` et ``evaluate``. Ici, nous " +"transmettons également le ``cid`` au client et l'utilisons pour consigner" +" des détails supplémentaires :" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:308 +msgid "Let's test what we have so far before we continue:" +msgstr "Testons ce que nous avons jusqu'à présent avant de continuer :" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:339 +msgid "Build a Strategy from scratch" +msgstr "Élaborer une stratégie à partir de zéro" -#: ../../source/tutorial-quickstart-huggingface.rst:218 +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:341 msgid "" -"And they will be able to connect to the server and start the federated " -"training." -msgstr "Et ils pourront se connecter au serveur et démarrer la formation fédérée." +"Let’s overwrite the ``configure_fit`` method such that it passes a higher" +" learning rate (potentially also other hyperparameters) to the optimizer " +"of a fraction of the clients. We will keep the sampling of the clients as" +" it is in ``FedAvg`` and then change the configuration dictionary (one of" +" the ``FitIns`` attributes)." +msgstr "" +"Remplaçons la méthode ``configure_fit`` de façon à ce qu'elle transmette " +"un taux d'apprentissage plus élevé (potentiellement aussi d'autres " +"hyperparamètres) à l'optimiseur d'une fraction des clients. Nous " +"garderons l'échantillonnage des clients tel qu'il est dans ``FedAvg`` et " +"changerons ensuite le dictionnaire de configuration (l'un des attributs " +"``FitIns``)." -#: ../../source/tutorial-quickstart-huggingface.rst:220 -#, fuzzy +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:507 msgid "" -"If you want to check out everything put together, you should check out " -"the full code example: [https://github.com/adap/flower/tree/main/examples" -"/quickstart-" -"huggingface](https://github.com/adap/flower/tree/main/examples" -"/quickstart-huggingface)." +"The only thing left is to use the newly created custom Strategy " +"``FedCustom`` when starting the experiment:" msgstr "" -"Si tu veux voir tout ce qui est mis ensemble, tu devrais consulter " -"l'exemple de code complet : " -"[https://github.com/adap/flower/tree/main/examples/quickstart-" -"huggingface](https://github.com/adap/flower/tree/main/examples" -"/quickstart-huggingface)." +"Il ne reste plus qu'à utiliser la stratégie personnalisée nouvellement " +"créée ``FedCustom`` lors du démarrage de l'expérience :" -#: ../../source/tutorial-quickstart-huggingface.rst:224 +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:534 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:932 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:697 +msgid "Recap" +msgstr "Récapitulation" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:536 msgid "" -"Of course, this is a very basic example, and a lot can be added or " -"modified, it was just to showcase how simply we could federate a Hugging " -"Face workflow using Flower." +"In this notebook, we’ve seen how to implement a custom strategy. A custom" +" strategy enables granular control over client node configuration, result" +" aggregation, and more. To define a custom strategy, you only have to " +"overwrite the abstract methods of the (abstract) base class ``Strategy``." +" To make custom strategies even more powerful, you can pass custom " +"functions to the constructor of your new class (``__init__``) and then " +"call these functions whenever needed." msgstr "" -"Bien sûr, c'est un exemple très basique, et beaucoup de choses peuvent " -"être ajoutées ou modifiées, il s'agissait juste de montrer avec quelle " -"simplicité on pouvait fédérer un flux de travail Hugging Face à l'aide de" -" Flower." +"Dans ce carnet, nous avons vu comment mettre en place une stratégie " +"personnalisée. Une stratégie personnalisée permet un contrôle granulaire " +"sur la configuration des nœuds clients, l'agrégation des résultats, et " +"bien plus encore. Pour définir une stratégie personnalisée, il te suffit " +"d'écraser les méthodes abstraites de la classe de base (abstraite) " +"``Strategy``. Pour rendre les stratégies personnalisées encore plus " +"puissantes, tu peux passer des fonctions personnalisées au constructeur " +"de ta nouvelle classe (``__init__``) et appeler ensuite ces fonctions à " +"chaque fois que c'est nécessaire." -#: ../../source/tutorial-quickstart-huggingface.rst:227 +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:550 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:948 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:729 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:715 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:369 msgid "" -"Note that in this example we used :code:`PyTorch`, but we could have very" -" well used :code:`TensorFlow`." +"Before you continue, make sure to join the Flower community on Slack: " +"`Join Slack `__" msgstr "" -"Notez que dans cet exemple, nous avons utilisé :code:`PyTorch`, mais nous" -" aurions très bien pu utiliser :code:`TensorFlow`." - -#: ../../source/tutorial-quickstart-ios.rst:5 -#, fuzzy -msgid "Quickstart iOS" -msgstr "Démarrage rapide XGBoost" +"Avant de continuer, n'oublie pas de rejoindre la communauté Flower sur " +"Slack : `Join Slack `__" -#: ../../source/tutorial-quickstart-ios.rst:7 -#, fuzzy +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:552 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:950 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:731 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:717 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:371 msgid "" -"In this tutorial we will learn how to train a Neural Network on MNIST " -"using Flower and CoreML on iOS devices." +"There's a dedicated ``#questions`` channel if you need help, but we'd " +"also love to hear who you are in ``#introductions``!" msgstr "" -"Dans ce tutoriel, nous allons apprendre, comment former un réseau " -"neuronal convolutif sur MNIST en utilisant Flower et PyTorch." +"Il existe un canal dédié aux ``questions`` si vous avez besoin d'aide, " +"mais nous aimerions aussi savoir qui vous êtes dans ``#introductions`` !" -#: ../../source/tutorial-quickstart-ios.rst:9 +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:554 #, fuzzy msgid "" -"First of all, for running the Flower Python server, it is recommended to " -"create a virtual environment and run everything within a `virtualenv " -"`_. For the Flower " -"client implementation in iOS, it is recommended to use Xcode as our IDE." +"The `Flower Federated Learning Tutorial - Part 4 " +"`__ introduces ``Client``, the flexible API underlying " +"``NumPyClient``." msgstr "" -"Tout d'abord, il est recommandé de créer un environnement virtuel et de " -"tout exécuter au sein d'un `virtualenv `_." +"Le `Tutoriel d'apprentissage fédéré Flower - Partie 4 " +"`__ présente ``Client``, l'API flexible qui sous-tend " +"``NumPyClient``." -#: ../../source/tutorial-quickstart-ios.rst:12 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:9 #, fuzzy -msgid "" -"Our example consists of one Python *server* and two iPhone *clients* that" -" all have the same model." -msgstr "" -"Notre exemple consiste en un *serveur* et deux *clients* ayant tous le " -"même modèle." +msgid "Customize the client" +msgstr "Création du client IMDBC" -#: ../../source/tutorial-quickstart-ios.rst:14 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:11 #, fuzzy msgid "" -"*Clients* are responsible for generating individual weight updates for " -"the model based on their local datasets. These updates are then sent to " -"the *server* which will aggregate them to produce a better model. " -"Finally, the *server* sends this improved version of the model back to " -"each *client*. A complete cycle of weight updates is called a *round*." +"Welcome to the fourth part of the Flower federated learning tutorial. In " +"the previous parts of this tutorial, we introduced federated learning " +"with PyTorch and Flower (`part 1 `__), we learned how " +"strategies can be used to customize the execution on both the server and " +"the clients (`part 2 `__), and we built our own " +"custom strategy from scratch (`part 3 `__)." msgstr "" -"*Les clients* sont chargés de générer des mises à jour de poids " -"individuelles pour le modèle en fonction de leurs ensembles de données " -"locales. Ces mises à jour sont ensuite envoyées au *serveur* qui les " -"agrège pour produire un meilleur modèle. Enfin, le *serveur* renvoie " -"cette version améliorée du modèle à chaque *client*. Un cycle complet de " -"mises à jour de poids s'appelle un *round*." +"Bienvenue dans la quatrième partie du tutoriel sur l'apprentissage fédéré" +" Flower. Dans les parties précédentes de ce tutoriel, nous avons présenté" +" l'apprentissage fédéré avec PyTorch et Flower (`partie 1 " +"`__)," +" nous avons appris comment les stratégies peuvent être utilisées pour " +"personnaliser l'exécution à la fois sur le serveur et les clients " +"(`partie 2 `__), et nous avons construit notre propre stratégie " +"personnalisée à partir de zéro (`partie 3 - WIP " +"`__)." -#: ../../source/tutorial-quickstart-ios.rst:18 -#, fuzzy +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:14 msgid "" -"Now that we have a rough idea of what is going on, let's get started to " -"setup our Flower server environment. We first need to install Flower. You" -" can do this by using pip:" -msgstr "" -"Maintenant que nous avons une idée générale de ce qui se passe, " -"commençons. Nous devons d'abord installer Flower. Tu peux le faire en " -"exécutant :" - -#: ../../source/tutorial-quickstart-ios.rst:24 -msgid "Or Poetry:" +"In this notebook, we revisit ``NumPyClient`` and introduce a new " +"baseclass for building clients, simply named ``Client``. In previous " +"parts of this tutorial, we've based our client on ``NumPyClient``, a " +"convenience class which makes it easy to work with machine learning " +"libraries that have good NumPy interoperability. With ``Client``, we gain" +" a lot of flexibility that we didn't have before, but we'll also have to " +"do a few things the we didn't have to do before." msgstr "" +"Dans ce carnet, nous revisitons `NumPyClient`` et introduisons une " +"nouvelle classe de base pour construire des clients, simplement appelée " +"`Client``. Dans les parties précédentes de ce tutoriel, nous avons basé " +"notre client sur ``NumPyClient``, une classe de commodité qui facilite le" +" travail avec les bibliothèques d'apprentissage automatique qui ont une " +"bonne interopérabilité NumPy. Avec ``Client``, nous gagnons beaucoup de " +"flexibilité que nous n'avions pas auparavant, mais nous devrons également" +" faire quelques choses que nous n'avions pas à faire auparavant." -#: ../../source/tutorial-quickstart-ios.rst:33 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:18 msgid "" -"Now that we have all our dependencies installed, let's run a simple " -"distributed training using CoreML as our local training pipeline and " -"MNIST as our dataset. For simplicity reasons we will use the complete " -"Flower client with CoreML, that has been implemented and stored inside " -"the Swift SDK. The client implementation can be seen below:" +"Let's go deeper and see what it takes to move from ``NumPyClient`` to " +"``Client``!" msgstr "" +"Allons plus loin et voyons ce qu'il faut faire pour passer de " +"``NumPyClient`` à ``Client`` !" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:30 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:29 +msgid "Step 0: Preparation" +msgstr "Étape 0 : Préparation" -#: ../../source/tutorial-quickstart-ios.rst:69 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:117 msgid "" -"Let's create a new application project in Xcode and add :code:`flwr` as a" -" dependency in your project. For our application, we will store the logic" -" of our app in :code:`FLiOSModel.swift` and the UI elements in " -":code:`ContentView.swift`. We will focus more on :code:`FLiOSModel.swift`" -" in this quickstart. Please refer to the `full code example " -"`_ to learn more " -"about the app." +"Let's now load the CIFAR-10 training and test set, partition them into " +"ten smaller datasets (each split into training and validation set), and " +"wrap everything in their own ``DataLoader``." msgstr "" +"Chargeons maintenant les ensembles d'entraînement et de test CIFAR-10, " +"divisons-les en dix ensembles de données plus petits (chacun divisé en " +"ensemble d'entraînement et de validation) et enveloppons le tout dans " +"leur propre ``DataLoader``." -#: ../../source/tutorial-quickstart-ios.rst:72 -msgid "Import Flower and CoreML related packages in :code:`FLiOSModel.swift`:" -msgstr "" +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:259 +msgid "Step 1: Revisiting NumPyClient" +msgstr "Étape 1 : Revoir NumPyClient" -#: ../../source/tutorial-quickstart-ios.rst:80 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:261 msgid "" -"Then add the mlmodel to the project simply by drag-and-drop, the mlmodel " -"will be bundled inside the application during deployment to your iOS " -"device. We need to pass the url to access mlmodel and run CoreML machine " -"learning processes, it can be retrieved by calling the function " -":code:`Bundle.main.url`. For the MNIST dataset, we need to preprocess it " -"into :code:`MLBatchProvider` object. The preprocessing is done inside " -":code:`DataLoader.swift`." +"So far, we've implemented our client by subclassing " +"``flwr.client.NumPyClient``. The three methods we implemented are " +"``get_parameters``, ``fit``, and ``evaluate``. Finally, we wrap the " +"creation of instances of this class in a function called ``client_fn``:" msgstr "" +"Jusqu'à présent, nous avons implémenté notre client en sous-classant " +"``flwr.client.NumPyClient``. Les trois méthodes que nous avons " +"implémentées sont ``get_parameters``, ``fit`` et ``evaluate``. Enfin, " +"nous enveloppons la création d'instances de cette classe dans une " +"fonction appelée ``client_fn`` :" -#: ../../source/tutorial-quickstart-ios.rst:96 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:309 msgid "" -"Since CoreML does not allow the model parameters to be seen before " -"training, and accessing the model parameters during or after the training" -" can only be done by specifying the layer name, we need to know this " -"informations beforehand, through looking at the model specification, " -"which are written as proto files. The implementation can be seen in " -":code:`MLModelInspect`." +"We've seen this before, there's nothing new so far. The only *tiny* " +"difference compared to the previous notebook is naming, we've changed " +"``FlowerClient`` to ``FlowerNumPyClient`` and ``client_fn`` to " +"``numpyclient_fn``. Let's run it to see the output we get:" msgstr "" +"Nous avons déjà vu cela auparavant, il n'y a rien de nouveau jusqu'à " +"présent. La seule *petite* différence par rapport au carnet précédent est" +" le nommage, nous avons changé ``FlowerClient`` en ``FlowerNumPyClient`` " +"et ``client_fn`` en ``numpyclient_fn``. Exécutons-le pour voir la sortie " +"que nous obtenons :" -#: ../../source/tutorial-quickstart-ios.rst:99 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:339 msgid "" -"After we have all of the necessary informations, let's create our Flower " -"client." +"This works as expected, two clients are training for three rounds of " +"federated learning." msgstr "" +"Cela fonctionne comme prévu, deux clients s'entraînent pour trois tours " +"d'apprentissage fédéré." -#: ../../source/tutorial-quickstart-ios.rst:114 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:341 msgid "" -"Then start the Flower gRPC client and start communicating to the server " -"by passing our Flower client to the function :code:`startFlwrGRPC`." +"Let's dive a little bit deeper and discuss how Flower executes this " +"simulation. Whenever a client is selected to do some work, " +"``start_simulation`` calls the function ``numpyclient_fn`` to create an " +"instance of our ``FlowerNumPyClient`` (along with loading the model and " +"the data)." msgstr "" +"Plongeons un peu plus profondément et discutons de la façon dont Flower " +"exécute cette simulation. Chaque fois qu'un client est sélectionné pour " +"effectuer un travail, ``start_simulation`` appelle la fonction " +"``numpyclient_fn`` pour créer une instance de notre ``FlowerNumPyClient``" +" (en même temps qu'il charge le modèle et les données)." -#: ../../source/tutorial-quickstart-ios.rst:121 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:343 msgid "" -"That's it for the client. We only have to implement :code:`Client` or " -"call the provided :code:`MLFlwrClient` and call :code:`startFlwrGRPC()`. " -"The attribute :code:`hostname` and :code:`port` tells the client which " -"server to connect to. This can be done by entering the hostname and port " -"in the application before clicking the start button to start the " -"federated learning process." +"But here's the perhaps surprising part: Flower doesn't actually use the " +"``FlowerNumPyClient`` object directly. Instead, it wraps the object to " +"makes it look like a subclass of ``flwr.client.Client``, not " +"``flwr.client.NumPyClient``. In fact, the Flower core framework doesn't " +"know how to handle ``NumPyClient``'s, it only knows how to handle " +"``Client``'s. ``NumPyClient`` is just a convenience abstraction built on " +"top of ``Client``." msgstr "" +"Mais voici la partie la plus surprenante : Flower n'utilise pas " +"directement l'objet `FlowerNumPyClient`. Au lieu de cela, il enveloppe " +"l'objet pour le faire ressembler à une sous-classe de " +"`flwr.client.Client`, et non de `flwr.client.NumPyClient`. En fait, le " +"noyau de Flower ne sait pas comment gérer les `NumPyClient`, il sait " +"seulement comment gérer les `Client`. `NumPyClient` est juste une " +"abstraction de commodité construite au dessus de `Client`." -#: ../../source/tutorial-quickstart-ios.rst:128 -#: ../../source/tutorial-quickstart-mxnet.rst:223 -#: ../../source/tutorial-quickstart-pytorch.rst:202 -#: ../../source/tutorial-quickstart-tensorflow.rst:97 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:345 msgid "" -"For simple workloads we can start a Flower server and leave all the " -"configuration possibilities at their default values. In a file named " -":code:`server.py`, import Flower and start the server:" +"Instead of building on top of ``NumPyClient``, we can directly build on " +"top of ``Client``." msgstr "" -"Pour les charges de travail simples, nous pouvons démarrer un serveur " -"Flower et laisser toutes les possibilités de configuration à leurs " -"valeurs par défaut. Dans un fichier nommé :code:`server.py`, importe " -"Flower et démarre le serveur :" - -#: ../../source/tutorial-quickstart-ios.rst:139 -#: ../../source/tutorial-quickstart-mxnet.rst:234 -#: ../../source/tutorial-quickstart-pytorch.rst:213 -#: ../../source/tutorial-quickstart-scikitlearn.rst:212 -#: ../../source/tutorial-quickstart-tensorflow.rst:109 -msgid "Train the model, federated!" -msgstr "Entraîne le modèle, fédéré !" +"Au lieu de construire par-dessus `NumPyClient``, nous pouvons construire " +"directement par-dessus `Client``." -#: ../../source/tutorial-quickstart-ios.rst:141 -#: ../../source/tutorial-quickstart-pytorch.rst:215 -#: ../../source/tutorial-quickstart-tensorflow.rst:111 -msgid "" -"With both client and server ready, we can now run everything and see " -"federated learning in action. FL systems usually have a server and " -"multiple clients. We therefore have to start the server first:" -msgstr "" -"Le client et le serveur étant prêts, nous pouvons maintenant tout " -"exécuter et voir l'apprentissage fédéré en action. Les systèmes FL ont " -"généralement un serveur et plusieurs clients. Nous devons donc commencer " -"par démarrer le serveur :" +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:357 +msgid "Step 2: Moving from ``NumPyClient`` to ``Client``" +msgstr "Étape 2 : Passer de ``NumPyClient`` à ``Client``" -#: ../../source/tutorial-quickstart-ios.rst:149 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:359 msgid "" -"Once the server is running we can start the clients in different " -"terminals. Build and run the client through your Xcode, one through Xcode" -" Simulator and the other by deploying it to your iPhone. To see more " -"about how to deploy your app to iPhone or Simulator visit `here " -"`_." +"Let's try to do the same thing using ``Client`` instead of " +"``NumPyClient``." msgstr "" +"Essayons de faire la même chose en utilisant ``Client`` au lieu de " +"``NumPyClient``." -#: ../../source/tutorial-quickstart-ios.rst:153 -#, fuzzy +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:465 msgid "" -"Congratulations! You've successfully built and run your first federated " -"learning system in your ios device. The full `source code " -"`_ for this " -"example can be found in :code:`examples/ios`." +"Before we discuss the code in more detail, let's try to run it! Gotta " +"make sure our new ``Client``-based client works, right?" msgstr "" -"Félicitations ! Tu as réussi à construire et à faire fonctionner ton " -"premier système d'apprentissage fédéré. Le code source complet " -"`_ de cet exemple se trouve dans :code:`examples" -"/quickstart-mxnet`." - -#: ../../source/tutorial-quickstart-jax.rst:5 -msgid "Quickstart JAX" -msgstr "Démarrage rapide de JAX" - -#: ../../source/tutorial-quickstart-mxnet.rst:5 -msgid "Quickstart MXNet" -msgstr "Démarrage rapide de MXNet" +"Avant de discuter du code plus en détail, essayons de l'exécuter ! Nous " +"devons nous assurer que notre nouveau client basé sur le ``Client`` " +"fonctionne, n'est-ce pas ?" -#: ../../source/tutorial-quickstart-mxnet.rst:7 -msgid "" -"In this tutorial, we will learn how to train a :code:`Sequential` model " -"on MNIST using Flower and MXNet." +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:490 +msgid "" +"That's it, we're now using ``Client``. It probably looks similar to what " +"we've done with ``NumPyClient``. So what's the difference?" msgstr "" -"Dans ce tutoriel, nous allons apprendre à former un modèle " -":code:`Sequential` sur MNIST à l'aide de Flower et de MXNet." +"Voilà, nous utilisons maintenant ``Client``. Cela ressemble probablement " +"à ce que nous avons fait avec ``NumPyClient``. Alors quelle est la " +"différence ?" -#: ../../source/tutorial-quickstart-mxnet.rst:9 -#: ../../source/tutorial-quickstart-scikitlearn.rst:9 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:492 msgid "" -"It is recommended to create a virtual environment and run everything " -"within this `virtualenv `_." +"First of all, it's more code. But why? The difference comes from the fact" +" that ``Client`` expects us to take care of parameter serialization and " +"deserialization. For Flower to be able to send parameters over the " +"network, it eventually needs to turn these parameters into ``bytes``. " +"Turning parameters (e.g., NumPy ``ndarray``'s) into raw bytes is called " +"serialization. Turning raw bytes into something more useful (like NumPy " +"``ndarray``'s) is called deserialization. Flower needs to do both: it " +"needs to serialize parameters on the server-side and send them to the " +"client, the client needs to deserialize them to use them for local " +"training, and then serialize the updated parameters again to send them " +"back to the server, which (finally!) deserializes them again in order to " +"aggregate them with the updates received from other clients." msgstr "" -"Il est recommandé de créer un environnement virtuel et de tout exécuter " -"dans ce `virtualenv `_." +"First of all, it's more code. But why? The difference comes from the fact" +" that ``Client`` expects us to take care of parameter serialization and " +"deserialization. For Flower to be able to send parameters over the " +"network, it eventually needs to turn these parameters into ``bytes``. " +"Turning parameters (e.g., NumPy ``ndarray``'s) into raw bytes is called " +"serialization. Turning raw bytes into something more useful (like NumPy " +"``ndarray``'s) is called deserialization. Flower needs to do both: it " +"needs to serialize parameters on the server-side and send them to the " +"client, the client needs to deserialize them to use them for local " +"training, and then serialize the updated parameters again to send them " +"back to the server, which (finally!) deserializes them again in order to " +"aggregate them with the updates received from other clients." -#: ../../source/tutorial-quickstart-mxnet.rst:13 -#: ../../source/tutorial-quickstart-scikitlearn.rst:13 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:495 msgid "" -"*Clients* are responsible for generating individual model parameter " -"updates for the model based on their local datasets. These updates are " -"then sent to the *server* which will aggregate them to produce an updated" -" global model. Finally, the *server* sends this improved version of the " -"model back to each *client*. A complete cycle of parameters updates is " -"called a *round*." +"The only *real* difference between Client and NumPyClient is that " +"NumPyClient takes care of serialization and deserialization for you. It " +"can do so because it expects you to return parameters as NumPy ndarray's," +" and it knows how to handle these. This makes working with machine " +"learning libraries that have good NumPy support (most of them) a breeze." msgstr "" -"*Les clients* sont chargés de générer des mises à jour individuelles des " -"paramètres du modèle en fonction de leurs ensembles de données locales. " -"Ces mises à jour sont ensuite envoyées au *serveur* qui les agrège pour " -"produire un modèle global mis à jour. Enfin, le *serveur* renvoie cette " -"version améliorée du modèle à chaque *client*. Un cycle complet de mises " -"à jour des paramètres s'appelle un *round*." +"La seule *vraie* différence entre Client et NumPyClient est que " +"NumPyClient s'occupe de la sérialisation et de la désérialisation pour " +"toi. Il peut le faire parce qu'il s'attend à ce que tu renvoies des " +"paramètres sous forme de NumPy ndarray, et il sait comment les gérer. " +"Cela permet de travailler avec des bibliothèques d'apprentissage " +"automatique qui ont une bonne prise en charge de NumPy (la plupart " +"d'entre elles) en un clin d'œil." -#: ../../source/tutorial-quickstart-mxnet.rst:17 -#: ../../source/tutorial-quickstart-scikitlearn.rst:17 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:497 msgid "" -"Now that we have a rough idea of what is going on, let's get started. We " -"first need to install Flower. You can do this by running:" +"In terms of API, there's one major difference: all methods in Client take" +" exactly one argument (e.g., ``FitIns`` in ``Client.fit``) and return " +"exactly one value (e.g., ``FitRes`` in ``Client.fit``). The methods in " +"``NumPyClient`` on the other hand have multiple arguments (e.g., " +"``parameters`` and ``config`` in ``NumPyClient.fit``) and multiple return" +" values (e.g., ``parameters``, ``num_example``, and ``metrics`` in " +"``NumPyClient.fit``) if there are multiple things to handle. These " +"``*Ins`` and ``*Res`` objects in ``Client`` wrap all the individual " +"values you're used to from ``NumPyClient``." msgstr "" -"Maintenant que nous avons une idée approximative de ce qui se passe, " -"commençons. Nous devons d'abord installer Flower. Tu peux le faire en " -"lançant :" +"In terms of API, there's one major difference: all methods in Client take" +" exactly one argument (e.g., ``FitIns`` in ``Client.fit``) and return " +"exactly one value (e.g., ``FitRes`` in ``Client.fit``). The methods in " +"``NumPyClient`` on the other hand have multiple arguments (e.g., " +"``parameters`` and ``config`` in ``NumPyClient.fit``) and multiple return" +" values (e.g., ``parameters``, ``num_example``, and ``metrics`` in " +"``NumPyClient.fit``) if there are multiple things to handle. These " +"``*Ins`` and ``*Res`` objects in ``Client`` wrap all the individual " +"values you're used to from ``NumPyClient``." -#: ../../source/tutorial-quickstart-mxnet.rst:23 -msgid "Since we want to use MXNet, let's go ahead and install it:" -msgstr "Puisque nous voulons utiliser MXNet, allons-y et installons-le :" +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:510 +msgid "Step 3: Custom serialization" +msgstr "Étape 3 : Sérialisation personnalisée" -#: ../../source/tutorial-quickstart-mxnet.rst:33 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:512 msgid "" -"Now that we have all our dependencies installed, let's run a simple " -"distributed training with two clients and one server. Our training " -"procedure and network architecture are based on MXNet´s `Hand-written " -"Digit Recognition tutorial " -"`_." +"Here we will explore how to implement custom serialization with a simple " +"example." msgstr "" -"Maintenant que toutes nos dépendances sont installées, lançons une " -"formation distribuée simple avec deux clients et un serveur. Notre " -"procédure de formation et l'architecture du réseau sont basées sur le " -"tutoriel de reconnaissance de chiffres écrits à la main du MXNet " -"`_." +"Nous allons ici explorer comment mettre en œuvre une sérialisation " +"personnalisée à l'aide d'un exemple simple." -#: ../../source/tutorial-quickstart-mxnet.rst:35 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:514 msgid "" -"In a file called :code:`client.py`, import Flower and MXNet related " -"packages:" +"But first what is serialization? Serialization is just the process of " +"converting an object into raw bytes, and equally as important, " +"deserialization is the process of converting raw bytes back into an " +"object. This is very useful for network communication. Indeed, without " +"serialization, you could not just a Python object through the internet." msgstr "" -"Dans un fichier appelé :code:`client.py`, importe Flower et les paquets " -"liés au MXNet :" - -#: ../../source/tutorial-quickstart-mxnet.rst:50 -msgid "In addition, define the device allocation in MXNet with:" -msgstr "En outre, définis l'attribution de l'appareil dans MXNet avec :" +"Mais d'abord, qu'est-ce que la sérialisation ? La sérialisation est " +"simplement le processus de conversion d'un objet en octets bruts, et tout" +" aussi important, la désérialisation est le processus de reconversion des" +" octets bruts en objet. Ceci est très utile pour la communication réseau." +" En effet, sans la sérialisation, tu ne pourrais pas faire passer un " +"objet Python par Internet." -#: ../../source/tutorial-quickstart-mxnet.rst:56 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:516 msgid "" -"We use MXNet to load MNIST, a popular image classification dataset of " -"handwritten digits for machine learning. The MXNet utility " -":code:`mx.test_utils.get_mnist()` downloads the training and test data." +"Federated Learning relies heavily on internet communication for training " +"by sending Python objects back and forth between the clients and the " +"server. This means that serialization is an essential part of Federated " +"Learning." msgstr "" -"Nous utilisons MXNet pour charger MNIST, un ensemble de données de " -"classification d'images populaire de chiffres manuscrits pour " -"l'apprentissage automatique. L'utilitaire MXNet " -":code:`mx.test_utils.get_mnist()` télécharge les données d'entraînement " -"et de test." +"L'apprentissage fédéré s'appuie fortement sur la communication Internet " +"pour la formation en envoyant des objets Python dans les deux sens entre " +"les clients et le serveur, ce qui signifie que la sérialisation est un " +"élément essentiel de l'apprentissage fédéré." -#: ../../source/tutorial-quickstart-mxnet.rst:70 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:518 msgid "" -"Define the training and loss with MXNet. We train the model by looping " -"over the dataset, measure the corresponding loss, and optimize it." +"In the following section, we will write a basic example where instead of " +"sending a serialized version of our ``ndarray``\\ s containing our " +"parameters, we will first convert the ``ndarray`` into sparse matrices, " +"before sending them. This technique can be used to save bandwidth, as in " +"certain cases where the weights of a model are sparse (containing many 0 " +"entries), converting them to a sparse matrix can greatly improve their " +"bytesize." msgstr "" -"Définis l'entraînement et la perte avec MXNet. Nous entraînons le modèle " -"en parcourant en boucle l'ensemble des données, nous mesurons la perte " -"correspondante et nous l'optimisons." +"Dans la section suivante, nous allons écrire un exemple de base où, au " +"lieu d'envoyer une version sérialisée de nos ``ndarray`` contenant nos " +"paramètres, nous allons d'abord convertir les ``ndarray`` en matrices " +"éparses, avant de les envoyer. Cette technique peut être utilisée pour " +"économiser de la bande passante, car dans certains cas où les poids d'un " +"modèle sont épars (contenant de nombreuses entrées 0), les convertir en " +"une matrice éparse peut grandement améliorer leur taille en octets." + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:521 +msgid "Our custom serialization/deserialization functions" +msgstr "Nos fonctions de sérialisation/désérialisation personnalisées" -#: ../../source/tutorial-quickstart-mxnet.rst:108 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:523 msgid "" -"Next, we define the validation of our machine learning model. We loop " -"over the test set and measure both loss and accuracy on the test set." +"This is where the real serialization/deserialization will happen, " +"especially in ``ndarray_to_sparse_bytes`` for serialization and " +"``sparse_bytes_to_ndarray`` for deserialization." msgstr "" -"Ensuite, nous définissons la validation de notre modèle d'apprentissage " -"automatique. Nous effectuons une boucle sur l'ensemble de test et " -"mesurons à la fois la perte et la précision sur l'ensemble de test." +"C'est là que la véritable sérialisation/désérialisation se produira, en " +"particulier dans ``ndarray_to_sparse_bytes`` pour la sérialisation et " +"``sparse_bytes_to_ndarray`` pour la désérialisation." -#: ../../source/tutorial-quickstart-mxnet.rst:132 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:525 msgid "" -"After defining the training and testing of a MXNet machine learning " -"model, we use these functions to implement a Flower client." +"Note that we imported the ``scipy.sparse`` library in order to convert " +"our arrays." msgstr "" -"Après avoir défini la formation et le test d'un modèle d'apprentissage " -"automatique MXNet, nous utilisons ces fonctions pour mettre en œuvre un " -"client Flower." +"Notez que nous avons importé la bibliothèque ``scipy.sparse`` afin de " +"convertir nos tableaux." -#: ../../source/tutorial-quickstart-mxnet.rst:134 -msgid "Our Flower clients will use a simple :code:`Sequential` model:" -msgstr "Nos clients Flower utiliseront un modèle simple :code:`Sequential` :" +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:613 +msgid "Client-side" +msgstr "Côté client" -#: ../../source/tutorial-quickstart-mxnet.rst:153 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:615 msgid "" -"After loading the dataset with :code:`load_data()` we perform one forward" -" propagation to initialize the model and model parameters with " -":code:`model(init)`. Next, we implement a Flower client." +"To be able to able to serialize our ``ndarray``\\ s into sparse " +"parameters, we will just have to call our custom functions in our " +"``flwr.client.Client``." msgstr "" -"Après avoir chargé l'ensemble de données avec :code:`load_data()`, nous " -"effectuons une propagation vers l'avant pour initialiser le modèle et les" -" paramètres du modèle avec :code:`model(init)`. Ensuite, nous " -"implémentons un client Flower." +"Pour pouvoir sérialiser nos ``ndarray`` en paramètres sparse, il nous " +"suffira d'appeler nos fonctions personnalisées dans notre " +"``flwr.client.Client``." -#: ../../source/tutorial-quickstart-mxnet.rst:155 -#: ../../source/tutorial-quickstart-pytorch.rst:141 -#: ../../source/tutorial-quickstart-tensorflow.rst:51 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:617 msgid "" -"The Flower server interacts with clients through an interface called " -":code:`Client`. When the server selects a particular client for training," -" it sends training instructions over the network. The client receives " -"those instructions and calls one of the :code:`Client` methods to run " -"your code (i.e., to train the neural network we defined earlier)." +"Indeed, in ``get_parameters`` we need to serialize the parameters we got " +"from our network using our custom ``ndarrays_to_sparse_parameters`` " +"defined above." msgstr "" -"Le serveur Flower interagit avec les clients par le biais d'une interface" -" appelée :code:`Client`. Lorsque le serveur sélectionne un client " -"particulier pour la formation, il envoie des instructions de formation " -"sur le réseau. Le client reçoit ces instructions et appelle l'une des " -"méthodes :code:`Client` pour exécuter ton code (c'est-à-dire pour former " -"le réseau neuronal que nous avons défini plus tôt)." +"En effet, dans ``get_parameters`` nous devons sérialiser les paramètres " +"que nous avons obtenus de notre réseau en utilisant nos " +"``ndarrays_to_sparse_parameters`` personnalisés définis ci-dessus." -#: ../../source/tutorial-quickstart-mxnet.rst:161 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:619 msgid "" -"Flower provides a convenience class called :code:`NumPyClient` which " -"makes it easier to implement the :code:`Client` interface when your " -"workload uses MXNet. Implementing :code:`NumPyClient` usually means " -"defining the following methods (:code:`set_parameters` is optional " -"though):" +"In ``fit``, we first need to deserialize the parameters coming from the " +"server using our custom ``sparse_parameters_to_ndarrays`` and then we " +"need to serialize our local results with " +"``ndarrays_to_sparse_parameters``." msgstr "" -"Flower fournit une classe de commodité appelée :code:`NumPyClient` qui " -"facilite l'implémentation de l'interface :code:`Client` lorsque ta charge" -" de travail utilise MXNet. L'implémentation de :code:`NumPyClient` " -"signifie généralement la définition des méthodes suivantes " -"(:code:`set_parameters` est cependant facultatif) :" - -#: ../../source/tutorial-quickstart-mxnet.rst:167 -#: ../../source/tutorial-quickstart-pytorch.rst:153 -#: ../../source/tutorial-quickstart-scikitlearn.rst:106 -msgid "return the model weight as a list of NumPy ndarrays" -msgstr "renvoie le poids du modèle sous la forme d'une liste de ndarrays NumPy" - -#: ../../source/tutorial-quickstart-mxnet.rst:168 -#: ../../source/tutorial-quickstart-pytorch.rst:154 -#: ../../source/tutorial-quickstart-scikitlearn.rst:108 -msgid ":code:`set_parameters` (optional)" -msgstr ":code:`set_parameters` (optionnel)" +"Dans ``fit``, nous devons d'abord désérialiser les paramètres provenant " +"du serveur en utilisant notre ``sparse_parameters_to_ndarrays`` " +"personnalisé, puis nous devons sérialiser nos résultats locaux avec " +"``ndarrays_to_sparse_parameters``." -#: ../../source/tutorial-quickstart-mxnet.rst:169 -#: ../../source/tutorial-quickstart-pytorch.rst:155 -#: ../../source/tutorial-quickstart-scikitlearn.rst:108 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:621 msgid "" -"update the local model weights with the parameters received from the " -"server" +"In ``evaluate``, we will only need to deserialize the global parameters " +"with our custom function." msgstr "" -"mettre à jour les poids du modèle local avec les paramètres reçus du " -"serveur" - -#: ../../source/tutorial-quickstart-mxnet.rst:171 -#: ../../source/tutorial-quickstart-pytorch.rst:157 -#: ../../source/tutorial-quickstart-scikitlearn.rst:111 -msgid "set the local model weights" -msgstr "fixe les poids du modèle local" - -#: ../../source/tutorial-quickstart-mxnet.rst:172 -#: ../../source/tutorial-quickstart-pytorch.rst:158 -#: ../../source/tutorial-quickstart-scikitlearn.rst:112 -msgid "train the local model" -msgstr "entraîne le modèle local" +"Dans ``evaluate``, nous n'aurons besoin que de désérialiser les " +"paramètres globaux avec notre fonction personnalisée." -#: ../../source/tutorial-quickstart-mxnet.rst:173 -#: ../../source/tutorial-quickstart-pytorch.rst:159 -#: ../../source/tutorial-quickstart-scikitlearn.rst:113 -msgid "receive the updated local model weights" -msgstr "recevoir les poids du modèle local mis à jour" +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:725 +msgid "Server-side" +msgstr "Côté serveur" -#: ../../source/tutorial-quickstart-mxnet.rst:175 -#: ../../source/tutorial-quickstart-pytorch.rst:161 -#: ../../source/tutorial-quickstart-scikitlearn.rst:115 -msgid "test the local model" -msgstr "teste le modèle local" +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:727 +msgid "" +"For this example, we will just use ``FedAvg`` as a strategy. To change " +"the serialization and deserialization here, we only need to reimplement " +"the ``evaluate`` and ``aggregate_fit`` functions of ``FedAvg``. The other" +" functions of the strategy will be inherited from the super class " +"``FedAvg``." +msgstr "" +"Pour cet exemple, nous utiliserons simplement ``FedAvg`` comme stratégie." +" Pour modifier la sérialisation et la désérialisation ici, il suffit de " +"réimplémenter les fonctions ``evaluate`` et ``aggregate_fit`` de " +"``FedAvg``. Les autres fonctions de la stratégie seront héritées de la " +"super-classe ``FedAvg``." -#: ../../source/tutorial-quickstart-mxnet.rst:177 -msgid "They can be implemented in the following way:" -msgstr "Ils peuvent être mis en œuvre de la manière suivante :" +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:729 +msgid "As you can see only one line as change in ``evaluate``:" +msgstr "Comme tu peux le voir, seule une ligne a été modifiée dans ``evaluate`` :" -#: ../../source/tutorial-quickstart-mxnet.rst:207 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:735 msgid "" -"We can now create an instance of our class :code:`MNISTClient` and add " -"one line to actually run this client:" +"And for ``aggregate_fit``, we will first deserialize every result we " +"received:" msgstr "" -"Nous pouvons maintenant créer une instance de notre classe " -":code:`MNISTClient` et ajouter une ligne pour exécuter ce client :" +"Et pour ``aggregate_fit``, nous allons d'abord désérialiser chaque " +"résultat que nous avons reçu :" -#: ../../source/tutorial-quickstart-mxnet.rst:214 -#: ../../source/tutorial-quickstart-scikitlearn.rst:147 -msgid "" -"That's it for the client. We only have to implement :code:`Client` or " -":code:`NumPyClient` and call :code:`fl.client.start_client()` or " -":code:`fl.client.start_numpy_client()`. The string " -":code:`\"0.0.0.0:8080\"` tells the client which server to connect to. In " -"our case we can run the server and the client on the same machine, " -"therefore we use :code:`\"0.0.0.0:8080\"`. If we run a truly federated " -"workload with the server and clients running on different machines, all " -"that needs to change is the :code:`server_address` we pass to the client." +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:744 +msgid "And then serialize the aggregated result:" +msgstr "Puis sérialise le résultat agrégé :" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:903 +msgid "We can now run our custom serialization example!" msgstr "" -"C'est tout pour le client. Il nous suffit d'implémenter :code:`Client` ou" -" :code:`NumPyClient` et d'appeler :code:`fl.client.start_client()` ou " -":code:`fl.client.start_numpy_client()`. La chaîne :code:`\"0.0.0:8080\"` " -"indique au client à quel serveur se connecter. Dans notre cas, nous " -"pouvons exécuter le serveur et le client sur la même machine, c'est " -"pourquoi nous utilisons :code:`\"0.0.0:8080\"`. Si nous exécutons une " -"charge de travail véritablement fédérée avec le serveur et les clients " -"s'exécutant sur des machines différentes, tout ce qui doit changer est " -":code:`server_address` que nous transmettons au client." +"Nous pouvons maintenant exécuter notre exemple de sérialisation " +"personnalisée !" -#: ../../source/tutorial-quickstart-mxnet.rst:236 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:934 msgid "" -"With both client and server ready, we can now run everything and see " -"federated learning in action. Federated learning systems usually have a " -"server and multiple clients. We therefore have to start the server first:" +"In this part of the tutorial, we've seen how we can build clients by " +"subclassing either ``NumPyClient`` or ``Client``. ``NumPyClient`` is a " +"convenience abstraction that makes it easier to work with machine " +"learning libraries that have good NumPy interoperability. ``Client`` is a" +" more flexible abstraction that allows us to do things that are not " +"possible in ``NumPyClient``. In order to do so, it requires us to handle " +"parameter serialization and deserialization ourselves." msgstr "" -"Le client et le serveur étant prêts, nous pouvons maintenant tout " -"exécuter et voir l'apprentissage fédéré en action. Les systèmes " -"d'apprentissage fédéré ont généralement un serveur et plusieurs clients. " -"Nous devons donc commencer par démarrer le serveur :" +"Dans cette partie du tutoriel, nous avons vu comment construire des " +"clients en sous-classant soit ``NumPyClient``, soit ``Client``. " +"``NumPyClient`` est une abstraction de commodité qui facilite le travail " +"avec les bibliothèques d'apprentissage automatique qui ont une bonne " +"interopérabilité NumPy. ``Client`` est une abstraction plus flexible qui " +"nous permet de faire des choses qui ne sont pas possibles dans " +"``NumPyClient``. Pour ce faire, elle nous oblige à gérer nous-mêmes la " +"sérialisation et la désérialisation des paramètres." -#: ../../source/tutorial-quickstart-mxnet.rst:244 -#: ../../source/tutorial-quickstart-pytorch.rst:223 -#: ../../source/tutorial-quickstart-scikitlearn.rst:221 -#: ../../source/tutorial-quickstart-tensorflow.rst:119 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:952 msgid "" -"Once the server is running we can start the clients in different " -"terminals. Open a new terminal and start the first client:" +"This is the final part of the Flower tutorial (for now!), " +"congratulations! You're now well equipped to understand the rest of the " +"documentation. There are many topics we didn't cover in the tutorial, we " +"recommend the following resources:" msgstr "" -"Une fois que le serveur fonctionne, nous pouvons démarrer les clients " -"dans différents terminaux. Ouvre un nouveau terminal et démarre le " -"premier client :" +"C'est la dernière partie du tutoriel Flower (pour l'instant !), " +"félicitations ! Tu es maintenant bien équipé pour comprendre le reste de " +"la documentation. Il y a de nombreux sujets que nous n'avons pas abordés " +"dans le tutoriel, nous te recommandons les ressources suivantes :" -#: ../../source/tutorial-quickstart-mxnet.rst:251 -#: ../../source/tutorial-quickstart-pytorch.rst:230 -#: ../../source/tutorial-quickstart-scikitlearn.rst:228 -#: ../../source/tutorial-quickstart-tensorflow.rst:126 -msgid "Open another terminal and start the second client:" -msgstr "Ouvre un autre terminal et démarre le deuxième client :" +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:954 +msgid "`Read Flower Docs `__" +msgstr "`Lire les docs sur les fleurs `__" -#: ../../source/tutorial-quickstart-mxnet.rst:257 -#: ../../source/tutorial-quickstart-pytorch.rst:236 -#: ../../source/tutorial-quickstart-scikitlearn.rst:234 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:955 msgid "" -"Each client will have its own dataset. You should now see how the " -"training does in the very first terminal (the one that started the " -"server):" +"`Check out Flower Code Examples " +"`__" msgstr "" -"Chaque client aura son propre ensemble de données. Tu devrais maintenant " -"voir comment la formation se déroule dans le tout premier terminal (celui" -" qui a démarré le serveur) :" +"`Check out Flower Code Examples " +"`__" -#: ../../source/tutorial-quickstart-mxnet.rst:289 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:956 #, fuzzy msgid "" -"Congratulations! You've successfully built and run your first federated " -"learning system. The full `source code " -"`_ for this example can be found in :code:`examples" -"/quickstart-mxnet`." +"`Use Flower Baselines for your research " +"`__" msgstr "" -"Félicitations ! Tu as réussi à construire et à faire fonctionner ton " -"premier système d'apprentissage fédéré. Le code source complet " -"`_ de cet exemple se trouve dans :code:`examples" -"/quickstart-mxnet`." - -#: ../../source/tutorial-quickstart-pandas.rst:5 -msgid "Quickstart Pandas" -msgstr "Démarrage rapide des Pandas" - -#: ../../source/tutorial-quickstart-pandas.rst:7 -msgid "Let's build a federated analytics system using Pandas and Flower!" -msgstr "Construisons un système d'analyse fédéré à l'aide de Pandas et de Flower !" +"`Utilise les lignes de base des fleurs pour ta recherche " +"`__" -#: ../../source/tutorial-quickstart-pandas.rst:9 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:957 #, fuzzy msgid "" -"Please refer to the `full code example " -"`_ " -"to learn more." +"`Watch Flower Summit 2023 videos `__" msgstr "" -"Réfère-toi à l'exemple de code complet " -"`_ " -"pour en savoir plus." +"`Regardez les vidéos du Flower Summit 2022 `__" -#: ../../source/tutorial-quickstart-pytorch.rst:10 -msgid "" -"In this tutorial we will learn how to train a Convolutional Neural " -"Network on CIFAR10 using Flower and PyTorch." +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:9 +msgid "Get started with Flower" msgstr "" -"Dans ce tutoriel, nous allons apprendre à entraîner un réseau neuronal " -"convolutif sur CIFAR10 à l'aide de Flower et PyTorch." -#: ../../source/tutorial-quickstart-pytorch.rst:12 -msgid "" -"First of all, it is recommended to create a virtual environment and run " -"everything within a `virtualenv `_." -msgstr "" -"Tout d'abord, il est recommandé de créer un environnement virtuel et de " -"tout exécuter au sein d'un `virtualenv `_." +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:11 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:11 +msgid "Welcome to the Flower federated learning tutorial!" +msgstr "Bienvenue au tutoriel sur l'apprentissage fédéré de la fleur !" -#: ../../source/tutorial-quickstart-pytorch.rst:26 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:13 +#, fuzzy msgid "" -"Since we want to use PyTorch to solve a computer vision task, let's go " -"ahead and install PyTorch and the **torchvision** library:" +"In this notebook, we'll build a federated learning system using Flower, " +"`Flower Datasets `__ and PyTorch. In " +"part 1, we use PyTorch for the model training pipeline and data loading. " +"In part 2, we continue to federate the PyTorch-based pipeline using " +"Flower." msgstr "" -"Puisque nous voulons utiliser PyTorch pour résoudre une tâche de vision " -"par ordinateur, allons-y et installons PyTorch et la bibliothèque " -"**torchvision** :" +"Dans ce carnet, nous allons construire un système d'apprentissage fédéré " +"en utilisant Flower et PyTorch. Dans la première partie, nous utilisons " +"PyTorch pour le pipeline d'entraînement des modèles et le chargement des " +"données. Dans la deuxième partie, nous continuons à fédérer le pipeline " +"basé sur PyTorch en utilisant Flower." -#: ../../source/tutorial-quickstart-pytorch.rst:36 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:17 +msgid "Let's get stated!" +msgstr "Allons-y, déclarons-le !" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:31 msgid "" -"Now that we have all our dependencies installed, let's run a simple " -"distributed training with two clients and one server. Our training " -"procedure and network architecture are based on PyTorch's `Deep Learning " -"with PyTorch " -"`_." +"Before we begin with any actual code, let's make sure that we have " +"everything we need." msgstr "" -"Maintenant que nous avons installé toutes nos dépendances, lançons une " -"formation distribuée simple avec deux clients et un serveur. Notre " -"procédure de formation et l'architecture de notre réseau sont basées sur " -"`Deep Learning with PyTorch " -"`_ de" -" PyTorch." +"Avant de commencer à coder, assurons-nous que nous disposons de tout ce " +"dont nous avons besoin." -#: ../../source/tutorial-quickstart-pytorch.rst:38 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:45 +#, fuzzy msgid "" -"In a file called :code:`client.py`, import Flower and PyTorch related " -"packages:" +"Next, we install the necessary packages for PyTorch (``torch`` and " +"``torchvision``), Flower Datasets (``flwr-datasets``) and Flower " +"(``flwr``):" msgstr "" -"Dans un fichier appelé :code:`client.py`, importe Flower et les paquets " -"liés à PyTorch :" - -#: ../../source/tutorial-quickstart-pytorch.rst:53 -msgid "In addition, we define the device allocation in PyTorch with:" -msgstr "En outre, nous définissons l'attribution des appareils dans PyTorch avec :" +"Ensuite, nous installons les paquets nécessaires pour PyTorch (``torch`` " +"et ``torchvision``) et Flower (``flwr``) :" -#: ../../source/tutorial-quickstart-pytorch.rst:59 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:105 +#, fuzzy msgid "" -"We use PyTorch to load CIFAR10, a popular colored image classification " -"dataset for machine learning. The PyTorch :code:`DataLoader()` downloads " -"the training and test data that are then normalized." +"It is possible to switch to a runtime that has GPU acceleration enabled " +"(on Google Colab: ``Runtime > Change runtime type > Hardware accelerator:" +" GPU > Save``). Note, however, that Google Colab is not always able to " +"offer GPU acceleration. If you see an error related to GPU availability " +"in one of the following sections, consider switching back to CPU-based " +"execution by setting ``DEVICE = torch.device(\"cpu\")``. If the runtime " +"has GPU acceleration enabled, you should see the output ``Training on " +"cuda``, otherwise it'll say ``Training on cpu``." msgstr "" -"Nous utilisons PyTorch pour charger CIFAR10, un ensemble de données de " -"classification d'images colorées populaire pour l'apprentissage " -"automatique. Le :code:`DataLoader()` de PyTorch télécharge les données " -"d'entraînement et de test qui sont ensuite normalisées." +"Il est possible de passer à un runtime dont l'accélération GPU est " +"activée (sur Google Colab : ``Runtime > Change runtime type > Hardware " +"acclerator : GPU > Save``). Note cependant que Google Colab n'est pas " +"toujours en mesure de proposer l'accélération GPU. Si tu vois une erreur " +"liée à la disponibilité du GPU dans l'une des sections suivantes, " +"envisage de repasser à une exécution basée sur le CPU en définissant " +"``DEVICE = torch.device(\"cpu\")``. Si le runtime a activé l'accélération" +" GPU, tu devrais voir apparaître le résultat ``Training on cuda``, sinon " +"il dira ``Training on cpu``." -#: ../../source/tutorial-quickstart-pytorch.rst:75 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:118 +msgid "Loading the data" +msgstr "Chargement des données" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:120 +#, fuzzy msgid "" -"Define the loss and optimizer with PyTorch. The training of the dataset " -"is done by looping over the dataset, measure the corresponding loss and " -"optimize it." +"Federated learning can be applied to many different types of tasks across" +" different domains. In this tutorial, we introduce federated learning by " +"training a simple convolutional neural network (CNN) on the popular " +"CIFAR-10 dataset. CIFAR-10 can be used to train image classifiers that " +"distinguish between images from ten different classes: 'airplane', " +"'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', and " +"'truck'." msgstr "" -"Définis la perte et l'optimiseur avec PyTorch L'entraînement de " -"l'ensemble de données se fait en bouclant sur l'ensemble de données, en " -"mesurant la perte correspondante et en l'optimisant." +"L'apprentissage fédéré peut être appliqué à de nombreux types de tâches " +"dans différents domaines. Dans ce tutoriel, nous présentons " +"l'apprentissage fédéré en formant un simple réseau neuronal " +"convolutionnel (CNN) sur l'ensemble de données populaire CIFAR-10. " +"CIFAR-10 peut être utilisé pour former des classificateurs d'images qui " +"font la distinction entre les images de dix classes différentes :" -#: ../../source/tutorial-quickstart-pytorch.rst:91 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:131 msgid "" -"Define then the validation of the machine learning network. We loop over" -" the test set and measure the loss and accuracy of the test set." +"We simulate having multiple datasets from multiple organizations (also " +"called the \"cross-silo\" setting in federated learning) by splitting the" +" original CIFAR-10 dataset into multiple partitions. Each partition will " +"represent the data from a single organization. We're doing this purely " +"for experimentation purposes, in the real world there's no need for data " +"splitting because each organization already has their own data (so the " +"data is naturally partitioned)." msgstr "" -"Définis ensuite la validation du réseau d'apprentissage automatique. Nous" -" passons en boucle sur l'ensemble de test et mesurons la perte et la " -"précision de l'ensemble de test." +"Nous simulons le fait d'avoir plusieurs ensembles de données provenant de" +" plusieurs organisations (également appelé le paramètre \"cross-silo\" " +"dans l'apprentissage fédéré) en divisant l'ensemble de données CIFAR-10 " +"original en plusieurs partitions. Chaque partition représentera les " +"données d'une seule organisation. Nous faisons cela purement à des fins " +"d'expérimentation, dans le monde réel, il n'y a pas besoin de diviser les" +" données parce que chaque organisation a déjà ses propres données (les " +"données sont donc naturellement partitionnées)." -#: ../../source/tutorial-quickstart-pytorch.rst:110 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:133 +#, fuzzy msgid "" -"After defining the training and testing of a PyTorch machine learning " -"model, we use the functions for the Flower clients." +"Each organization will act as a client in the federated learning system. " +"So having ten organizations participate in a federation means having ten " +"clients connected to the federated learning server." msgstr "" -"Après avoir défini l'entraînement et le test d'un modèle d'apprentissage " -"automatique PyTorch, nous utilisons les fonctions pour les clients " -"Flower." +"Chaque organisation agira comme un client dans le système d'apprentissage" +" fédéré. Ainsi, le fait que dix organisations participent à une " +"fédération signifie que dix clients sont connectés au serveur " +"d'apprentissage fédéré :" -#: ../../source/tutorial-quickstart-pytorch.rst:112 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:144 msgid "" -"The Flower clients will use a simple CNN adapted from 'PyTorch: A 60 " -"Minute Blitz':" +"Let's now create the Federated Dataset abstraction that from ``flwr-" +"datasets`` that partitions the CIFAR-10. We will create small training " +"and test set for each edge device and wrap each of them into a PyTorch " +"``DataLoader``:" msgstr "" -"Les clients de Flower utiliseront un CNN simple adapté de \"PyTorch : A " -"60 Minute Blitz\" :" -#: ../../source/tutorial-quickstart-pytorch.rst:139 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:198 msgid "" -"After loading the data set with :code:`load_data()` we define the Flower " -"interface." +"We now have a list of ten training sets and ten validation sets " +"(``trainloaders`` and ``valloaders``) representing the data of ten " +"different organizations. Each ``trainloader``/``valloader`` pair contains" +" 4500 training examples and 500 validation examples. There's also a " +"single ``testloader`` (we did not split the test set). Again, this is " +"only necessary for building research or educational systems, actual " +"federated learning systems have their data naturally distributed across " +"multiple partitions." msgstr "" -"Après avoir chargé l'ensemble des données avec :code:`load_data()`, nous " -"définissons l'interface Flower." +"Nous avons maintenant une liste de dix ensembles de formation et dix " +"ensembles de validation (``trainloaders`` et ``valloaders``) représentant" +" les données de dix organisations différentes. Chaque paire " +"``trainloader`/``valloader`` contient 4500 exemples de formation et 500 " +"exemples de validation. Il y a également un seul ``testloader`` (nous " +"n'avons pas divisé l'ensemble de test). Encore une fois, cela n'est " +"nécessaire que pour construire des systèmes de recherche ou d'éducation, " +"les systèmes d'apprentissage fédérés actuels ont leurs données " +"naturellement distribuées à travers plusieurs partitions." -#: ../../source/tutorial-quickstart-pytorch.rst:147 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:201 msgid "" -"Flower provides a convenience class called :code:`NumPyClient` which " -"makes it easier to implement the :code:`Client` interface when your " -"workload uses PyTorch. Implementing :code:`NumPyClient` usually means " -"defining the following methods (:code:`set_parameters` is optional " -"though):" +"Let's take a look at the first batch of images and labels in the first " +"training set (i.e., ``trainloaders[0]``) before we move on:" msgstr "" -"Flower fournit une classe de commodité appelée :code:`NumPyClient` qui " -"facilite la mise en œuvre de l'interface :code:`Client` lorsque ta charge" -" de travail utilise PyTorch. Mettre en œuvre :code:`NumPyClient` signifie" -" généralement définir les méthodes suivantes (:code:`set_parameters` est " -"cependant facultatif) :" - -#: ../../source/tutorial-quickstart-pytorch.rst:163 -msgid "which can be implemented in the following way:" -msgstr "qui peut être mis en œuvre de la manière suivante :" +"Jetons un coup d'œil au premier lot d'images et d'étiquettes du premier " +"ensemble d'entraînement (c'est-à-dire ``trainloaders[0]``) avant de " +"poursuivre :" -#: ../../source/tutorial-quickstart-pytorch.rst:186 -#: ../../source/tutorial-quickstart-tensorflow.rst:79 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:240 msgid "" -"We can now create an instance of our class :code:`CifarClient` and add " -"one line to actually run this client:" +"The output above shows a random batch of images from the first " +"``trainloader`` in our list of ten ``trainloaders``. It also prints the " +"labels associated with each image (i.e., one of the ten possible labels " +"we've seen above). If you run the cell again, you should see another " +"batch of images." msgstr "" -"Nous pouvons maintenant créer une instance de notre classe " -":code:`CifarClient` et ajouter une ligne pour exécuter ce client :" +"La sortie ci-dessus montre un lot aléatoire d'images provenant du premier" +" ``chargeur de formation`` de notre liste de dix ``chargeurs de " +"formation``. Elle imprime également les étiquettes associées à chaque " +"image (c'est-à-dire l'une des dix étiquettes possibles que nous avons " +"vues ci-dessus). Si tu exécutes à nouveau la cellule, tu devrais voir un " +"autre lot d'images." + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:252 +msgid "Step 1: Centralized Training with PyTorch" +msgstr "Étape 1 : Formation centralisée avec PyTorch" -#: ../../source/tutorial-quickstart-pytorch.rst:193 -#: ../../source/tutorial-quickstart-tensorflow.rst:87 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:263 msgid "" -"That's it for the client. We only have to implement :code:`Client` or " -":code:`NumPyClient` and call :code:`fl.client.start_client()` or " -":code:`fl.client.start_numpy_client()`. The string :code:`\"[::]:8080\"` " -"tells the client which server to connect to. In our case we can run the " -"server and the client on the same machine, therefore we use " -":code:`\"[::]:8080\"`. If we run a truly federated workload with the " -"server and clients running on different machines, all that needs to " -"change is the :code:`server_address` we point the client at." +"Next, we're going to use PyTorch to define a simple convolutional neural " +"network. This introduction assumes basic familiarity with PyTorch, so it " +"doesn't cover the PyTorch-related aspects in full detail. If you want to " +"dive deeper into PyTorch, we recommend `DEEP LEARNING WITH PYTORCH: A 60 " +"MINUTE BLITZ " +"`__." msgstr "" -"C'est tout pour le client. Il nous suffit d'implémenter :code:`Client` ou" -" :code:`NumPyClient` et d'appeler :code:`fl.client.start_client()` ou " -":code:`fl.client.start_numpy_client()`. La chaîne :code:`\"[: :]:8080\"` " -"indique au client à quel serveur se connecter. Dans notre cas, nous " -"pouvons exécuter le serveur et le client sur la même machine, c'est " -"pourquoi nous utilisons :code:`\"[: :]:8080\"`. Si nous exécutons une " -"charge de travail véritablement fédérée avec le serveur et les clients " -"fonctionnant sur des machines différentes, tout ce qui doit changer est " -"l'adresse :code:`server_address` vers laquelle nous dirigeons le client." +"Ensuite, nous allons utiliser PyTorch pour définir un simple réseau " +"neuronal convolutif. Cette introduction suppose une familiarité de base " +"avec PyTorch, elle ne couvre donc pas en détail les aspects liés à " +"PyTorch. Si tu veux plonger plus profondément dans PyTorch, nous te " +"recommandons `DEEP LEARNING WITH PYTORCH : A 60 MINUTE BLITZ " +"`__." -#: ../../source/tutorial-quickstart-pytorch.rst:268 -#, fuzzy +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:275 +msgid "Defining the model" +msgstr "Définir le modèle" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:277 msgid "" -"Congratulations! You've successfully built and run your first federated " -"learning system. The full `source code " -"`_ for this example can be found in :code:`examples" -"/quickstart-pytorch`." +"We use the simple CNN described in the `PyTorch tutorial " +"`__:" msgstr "" -"Félicitations ! Tu as réussi à construire et à faire fonctionner ton " -"premier système d'apprentissage fédéré. Le code source complet " -"`_ de cet exemple se trouve dans :code:`examples" -"/quickstart-pytorch`." +"Nous utilisons le CNN simple décrit dans le tutoriel `PyTorch " +"`__ :" -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:5 -msgid "Quickstart PyTorch Lightning" -msgstr "Démarrage rapide de PyTorch Lightning" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:314 +msgid "Let's continue with the usual training and test functions:" +msgstr "Poursuivons avec les fonctions habituelles de formation et de test :" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:374 +msgid "Training the model" +msgstr "Entraîne le modèle" -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:7 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:376 msgid "" -"Let's build a federated learning system using PyTorch Lightning and " -"Flower!" +"We now have all the basic building blocks we need: a dataset, a model, a " +"training function, and a test function. Let's put them together to train " +"the model on the dataset of one of our organizations " +"(``trainloaders[0]``). This simulates the reality of most machine " +"learning projects today: each organization has their own data and trains " +"models only on this internal data:" msgstr "" -"Construisons un système d'apprentissage fédéré en utilisant PyTorch " -"Lightning et Flower !" +"Nous avons maintenant tous les éléments de base dont nous avons besoin : " +"un ensemble de données, un modèle, une fonction d'entraînement et une " +"fonction de test. Assemblons-les pour entraîner le modèle sur l'ensemble " +"de données de l'une de nos organisations (``trainloaders[0]``). Cela " +"simule la réalité de la plupart des projets d'apprentissage automatique " +"aujourd'hui : chaque organisation possède ses propres données et entraîne" +" les modèles uniquement sur ces données internes :" -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:9 -#, fuzzy +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:406 msgid "" -"Please refer to the `full code example " -"`_ to learn more." +"Training the simple CNN on our CIFAR-10 split for 5 epochs should result " +"in a test set accuracy of about 41%, which is not good, but at the same " +"time, it doesn't really matter for the purposes of this tutorial. The " +"intent was just to show a simplistic centralized training pipeline that " +"sets the stage for what comes next - federated learning!" msgstr "" -"Réfère-toi à l'exemple de code complet " -"`_ pour en savoir plus." +"L'entraînement du CNN simple sur notre fractionnement CIFAR-10 pendant 5 " +"époques devrait se traduire par une précision de l'ensemble de test " +"d'environ 41 %, ce qui n'est pas bon, mais en même temps, cela n'a pas " +"vraiment d'importance pour les besoins de ce tutoriel. L'intention était " +"juste de montrer un pipeline d'entraînement centralisé simpliste qui " +"prépare le terrain pour ce qui vient ensuite - l'apprentissage fédéré !" -#: ../../source/tutorial-quickstart-scikitlearn.rst:5 -msgid "Quickstart scikit-learn" -msgstr "Démarrage rapide de scikit-learn" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:418 +msgid "Step 2: Federated Learning with Flower" +msgstr "Étape 2 : Apprentissage fédéré avec Flower" -#: ../../source/tutorial-quickstart-scikitlearn.rst:7 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:420 msgid "" -"In this tutorial, we will learn how to train a :code:`Logistic " -"Regression` model on MNIST using Flower and scikit-learn." +"Step 1 demonstrated a simple centralized training pipeline. All data was " +"in one place (i.e., a single ``trainloader`` and a single ``valloader``)." +" Next, we'll simulate a situation where we have multiple datasets in " +"multiple organizations and where we train a model over these " +"organizations using federated learning." msgstr "" -"Dans ce tutoriel, nous allons apprendre à former un :code:`modèle de " -"régression logistique` sur MNIST en utilisant Flower et scikit-learn." - -#: ../../source/tutorial-quickstart-scikitlearn.rst:23 -msgid "Since we want to use scikt-learn, let's go ahead and install it:" -msgstr "Puisque nous voulons utiliser scikt-learn, allons-y et installons-le :" +"L'étape 1 a montré un simple pipeline de formation centralisé. Toutes les" +" données étaient au même endroit (c'est-à-dire un seul ``trainloader`` et" +" un seul ``valloader``). Ensuite, nous allons simuler une situation où " +"nous avons plusieurs ensembles de données dans plusieurs organisations et" +" où nous formons un modèle sur ces organisations à l'aide de " +"l'apprentissage fédéré." -#: ../../source/tutorial-quickstart-scikitlearn.rst:29 -msgid "Or simply install all dependencies using Poetry:" -msgstr "Ou installe simplement toutes les dépendances à l'aide de Poetry :" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:432 +msgid "Updating model parameters" +msgstr "Mise à jour des paramètres du modèle" -#: ../../source/tutorial-quickstart-scikitlearn.rst:39 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:434 msgid "" -"Now that we have all our dependencies installed, let's run a simple " -"distributed training with two clients and one server. However, before " -"setting up the client and server, we will define all functionalities that" -" we need for our federated learning setup within :code:`utils.py`. The " -":code:`utils.py` contains different functions defining all the machine " -"learning basics:" +"In federated learning, the server sends the global model parameters to " +"the client, and the client updates the local model with the parameters " +"received from the server. It then trains the model on the local data " +"(which changes the model parameters locally) and sends the " +"updated/changed model parameters back to the server (or, alternatively, " +"it sends just the gradients back to the server, not the full model " +"parameters)." msgstr "" -"Maintenant que toutes nos dépendances sont installées, exécutons une " -"formation distribuée simple avec deux clients et un serveur. Cependant, " -"avant de configurer le client et le serveur, nous allons définir toutes " -"les fonctionnalités dont nous avons besoin pour notre configuration " -"d'apprentissage fédéré dans :code:`utils.py`. Le :code:`utils.py` " -"contient différentes fonctions définissant toutes les bases de " -"l'apprentissage automatique :" - -#: ../../source/tutorial-quickstart-scikitlearn.rst:42 -msgid ":code:`get_model_parameters()`" -msgstr ":code:`get_model_parameters()`" +"Dans l'apprentissage fédéré, le serveur envoie les paramètres du modèle " +"global au client, et le client met à jour le modèle local avec les " +"paramètres reçus du serveur. Il entraîne ensuite le modèle sur les " +"données locales (ce qui modifie les paramètres du modèle localement) et " +"renvoie les paramètres du modèle mis à jour/changés au serveur (ou, " +"alternativement, il renvoie seulement les gradients au serveur, et non " +"pas les paramètres complets du modèle)." -#: ../../source/tutorial-quickstart-scikitlearn.rst:43 -msgid "Returns the paramters of a :code:`sklearn` LogisticRegression model" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:436 +msgid "" +"We need two helper functions to update the local model with parameters " +"received from the server and to get the updated model parameters from the" +" local model: ``set_parameters`` and ``get_parameters``. The following " +"two functions do just that for the PyTorch model above." msgstr "" -"Renvoie les paramètres d'un modèle de régression logistique " -":code:`sklearn`" - -#: ../../source/tutorial-quickstart-scikitlearn.rst:44 -msgid ":code:`set_model_params()`" -msgstr ":code:`set_model_params()`" - -#: ../../source/tutorial-quickstart-scikitlearn.rst:45 -msgid "Sets the parameters of a :code:`sklean` LogisticRegression model" -msgstr "Définit les paramètres d'un modèle de régression logistique :code:`sklean`" - -#: ../../source/tutorial-quickstart-scikitlearn.rst:46 -msgid ":code:`set_initial_params()`" -msgstr ":code:`set_initial_params()`" - -#: ../../source/tutorial-quickstart-scikitlearn.rst:47 -msgid "Initializes the model parameters that the Flower server will ask for" -msgstr "Initialise les paramètres du modèle que le serveur de Flower demandera" - -#: ../../source/tutorial-quickstart-scikitlearn.rst:48 -msgid ":code:`load_mnist()`" -msgstr ":code:`load_mnist()`" - -#: ../../source/tutorial-quickstart-scikitlearn.rst:49 -msgid "Loads the MNIST dataset using OpenML" -msgstr "Charge l'ensemble de données MNIST à l'aide d'OpenML" - -#: ../../source/tutorial-quickstart-scikitlearn.rst:50 -msgid ":code:`shuffle()`" -msgstr ":code:`shuffle()`" - -#: ../../source/tutorial-quickstart-scikitlearn.rst:51 -msgid "Shuffles data and its label" -msgstr "Mélange les données et leur étiquette" +"Nous avons besoin de deux fonctions d'aide pour mettre à jour le modèle " +"local avec les paramètres reçus du serveur et pour obtenir les paramètres" +" mis à jour du modèle local : ``set_parameters`` et ``get_parameters``. " +"Les deux fonctions suivantes font exactement cela pour le modèle PyTorch " +"ci-dessus." -#: ../../source/tutorial-quickstart-scikitlearn.rst:53 -msgid ":code:`partition()`" -msgstr ":code:`partition()`" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:438 +msgid "" +"The details of how this works are not really important here (feel free to" +" consult the PyTorch documentation if you want to learn more). In " +"essence, we use ``state_dict`` to access PyTorch model parameter tensors." +" The parameter tensors are then converted to/from a list of NumPy " +"ndarray's (which Flower knows how to serialize/deserialize):" +msgstr "" +"Les détails de ce fonctionnement ne sont pas vraiment importants ici " +"(n'hésite pas à consulter la documentation PyTorch si tu veux en savoir " +"plus). En substance, nous utilisons ``state_dict`` pour accéder aux " +"tenseurs de paramètres du modèle PyTorch. Les tenseurs de paramètres sont" +" ensuite convertis en/depuis une liste de ndarray NumPy (que Flower sait " +"sérialiser/désérialiser) :" -#: ../../source/tutorial-quickstart-scikitlearn.rst:53 -msgid "Splits datasets into a number of partitions" -msgstr "Divise les ensembles de données en un certain nombre de partitions" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:466 +msgid "Implementing a Flower client" +msgstr "Mise en place d'un client Flower" -#: ../../source/tutorial-quickstart-scikitlearn.rst:55 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:468 msgid "" -"Please check out :code:`utils.py` `here " -"`_ for more details. The pre-defined functions are used in" -" the :code:`client.py` and imported. The :code:`client.py` also requires " -"to import several packages such as Flower and scikit-learn:" +"With that out of the way, let's move on to the interesting part. " +"Federated learning systems consist of a server and multiple clients. In " +"Flower, we create clients by implementing subclasses of " +"``flwr.client.Client`` or ``flwr.client.NumPyClient``. We use " +"``NumPyClient`` in this tutorial because it is easier to implement and " +"requires us to write less boilerplate." msgstr "" -"Tu peux consulter :code:`utils.py` `ici " -"`_ pour plus de détails. Les fonctions prédéfinies sont " -"utilisées dans :code:`client.py` et importées. :code:`client.py` " -"nécessite également d'importer plusieurs paquets tels que Flower et " -"scikit-learn :" +"Ceci étant dit, passons à la partie intéressante. Les systèmes " +"d'apprentissage fédérés se composent d'un serveur et de plusieurs " +"clients. Dans Flower, nous créons des clients en mettant en œuvre des " +"sous-classes de ``flwr.client.Client`` ou de ``flwr.client.NumPyClient``." +" Nous utilisons ``NumPyClient`` dans ce tutoriel parce qu'il est plus " +"facile à mettre en œuvre et qu'il nous oblige à rédiger moins de modèles " +"de chaudière." -#: ../../source/tutorial-quickstart-scikitlearn.rst:70 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:470 msgid "" -"We load the MNIST dataset from `OpenML `_, " -"a popular image classification dataset of handwritten digits for machine " -"learning. The utility :code:`utils.load_mnist()` downloads the training " -"and test data. The training set is split afterwards into 10 partitions " -"with :code:`utils.partition()`." +"To implement the Flower client, we create a subclass of " +"``flwr.client.NumPyClient`` and implement the three methods " +"``get_parameters``, ``fit``, and ``evaluate``:" msgstr "" -"Nous chargeons l'ensemble de données MNIST de `OpenML " -"`_, un ensemble de données de " -"classification d'images populaires de chiffres manuscrits pour " -"l'apprentissage automatique. L'utilitaire :code:`utils.load_mnist()` " -"télécharge les données d'entraînement et de test. L'ensemble " -"d'entraînement est ensuite divisé en 10 partitions avec " -":code:`utils.partition()`." +"Pour mettre en œuvre le client Flower, nous créons une sous-classe de " +"``flwr.client.NumPyClient`` et mettons en œuvre les trois méthodes " +"``get_parameters``, ``fit`` et ``evaluate`` :" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:472 +msgid "``get_parameters``: Return the current local model parameters" +msgstr "``get_parameters`` : renvoie les paramètres du modèle local actuel" -#: ../../source/tutorial-quickstart-scikitlearn.rst:82 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:473 msgid "" -"Next, the logistic regression model is defined and initialized with " -":code:`utils.set_initial_params()`." +"``fit``: Receive model parameters from the server, train the model " +"parameters on the local data, and return the (updated) model parameters " +"to the server" msgstr "" -"Ensuite, le modèle de régression logistique est défini et initialisé avec" -" :code:`utils.set_initial_params()`." +"``fit`` : reçoit les paramètres du modèle du serveur, entraîne les " +"paramètres du modèle sur les données locales et renvoie les paramètres du" +" modèle (mis à jour) au serveur" -#: ../../source/tutorial-quickstart-scikitlearn.rst:94 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:474 msgid "" -"The Flower server interacts with clients through an interface called " -":code:`Client`. When the server selects a particular client for training," -" it sends training instructions over the network. The client receives " -"those instructions and calls one of the :code:`Client` methods to run " -"your code (i.e., to fit the logistic regression we defined earlier)." +"``evaluate``: Receive model parameters from the server, evaluate the " +"model parameters on the local data, and return the evaluation result to " +"the server" msgstr "" -"Le serveur Flower interagit avec les clients par le biais d'une interface" -" appelée :code:`Client`. Lorsque le serveur sélectionne un client " -"particulier pour la formation, il envoie des instructions de formation " -"sur le réseau. Le client reçoit ces instructions et appelle l'une des " -"méthodes :code:`Client` pour exécuter ton code (c'est-à-dire pour ajuster" -" la régression logistique que nous avons définie plus tôt)." +"``evaluate`` : reçoit les paramètres du modèle du serveur, évalue les " +"paramètres du modèle sur les données locales et renvoie le résultat de " +"l'évaluation au serveur" -#: ../../source/tutorial-quickstart-scikitlearn.rst:100 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:476 msgid "" -"Flower provides a convenience class called :code:`NumPyClient` which " -"makes it easier to implement the :code:`Client` interface when your " -"workload uses scikit-learn. Implementing :code:`NumPyClient` usually " -"means defining the following methods (:code:`set_parameters` is optional " -"though):" +"We mentioned that our clients will use the previously defined PyTorch " +"components for model training and evaluation. Let's see a simple Flower " +"client implementation that brings everything together:" msgstr "" -"Flower fournit une classe de commodité appelée :code:`NumPyClient` qui " -"facilite la mise en œuvre de l'interface :code:`Client` lorsque ta charge" -" de travail utilise scikit-learn. Mettre en œuvre :code:`NumPyClient` " -"signifie généralement définir les méthodes suivantes " -"(:code:`set_parameters` est cependant facultatif) :" +"Nous avons mentionné que nos clients utiliseront les composants PyTorch " +"définis précédemment pour la formation et l'évaluation des modèles. " +"Voyons une simple mise en œuvre du client Flower qui réunit tout cela :" -#: ../../source/tutorial-quickstart-scikitlearn.rst:109 -msgid "is directly imported with :code:`utils.set_model_params()`" -msgstr "est directement importé avec :code:`utils.set_model_params()`" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:513 +msgid "" +"Our class ``FlowerClient`` defines how local training/evaluation will be " +"performed and allows Flower to call the local training/evaluation through" +" ``fit`` and ``evaluate``. Each instance of ``FlowerClient`` represents a" +" *single client* in our federated learning system. Federated learning " +"systems have multiple clients (otherwise, there's not much to federate), " +"so each client will be represented by its own instance of " +"``FlowerClient``. If we have, for example, three clients in our workload," +" then we'd have three instances of ``FlowerClient``. Flower calls " +"``FlowerClient.fit`` on the respective instance when the server selects a" +" particular client for training (and ``FlowerClient.evaluate`` for " +"evaluation)." +msgstr "" +"Our class ``FlowerClient`` defines how local training/evaluation will be " +"performed and allows Flower to call the local training/evaluation through" +" ``fit`` and ``evaluate``. Each instance of ``FlowerClient`` represents a" +" *single client* in our federated learning system. Federated learning " +"systems have multiple clients (otherwise, there's not much to federate), " +"so each client will be represented by its own instance of " +"``FlowerClient``. If we have, for example, three clients in our workload," +" then we'd have three instances of ``FlowerClient``. Flower calls " +"``FlowerClient.fit`` on the respective instance when the server selects a" +" particular client for training (and ``FlowerClient.evaluate`` for " +"evaluation)." -#: ../../source/tutorial-quickstart-scikitlearn.rst:117 -msgid "The methods can be implemented in the following way:" -msgstr "Les méthodes peuvent être mises en œuvre de la manière suivante :" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:517 +msgid "Using the Virtual Client Engine" +msgstr "Utilisation du moteur du client virtuel" -#: ../../source/tutorial-quickstart-scikitlearn.rst:140 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:519 msgid "" -"We can now create an instance of our class :code:`MnistClient` and add " -"one line to actually run this client:" +"In this notebook, we want to simulate a federated learning system with 10" +" clients on a single machine. This means that the server and all 10 " +"clients will live on a single machine and share resources such as CPU, " +"GPU, and memory. Having 10 clients would mean having 10 instances of " +"``FlowerClient`` in memory. Doing this on a single machine can quickly " +"exhaust the available memory resources, even if only a subset of these " +"clients participates in a single round of federated learning." msgstr "" -"Nous pouvons maintenant créer une instance de notre classe " -":code:`MnistClient` et ajouter une ligne pour exécuter ce client :" +"Dans ce carnet, nous voulons simuler un système d'apprentissage fédéré " +"avec 10 clients sur une seule machine. Cela signifie que le serveur et " +"les 10 clients vivront sur une seule machine et partageront des " +"ressources telles que le CPU, le GPU et la mémoire. Avoir 10 clients " +"signifierait avoir 10 instances de ``FlowerClient`` en mémoire. Faire " +"cela sur une seule machine peut rapidement épuiser les ressources mémoire" +" disponibles, même si seulement un sous-ensemble de ces clients participe" +" à un seul tour d'apprentissage fédéré." -#: ../../source/tutorial-quickstart-scikitlearn.rst:156 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:521 msgid "" -"The following Flower server is a little bit more advanced and returns an " -"evaluation function for the server-side evaluation. First, we import " -"again all required libraries such as Flower and scikit-learn." +"In addition to the regular capabilities where server and clients run on " +"multiple machines, Flower, therefore, provides special simulation " +"capabilities that create ``FlowerClient`` instances only when they are " +"actually necessary for training or evaluation. To enable the Flower " +"framework to create clients when necessary, we need to implement a " +"function called ``client_fn`` that creates a ``FlowerClient`` instance on" +" demand. Flower calls ``client_fn`` whenever it needs an instance of one " +"particular client to call ``fit`` or ``evaluate`` (those instances are " +"usually discarded after use, so they should not keep any local state). " +"Clients are identified by a client ID, or short ``cid``. The ``cid`` can " +"be used, for example, to load different local data partitions for " +"different clients, as can be seen below:" msgstr "" -"Le serveur Flower suivant est un peu plus avancé et renvoie une fonction " -"d'évaluation pour l'évaluation côté serveur. Tout d'abord, nous importons" -" à nouveau toutes les bibliothèques requises telles que Flower et scikit-" -"learn." +"In addition to the regular capabilities where server and clients run on " +"multiple machines, Flower, therefore, provides special simulation " +"capabilities that create ``FlowerClient`` instances only when they are " +"actually necessary for training or evaluation. To enable the Flower " +"framework to create clients when necessary, we need to implement a " +"function called ``client_fn`` that creates a ``FlowerClient`` instance on" +" demand. Flower calls ``client_fn`` whenever it needs an instance of one " +"particular client to call ``fit`` or ``evaluate`` (those instances are " +"usually discarded after use, so they should not keep any local state). " +"Clients are identified by a client ID, or short ``cid``. The ``cid`` can " +"be used, for example, to load different local data partitions for " +"different clients, as can be seen below:" -#: ../../source/tutorial-quickstart-scikitlearn.rst:159 -msgid ":code:`server.py`, import Flower and start the server:" -msgstr ":code:`server.py`, importe Flower et démarre le serveur :" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:556 +msgid "Starting the training" +msgstr "Commencer la formation" -#: ../../source/tutorial-quickstart-scikitlearn.rst:170 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:558 msgid "" -"The number of federated learning rounds is set in :code:`fit_round()` and" -" the evaluation is defined in :code:`get_evaluate_fn()`. The evaluation " -"function is called after each federated learning round and gives you " -"information about loss and accuracy." +"We now have the class ``FlowerClient`` which defines client-side " +"training/evaluation and ``client_fn`` which allows Flower to create " +"``FlowerClient`` instances whenever it needs to call ``fit`` or " +"``evaluate`` on one particular client. The last step is to start the " +"actual simulation using ``flwr.simulation.start_simulation``." msgstr "" -"Le nombre de tours d'apprentissage fédéré est défini dans " -":code:`fit_round()` et l'évaluation est définie dans " -":code:`get_evaluate_fn()`. La fonction d'évaluation est appelée après " -"chaque tour d'apprentissage fédéré et te donne des informations sur la " -"perte et la précision." +"Nous avons maintenant la classe ``FlowerClient`` qui définit " +"l'entraînement/évaluation côté client et ``client_fn`` qui permet à " +"Flower de créer des instances de ``FlowerClient`` chaque fois qu'il a " +"besoin d'appeler ``fit`` ou ``evaluate`` sur un client particulier. La " +"dernière étape consiste à démarrer la simulation réelle en utilisant " +"``flwr.simulation.start_simulation``." -#: ../../source/tutorial-quickstart-scikitlearn.rst:195 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:560 msgid "" -"The :code:`main` contains the server-side parameter initialization " -":code:`utils.set_initial_params()` as well as the aggregation strategy " -":code:`fl.server.strategy:FedAvg()`. The strategy is the default one, " -"federated averaging (or FedAvg), with two clients and evaluation after " -"each federated learning round. The server can be started with the command" -" :code:`fl.server.start_server(server_address=\"0.0.0.0:8080\", " -"strategy=strategy, config=fl.server.ServerConfig(num_rounds=3))`." +"The function ``start_simulation`` accepts a number of arguments, amongst " +"them the ``client_fn`` used to create ``FlowerClient`` instances, the " +"number of clients to simulate (``num_clients``), the number of federated " +"learning rounds (``num_rounds``), and the strategy. The strategy " +"encapsulates the federated learning approach/algorithm, for example, " +"*Federated Averaging* (FedAvg)." msgstr "" -"Le :code:`main` contient l'initialisation des paramètres côté serveur " -":code:`utils.set_initial_params()` ainsi que la stratégie d'agrégation " -":code:`fl.server.strategy:FedAvg()`. La stratégie est celle par défaut, " -"la moyenne fédérée (ou FedAvg), avec deux clients et une évaluation après" -" chaque tour d'apprentissage fédéré. Le serveur peut être démarré avec la" -" commande :code:`fl.server.start_server(server_address=\"0.0.0.0:8080\", " -"strategy=strategy, config=fl.server.ServerConfig(num_rounds=3))`." +"La fonction ``start_simulation`` accepte un certain nombre d'arguments, " +"parmi lesquels le ``client_fn`` utilisé pour créer les instances " +"``FlowerClient``, le nombre de clients à simuler (``num_clients``), le " +"nombre de tours d'apprentissage fédéré (``num_rounds``), et la stratégie." +" La stratégie encapsule l'approche/algorithme d'apprentissage fédéré, par" +" exemple, *Federated Averaging* (FedAvg)." -#: ../../source/tutorial-quickstart-scikitlearn.rst:214 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:562 msgid "" -"With both client and server ready, we can now run everything and see " -"federated learning in action. Federated learning systems usually have a " -"server and multiple clients. We, therefore, have to start the server " -"first:" +"Flower has a number of built-in strategies, but we can also use our own " +"strategy implementations to customize nearly all aspects of the federated" +" learning approach. For this example, we use the built-in ``FedAvg`` " +"implementation and customize it using a few basic parameters. The last " +"step is the actual call to ``start_simulation`` which - you guessed it - " +"starts the simulation:" msgstr "" -"Le client et le serveur étant prêts, nous pouvons maintenant tout lancer " -"et voir l'apprentissage fédéré en action. Les systèmes d'apprentissage " -"fédéré ont généralement un serveur et plusieurs clients. Nous devons donc" -" commencer par lancer le serveur :" +"Flower dispose d'un certain nombre de stratégies intégrées, mais nous " +"pouvons également utiliser nos propres implémentations de stratégies pour" +" personnaliser presque tous les aspects de l'approche de l'apprentissage " +"fédéré. Pour cet exemple, nous utilisons l'implémentation intégrée " +"``FedAvg`` et nous la personnalisons en utilisant quelques paramètres de " +"base. La dernière étape est l'appel à ``start_simulation`` qui - tu l'as " +"deviné - démarre la simulation :" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:608 +msgid "Behind the scenes" +msgstr "Dans les coulisses" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:610 +msgid "So how does this work? How does Flower execute this simulation?" +msgstr "" +"Alors, comment cela fonctionne-t-il ? Comment Flower exécute-t-il cette " +"simulation ?" -#: ../../source/tutorial-quickstart-scikitlearn.rst:268 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:612 +#, python-format msgid "" -"Congratulations! You've successfully built and run your first federated " -"learning system. The full `source code " -"`_ for this example can be found in :code:`examples/sklearn-logreg-" -"mnist`." +"When we call ``start_simulation``, we tell Flower that there are 10 " +"clients (``num_clients=10``). Flower then goes ahead an asks the " +"``FedAvg`` strategy to select clients. ``FedAvg`` knows that it should " +"select 100% of the available clients (``fraction_fit=1.0``), so it goes " +"ahead and selects 10 random clients (i.e., 100% of 10)." msgstr "" -"Félicitations ! Tu as réussi à construire et à faire fonctionner ton " -"premier système d'apprentissage fédéré. Le code source complet " -"`_ de cet exemple se trouve dans :code:`examples/sklearn-logreg-" -"mnist`." - -#: ../../source/tutorial-quickstart-tensorflow.rst:5 -msgid "Quickstart TensorFlow" -msgstr "Démarrage rapide de TensorFlow" +"Lorsque nous appelons ``start_simulation``, nous disons à Flower qu'il y " +"a 10 clients (``num_clients=10``). Flower demande alors à la stratégie " +"``FedAvg`` de sélectionner des clients. ``FedAvg` sait qu'il doit " +"sélectionner 100% des clients disponibles (``fraction_fit=1.0``), alors " +"il choisit 10 clients au hasard (c'est à dire 100% de 10)." -#: ../../source/tutorial-quickstart-tensorflow.rst:10 -msgid "Let's build a federated learning system in less than 20 lines of code!" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:614 +msgid "" +"Flower then asks the selected 10 clients to train the model. When the " +"server receives the model parameter updates from the clients, it hands " +"those updates over to the strategy (*FedAvg*) for aggregation. The " +"strategy aggregates those updates and returns the new global model, which" +" then gets used in the next round of federated learning." msgstr "" -"Construisons un système d'apprentissage fédéré en moins de 20 lignes de " -"code !" +"Flower demande ensuite aux 10 clients sélectionnés d'entraîner le modèle." +" Lorsque le serveur reçoit les mises à jour des paramètres du modèle de " +"la part des clients, il les transmet à la stratégie (*FedAvg*) pour " +"qu'elle les agrège. La stratégie agrège ces mises à jour et renvoie le " +"nouveau modèle global, qui est ensuite utilisé dans le prochain cycle " +"d'apprentissage fédéré." -#: ../../source/tutorial-quickstart-tensorflow.rst:12 -msgid "Before Flower can be imported we have to install it:" -msgstr "Avant de pouvoir importer une fleur, nous devons l'installer :" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:626 +msgid "Where's the accuracy?" +msgstr "Où est la précision ?" -#: ../../source/tutorial-quickstart-tensorflow.rst:18 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:628 msgid "" -"Since we want to use the Keras API of TensorFlow (TF), we have to install" -" TF as well:" +"You may have noticed that all metrics except for ``losses_distributed`` " +"are empty. Where did the ``{\"accuracy\": float(accuracy)}`` go?" msgstr "" -"Comme nous voulons utiliser l'API Keras de TensorFlow (TF), nous devons " -"également installer TF :" +"Tu as peut-être remarqué que toutes les mesures, à l'exception de " +"``pertes_distribuées``, sont vides. Où est passée la ``{\"précision\" : " +"float(précision)}`` ?" -#: ../../source/tutorial-quickstart-tensorflow.rst:28 -msgid "Next, in a file called :code:`client.py`, import Flower and TensorFlow:" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:630 +msgid "" +"Flower can automatically aggregate losses returned by individual clients," +" but it cannot do the same for metrics in the generic metrics dictionary " +"(the one with the ``accuracy`` key). Metrics dictionaries can contain " +"very different kinds of metrics and even key/value pairs that are not " +"metrics at all, so the framework does not (and can not) know how to " +"handle these automatically." msgstr "" -"Ensuite, dans un fichier appelé :code:`client.py`, importe Flower et " -"TensorFlow :" +"Flower peut automatiquement agréger les pertes renvoyées par les clients " +"individuels, mais il ne peut pas faire la même chose pour les mesures " +"dans le dictionnaire de mesures générique (celui avec la clé " +"``accuracy``). Les dictionnaires de mesures peuvent contenir des types de" +" mesures très différents et même des paires clé/valeur qui ne sont pas " +"des mesures du tout, donc le cadre ne sait pas (et ne peut pas) savoir " +"comment les gérer automatiquement." -#: ../../source/tutorial-quickstart-tensorflow.rst:35 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:632 msgid "" -"We use the Keras utilities of TF to load CIFAR10, a popular colored image" -" classification dataset for machine learning. The call to " -":code:`tf.keras.datasets.cifar10.load_data()` downloads CIFAR10, caches " -"it locally, and then returns the entire training and test set as NumPy " -"ndarrays." +"As users, we need to tell the framework how to handle/aggregate these " +"custom metrics, and we do so by passing metric aggregation functions to " +"the strategy. The strategy will then call these functions whenever it " +"receives fit or evaluate metrics from clients. The two possible functions" +" are ``fit_metrics_aggregation_fn`` and " +"``evaluate_metrics_aggregation_fn``." msgstr "" -"Nous utilisons les utilitaires Keras de TF pour charger CIFAR10, un " -"ensemble de données de classification d'images colorées populaire pour " -"l'apprentissage automatique. L'appel à " -":code:`tf.keras.datasets.cifar10.load_data()` télécharge CIFAR10, le met " -"en cache localement, puis renvoie l'ensemble d'entraînement et de test " -"sous forme de NumPy ndarrays." +"En tant qu'utilisateurs, nous devons indiquer au framework comment " +"gérer/agréger ces métriques personnalisées, et nous le faisons en passant" +" des fonctions d'agrégation de métriques à la stratégie. La stratégie " +"appellera alors ces fonctions chaque fois qu'elle recevra des métriques " +"d'ajustement ou d'évaluation de la part des clients. Les deux fonctions " +"possibles sont ``fit_metrics_aggregation_fn`` et " +"``evaluate_metrics_aggregation_fn``." -#: ../../source/tutorial-quickstart-tensorflow.rst:44 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:634 msgid "" -"Next, we need a model. For the purpose of this tutorial, we use " -"MobilNetV2 with 10 output classes:" +"Let's create a simple weighted averaging function to aggregate the " +"``accuracy`` metric we return from ``evaluate``:" msgstr "" -"Ensuite, nous avons besoin d'un modèle. Pour les besoins de ce tutoriel, " -"nous utilisons MobilNetV2 avec 10 classes de sortie :" +"Créons une simple fonction de calcul de la moyenne pondérée pour agréger " +"la mesure de \"précision\" que nous renvoie ``evaluate`` :" -#: ../../source/tutorial-quickstart-tensorflow.rst:57 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:660 msgid "" -"Flower provides a convenience class called :code:`NumPyClient` which " -"makes it easier to implement the :code:`Client` interface when your " -"workload uses Keras. The :code:`NumPyClient` interface defines three " -"methods which can be implemented in the following way:" +"The only thing left to do is to tell the strategy to call this function " +"whenever it receives evaluation metric dictionaries from the clients:" msgstr "" -"Flower fournit une classe de commodité appelée :code:`NumPyClient` qui " -"facilite la mise en œuvre de l'interface :code:`Client` lorsque ta charge" -" de travail utilise Keras. L'interface :code:`NumPyClient` définit trois " -"méthodes qui peuvent être mises en œuvre de la manière suivante :" - -#: ../../source/tutorial-quickstart-tensorflow.rst:132 -msgid "Each client will have its own dataset." -msgstr "Chaque client aura son propre ensemble de données." +"La seule chose qui reste à faire est d'indiquer à la stratégie d'appeler " +"cette fonction chaque fois qu'elle reçoit des dictionnaires de métriques " +"d'évaluation de la part des clients :" -#: ../../source/tutorial-quickstart-tensorflow.rst:134 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:697 msgid "" -"You should now see how the training does in the very first terminal (the " -"one that started the server):" +"We now have a full system that performs federated training and federated " +"evaluation. It uses the ``weighted_average`` function to aggregate custom" +" evaluation metrics and calculates a single ``accuracy`` metric across " +"all clients on the server side." msgstr "" -"Tu devrais maintenant voir comment la formation se déroule dans le tout " -"premier terminal (celui qui a démarré le serveur) :" +"Nous avons maintenant un système complet qui effectue la formation " +"fédérée et l'évaluation fédérée. Il utilise la fonction ``moyenne " +"pondérée`` pour agréger les mesures d'évaluation personnalisées et " +"calcule une seule mesure de ``précision`` pour tous les clients du côté " +"du serveur." -#: ../../source/tutorial-quickstart-tensorflow.rst:166 -#, fuzzy +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:699 msgid "" -"Congratulations! You've successfully built and run your first federated " -"learning system. The full `source code " -"`_ for this can be found in :code:`examples" -"/quickstart-tensorflow/client.py`." +"The other two categories of metrics (``losses_centralized`` and " +"``metrics_centralized``) are still empty because they only apply when " +"centralized evaluation is being used. Part two of the Flower tutorial " +"will cover centralized evaluation." msgstr "" -"Félicitations ! Tu as réussi à construire et à faire fonctionner ton " -"premier système d'apprentissage fédéré. Le `code source complet " -"`_ pour cela se trouve dans :code:`examples" -"/quickstart-tensorflow/client.py`." +"Les deux autres catégories de mesures (``pertes_centralisées`` et " +"``métriques_centralisées``) sont toujours vides car elles ne s'appliquent" +" que lorsque l'évaluation centralisée est utilisée. La deuxième partie du" +" tutoriel sur les fleurs couvrira l'évaluation centralisée." -#: ../../source/tutorial-quickstart-xgboost.rst:5 -msgid "Quickstart XGBoost" -msgstr "Démarrage rapide XGBoost" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:711 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:351 +msgid "Final remarks" +msgstr "Remarques finales" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:713 +msgid "" +"Congratulations, you just trained a convolutional neural network, " +"federated over 10 clients! With that, you understand the basics of " +"federated learning with Flower. The same approach you've seen can be used" +" with other machine learning frameworks (not just PyTorch) and tasks (not" +" just CIFAR-10 images classification), for example NLP with Hugging Face " +"Transformers or speech with SpeechBrain." +msgstr "" +"Félicitations, tu viens d'entraîner un réseau neuronal convolutif, fédéré" +" sur 10 clients ! Avec ça, tu comprends les bases de l'apprentissage " +"fédéré avec Flower. La même approche que tu as vue peut être utilisée " +"avec d'autres cadres d'apprentissage automatique (pas seulement PyTorch) " +"et d'autres tâches (pas seulement la classification des images CIFAR-10)," +" par exemple le NLP avec Hugging Face Transformers ou la parole avec " +"SpeechBrain." -#: ../../source/tutorial-quickstart-xgboost.rst:7 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:715 msgid "" -"Let's build a horizontal federated learning system using XGBoost and " -"Flower!" +"In the next notebook, we're going to cover some more advanced concepts. " +"Want to customize your strategy? Initialize parameters on the server " +"side? Or evaluate the aggregated model on the server side? We'll cover " +"all this and more in the next tutorial." msgstr "" -"Construisons un système d'apprentissage fédéré horizontal en utilisant " -"XGBoost et Flower !" +"Dans le prochain cahier, nous allons aborder des concepts plus avancés. " +"Tu veux personnaliser ta stratégie ? Initialiser des paramètres côté " +"serveur ? Ou évaluer le modèle agrégé côté serveur ? Nous aborderons tout" +" cela et bien plus encore dans le prochain tutoriel." -#: ../../source/tutorial-quickstart-xgboost.rst:9 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:733 #, fuzzy msgid "" -"Please refer to the `full code example " -"`_ to learn more." +"The `Flower Federated Learning Tutorial - Part 2 " +"`__ goes into more depth about strategies and all " +"the advanced things you can build with them." msgstr "" -"Réfère-toi à l'exemple de code complet " -"`_ pour en savoir plus." +"Le `Tutoriel d'apprentissage fédéré Flower - Partie 2 " +"`__ va plus en profondeur sur les stratégies et toutes les " +"choses avancées que tu peux construire avec elles." -#: ../../source/tutorial-use-a-federated-learning-strategy-pytorch.ipynb:9 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:9 #, fuzzy msgid "Use a federated learning strategy" msgstr "Stratégie de moyenne fédérée." -#: ../../source/tutorial-use-a-federated-learning-strategy-pytorch.ipynb:11 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:11 #, fuzzy msgid "" "Welcome to the next part of the federated learning tutorial. In previous " "parts of this tutorial, we introduced federated learning with PyTorch and" -" Flower (`part 1 `__)." msgstr "" "Bienvenue dans la prochaine partie du tutoriel sur l'apprentissage " "fédéré. Dans les parties précédentes de ce tutoriel, nous avons présenté " "l'apprentissage fédéré avec PyTorch et Flower (`partie 1 " -"`__)." +"`__)." -#: ../../source/tutorial-use-a-federated-learning-strategy-pytorch.ipynb:13 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:13 msgid "" "In this notebook, we'll begin to customize the federated learning system " "we built in the introductory notebook (again, using `Flower " -"`__ and `PyTorch `__)." +"`__ and `PyTorch `__)." msgstr "" "Dans ce carnet, nous allons commencer à personnaliser le système " "d'apprentissage fédéré que nous avons construit dans le carnet " -"d'introduction (toujours en utilisant `Flower `__ et" +"d'introduction (toujours en utilisant `Flower `__ et" " `PyTorch `__)." -#: ../../source/tutorial-use-a-federated-learning-strategy-pytorch.ipynb:17 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:17 #, fuzzy msgid "Let's move beyond FedAvg with Flower strategies!" msgstr "Dépassons FedAvg avec les stratégies florales !" -#: ../../source/tutorial-use-a-federated-learning-strategy-pytorch.ipynb:309 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:309 msgid "Strategy customization" msgstr "Personnalisation de la stratégie" -#: ../../source/tutorial-use-a-federated-learning-strategy-pytorch.ipynb:311 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:311 msgid "" "So far, everything should look familiar if you've worked through the " "introductory notebook. With that, we're ready to introduce a number of " @@ -14785,11 +21441,11 @@ msgstr "" "le cahier d'introduction. Avec cela, nous sommes prêts à présenter un " "certain nombre de nouvelles fonctionnalités." -#: ../../source/tutorial-use-a-federated-learning-strategy-pytorch.ipynb:323 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:323 msgid "Server-side parameter **initialization**" msgstr "Paramètres côté serveur **initialisation**" -#: ../../source/tutorial-use-a-federated-learning-strategy-pytorch.ipynb:325 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:325 msgid "" "Flower, by default, initializes the global model by asking one random " "client for the initial parameters. In many cases, we want more control " @@ -14802,7 +21458,7 @@ msgstr "" "Flower te permet donc de passer directement les paramètres initiaux à la " "Stratégie :" -#: ../../source/tutorial-use-a-federated-learning-strategy-pytorch.ipynb:370 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:370 msgid "" "Passing ``initial_parameters`` to the ``FedAvg`` strategy prevents Flower" " from asking one of the clients for the initial parameters. If we look " @@ -14814,11 +21470,11 @@ msgstr "" " nous regardons de près, nous pouvons voir que les journaux ne montrent " "aucun appel à la méthode ``FlowerClient.get_parameters``." -#: ../../source/tutorial-use-a-federated-learning-strategy-pytorch.ipynb:382 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:382 msgid "Starting with a customized strategy" msgstr "Commencer par une stratégie personnalisée" -#: ../../source/tutorial-use-a-federated-learning-strategy-pytorch.ipynb:384 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:384 msgid "" "We've seen the function ``start_simulation`` before. It accepts a number " "of arguments, amongst them the ``client_fn`` used to create " @@ -14830,7 +21486,7 @@ msgstr "" "nombre de clients à simuler ``num_clients``, le nombre de rounds " "``num_rounds``, et la stratégie." -#: ../../source/tutorial-use-a-federated-learning-strategy-pytorch.ipynb:386 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:386 msgid "" "The strategy encapsulates the federated learning approach/algorithm, for " "example, ``FedAvg`` or ``FedAdagrad``. Let's try to use a different " @@ -14840,11 +21496,11 @@ msgstr "" "exemple, ``FedAvg`` ou ``FedAdagrad``. Essayons d'utiliser une stratégie " "différente cette fois-ci :" -#: ../../source/tutorial-use-a-federated-learning-strategy-pytorch.ipynb:424 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:424 msgid "Server-side parameter **evaluation**" msgstr "Paramètre côté serveur **évaluation**" -#: ../../source/tutorial-use-a-federated-learning-strategy-pytorch.ipynb:426 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:426 msgid "" "Flower can evaluate the aggregated model on the server-side or on the " "client-side. Client-side and server-side evaluation are similar in some " @@ -14854,7 +21510,7 @@ msgstr "" "évaluations côté client et côté serveur sont similaires à certains " "égards, mais différentes à d'autres." -#: ../../source/tutorial-use-a-federated-learning-strategy-pytorch.ipynb:428 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:428 msgid "" "**Centralized Evaluation** (or *server-side evaluation*) is conceptually " "simple: it works the same way that evaluation in centralized machine " @@ -14873,7 +21529,7 @@ msgstr "" "le modèle aux clients. Nous avons également la chance que l'ensemble de " "notre ensemble de données d'évaluation soit disponible à tout moment." -#: ../../source/tutorial-use-a-federated-learning-strategy-pytorch.ipynb:430 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:430 msgid "" "**Federated Evaluation** (or *client-side evaluation*) is more complex, " "but also more powerful: it doesn't require a centralized dataset and " @@ -14904,7 +21560,7 @@ msgstr "" "le modèle, nous verrions nos résultats d'évaluation fluctuer au cours des" " cycles consécutifs." -#: ../../source/tutorial-use-a-federated-learning-strategy-pytorch.ipynb:433 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:433 msgid "" "We've seen how federated evaluation works on the client side (i.e., by " "implementing the ``evaluate`` method in ``FlowerClient``). Now let's see " @@ -14915,11 +21571,11 @@ msgstr "" "``FlowerClient``). Voyons maintenant comment nous pouvons évaluer les " "paramètres du modèle agrégé du côté serveur :" -#: ../../source/tutorial-use-a-federated-learning-strategy-pytorch.ipynb:490 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:490 msgid "Sending/receiving arbitrary values to/from clients" msgstr "Envoi/réception de valeurs arbitraires vers/depuis les clients" -#: ../../source/tutorial-use-a-federated-learning-strategy-pytorch.ipynb:492 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:492 #, fuzzy msgid "" "In some situations, we want to configure client-side execution (training," @@ -14946,7 +21602,7 @@ msgstr "" " it reads ``server_round`` and ``local_epochs`` and uses those values to " "improve the logging and configure the number of local training epochs:" -#: ../../source/tutorial-use-a-federated-learning-strategy-pytorch.ipynb:546 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:546 msgid "" "So how can we send this config dictionary from server to clients? The " "built-in Flower Strategies provide way to do this, and it works similarly" @@ -14961,7 +21617,7 @@ msgstr "" "stratégie appelle cette fonction pour chaque cycle d'apprentissage fédéré" " :" -#: ../../source/tutorial-use-a-federated-learning-strategy-pytorch.ipynb:576 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:576 msgid "" "Next, we'll just pass this function to the FedAvg strategy before " "starting the simulation:" @@ -14969,7 +21625,7 @@ msgstr "" "Ensuite, nous allons simplement passer cette fonction à la stratégie " "FedAvg avant de commencer la simulation :" -#: ../../source/tutorial-use-a-federated-learning-strategy-pytorch.ipynb:613 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:613 msgid "" "As we can see, the client logs now include the current round of federated" " learning (which they read from the ``config`` dictionary). We can also " @@ -14984,7 +21640,7 @@ msgstr "" " premier et du deuxième cycle d'apprentissage fédéré, puis pendant deux " "époques au cours du troisième cycle." -#: ../../source/tutorial-use-a-federated-learning-strategy-pytorch.ipynb:615 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:615 msgid "" "Clients can also return arbitrary values to the server. To do so, they " "return a dictionary from ``fit`` and/or ``evaluate``. We have seen and " @@ -14999,11 +21655,11 @@ msgstr "" "renvoie un dictionnaire contenant une paire clé/valeur personnalisée en " "tant que troisième valeur de retour dans ``evaluate``." -#: ../../source/tutorial-use-a-federated-learning-strategy-pytorch.ipynb:627 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:627 msgid "Scaling federated learning" msgstr "Mise à l'échelle de l'apprentissage fédéré" -#: ../../source/tutorial-use-a-federated-learning-strategy-pytorch.ipynb:629 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:629 msgid "" "As a last step in this notebook, let's see how we can use Flower to " "experiment with a large number of clients." @@ -15011,7 +21667,7 @@ msgstr "" "Comme dernière étape de ce carnet, voyons comment nous pouvons utiliser " "Flower pour expérimenter avec un grand nombre de clients." -#: ../../source/tutorial-use-a-federated-learning-strategy-pytorch.ipynb:651 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:651 #, python-format msgid "" "We now have 1000 partitions, each holding 45 training and 5 validation " @@ -15036,7 +21692,7 @@ msgstr "" " disponibles (donc 50 clients) seront sélectionnés pour l'entraînement à " "chaque tour :" -#: ../../source/tutorial-use-a-federated-learning-strategy-pytorch.ipynb:699 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:699 msgid "" "In this notebook, we've seen how we can gradually enhance our system by " "customizing the strategy, initializing parameters on the server side, " @@ -15049,7 +21705,7 @@ msgstr "" "en évaluant les modèles côté serveur. C'est une sacrée flexibilité avec " "si peu de code, n'est-ce pas ?" -#: ../../source/tutorial-use-a-federated-learning-strategy-pytorch.ipynb:701 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:701 msgid "" "In the later sections, we've seen how we can communicate arbitrary values" " between server and clients to fully customize client-side execution. " @@ -15065,24 +21721,24 @@ msgstr "" "avons mené une expérience impliquant 1000 clients dans la même charge de " "travail - le tout dans un carnet Jupyter !" -#: ../../source/tutorial-use-a-federated-learning-strategy-pytorch.ipynb:719 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:719 #, fuzzy msgid "" "The `Flower Federated Learning Tutorial - Part 3 " -"`__ shows how to build a fully custom ``Strategy`` " "from scratch." msgstr "" "Le `Tutoriel d'apprentissage fédéré Flower - Partie 3 [WIP] " -"`__ montre comment construire une ``Stratégie`` entièrement " "personnalisée à partir de zéro." -#: ../../source/tutorial-what-is-federated-learning.ipynb:9 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:9 msgid "What is Federated Learning?" msgstr "Qu'est-ce que l'apprentissage fédéré ?" -#: ../../source/tutorial-what-is-federated-learning.ipynb:13 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:13 #, fuzzy msgid "" "In this tutorial, you will learn what federated learning is, build your " @@ -15096,7 +21752,7 @@ msgstr "" "seras capable de construire des systèmes d'apprentissage fédéré avancés " "qui se rapprochent de l'état actuel de l'art dans le domaine." -#: ../../source/tutorial-what-is-federated-learning.ipynb:15 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:15 msgid "" "🧑‍🏫 This tutorial starts at zero and expects no familiarity with " "federated learning. Only a basic understanding of data science and Python" @@ -15106,27 +21762,32 @@ msgstr "" "l'apprentissage fédéré. Seule une compréhension de base de la science des" " données et de la programmation Python est supposée." -#: ../../source/tutorial-what-is-federated-learning.ipynb:17 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:17 #, fuzzy msgid "" "`Star Flower on GitHub `__ ⭐️ and join " "the open-source Flower community on Slack to connect, ask questions, and " -"get help: `Join Slack `__ 🌼 We'd love to " +"get help: `Join Slack `__ 🌼 We'd love to " "hear from you in the ``#introductions`` channel! And if anything is " "unclear, head over to the ``#questions`` channel." msgstr "" "`Star Flower on GitHub `__ ⭐️ et " "rejoignez la communauté Flower sur Slack pour vous connecter, poser des " -"questions et obtenir de l'aide : `Join Slack `__ 🌼 Nous serions ravis d'avoir de vos nouvelles dans le canal " "``#introductions`` ! Et si quelque chose n'est pas clair, rendez-vous sur" " le canal ``#questions``." -#: ../../source/tutorial-what-is-federated-learning.ipynb:31 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:19 +#, fuzzy +msgid "Let's get started!" +msgstr "Allons-y, déclarons-le !" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:31 msgid "Classic machine learning" msgstr "Apprentissage automatique classique" -#: ../../source/tutorial-what-is-federated-learning.ipynb:33 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:33 msgid "" "Before we begin to discuss federated learning, let us quickly recap how " "most machine learning works today." @@ -15135,7 +21796,7 @@ msgstr "" "rapidement la façon dont la plupart des apprentissages automatiques " "fonctionnent aujourd'hui." -#: ../../source/tutorial-what-is-federated-learning.ipynb:35 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:35 msgid "" "In machine learning, we have a model, and we have data. The model could " "be a neural network (as depicted here), or something else, like classical" @@ -15145,15 +21806,15 @@ msgstr "" " modèle peut être un réseau neuronal (comme illustré ici), ou quelque " "chose d'autre, comme la régression linéaire classique." -#: ../../source/tutorial-what-is-federated-learning.ipynb:41 -msgid "|3ff4c820a01d4a5abb022617de537c54|" +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:41 +msgid "|31e4b1afa87c4b968327bbeafbf184d4|" msgstr "" -#: ../../source/tutorial-what-is-federated-learning.ipynb:109 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:109 msgid "Model and data" msgstr "Modèle et données" -#: ../../source/tutorial-what-is-federated-learning.ipynb:47 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:47 msgid "" "We train the model using the data to perform a useful task. A task could " "be to detect objects in images, transcribe an audio recording, or play a " @@ -15164,15 +21825,15 @@ msgstr "" "images, à transcrire un enregistrement audio ou à jouer à un jeu comme le" " Go." -#: ../../source/tutorial-what-is-federated-learning.ipynb:53 -msgid "|7f1889391ad448e2a65920165f0d798c|" +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:53 +msgid "|c9d935b4284e4c389a33d86b33e07c0a|" msgstr "" -#: ../../source/tutorial-what-is-federated-learning.ipynb:111 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:111 msgid "Train model using data" msgstr "Entraîne le modèle à l'aide des données" -#: ../../source/tutorial-what-is-federated-learning.ipynb:59 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:59 #, fuzzy msgid "" "Now, in practice, the training data we work with doesn't originate on the" @@ -15182,7 +21843,7 @@ msgstr "" "travaillons ne proviennent pas de la machine sur laquelle nous entraînons" " le modèle. Elles sont créées ailleurs." -#: ../../source/tutorial-what-is-federated-learning.ipynb:61 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:61 #, fuzzy msgid "" "It originates on a smartphone by the user interacting with an app, a car " @@ -15195,15 +21856,15 @@ msgstr "" "parleur intelligent qui écoute quelqu'un qui essaie de chanter une " "chanson." -#: ../../source/tutorial-what-is-federated-learning.ipynb:67 -msgid "|a171dc4a0d044e70b5d585cc10ace0e0|" +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:67 +msgid "|00727b5faffb468f84dd1b03ded88638|" msgstr "" -#: ../../source/tutorial-what-is-federated-learning.ipynb:113 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:113 msgid "Data on a phone" msgstr "Données sur un téléphone" -#: ../../source/tutorial-what-is-federated-learning.ipynb:73 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:73 msgid "" "What's also important to mention, this \"somewhere else\" is usually not " "just one place, it's many places. It could be several devices all running" @@ -15216,15 +21877,15 @@ msgstr "" "peut également s'agir de plusieurs organisations, qui génèrent toutes des" " données pour la même tâche." -#: ../../source/tutorial-what-is-federated-learning.ipynb:79 -msgid "|fe518aa0d86341f7b2fc87bd6e3bbf0c|" +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:79 +msgid "|daf0cf0ff4c24fd29439af78416cf47b|" msgstr "" -#: ../../source/tutorial-what-is-federated-learning.ipynb:115 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:115 msgid "Data is on many devices" msgstr "Les données se trouvent sur de nombreux appareils" -#: ../../source/tutorial-what-is-federated-learning.ipynb:85 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:85 msgid "" "So to use machine learning, or any kind of data analysis, the approach " "that has been used in the past was to collect all data on a central " @@ -15237,15 +21898,15 @@ msgstr "" "trouver quelque part dans un centre de données, ou quelque part dans le " "cloud." -#: ../../source/tutorial-what-is-federated-learning.ipynb:91 -msgid "|6abfdf0dade44469ae9f08c8dc7d148c|" +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:91 +msgid "|9f093007080d471d94ca90d3e9fde9b6|" msgstr "" -#: ../../source/tutorial-what-is-federated-learning.ipynb:117 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:117 msgid "Central data collection" msgstr "Collecte centralisée des données" -#: ../../source/tutorial-what-is-federated-learning.ipynb:97 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:97 #, fuzzy msgid "" "Once all the data is collected in one place, we can finally use machine " @@ -15258,19 +21919,19 @@ msgstr "" "automatique sur laquelle nous nous sommes fondamentalement toujours " "appuyés." -#: ../../source/tutorial-what-is-federated-learning.ipynb:103 -msgid "|b4f147db24bb4da9a786e1d6676a1c2d|" +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:103 +msgid "|46a26e6150e0479fbd3dfd655f36eb13|" msgstr "" -#: ../../source/tutorial-what-is-federated-learning.ipynb:119 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:119 msgid "Central model training" msgstr "Formation au modèle central" -#: ../../source/tutorial-what-is-federated-learning.ipynb:130 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:130 msgid "Challenges of classical machine learning" msgstr "Les défis de l'apprentissage automatique classique" -#: ../../source/tutorial-what-is-federated-learning.ipynb:132 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:132 msgid "" "The classic machine learning approach we've just seen can be used in some" " cases. Great examples include categorizing holiday photos, or analyzing " @@ -15283,15 +21944,15 @@ msgstr "" "trafic web. Des cas, où toutes les données sont naturellement disponibles" " sur un serveur centralisé." -#: ../../source/tutorial-what-is-federated-learning.ipynb:138 -msgid "|5c62032f589a457bb37b5fee5b2adbde|" +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:138 +msgid "|3daba297595c4c7fb845d90404a6179a|" msgstr "" -#: ../../source/tutorial-what-is-federated-learning.ipynb:173 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:173 msgid "Centralized possible" msgstr "Possibilité de centralisation" -#: ../../source/tutorial-what-is-federated-learning.ipynb:144 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:144 msgid "" "But the approach can not be used in many other cases. Cases, where the " "data is not available on a centralized server, or cases where the data " @@ -15302,15 +21963,15 @@ msgstr "" " ou lorsque les données disponibles sur un serveur ne sont pas " "suffisantes pour former un bon modèle." -#: ../../source/tutorial-what-is-federated-learning.ipynb:150 -msgid "|f154df1846dd44f79a94f1dc3ae8b088|" +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:150 +msgid "|5769874fa9c4455b80b2efda850d39d7|" msgstr "" -#: ../../source/tutorial-what-is-federated-learning.ipynb:175 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:175 msgid "Centralized impossible" msgstr "Impossible de centraliser" -#: ../../source/tutorial-what-is-federated-learning.ipynb:156 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:156 #, fuzzy msgid "" "There are many reasons why the classic centralized machine learning " @@ -15322,7 +21983,7 @@ msgstr "" "grand nombre de cas d'utilisation très importants dans le monde réel, " "notamment :" -#: ../../source/tutorial-what-is-federated-learning.ipynb:158 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:158 #, fuzzy msgid "" "**Regulations**: GDPR (Europe), CCPA (California), PIPEDA (Canada), LGPD " @@ -15347,7 +22008,7 @@ msgstr "" "régies par des réglementations différentes en matière de protection des " "données." -#: ../../source/tutorial-what-is-federated-learning.ipynb:160 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:160 msgid "" "**User preference**: In addition to regulation, there are use cases where" " users just expect that no data leaves their device, ever. If you type " @@ -15365,7 +22026,7 @@ msgstr "" "'est-ce pas ? En fait, ce cas d'utilisation est la raison pour laquelle " "l'apprentissage fédéré a été inventé en premier lieu." -#: ../../source/tutorial-what-is-federated-learning.ipynb:161 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:161 #, fuzzy msgid "" "**Data volume**: Some sensors, like cameras, produce such a high data " @@ -15388,13 +22049,13 @@ msgstr "" "excessivement coûteuse pour les traiter et les stocker. Et la plupart de " "ces données ne sont même pas utiles." -#: ../../source/tutorial-what-is-federated-learning.ipynb:164 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:164 msgid "Examples where centralized machine learning does not work include:" msgstr "" "Voici quelques exemples où l'apprentissage automatique centralisé ne " "fonctionne pas :" -#: ../../source/tutorial-what-is-federated-learning.ipynb:166 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:166 #, fuzzy msgid "" "Sensitive healthcare records from multiple hospitals to train cancer " @@ -15403,7 +22064,7 @@ msgstr "" "Des dossiers médicaux sensibles provenant de plusieurs hôpitaux pour " "former des modèles de détection du cancer" -#: ../../source/tutorial-what-is-federated-learning.ipynb:167 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:167 msgid "" "Financial information from different organizations to detect financial " "fraud" @@ -15411,19 +22072,19 @@ msgstr "" "Informations financières provenant de différentes organisations pour " "détecter les fraudes financières" -#: ../../source/tutorial-what-is-federated-learning.ipynb:168 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:168 msgid "Location data from your electric car to make better range prediction" msgstr "" "Les données de localisation de ta voiture électrique pour mieux prédire " "l'autonomie" -#: ../../source/tutorial-what-is-federated-learning.ipynb:169 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:169 msgid "End-to-end encrypted messages to train better auto-complete models" msgstr "" "Messages cryptés de bout en bout pour former de meilleurs modèles " "d'autocomplétion" -#: ../../source/tutorial-what-is-federated-learning.ipynb:171 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:171 #, fuzzy msgid "" "The popularity of privacy-enhancing systems like the `Brave " @@ -15445,11 +22106,11 @@ msgstr "" "qui bénéficieraient de manière significative des récentes avancées en " "matière d'IA." -#: ../../source/tutorial-what-is-federated-learning.ipynb:186 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:186 msgid "Federated learning" msgstr "Apprentissage fédéré" -#: ../../source/tutorial-what-is-federated-learning.ipynb:188 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:188 msgid "" "Federated learning simply reverses this approach. It enables machine " "learning on distributed data by moving the training to the data, instead " @@ -15461,15 +22122,15 @@ msgstr "" "formation vers les données, au lieu de déplacer les données vers la " "formation. Voici l'explication en une seule phrase :" -#: ../../source/tutorial-what-is-federated-learning.ipynb:190 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:190 msgid "Central machine learning: move the data to the computation" msgstr "Apprentissage automatique central : déplace les données vers le calcul" -#: ../../source/tutorial-what-is-federated-learning.ipynb:191 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:191 msgid "Federated (machine) learning: move the computation to the data" msgstr "Apprentissage (machine) fédéré : déplacer le calcul vers les données" -#: ../../source/tutorial-what-is-federated-learning.ipynb:193 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:193 msgid "" "By doing so, it enables us to use machine learning (and other data " "science approaches) in areas where it wasn't possible before. We can now " @@ -15497,7 +22158,7 @@ msgstr "" " réinventés parce qu'ils ont maintenant accès à de vastes quantités de " "données auparavant inaccessibles." -#: ../../source/tutorial-what-is-federated-learning.ipynb:196 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:196 msgid "" "So how does federated learning work, exactly? Let's start with an " "intuitive explanation." @@ -15505,15 +22166,15 @@ msgstr "" "Comment fonctionne l'apprentissage fédéré ? Commençons par une " "explication intuitive." -#: ../../source/tutorial-what-is-federated-learning.ipynb:199 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:199 msgid "Federated learning in five steps" msgstr "L'apprentissage fédéré en cinq étapes" -#: ../../source/tutorial-what-is-federated-learning.ipynb:202 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:202 msgid "Step 0: Initialize global model" msgstr "Étape 0 : Initialisation du modèle global" -#: ../../source/tutorial-what-is-federated-learning.ipynb:204 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:204 msgid "" "We start by initializing the model on the server. This is exactly the " "same in classic centralized learning: we initialize the model parameters," @@ -15524,15 +22185,15 @@ msgstr "" " initialisons les paramètres du modèle, soit de façon aléatoire, soit à " "partir d'un point de contrôle précédemment sauvegardé." -#: ../../source/tutorial-what-is-federated-learning.ipynb:210 -msgid "|9d20be8160f7451fb0f33b194506503f|" +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:210 +msgid "|ba47ffb421814b0f8f9fa5719093d839|" msgstr "" -#: ../../source/tutorial-what-is-federated-learning.ipynb:307 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:307 msgid "Initialize global model" msgstr "Initialise le modèle global" -#: ../../source/tutorial-what-is-federated-learning.ipynb:217 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:217 msgid "" "Step 1: Send model to a number of connected organizations/devices (client" " nodes)" @@ -15540,7 +22201,7 @@ msgstr "" "Étape 1 : envoyer le modèle à un certain nombre d'organisations/appareils" " connectés (nœuds clients)" -#: ../../source/tutorial-what-is-federated-learning.ipynb:219 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:219 #, fuzzy msgid "" "Next, we send the parameters of the global model to the connected client " @@ -15559,15 +22220,15 @@ msgstr "" "est que la sélection d'un nombre croissant de nœuds clients a des " "rendements décroissants." -#: ../../source/tutorial-what-is-federated-learning.ipynb:225 -msgid "|3d949f76988443c59990d2e64f05c386|" +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:225 +msgid "|aeac5bf79cbf497082e979834717e01b|" msgstr "" -#: ../../source/tutorial-what-is-federated-learning.ipynb:309 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:309 msgid "Send global model" msgstr "Envoyer le modèle global" -#: ../../source/tutorial-what-is-federated-learning.ipynb:232 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:232 msgid "" "Step 2: Train model locally on the data of each organization/device " "(client node)" @@ -15575,7 +22236,7 @@ msgstr "" "Étape 2 : Entraîne le modèle localement sur les données de chaque " "organisation/appareil (nœud client)" -#: ../../source/tutorial-what-is-federated-learning.ipynb:234 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:234 msgid "" "Now that all (selected) client nodes have the latest version of the " "global model parameters, they start the local training. They use their " @@ -15592,19 +22253,19 @@ msgstr "" "pendant un petit moment. Il peut s'agir d'une seule époque sur les " "données locales, ou même de quelques étapes (mini-batchs)." -#: ../../source/tutorial-what-is-federated-learning.ipynb:240 -msgid "|526c6d9140f6404f8a226d9056327b3b|" +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:240 +msgid "|ce27ed4bbe95459dba016afc42486ba2|" msgstr "" -#: ../../source/tutorial-what-is-federated-learning.ipynb:311 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:311 msgid "Train on local data" msgstr "Forme-toi aux données locales" -#: ../../source/tutorial-what-is-federated-learning.ipynb:247 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:247 msgid "Step 3: Return model updates back to the server" msgstr "Étape 3 : Renvoyer les mises à jour du modèle au serveur" -#: ../../source/tutorial-what-is-federated-learning.ipynb:249 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:249 msgid "" "After local training, each client node has a slightly different version " "of the model parameters they originally received. The parameters are all " @@ -15623,21 +22284,21 @@ msgstr "" "du modèle, soit seulement les gradients qui ont été accumulés au cours de" " l'entraînement local." -#: ../../source/tutorial-what-is-federated-learning.ipynb:255 -msgid "|a5f6af14cd7c4550929b17f83b4f63c7|" +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:255 +msgid "|ae94a7f71dda443cbec2385751427d41|" msgstr "" -#: ../../source/tutorial-what-is-federated-learning.ipynb:313 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:313 msgid "Send model updates" msgstr "Envoyer les mises à jour du modèle" -#: ../../source/tutorial-what-is-federated-learning.ipynb:262 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:262 msgid "Step 4: Aggregate model updates into a new global model" msgstr "" "Étape 4 : Agréger les mises à jour des modèles dans un nouveau modèle " "global" -#: ../../source/tutorial-what-is-federated-learning.ipynb:264 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:264 msgid "" "The server receives model updates from the selected client nodes. If it " "selected 100 client nodes, it now has 100 slightly different versions of " @@ -15652,7 +22313,7 @@ msgstr "" "voulions-nous pas avoir un seul modèle qui contienne les apprentissages " "des données de l'ensemble des 100 nœuds clients ?" -#: ../../source/tutorial-what-is-federated-learning.ipynb:266 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:266 msgid "" "In order to get one single model, we have to combine all the model " "updates we received from the client nodes. This process is called " @@ -15682,19 +22343,19 @@ msgstr "" "weighting - each of the 10 examples would influence the global model ten " "times as much as each of the 100 examples." -#: ../../source/tutorial-what-is-federated-learning.ipynb:273 -msgid "|bcd571c4f4ee4803a54f71b5c20448cb|" +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:273 +msgid "|e61fce4d43d243e7bb08bdde97d81ce6|" msgstr "" -#: ../../source/tutorial-what-is-federated-learning.ipynb:315 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:315 msgid "Aggregate model updates" msgstr "Mises à jour globales du modèle" -#: ../../source/tutorial-what-is-federated-learning.ipynb:280 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:280 msgid "Step 5: Repeat steps 1 to 4 until the model converges" msgstr "Étape 5 : répète les étapes 1 à 4 jusqu'à ce que le modèle converge" -#: ../../source/tutorial-what-is-federated-learning.ipynb:282 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:282 msgid "" "Steps 1 to 4 are what we call a single round of federated learning. The " "global model parameters get sent to the participating client nodes (step " @@ -15709,7 +22370,7 @@ msgstr "" " serveur (étape 3), et le serveur agrège ensuite les mises à jour du " "modèle pour obtenir une nouvelle version du modèle global (étape 4)." -#: ../../source/tutorial-what-is-federated-learning.ipynb:284 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:284 #, fuzzy msgid "" "During a single round, each client node that participates in that " @@ -15729,7 +22390,7 @@ msgstr "" "modèle entièrement entraîné qui fonctionne bien sur l'ensemble des " "données de tous les nœuds clients." -#: ../../source/tutorial-what-is-federated-learning.ipynb:289 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:289 msgid "" "Congratulations, you now understand the basics of federated learning. " "There's a lot more to discuss, of course, but that was federated learning" @@ -15748,7 +22409,7 @@ msgstr "" "meilleure façon d'agréger les mises à jour du modèle ? Comment pouvons-" "nous gérer les nœuds clients qui échouent (stragglers) ?" -#: ../../source/tutorial-what-is-federated-learning.ipynb:294 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:294 #, fuzzy msgid "" "Just like we can train a model on the decentralized data of different " @@ -15764,11 +22425,11 @@ msgstr "" "fédérée fait partie intégrante de la plupart des systèmes d'apprentissage" " fédéré." -#: ../../source/tutorial-what-is-federated-learning.ipynb:297 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:297 msgid "Federated analytics" msgstr "Analyses fédérées" -#: ../../source/tutorial-what-is-federated-learning.ipynb:299 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:299 msgid "" "In many cases, machine learning isn't necessary to derive value from " "data. Data analysis can yield valuable insights, but again, there's often" @@ -15790,12 +22451,12 @@ msgstr "" "empêcher le serveur de voir les résultats soumis par les nœuds clients " "individuels." -#: ../../source/tutorial-what-is-federated-learning.ipynb:303 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:303 #, fuzzy msgid "Differential Privacy" msgstr "Confidentialité différentielle" -#: ../../source/tutorial-what-is-federated-learning.ipynb:305 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:305 msgid "" "Differential privacy (DP) is often mentioned in the context of Federated " "Learning. It is a privacy-preserving method used when analyzing and " @@ -15806,11 +22467,11 @@ msgid "" "optimization that provides a quantifiable privacy protection measure." msgstr "" -#: ../../source/tutorial-what-is-federated-learning.ipynb:326 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:326 msgid "Flower" msgstr "Fleur" -#: ../../source/tutorial-what-is-federated-learning.ipynb:328 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:328 msgid "" "Federated learning, federated evaluation, and federated analytics require" " infrastructure to move machine learning models back and forth, train and" @@ -15830,11 +22491,11 @@ msgstr "" "l'utilisateur de fédérer n'importe quelle charge de travail, n'importe " "quel cadre de ML et n'importe quel langage de programmation." -#: ../../source/tutorial-what-is-federated-learning.ipynb:334 -msgid "|c76452ae1ed84965be7ef23c72b95845|" +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:334 +msgid "|08cb60859b07461588fe44e55810b050|" msgstr "" -#: ../../source/tutorial-what-is-federated-learning.ipynb:340 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:340 msgid "" "Flower federated learning server and client nodes (car, scooter, personal" " computer, roomba, and phone)" @@ -15842,7 +22503,7 @@ msgstr "" "Serveur d'apprentissage fédéré de Flower et nœuds clients (voiture, " "scooter, ordinateur personnel, roomba et téléphone)" -#: ../../source/tutorial-what-is-federated-learning.ipynb:353 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:353 msgid "" "Congratulations, you just learned the basics of federated learning and " "how it relates to the classic (centralized) machine learning!" @@ -15850,7 +22511,7 @@ msgstr "" "Félicitations, tu viens d'apprendre les bases de l'apprentissage fédéré " "et son rapport avec l'apprentissage automatique classique (centralisé) !" -#: ../../source/tutorial-what-is-federated-learning.ipynb:355 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:355 msgid "" "In the next part of this tutorial, we are going to build a first " "federated learning system with Flower." @@ -15858,16 +22519,16 @@ msgstr "" "Dans la prochaine partie de ce tutoriel, nous allons construire un " "premier système d'apprentissage fédéré avec Flower." -#: ../../source/tutorial-what-is-federated-learning.ipynb:373 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:373 #, fuzzy msgid "" "The `Flower Federated Learning Tutorial - Part 1 " -"`__ shows how to build a simple federated learning system " "with PyTorch and Flower." msgstr "" "Le `Tutoriel d'apprentissage fédéré Flower - Partie 1 " -"`__ " +"`__ " "montre comment construire un système d'apprentissage fédéré simple avec " "PyTorch et Flower." @@ -17225,14 +23886,13 @@ msgstr "" #~ " and manages `virtual` clients. These " #~ "clients are identical to `non-virtual`" #~ " clients (i.e. the ones you launch" -#~ " via the command `flwr.client.start_numpy_client" -#~ " `_)" -#~ " in the sense that they can be" -#~ " configure by creating a class " -#~ "inheriting, for example, from " -#~ "`flwr.client.NumPyClient `_ and therefore " -#~ "behave in an identical way. In " +#~ " via the command `flwr.client.start_client " +#~ "`_) in the" +#~ " sense that they can be configure " +#~ "by creating a class inheriting, for " +#~ "example, from `flwr.client.NumPyClient `_ and therefore" +#~ " behave in an identical way. In " #~ "addition to that, clients managed by " #~ "the :code:`VirtualClientEngine` are:" #~ msgstr "" @@ -17709,7 +24369,7 @@ msgstr "" #~ msgstr "Documentation de Flower" #~ msgid "PyTorch" -#~ msgstr "PyTorch" +#~ msgstr "Exemples de PyTorch" #~ msgid "TensorFlow" #~ msgstr "TensorFlow" @@ -17718,7 +24378,7 @@ msgstr "" #~ msgstr "Client de Flower" #~ msgid "flwr (Python API reference)" -#~ msgstr "flwr (paquet Python)" +#~ msgstr "Référence pour l'API" #~ msgid "Unreleased" #~ msgstr "Inédit" @@ -17913,10 +24573,10 @@ msgstr "" #~ msgid "" #~ "`Android Kotlin example " -#~ "`_" +#~ "`_" #~ msgstr "" -#~ msgid "`Android Java example `_" +#~ msgid "`Android Java example `_" #~ msgstr "" #~ msgid "Build a strategy from scratch" @@ -17969,7 +24629,7 @@ msgstr "" #~ "Flower Python server, it is recommended" #~ " to create a virtual environment and" #~ " run everything within a `virtualenv " -#~ "`_." +#~ "`_." #~ " For the Flower client implementation " #~ "in iOS, it is recommended to use" #~ " Xcode as our IDE." @@ -17978,7 +24638,7 @@ msgstr "" #~ "serveur Flower Python, il est recommandé" #~ " de créer un environnement virtuel et" #~ " de tout exécuter au sein d'un " -#~ "`virtualenv `_. Pour l'implémentation du client" #~ " Flower dans iOS, il est recommandé" #~ " d'utiliser Xcode comme notre IDE." @@ -18241,7 +24901,7 @@ msgstr "" #~ "`__ ⭐️ and join " #~ "the open-source Flower community on " #~ "Slack to connect, ask questions, and " -#~ "get help: `Join Slack `__ 🌼 We'd love to hear" #~ " from you in the ``#introductions`` " #~ "channel! And if anything is unclear, " @@ -18252,7 +24912,7 @@ msgstr "" #~ " la communauté open-source Flower sur" #~ " Slack pour vous connecter, poser des" #~ " questions et obtenir de l'aide : " -#~ "`Join Slack `__ " +#~ "`Join Slack `__ " #~ "🌼 Nous serions ravis d'avoir de " #~ "vos nouvelles dans le canal " #~ "``#introductions`` ! Et si quelque chose" @@ -18582,7 +25242,7 @@ msgstr "" #~ "Please make sure to add your " #~ "baseline or experiment to the " #~ "corresponding directory as explained in " -#~ "`Executing Baseline `_. Give your baseline the " #~ "unique identifier. For example, :code:`fedbn`" #~ " refers to the paper \"FedBN: " @@ -18814,7 +25474,7 @@ msgstr "" #~ " papers. If you want to add a" #~ " new baseline or experiment, please " #~ "check the `Contributing Baselines " -#~ "`_ " +#~ "`_ " #~ "section." #~ msgstr "" @@ -18913,3 +25573,228 @@ msgstr "" #~ msgid "|025f0a6f7a6145cba4bf8fa0e2495851|" #~ msgstr "" +#~ msgid "Before the release" +#~ msgstr "Avant la sortie" + +#~ msgid "" +#~ "Update the changelog (``changelog.md``) with" +#~ " all relevant changes that happened " +#~ "after the last release. If the " +#~ "last release was tagged ``v1.2.0``, you" +#~ " can use the following URL to " +#~ "see all commits that got merged " +#~ "into ``main`` since then:" +#~ msgstr "" +#~ "Mettez à jour le journal des " +#~ "modifications (``changelog.md``) avec tous les" +#~ " changements pertinents qui se sont " +#~ "produits après la dernière version. Si" +#~ " la dernière version a été étiquetée" +#~ " ``v1.2.0``, vous pouvez utiliser l'URL " +#~ "suivante pour voir tous les commits " +#~ "qui ont été fusionnés dans ``main`` " +#~ "depuis lors :" + +#~ msgid "" +#~ "`GitHub: Compare v1.2.0...main " +#~ "`_" +#~ msgstr "" +#~ "`GitHub : Compare v1.2.0...main " +#~ "`_" + +#~ msgid "" +#~ "Thank the authors who contributed since" +#~ " the last release. This command helps" +#~ " extract them: ``git log --format='%aN' " +#~ "v1.1.0..HEAD | sort -u``. The command" +#~ " has the same order as ``git " +#~ "shortlog``." +#~ msgstr "" +#~ "Remerciez les auteurs qui ont contribué" +#~ " depuis la dernière version. Cette " +#~ "commande permet de les extraire : " +#~ "``git log --format='%aN' v1.1.0..HEAD | " +#~ "sort -u``. La commande a le même" +#~ " ordre que ``git shortlog``." + +#~ msgid "" +#~ "Update the ``changelog.md`` section header " +#~ "``Unreleased`` to contain the version " +#~ "number and date for the release " +#~ "you are building. Create a pull " +#~ "request with the change." +#~ msgstr "" +#~ "Mettez à jour l'en-tête de section" +#~ " ``changelog.md`` ``Unreleased`` pour qu'il " +#~ "contienne le numéro de version et " +#~ "la date de la version que vous " +#~ "construisez. Créez une demande de " +#~ "traction avec le changement." + +#~ msgid "" +#~ "Tag the release commit with the " +#~ "version number as soon as the PR" +#~ " is merged: ``git tag v0.12.3``, then" +#~ " ``git push --tags``" +#~ msgstr "" +#~ "Marquez le commit de la version " +#~ "avec le numéro de version dès que" +#~ " le PR est fusionné : ``git tag" +#~ " v0.12.3``, puis ``git push --tags``" + +#~ msgid "" +#~ "Build the release with ``./dev/build.sh``, " +#~ "then publish it with ``./dev/publish.sh``" +#~ msgstr "" +#~ "Construisez la version avec " +#~ "``./dev/build.sh``, puis publiez-la avec " +#~ "``./dev/publish.sh``" + +#~ msgid "" +#~ "Create an entry in GitHub releases " +#~ "with the release notes for the " +#~ "previously tagged commit and attach the" +#~ " build artifacts (:code:`.whl` and " +#~ ":code:`.tar.gz`)." +#~ msgstr "" +#~ "Crée une entrée dans GitHub releases " +#~ "avec les notes de version pour le" +#~ " commit précédemment étiqueté et attache" +#~ " les artefacts de construction " +#~ "(:code:`.whl` et :code:`.tar.gz`)." + +#~ msgid "" +#~ "Second, create a virtual environment " +#~ "(and activate it). If you chose to" +#~ " use :code:`pyenv` (with the :code" +#~ ":`pyenv-virtualenv` plugin) and already " +#~ "have it installed , you can use" +#~ " the following convenience script (by " +#~ "default it will use :code:`Python " +#~ "3.8.17`, but you can change it by" +#~ " providing a specific :code:``)::" +#~ msgstr "" +#~ "Deuxièmement, créer un environnement virtuel" +#~ " (et l'activer). Si vous choisissez " +#~ "d'utiliser :code:`pyenv` (avec le plugin " +#~ ":code:`pyenv-virtualenv`) et que vous " +#~ "l'avez déjà installé, vous pouvez " +#~ "utiliser le script suivant (par défaut" +#~ " il utilisera :code:`Python 3.8.17`, mais" +#~ " vous pouvez le changer en " +#~ "fournissant une :code:`` spécifique)::" + +#~ msgid "server.strategy.FedAvg" +#~ msgstr "serveur.stratégie.FedAvg" + +#~ msgid "server.strategy.FedAvgM" +#~ msgstr "stratégie.serveur.FedAvgM" + +#~ msgid "server.strategy.QFedAvg" +#~ msgstr "server.strategy.QFedAvg" + +#~ msgid "server.strategy.FedOpt" +#~ msgstr "serveur.stratégie.FedOpt" + +#~ msgid "server.strategy.FedProx" +#~ msgstr "serveur.stratégie.FedProx" + +#~ msgid "server.strategy.FedAdagrad" +#~ msgstr "serveur.stratégie.FedAdagrad" + +#~ msgid "server.strategy.FedAdam" +#~ msgstr "serveur.stratégie.FedAdam" + +#~ msgid "server.strategy.FedYogi" +#~ msgstr "serveur.stratégie.FedYogi" + +#~ msgid "" +#~ "`achiverram28`, `Adam Narozniak`, `Anass " +#~ "Anhari`, `Charles Beauville`, `Dana-Farber`," +#~ " `Daniel J. Beutel`, `Daniel Nata " +#~ "Nugraha`, `Edoardo Gabrielli`, `eunchung`, " +#~ "`Gustavo Bertoli`, `Heng Pan`, `Javier`, " +#~ "`Mahdi`, `Ruth Galindo`, `Steven Hé " +#~ "(Sīchàng)`, `Taner Topal`" +#~ msgstr "" + +#~ msgid "" +#~ "Let's now load the CIFAR-10 training " +#~ "and test set, partition them into " +#~ "ten smaller datasets (each split into" +#~ " training and validation set), and " +#~ "wrap the resulting partitions by " +#~ "creating a PyTorch ``DataLoader`` for " +#~ "each of them:" +#~ msgstr "" +#~ "Chargeons maintenant l'ensemble de formation" +#~ " et de test CIFAR-10, partitionnons-" +#~ "les en dix ensembles de données " +#~ "plus petits (chacun divisé en ensemble" +#~ " de formation et de validation), et" +#~ " enveloppons les partitions résultantes en" +#~ " créant un PyTorch ``DataLoader`` pour " +#~ "chacun d'entre eux :" + +#~ msgid "" +#~ "Let's build a horizontal federated " +#~ "learning system using XGBoost and " +#~ "Flower!" +#~ msgstr "" +#~ "Construisons un système d'apprentissage fédéré" +#~ " horizontal en utilisant XGBoost et " +#~ "Flower !" + +#~ msgid "" +#~ "Please refer to the `full code " +#~ "example `_ to learn " +#~ "more." +#~ msgstr "" +#~ "Réfère-toi à l'exemple de code " +#~ "complet `_ pour en " +#~ "savoir plus." + +#~ msgid "|3ff4c820a01d4a5abb022617de537c54|" +#~ msgstr "" + +#~ msgid "|7f1889391ad448e2a65920165f0d798c|" +#~ msgstr "" + +#~ msgid "|a171dc4a0d044e70b5d585cc10ace0e0|" +#~ msgstr "" + +#~ msgid "|fe518aa0d86341f7b2fc87bd6e3bbf0c|" +#~ msgstr "" + +#~ msgid "|6abfdf0dade44469ae9f08c8dc7d148c|" +#~ msgstr "" + +#~ msgid "|b4f147db24bb4da9a786e1d6676a1c2d|" +#~ msgstr "" + +#~ msgid "|5c62032f589a457bb37b5fee5b2adbde|" +#~ msgstr "" + +#~ msgid "|f154df1846dd44f79a94f1dc3ae8b088|" +#~ msgstr "" + +#~ msgid "|9d20be8160f7451fb0f33b194506503f|" +#~ msgstr "" + +#~ msgid "|3d949f76988443c59990d2e64f05c386|" +#~ msgstr "" + +#~ msgid "|526c6d9140f6404f8a226d9056327b3b|" +#~ msgstr "" + +#~ msgid "|a5f6af14cd7c4550929b17f83b4f63c7|" +#~ msgstr "" + +#~ msgid "|bcd571c4f4ee4803a54f71b5c20448cb|" +#~ msgstr "" + +#~ msgid "|c76452ae1ed84965be7ef23c72b95845|" +#~ msgstr "" + diff --git a/doc/locales/pt_BR/LC_MESSAGES/framework-docs.po b/doc/locales/pt_BR/LC_MESSAGES/framework-docs.po index 4bb195306670..359458e8db57 100644 --- a/doc/locales/pt_BR/LC_MESSAGES/framework-docs.po +++ b/doc/locales/pt_BR/LC_MESSAGES/framework-docs.po @@ -8,7 +8,7 @@ msgid "" msgstr "" "Project-Id-Version: Flower main\n" "Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2023-10-19 07:55+0200\n" +"POT-Creation-Date: 2024-02-13 11:23+0100\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language: pt_BR\n" @@ -17,7 +17,7 @@ msgstr "" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=utf-8\n" "Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 2.12.1\n" +"Generated-By: Babel 2.13.1\n" #: ../../source/contributor-explanation-architecture.rst:2 msgid "Flower Architecture" @@ -29,7 +29,7 @@ msgstr "" #: ../../source/contributor-explanation-architecture.rst:7 msgid "" -"`Flower `_ core framework architecture with Edge " +"`Flower `_ core framework architecture with Edge " "Client Engine" msgstr "" @@ -39,7 +39,7 @@ msgstr "" #: ../../source/contributor-explanation-architecture.rst:15 msgid "" -"`Flower `_ core framework architecture with Virtual " +"`Flower `_ core framework architecture with Virtual " "Client Engine" msgstr "" @@ -49,121 +49,229 @@ msgstr "" #: ../../source/contributor-explanation-architecture.rst:23 msgid "" -"`Flower `_ core framework architecture with both " +"`Flower `_ core framework architecture with both " "Virtual Client Engine and Edge Client Engine" msgstr "" -#: ../../source/contributor-how-create-new-messages.rst:2 -msgid "Creating New Messages" +#: ../../source/contributor-how-to-build-docker-images.rst:2 +msgid "How to build Docker Flower images locally" msgstr "" -#: ../../source/contributor-how-create-new-messages.rst:4 +#: ../../source/contributor-how-to-build-docker-images.rst:4 msgid "" -"This is a simple guide for creating a new type of message between the " -"server and clients in Flower." +"Flower provides pre-made docker images on `Docker Hub " +"`_ that include all necessary " +"dependencies for running the server. You can also build your own custom " +"docker images from scratch with a different version of Python or Ubuntu " +"if that is what you need. In this guide, we will explain what images " +"exist and how to build them locally." msgstr "" -#: ../../source/contributor-how-create-new-messages.rst:6 +#: ../../source/contributor-how-to-build-docker-images.rst:9 msgid "" -"Let's suppose we have the following example functions in " -":code:`server.py` and :code:`numpy_client.py`..." +"Before we can start, we need to meet a few prerequisites in our local " +"development environment." msgstr "" -#: ../../source/contributor-how-create-new-messages.rst:8 -msgid "Server's side:" +#: ../../source/contributor-how-to-build-docker-images.rst:11 +msgid "Clone the flower repository." msgstr "" -#: ../../source/contributor-how-create-new-messages.rst:17 -msgid "Client's side:" +#: ../../source/contributor-how-to-build-docker-images.rst:17 +msgid "Verify the Docker daemon is running." msgstr "" -#: ../../source/contributor-how-create-new-messages.rst:26 +#: ../../source/contributor-how-to-build-docker-images.rst:19 msgid "" -"Let's now see what we need to implement in order to get this simple " -"function between the server and client to work!" +"Please follow the first section on `Run Flower using Docker " +"`_ " +"which covers this step in more detail." msgstr "" -#: ../../source/contributor-how-create-new-messages.rst:30 -msgid "Message Types for Protocol Buffers" +#: ../../source/contributor-how-to-build-docker-images.rst:23 +msgid "" +"Currently, Flower provides two images, a base image and a server image. " +"There will also be a client image soon. The base image, as the name " +"suggests, contains basic dependencies that both the server and the client" +" need. This includes system dependencies, Python and Python tools. The " +"server image is based on the base image, but it additionally installs the" +" Flower server using ``pip``." msgstr "" -#: ../../source/contributor-how-create-new-messages.rst:32 +#: ../../source/contributor-how-to-build-docker-images.rst:28 msgid "" -"The first thing we need to do is to define a message type for the RPC " -"system in :code:`transport.proto`. Note that we have to do it for both " -"the request and response messages. For more details on the syntax of " -"proto3, please see the `official documentation " -"`_." +"The build instructions that assemble the images are located in the " +"respective Dockerfiles. You can find them in the subdirectories of " +"``src/docker``." msgstr "" -#: ../../source/contributor-how-create-new-messages.rst:35 -msgid "Within the :code:`ServerMessage` block:" +#: ../../source/contributor-how-to-build-docker-images.rst:31 +msgid "" +"Both, base and server image are configured via build arguments. Through " +"build arguments, we can make our build more flexible. For example, in the" +" base image, we can specify the version of Python to install using the " +"``PYTHON_VERSION`` build argument. Some of the build arguments have " +"default values, others must be specified when building the image. All " +"available build arguments for each image are listed in one of the tables " +"below." msgstr "" -#: ../../source/contributor-how-create-new-messages.rst:52 -msgid "Within the ClientMessage block:" +#: ../../source/contributor-how-to-build-docker-images.rst:38 +msgid "Building the base image" msgstr "" -#: ../../source/contributor-how-create-new-messages.rst:70 -msgid "" -"Make sure to also add a field of the newly created message type in " -":code:`oneof msg`." +#: ../../source/contributor-how-to-build-docker-images.rst:44 +#: ../../source/contributor-how-to-build-docker-images.rst:86 +msgid "Build argument" msgstr "" -#: ../../source/contributor-how-create-new-messages.rst:72 -msgid "Once that is done, we will compile the file with:" +#: ../../source/contributor-how-to-build-docker-images.rst:45 +#: ../../source/contributor-how-to-build-docker-images.rst:87 +msgid "Description" msgstr "" -#: ../../source/contributor-how-create-new-messages.rst:78 -msgid "If it compiles succesfully, you should see the following message:" +#: ../../source/contributor-how-to-build-docker-images.rst:46 +#: ../../source/contributor-how-to-build-docker-images.rst:88 +msgid "Required" msgstr "" -#: ../../source/contributor-how-create-new-messages.rst:87 -msgid "Serialization and Deserialization Functions" +#: ../../source/contributor-how-to-build-docker-images.rst:47 +#: ../../source/contributor-how-to-build-docker-images.rst:89 +msgid "Example" msgstr "" -#: ../../source/contributor-how-create-new-messages.rst:89 -msgid "" -"Our next step is to add functions to serialize and deserialize Python " -"datatypes to or from our defined RPC message types. You should add these " -"functions in :code:`serde.py`." +#: ../../source/contributor-how-to-build-docker-images.rst:48 +msgid "``PYTHON_VERSION``" msgstr "" -#: ../../source/contributor-how-create-new-messages.rst:91 -msgid "The four functions:" +#: ../../source/contributor-how-to-build-docker-images.rst:49 +msgid "Version of ``python`` to be installed." msgstr "" -#: ../../source/contributor-how-create-new-messages.rst:112 -msgid "Sending the Message from the Server" +#: ../../source/contributor-how-to-build-docker-images.rst:50 +#: ../../source/contributor-how-to-build-docker-images.rst:54 +#: ../../source/contributor-how-to-build-docker-images.rst:58 +#: ../../source/contributor-how-to-build-docker-images.rst:100 +msgid "Yes" +msgstr "" + +#: ../../source/contributor-how-to-build-docker-images.rst:51 +msgid "``3.11``" +msgstr "" + +#: ../../source/contributor-how-to-build-docker-images.rst:52 +msgid "``PIP_VERSION``" +msgstr "" + +#: ../../source/contributor-how-to-build-docker-images.rst:53 +msgid "Version of ``pip`` to be installed." +msgstr "" + +#: ../../source/contributor-how-to-build-docker-images.rst:55 +msgid "``23.0.1``" +msgstr "" + +#: ../../source/contributor-how-to-build-docker-images.rst:56 +msgid "``SETUPTOOLS_VERSION``" +msgstr "" + +#: ../../source/contributor-how-to-build-docker-images.rst:57 +msgid "Version of ``setuptools`` to be installed." +msgstr "" + +#: ../../source/contributor-how-to-build-docker-images.rst:59 +msgid "``69.0.2``" +msgstr "" + +#: ../../source/contributor-how-to-build-docker-images.rst:60 +msgid "``UBUNTU_VERSION``" +msgstr "" + +#: ../../source/contributor-how-to-build-docker-images.rst:61 +msgid "Version of the official Ubuntu Docker image." +msgstr "" + +#: ../../source/contributor-how-to-build-docker-images.rst:62 +msgid "Defaults to ``22.04``." msgstr "" -#: ../../source/contributor-how-create-new-messages.rst:114 +#: ../../source/contributor-how-to-build-docker-images.rst:65 msgid "" -"Now write the request function in your Client Proxy class (e.g., " -":code:`grpc_client_proxy.py`) using the serde functions you just created:" +"The following example creates a base image with Python 3.11.0, pip 23.0.1" +" and setuptools 69.0.2:" msgstr "" -#: ../../source/contributor-how-create-new-messages.rst:128 -msgid "Receiving the Message by the Client" +#: ../../source/contributor-how-to-build-docker-images.rst:76 +msgid "" +"The name of image is ``flwr_base`` and the tag ``0.1.0``. Remember that " +"the build arguments as well as the name and tag can be adapted to your " +"needs. These values serve as examples only." +msgstr "" + +#: ../../source/contributor-how-to-build-docker-images.rst:80 +msgid "Building the server image" +msgstr "" + +#: ../../source/contributor-how-to-build-docker-images.rst:90 +msgid "``BASE_REPOSITORY``" +msgstr "" + +#: ../../source/contributor-how-to-build-docker-images.rst:91 +msgid "The repository name of the base image." +msgstr "" + +#: ../../source/contributor-how-to-build-docker-images.rst:92 +msgid "Defaults to ``flwr/server``." +msgstr "" + +#: ../../source/contributor-how-to-build-docker-images.rst:94 +msgid "``BASE_IMAGE_TAG``" +msgstr "" + +#: ../../source/contributor-how-to-build-docker-images.rst:95 +msgid "The image tag of the base image." +msgstr "" + +#: ../../source/contributor-how-to-build-docker-images.rst:96 +msgid "Defaults to ``py3.11-ubuntu22.04``." +msgstr "" + +#: ../../source/contributor-how-to-build-docker-images.rst:98 +msgid "``FLWR_VERSION``" +msgstr "" + +#: ../../source/contributor-how-to-build-docker-images.rst:99 +msgid "Version of Flower to be installed." +msgstr "" + +#: ../../source/contributor-how-to-build-docker-images.rst:101 +msgid "``1.7.0``" msgstr "" -#: ../../source/contributor-how-create-new-messages.rst:130 +#: ../../source/contributor-how-to-build-docker-images.rst:103 msgid "" -"Last step! Modify the code in :code:`message_handler.py` to check the " -"field of your message and call the :code:`example_response` function. " -"Remember to use the serde functions!" +"The following example creates a server image with the official Flower " +"base image py3.11-ubuntu22.04 and Flower 1.7.0:" msgstr "" -#: ../../source/contributor-how-create-new-messages.rst:132 -msgid "Within the handle function:" +#: ../../source/contributor-how-to-build-docker-images.rst:114 +msgid "" +"The name of image is ``flwr_server`` and the tag ``0.1.0``. Remember that" +" the build arguments as well as the name and tag can be adapted to your " +"needs. These values serve as examples only." msgstr "" -#: ../../source/contributor-how-create-new-messages.rst:139 -msgid "And add a new function:" +#: ../../source/contributor-how-to-build-docker-images.rst:117 +msgid "" +"If you want to use your own base image instead of the official Flower " +"base image, all you need to do is set the ``BASE_REPOSITORY`` and " +"``BASE_IMAGE_TAG`` build arguments. The value of ``BASE_REPOSITORY`` must" +" match the name of your image and the value of ``BASE_IMAGE_TAG`` must " +"match the tag of your image." msgstr "" -#: ../../source/contributor-how-create-new-messages.rst:149 -msgid "Hopefully, when you run your program you will get the intended result!" +#: ../../source/contributor-how-to-build-docker-images.rst:131 +msgid "After creating the image, we can test whether the image is working:" msgstr "" #: ../../source/contributor-how-to-contribute-translations.rst:2 @@ -172,7 +280,7 @@ msgstr "" #: ../../source/contributor-how-to-contribute-translations.rst:4 msgid "" -"Since `Flower 1.5 `_ we have introduced translations to " "our doc pages, but, as you might have noticed, the translations are often" " imperfect. If you speak languages other than English, you might be able " @@ -268,10 +376,123 @@ msgstr "" #: ../../source/contributor-how-to-contribute-translations.rst:69 msgid "" "If you want to add a new language, you will first have to contact us, " -"either on `Slack `_, or by opening an " +"either on `Slack `_, or by opening an " "issue on our `GitHub repo `_." msgstr "" +#: ../../source/contributor-how-to-create-new-messages.rst:2 +msgid "Creating New Messages" +msgstr "" + +#: ../../source/contributor-how-to-create-new-messages.rst:4 +msgid "" +"This is a simple guide for creating a new type of message between the " +"server and clients in Flower." +msgstr "" + +#: ../../source/contributor-how-to-create-new-messages.rst:6 +msgid "" +"Let's suppose we have the following example functions in " +":code:`server.py` and :code:`numpy_client.py`..." +msgstr "" + +#: ../../source/contributor-how-to-create-new-messages.rst:8 +msgid "Server's side:" +msgstr "" + +#: ../../source/contributor-how-to-create-new-messages.rst:17 +msgid "Client's side:" +msgstr "" + +#: ../../source/contributor-how-to-create-new-messages.rst:26 +msgid "" +"Let's now see what we need to implement in order to get this simple " +"function between the server and client to work!" +msgstr "" + +#: ../../source/contributor-how-to-create-new-messages.rst:30 +msgid "Message Types for Protocol Buffers" +msgstr "" + +#: ../../source/contributor-how-to-create-new-messages.rst:32 +msgid "" +"The first thing we need to do is to define a message type for the RPC " +"system in :code:`transport.proto`. Note that we have to do it for both " +"the request and response messages. For more details on the syntax of " +"proto3, please see the `official documentation " +"`_." +msgstr "" + +#: ../../source/contributor-how-to-create-new-messages.rst:35 +msgid "Within the :code:`ServerMessage` block:" +msgstr "" + +#: ../../source/contributor-how-to-create-new-messages.rst:52 +msgid "Within the ClientMessage block:" +msgstr "" + +#: ../../source/contributor-how-to-create-new-messages.rst:70 +msgid "" +"Make sure to also add a field of the newly created message type in " +":code:`oneof msg`." +msgstr "" + +#: ../../source/contributor-how-to-create-new-messages.rst:72 +msgid "Once that is done, we will compile the file with:" +msgstr "" + +#: ../../source/contributor-how-to-create-new-messages.rst:78 +msgid "If it compiles succesfully, you should see the following message:" +msgstr "" + +#: ../../source/contributor-how-to-create-new-messages.rst:87 +msgid "Serialization and Deserialization Functions" +msgstr "" + +#: ../../source/contributor-how-to-create-new-messages.rst:89 +msgid "" +"Our next step is to add functions to serialize and deserialize Python " +"datatypes to or from our defined RPC message types. You should add these " +"functions in :code:`serde.py`." +msgstr "" + +#: ../../source/contributor-how-to-create-new-messages.rst:91 +msgid "The four functions:" +msgstr "" + +#: ../../source/contributor-how-to-create-new-messages.rst:112 +msgid "Sending the Message from the Server" +msgstr "" + +#: ../../source/contributor-how-to-create-new-messages.rst:114 +msgid "" +"Now write the request function in your Client Proxy class (e.g., " +":code:`grpc_client_proxy.py`) using the serde functions you just created:" +msgstr "" + +#: ../../source/contributor-how-to-create-new-messages.rst:128 +msgid "Receiving the Message by the Client" +msgstr "" + +#: ../../source/contributor-how-to-create-new-messages.rst:130 +msgid "" +"Last step! Modify the code in :code:`message_handler.py` to check the " +"field of your message and call the :code:`example_response` function. " +"Remember to use the serde functions!" +msgstr "" + +#: ../../source/contributor-how-to-create-new-messages.rst:132 +msgid "Within the handle function:" +msgstr "" + +#: ../../source/contributor-how-to-create-new-messages.rst:139 +msgid "And add a new function:" +msgstr "" + +#: ../../source/contributor-how-to-create-new-messages.rst:149 +msgid "Hopefully, when you run your program you will get the intended result!" +msgstr "" + #: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:2 msgid "Develop in VSCode Dev Containers" msgstr "" @@ -320,10 +541,11 @@ msgstr "" msgid "" "Configuring and setting up the :code:`Dockerfile` as well the " "configuration for the devcontainer can be a bit more involved. The good " -"thing is you want have to do it. Usually it should be enough to install " -"Docker on your system and ensure its available on your command line. " -"Additionally, install the `VSCode Containers Extension `_." +"thing is you don't have to do it. Usually it should be enough to install " +"`Docker `_ on your system and " +"ensure its available on your command line. Additionally, install the " +"`VSCode Containers Extension `_." msgstr "" #: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:19 @@ -408,13 +630,13 @@ msgstr "" #: ../../source/contributor-how-to-install-development-versions.rst:22 msgid "" -"``flwr = { path = \"../../dist/flwr-1.0.0-py3-none-any.whl\" }`` (without" +"``flwr = { path = \"../../dist/flwr-1.8.0-py3-none-any.whl\" }`` (without" " extras)" msgstr "" #: ../../source/contributor-how-to-install-development-versions.rst:23 msgid "" -"``flwr = { path = \"../../dist/flwr-1.0.0-py3-none-any.whl\", extras = " +"``flwr = { path = \"../../dist/flwr-1.8.0-py3-none-any.whl\", extras = " "[\"simulation\"] }`` (with extras)" msgstr "" @@ -518,13 +740,13 @@ msgid "" msgstr "" #: ../../source/contributor-how-to-install-development-versions.rst:62 -msgid "Upload the whl (e.g., ``flwr-1.6.0-py3-none-any.whl``)" +msgid "Upload the whl (e.g., ``flwr-1.8.0-py3-none-any.whl``)" msgstr "" #: ../../source/contributor-how-to-install-development-versions.rst:63 msgid "" "Change ``!pip install -q 'flwr[simulation]' torch torchvision " -"matplotlib`` to ``!pip install -q 'flwr-1.6.0-py3-none-" +"matplotlib`` to ``!pip install -q 'flwr-1.8.0-py3-none-" "any.whl[simulation]' torch torchvision matplotlib``" msgstr "" @@ -539,151 +761,136 @@ msgid "" msgstr "" #: ../../source/contributor-how-to-release-flower.rst:7 -msgid "Before the release" +msgid "During the release" msgstr "" #: ../../source/contributor-how-to-release-flower.rst:9 msgid "" -"Update the changelog (``changelog.md``) with all relevant changes that " -"happened after the last release. If the last release was tagged " -"``v1.2.0``, you can use the following URL to see all commits that got " -"merged into ``main`` since then:" +"The version number of a release is stated in ``pyproject.toml``. To " +"release a new version of Flower, the following things need to happen (in " +"that order):" msgstr "" #: ../../source/contributor-how-to-release-flower.rst:11 msgid "" -"`GitHub: Compare v1.2.0...main " -"`_" -msgstr "" - -#: ../../source/contributor-how-to-release-flower.rst:13 -msgid "" -"Thank the authors who contributed since the last release. This can be " -"done by running the ``./dev/add-shortlog.sh`` convenience script (it can " -"be ran multiple times and will update the names in the list if new " -"contributors were added in the meantime)." -msgstr "" - -#: ../../source/contributor-how-to-release-flower.rst:16 -msgid "During the release" -msgstr "" - -#: ../../source/contributor-how-to-release-flower.rst:18 -msgid "" -"The version number of a release is stated in ``pyproject.toml``. To " -"release a new version of Flower, the following things need to happen (in " -"that order):" +"Run ``python3 src/py/flwr_tool/update_changelog.py `` in " +"order to add every new change to the changelog (feel free to make manual " +"changes to the changelog afterwards until it looks good)." msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:20 +#: ../../source/contributor-how-to-release-flower.rst:12 msgid "" -"Update the ``changelog.md`` section header ``Unreleased`` to contain the " -"version number and date for the release you are building. Create a pull " -"request with the change." +"Once the changelog has been updated with all the changes, run ``./dev" +"/prepare-release-changelog.sh v``, where ```` " +"is the version stated in ``pyproject.toml`` (notice the ``v`` added " +"before it). This will replace the ``Unreleased`` header of the changelog " +"by the version and current date, and it will add a thanking message for " +"the contributors. Open a pull request with those changes." msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:21 +#: ../../source/contributor-how-to-release-flower.rst:13 msgid "" -"Tag the release commit with the version number as soon as the PR is " -"merged: ``git tag v0.12.3``, then ``git push --tags``. This will create a" -" draft release on GitHub containing the correct artifacts and the " -"relevant part of the changelog." +"Once the pull request is merged, tag the release commit with the version " +"number as soon as the PR is merged: ``git tag v`` (notice " +"the ``v`` added before the version number), then ``git push --tags``. " +"This will create a draft release on GitHub containing the correct " +"artifacts and the relevant part of the changelog." msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:22 +#: ../../source/contributor-how-to-release-flower.rst:14 msgid "Check the draft release on GitHub, and if everything is good, publish it." msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:25 +#: ../../source/contributor-how-to-release-flower.rst:17 msgid "After the release" msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:27 +#: ../../source/contributor-how-to-release-flower.rst:19 msgid "Create a pull request which contains the following changes:" msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:29 +#: ../../source/contributor-how-to-release-flower.rst:21 msgid "Increase the minor version in ``pyproject.toml`` by one." msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:30 +#: ../../source/contributor-how-to-release-flower.rst:22 msgid "Update all files which contain the current version number if necessary." msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:31 +#: ../../source/contributor-how-to-release-flower.rst:23 msgid "Add a new ``Unreleased`` section in ``changelog.md``." msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:33 +#: ../../source/contributor-how-to-release-flower.rst:25 msgid "" "Merge the pull request on the same day (i.e., before a new nighly release" " gets published to PyPI)." msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:36 +#: ../../source/contributor-how-to-release-flower.rst:28 msgid "Publishing a pre-release" msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:39 +#: ../../source/contributor-how-to-release-flower.rst:31 msgid "Pre-release naming" msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:41 +#: ../../source/contributor-how-to-release-flower.rst:33 msgid "" "PyPI supports pre-releases (alpha, beta, release candiate). Pre-releases " "MUST use one of the following naming patterns:" msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:43 +#: ../../source/contributor-how-to-release-flower.rst:35 msgid "Alpha: ``MAJOR.MINOR.PATCHaN``" msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:44 +#: ../../source/contributor-how-to-release-flower.rst:36 msgid "Beta: ``MAJOR.MINOR.PATCHbN``" msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:45 +#: ../../source/contributor-how-to-release-flower.rst:37 msgid "Release candiate (RC): ``MAJOR.MINOR.PATCHrcN``" msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:47 +#: ../../source/contributor-how-to-release-flower.rst:39 msgid "Examples include:" msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:49 +#: ../../source/contributor-how-to-release-flower.rst:41 msgid "``1.0.0a0``" msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:50 +#: ../../source/contributor-how-to-release-flower.rst:42 msgid "``1.0.0b0``" msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:51 +#: ../../source/contributor-how-to-release-flower.rst:43 msgid "``1.0.0rc0``" msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:52 +#: ../../source/contributor-how-to-release-flower.rst:44 msgid "``1.0.0rc1``" msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:54 +#: ../../source/contributor-how-to-release-flower.rst:46 msgid "" "This is in line with PEP-440 and the recommendations from the Python " "Packaging Authority (PyPA):" msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:57 +#: ../../source/contributor-how-to-release-flower.rst:49 msgid "`PEP-440 `_" msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:58 +#: ../../source/contributor-how-to-release-flower.rst:50 msgid "" "`PyPA Choosing a versioning scheme " "`_" msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:60 +#: ../../source/contributor-how-to-release-flower.rst:52 msgid "" "Note that the approach defined by PyPA is not compatible with SemVer " "2.0.0 spec, for details consult the `Semantic Versioning Specification " @@ -691,26 +898,26 @@ msgid "" "11 on precedence)." msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:63 +#: ../../source/contributor-how-to-release-flower.rst:55 msgid "Pre-release classification" msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:65 +#: ../../source/contributor-how-to-release-flower.rst:57 msgid "Should the next pre-release be called alpha, beta, or release candidate?" msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:67 +#: ../../source/contributor-how-to-release-flower.rst:59 msgid "" "RC: feature complete, no known issues (apart from issues that are " "classified as \"won't fix\" for the next stable release) - if no issues " "surface this will become the next stable release" msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:68 +#: ../../source/contributor-how-to-release-flower.rst:60 msgid "Beta: feature complete, allowed to have known issues" msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:69 +#: ../../source/contributor-how-to-release-flower.rst:61 msgid "Alpha: not feature complete, allowed to have known issues" msgstr "" @@ -823,11 +1030,11 @@ msgid "" msgstr "" #: ../../source/contributor-how-to-write-documentation.rst:10 -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:119 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:142 msgid "" "Note that, in order to build the documentation locally (with ``poetry run" " make html``, like described below), `Pandoc " -"_` needs to be installed on the " +"`_ needs to be installed on the " "system." msgstr "" @@ -907,7 +1114,7 @@ msgstr "" #: ../../source/contributor-ref-good-first-contributions.rst:25 msgid "" "If you are not familiar with Flower Baselines, you should probably check-" -"out our `contributing guide for baselines `_." msgstr "" @@ -1002,20 +1209,19 @@ msgstr "" msgid "" "If you're familiar with how contributing on GitHub works, you can " "directly checkout our `getting started guide for contributors " -"`_ and " -"examples of `good first contributions `_." +"`_." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:12 +#: ../../source/contributor-tutorial-contribute-on-github.rst:11 msgid "Setting up the repository" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:23 +#: ../../source/contributor-tutorial-contribute-on-github.rst:22 msgid "**Create a GitHub account and setup Git**" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:15 +#: ../../source/contributor-tutorial-contribute-on-github.rst:14 msgid "" "Git is a distributed version control tool. This allows for an entire " "codebase's history to be stored and every developer's machine. It is a " @@ -1024,20 +1230,20 @@ msgid "" "/set-up-git>`_ to set it up." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:18 +#: ../../source/contributor-tutorial-contribute-on-github.rst:17 msgid "" "GitHub, itself, is a code hosting platform for version control and " "collaboration. It allows for everyone to collaborate and work from " "anywhere on remote repositories." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:20 +#: ../../source/contributor-tutorial-contribute-on-github.rst:19 msgid "" "If you haven't already, you will need to create an account on `GitHub " "`_." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:22 +#: ../../source/contributor-tutorial-contribute-on-github.rst:21 msgid "" "The idea behind the generic Git and GitHub workflow boils down to this: " "you download code from a remote repository on GitHub, make changes " @@ -1045,11 +1251,11 @@ msgid "" "history back to GitHub." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:34 +#: ../../source/contributor-tutorial-contribute-on-github.rst:33 msgid "**Forking the Flower repository**" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:26 +#: ../../source/contributor-tutorial-contribute-on-github.rst:25 msgid "" "A fork is a personal copy of a GitHub repository. To create one for " "Flower, you must navigate to https://github.com/adap/flower (while " @@ -1057,7 +1263,7 @@ msgid "" "on the top right of the page." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:31 +#: ../../source/contributor-tutorial-contribute-on-github.rst:30 msgid "" "You can change the name if you want, but this is not necessary as this " "version of Flower will be yours and will sit inside your own account " @@ -1065,11 +1271,11 @@ msgid "" " the top left corner that you are looking at your own version of Flower." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:49 +#: ../../source/contributor-tutorial-contribute-on-github.rst:48 msgid "**Cloning your forked repository**" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:37 +#: ../../source/contributor-tutorial-contribute-on-github.rst:36 msgid "" "The next step is to download the forked repository on your machine to be " "able to make changes to it. On your forked repository page, you should " @@ -1077,27 +1283,27 @@ msgid "" "ability to copy the HTTPS link of the repository." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:43 +#: ../../source/contributor-tutorial-contribute-on-github.rst:42 msgid "" "Once you copied the \\, you can open a terminal on your machine, " "navigate to the place you want to download the repository to and type:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:49 +#: ../../source/contributor-tutorial-contribute-on-github.rst:48 msgid "" -"This will create a `flower/` (or the name of your fork if you renamed it)" -" folder in the current working directory." +"This will create a ``flower/`` (or the name of your fork if you renamed " +"it) folder in the current working directory." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:68 +#: ../../source/contributor-tutorial-contribute-on-github.rst:67 msgid "**Add origin**" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:52 +#: ../../source/contributor-tutorial-contribute-on-github.rst:51 msgid "You can then go into the repository folder:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:58 +#: ../../source/contributor-tutorial-contribute-on-github.rst:57 msgid "" "And here we will need to add an origin to our repository. The origin is " "the \\ of the remote fork repository. To obtain it, we can do as " @@ -1105,27 +1311,27 @@ msgid "" "account and copying the link." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:63 +#: ../../source/contributor-tutorial-contribute-on-github.rst:62 msgid "" "Once the \\ is copied, we can type the following command in our " "terminal:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:92 +#: ../../source/contributor-tutorial-contribute-on-github.rst:91 msgid "**Add upstream**" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:71 +#: ../../source/contributor-tutorial-contribute-on-github.rst:70 msgid "" "Now we will add an upstream address to our repository. Still in the same " "directroy, we must run the following command:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:78 +#: ../../source/contributor-tutorial-contribute-on-github.rst:77 msgid "The following diagram visually explains what we did in the previous steps:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:82 +#: ../../source/contributor-tutorial-contribute-on-github.rst:81 msgid "" "The upstream is the GitHub remote address of the parent repository (in " "this case Flower), i.e. the one we eventually want to contribute to and " @@ -1134,17 +1340,17 @@ msgid "" "in our own account." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:86 +#: ../../source/contributor-tutorial-contribute-on-github.rst:85 msgid "" "To make sure our local version of the fork is up-to-date with the latest " "changes from the Flower repository, we can execute the following command:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:95 +#: ../../source/contributor-tutorial-contribute-on-github.rst:94 msgid "Setting up the coding environment" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:97 +#: ../../source/contributor-tutorial-contribute-on-github.rst:96 msgid "" "This can be achieved by following this `getting started guide for " "contributors`_ (note that you won't need to clone the repository). Once " @@ -1152,151 +1358,151 @@ msgid "" "changes!" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:102 +#: ../../source/contributor-tutorial-contribute-on-github.rst:101 msgid "Making changes" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:104 +#: ../../source/contributor-tutorial-contribute-on-github.rst:103 msgid "" "Before making any changes make sure you are up-to-date with your " "repository:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:110 +#: ../../source/contributor-tutorial-contribute-on-github.rst:109 msgid "And with Flower's repository:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:124 +#: ../../source/contributor-tutorial-contribute-on-github.rst:123 msgid "**Create a new branch**" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:117 +#: ../../source/contributor-tutorial-contribute-on-github.rst:116 msgid "" "To make the history cleaner and easier to work with, it is good practice " "to create a new branch for each feature/project that needs to be " "implemented." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:120 +#: ../../source/contributor-tutorial-contribute-on-github.rst:119 msgid "" "To do so, just run the following command inside the repository's " "directory:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:127 +#: ../../source/contributor-tutorial-contribute-on-github.rst:126 msgid "**Make changes**" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:127 +#: ../../source/contributor-tutorial-contribute-on-github.rst:126 msgid "Write great code and create wonderful changes using your favorite editor!" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:140 +#: ../../source/contributor-tutorial-contribute-on-github.rst:139 msgid "**Test and format your code**" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:130 +#: ../../source/contributor-tutorial-contribute-on-github.rst:129 msgid "" "Don't forget to test and format your code! Otherwise your code won't be " "able to be merged into the Flower repository. This is done so the " "codebase stays consistent and easy to understand." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:133 +#: ../../source/contributor-tutorial-contribute-on-github.rst:132 msgid "To do so, we have written a few scripts that you can execute:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:152 +#: ../../source/contributor-tutorial-contribute-on-github.rst:151 msgid "**Stage changes**" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:143 +#: ../../source/contributor-tutorial-contribute-on-github.rst:142 msgid "" "Before creating a commit that will update your history, you must specify " "to Git which files it needs to take into account." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:145 +#: ../../source/contributor-tutorial-contribute-on-github.rst:144 msgid "This can be done with:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:151 +#: ../../source/contributor-tutorial-contribute-on-github.rst:150 msgid "" "To check which files have been modified compared to the last version " "(last commit) and to see which files are staged for commit, you can use " "the :code:`git status` command." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:162 +#: ../../source/contributor-tutorial-contribute-on-github.rst:161 msgid "**Commit changes**" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:155 +#: ../../source/contributor-tutorial-contribute-on-github.rst:154 msgid "" "Once you have added all the files you wanted to commit using :code:`git " "add`, you can finally create your commit using this command:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:161 +#: ../../source/contributor-tutorial-contribute-on-github.rst:160 msgid "" "The \\ is there to explain to others what the commit " "does. It should be written in an imperative style and be concise. An " "example would be :code:`git commit -m \"Add images to README\"`." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:173 +#: ../../source/contributor-tutorial-contribute-on-github.rst:172 msgid "**Push the changes to the fork**" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:165 +#: ../../source/contributor-tutorial-contribute-on-github.rst:164 msgid "" "Once we have committed our changes, we have effectively updated our local" " history, but GitHub has no way of knowing this unless we push our " "changes to our origin's remote address:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:172 +#: ../../source/contributor-tutorial-contribute-on-github.rst:171 msgid "" "Once this is done, you will see on the GitHub that your forked repo was " "updated with the changes you have made." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:176 +#: ../../source/contributor-tutorial-contribute-on-github.rst:175 msgid "Creating and merging a pull request (PR)" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:203 +#: ../../source/contributor-tutorial-contribute-on-github.rst:206 msgid "**Create the PR**" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:179 +#: ../../source/contributor-tutorial-contribute-on-github.rst:178 msgid "" "Once you have pushed changes, on the GitHub webpage of your repository " "you should see the following message:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:183 -msgid "Otherwise you can always find this option in the `Branches` page." +#: ../../source/contributor-tutorial-contribute-on-github.rst:182 +msgid "Otherwise you can always find this option in the ``Branches`` page." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:185 +#: ../../source/contributor-tutorial-contribute-on-github.rst:184 msgid "" -"Once you click the `Compare & pull request` button, you should see " +"Once you click the ``Compare & pull request`` button, you should see " "something similar to this:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:189 +#: ../../source/contributor-tutorial-contribute-on-github.rst:188 msgid "At the top you have an explanation of which branch will be merged where:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:193 +#: ../../source/contributor-tutorial-contribute-on-github.rst:192 msgid "" "In this example you can see that the request is to merge the branch " "``doc-fixes`` from my forked repository to branch ``main`` from the " "Flower repository." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:195 +#: ../../source/contributor-tutorial-contribute-on-github.rst:194 msgid "" "The input box in the middle is there for you to describe what your PR " "does and to link it to existing issues. We have placed comments (that " @@ -1304,163 +1510,172 @@ msgid "" "process." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:198 +#: ../../source/contributor-tutorial-contribute-on-github.rst:197 +msgid "" +"It is important to follow the instructions described in comments. For " +"instance, in order to not break how our changelog system works, you " +"should read the information above the ``Changelog entry`` section " +"carefully. You can also checkout some examples and details in the " +":ref:`changelogentry` appendix." +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:201 msgid "" "At the bottom you will find the button to open the PR. This will notify " "reviewers that a new PR has been opened and that they should look over it" " to merge or to request changes." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:201 +#: ../../source/contributor-tutorial-contribute-on-github.rst:204 msgid "" "If your PR is not yet ready for review, and you don't want to notify " "anyone, you have the option to create a draft pull request:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:206 +#: ../../source/contributor-tutorial-contribute-on-github.rst:209 msgid "**Making new changes**" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:206 +#: ../../source/contributor-tutorial-contribute-on-github.rst:209 msgid "" "Once the PR has been opened (as draft or not), you can still push new " "commits to it the same way we did before, by making changes to the branch" " associated with the PR." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:228 +#: ../../source/contributor-tutorial-contribute-on-github.rst:231 msgid "**Review the PR**" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:209 +#: ../../source/contributor-tutorial-contribute-on-github.rst:212 msgid "" "Once the PR has been opened or once the draft PR has been marked as " "ready, a review from code owners will be automatically requested:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:213 +#: ../../source/contributor-tutorial-contribute-on-github.rst:216 msgid "" "Code owners will then look into the code, ask questions, request changes " "or validate the PR." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:215 +#: ../../source/contributor-tutorial-contribute-on-github.rst:218 msgid "Merging will be blocked if there are ongoing requested changes." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:219 +#: ../../source/contributor-tutorial-contribute-on-github.rst:222 msgid "" "To resolve them, just push the necessary changes to the branch associated" " with the PR:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:223 +#: ../../source/contributor-tutorial-contribute-on-github.rst:226 msgid "And resolve the conversation:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:227 +#: ../../source/contributor-tutorial-contribute-on-github.rst:230 msgid "" "Once all the conversations have been resolved, you can re-request a " "review." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:248 +#: ../../source/contributor-tutorial-contribute-on-github.rst:251 msgid "**Once the PR is merged**" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:231 +#: ../../source/contributor-tutorial-contribute-on-github.rst:234 msgid "" "If all the automatic tests have passed and reviewers have no more changes" " to request, they can approve the PR and merge it." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:235 +#: ../../source/contributor-tutorial-contribute-on-github.rst:238 msgid "" "Once it is merged, you can delete the branch on GitHub (a button should " "appear to do so) and also delete it locally by doing:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:242 +#: ../../source/contributor-tutorial-contribute-on-github.rst:245 msgid "Then you should update your forked repository by doing:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:251 +#: ../../source/contributor-tutorial-contribute-on-github.rst:254 msgid "Example of first contribution" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:254 +#: ../../source/contributor-tutorial-contribute-on-github.rst:257 msgid "Problem" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:256 +#: ../../source/contributor-tutorial-contribute-on-github.rst:259 msgid "" "For our documentation, we’ve started to use the `Diàtaxis framework " "`_." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:258 +#: ../../source/contributor-tutorial-contribute-on-github.rst:261 msgid "" "Our “How to” guides should have titles that continue the sencence “How to" " …”, for example, “How to upgrade to Flower 1.0”." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:260 +#: ../../source/contributor-tutorial-contribute-on-github.rst:263 msgid "" "Most of our guides do not follow this new format yet, and changing their " "title is (unfortunately) more involved than one might think." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:262 +#: ../../source/contributor-tutorial-contribute-on-github.rst:265 msgid "" "This issue is about changing the title of a doc from present continious " "to present simple." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:264 +#: ../../source/contributor-tutorial-contribute-on-github.rst:267 msgid "" "Let's take the example of “Saving Progress” which we changed to “Save " "Progress”. Does this pass our check?" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:266 +#: ../../source/contributor-tutorial-contribute-on-github.rst:269 msgid "Before: ”How to saving progress” ❌" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:268 +#: ../../source/contributor-tutorial-contribute-on-github.rst:271 msgid "After: ”How to save progress” ✅" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:271 +#: ../../source/contributor-tutorial-contribute-on-github.rst:274 msgid "Solution" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:273 +#: ../../source/contributor-tutorial-contribute-on-github.rst:276 msgid "" "This is a tiny change, but it’ll allow us to test your end-to-end setup. " "After cloning and setting up the Flower repo, here’s what you should do:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:275 -msgid "Find the source file in `doc/source`" +#: ../../source/contributor-tutorial-contribute-on-github.rst:278 +msgid "Find the source file in ``doc/source``" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:276 +#: ../../source/contributor-tutorial-contribute-on-github.rst:279 msgid "" -"Make the change in the `.rst` file (beware, the dashes under the title " +"Make the change in the ``.rst`` file (beware, the dashes under the title " "should be the same length as the title itself)" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:277 +#: ../../source/contributor-tutorial-contribute-on-github.rst:280 msgid "" -"Build the docs and check the result: ``_" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:280 +#: ../../source/contributor-tutorial-contribute-on-github.rst:283 msgid "Rename file" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:282 +#: ../../source/contributor-tutorial-contribute-on-github.rst:285 msgid "" "You might have noticed that the file name still reflects the old wording." " If we just change the file, then we break all existing links to it - it " @@ -1468,77 +1683,77 @@ msgid "" "engine ranking." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:285 +#: ../../source/contributor-tutorial-contribute-on-github.rst:288 msgid "Here’s how to change the file name:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:287 -msgid "Change the file name to `save-progress.rst`" +#: ../../source/contributor-tutorial-contribute-on-github.rst:290 +msgid "Change the file name to ``save-progress.rst``" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:288 -msgid "Add a redirect rule to `doc/source/conf.py`" +#: ../../source/contributor-tutorial-contribute-on-github.rst:291 +msgid "Add a redirect rule to ``doc/source/conf.py``" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:290 +#: ../../source/contributor-tutorial-contribute-on-github.rst:293 msgid "" -"This will cause a redirect from `saving-progress.html` to `save-" -"progress.html`, old links will continue to work." +"This will cause a redirect from ``saving-progress.html`` to ``save-" +"progress.html``, old links will continue to work." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:293 +#: ../../source/contributor-tutorial-contribute-on-github.rst:296 msgid "Apply changes in the index file" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:295 +#: ../../source/contributor-tutorial-contribute-on-github.rst:298 msgid "" "For the lateral navigation bar to work properly, it is very important to " -"update the `index.rst` file as well. This is where we define the whole " +"update the ``index.rst`` file as well. This is where we define the whole " "arborescence of the navbar." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:298 -msgid "Find and modify the file name in `index.rst`" +#: ../../source/contributor-tutorial-contribute-on-github.rst:301 +msgid "Find and modify the file name in ``index.rst``" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:301 +#: ../../source/contributor-tutorial-contribute-on-github.rst:304 msgid "Open PR" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:303 +#: ../../source/contributor-tutorial-contribute-on-github.rst:306 msgid "" "Commit the changes (commit messages are always imperative: “Do " "something”, in this case “Change …”)" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:304 +#: ../../source/contributor-tutorial-contribute-on-github.rst:307 msgid "Push the changes to your fork" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:305 +#: ../../source/contributor-tutorial-contribute-on-github.rst:308 msgid "Open a PR (as shown above)" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:306 +#: ../../source/contributor-tutorial-contribute-on-github.rst:309 msgid "Wait for it to be approved!" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:307 +#: ../../source/contributor-tutorial-contribute-on-github.rst:310 msgid "Congrats! 🥳 You're now officially a Flower contributor!" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:311 +#: ../../source/contributor-tutorial-contribute-on-github.rst:314 msgid "How to write a good PR title" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:313 +#: ../../source/contributor-tutorial-contribute-on-github.rst:316 msgid "" "A well-crafted PR title helps team members quickly understand the purpose" " and scope of the changes being proposed. Here's a guide to help you " "write a good GitHub PR title:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:315 +#: ../../source/contributor-tutorial-contribute-on-github.rst:318 msgid "" "1. Be Clear and Concise: Provide a clear summary of the changes in a " "concise manner. 1. Use Actionable Verbs: Start with verbs like \"Add,\" " @@ -1548,83 +1763,219 @@ msgid "" "Capitalization and Punctuation: Follow grammar rules for clarity." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:321 +#: ../../source/contributor-tutorial-contribute-on-github.rst:324 msgid "" "Let's start with a few examples for titles that should be avoided because" " they do not provide meaningful information:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:323 +#: ../../source/contributor-tutorial-contribute-on-github.rst:326 msgid "Implement Algorithm" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:324 +#: ../../source/contributor-tutorial-contribute-on-github.rst:327 msgid "Database" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:325 +#: ../../source/contributor-tutorial-contribute-on-github.rst:328 msgid "Add my_new_file.py to codebase" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:326 +#: ../../source/contributor-tutorial-contribute-on-github.rst:329 msgid "Improve code in module" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:327 +#: ../../source/contributor-tutorial-contribute-on-github.rst:330 msgid "Change SomeModule" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:329 +#: ../../source/contributor-tutorial-contribute-on-github.rst:332 msgid "" "Here are a few positive examples which provide helpful information " "without repeating how they do it, as that is already visible in the " "\"Files changed\" section of the PR:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:331 +#: ../../source/contributor-tutorial-contribute-on-github.rst:334 msgid "Update docs banner to mention Flower Summit 2023" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:332 +#: ../../source/contributor-tutorial-contribute-on-github.rst:335 msgid "Remove unnecessary XGBoost dependency" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:333 +#: ../../source/contributor-tutorial-contribute-on-github.rst:336 msgid "Remove redundant attributes in strategies subclassing FedAvg" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:334 -msgid "Add CI job to deploy the staging system when the `main` branch changes" +#: ../../source/contributor-tutorial-contribute-on-github.rst:337 +msgid "Add CI job to deploy the staging system when the ``main`` branch changes" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:335 +#: ../../source/contributor-tutorial-contribute-on-github.rst:338 msgid "" "Add new amazing library which will be used to improve the simulation " "engine" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:339 +#: ../../source/contributor-tutorial-contribute-on-github.rst:342 #: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:548 #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:946 -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:747 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:727 #: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:713 #: ../../source/tutorial-series-what-is-federated-learning.ipynb:367 msgid "Next steps" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:341 +#: ../../source/contributor-tutorial-contribute-on-github.rst:344 msgid "" "Once you have made your first PR, and want to contribute more, be sure to" " check out the following :" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:343 +#: ../../source/contributor-tutorial-contribute-on-github.rst:346 msgid "" -"`Good first contributions `_, where you should particularly look " "into the :code:`baselines` contributions." msgstr "" +#: ../../source/contributor-tutorial-contribute-on-github.rst:350 +#: ../../source/fed/0000-20200102-fed-template.md:60 +msgid "Appendix" +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:355 +msgid "Changelog entry" +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:357 +msgid "" +"When opening a new PR, inside its description, there should be a " +"``Changelog entry`` header." +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:359 +msgid "" +"Above this header you should see the following comment that explains how " +"to write your changelog entry:" +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:361 +msgid "" +"Inside the following 'Changelog entry' section, you should put the " +"description of your changes that will be added to the changelog alongside" +" your PR title." +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:364 +msgid "" +"If the section is completely empty (without any token) or non-existant, " +"the changelog will just contain the title of the PR for the changelog " +"entry, without any description." +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:367 +msgid "" +"If the section contains some text other than tokens, it will use it to " +"add a description to the change." +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:369 +msgid "" +"If the section contains one of the following tokens it will ignore any " +"other text and put the PR under the corresponding section of the " +"changelog:" +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:371 +msgid " is for classifying a PR as a general improvement." +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:373 +msgid " is to not add the PR to the changelog" +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:375 +msgid " is to add a general baselines change to the PR" +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:377 +msgid " is to add a general examples change to the PR" +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:379 +msgid " is to add a general sdk change to the PR" +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:381 +msgid " is to add a general simulations change to the PR" +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:383 +msgid "Note that only one token should be used." +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:385 +msgid "" +"Its content must have a specific format. We will break down what each " +"possibility does:" +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:387 +msgid "" +"If the ``### Changelog entry`` section contains nothing or doesn't exist," +" the following text will be added to the changelog::" +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:391 +msgid "" +"If the ``### Changelog entry`` section contains a description (and no " +"token), the following text will be added to the changelog::" +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:397 +msgid "" +"If the ``### Changelog entry`` section contains ````, nothing will " +"change in the changelog." +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:399 +msgid "" +"If the ``### Changelog entry`` section contains ````, the " +"following text will be added to the changelog::" +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:403 +msgid "" +"If the ``### Changelog entry`` section contains ````, the " +"following text will be added to the changelog::" +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:407 +msgid "" +"If the ``### Changelog entry`` section contains ````, the " +"following text will be added to the changelog::" +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:411 +msgid "" +"If the ``### Changelog entry`` section contains ````, the following " +"text will be added to the changelog::" +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:415 +msgid "" +"If the ``### Changelog entry`` section contains ````, the " +"following text will be added to the changelog::" +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:419 +msgid "" +"Note that only one token must be provided, otherwise, only the first " +"action (in the order listed above), will be performed." +msgstr "" + #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:2 msgid "Get started as a contributor" msgstr "" @@ -1634,7 +1985,7 @@ msgid "Prerequisites" msgstr "" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:7 -msgid "`Python 3.7 `_ or above" +msgid "`Python 3.8 `_ or above" msgstr "" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:8 @@ -1660,39 +2011,84 @@ msgstr "" msgid "Developer Machine Setup" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:20 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:21 +msgid "Preliminarities" +msgstr "" + +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:22 +msgid "Some system-wide dependencies are needed." +msgstr "" + +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:25 +msgid "For macOS" +msgstr "" + +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:27 +msgid "" +"Install `homebrew `_. Don't forget the post-" +"installation actions to add `brew` to your PATH." +msgstr "" + +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:28 +msgid "" +"Install `xz` (to install different Python versions) and `pandoc` to build" +" the docs::" +msgstr "" + +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:34 +msgid "For Ubuntu" +msgstr "" + +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:35 +msgid "" +"Ensure you system (Ubuntu 22.04+) is up-to-date, and you have all " +"necessary packages::" +msgstr "" + +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:44 +msgid "Create Flower Dev Environment" +msgstr "" + +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:46 msgid "" -"First, clone the `Flower repository `_ " -"from GitHub::" +"1. Clone the `Flower repository `_ from " +"GitHub::" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:26 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:52 +msgid "" +"Let's create the Python environment for all-things Flower. If you wish to" +" use :code:`pyenv`, we provide two convenience scripts that you can use. " +"If you prefer using something else than :code:`pyenv`, create a new " +"environment, activate and skip to the last point where all packages are " +"installed." +msgstr "" + +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:54 msgid "" -"Second, create a virtual environment (and activate it). If you chose to " -"use :code:`pyenv` (with the :code:`pyenv-virtualenv` plugin) and already " -"have it installed , you can use the following convenience script (by " -"default it will use :code:`Python 3.8.17`, but you can change it by " -"providing a specific :code:``)::" +"If you don't have :code:`pyenv` installed, the following script that will" +" install it, set it up, and create the virtual environment (with " +":code:`Python 3.8.17` by default)::" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:33 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:58 msgid "" -"If you don't have :code:`pyenv` installed, you can use the following " -"script that will install pyenv, set it up and create the virtual " -"environment (with :code:`Python 3.8.17` by default)::" +"If you already have :code:`pyenv` installed (along with the :code:`pyenv-" +"virtualenv` plugin), you can use the following convenience script (with " +":code:`Python 3.8.17` by default)::" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:39 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:62 msgid "" -"Third, install the Flower package in development mode (think :code:`pip " +"3. Install the Flower package in development mode (think :code:`pip " "install -e`) along with all necessary dependencies::" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:46 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:69 msgid "Convenience Scripts" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:48 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:71 msgid "" "The Flower repository contains a number of convenience scripts to make " "recurring development tasks easier and less error-prone. See the " @@ -1700,68 +2096,68 @@ msgid "" "amonst the most important ones:" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:54 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:77 msgid "Create/Delete Virtual Environment" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:62 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:85 msgid "Compile ProtoBuf Definitions" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:69 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:92 msgid "Auto-Format Code" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:76 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:99 msgid "Run Linters and Tests" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:83 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:106 msgid "Run Github Actions (CI) locally" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:85 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:108 msgid "" "Developers could run the full set of Github Actions workflows under their" -" local environment by using `Act _`. " +" local environment by using `Act `_. " "Please refer to the installation instructions under the linked repository" " and run the next command under Flower main cloned repository folder::" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:92 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:115 msgid "" "The Flower default workflow would run by setting up the required Docker " "machines underneath." msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:97 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:120 msgid "Build Release" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:99 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:122 msgid "" "Flower uses Poetry to build releases. The necessary command is wrapped in" " a simple script::" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:104 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:127 msgid "" "The resulting :code:`.whl` and :code:`.tar.gz` releases will be stored in" " the :code:`/dist` subdirectory." msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:109 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:132 msgid "Build Documentation" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:111 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:134 msgid "" "Flower's documentation uses `Sphinx `_. " "There's no convenience script to re-build the documentation yet, but it's" " pretty easy::" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:117 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:140 msgid "This will generate HTML documentation in ``doc/build/html``." msgstr "" @@ -1777,7 +2173,7 @@ msgid "" "designed for non-iid data. We are using PyTorch to train a Convolutional " "Neural Network(with Batch Normalization layers) on the CIFAR-10 dataset. " "When applying FedBN, only few changes needed compared to `Example: " -"PyTorch - From Centralized To Federated `_." msgstr "" @@ -1789,7 +2185,7 @@ msgstr "" #: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:10 msgid "" "All files are revised based on `Example: PyTorch - From Centralized To " -"Federated `_. The only thing to do is modifying the file called " ":code:`cifar.py`, revised part is shown below:" msgstr "" @@ -1821,11 +2217,11 @@ msgstr "" #: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:53 msgid "" "If you have read `Example: PyTorch - From Centralized To Federated " -"`_, the following parts are easy to follow, onyl " ":code:`get_parameters` and :code:`set_parameters` function in " ":code:`client.py` needed to revise. If not, please read the `Example: " -"PyTorch - From Centralized To Federated `_. first." msgstr "" @@ -2073,7 +2469,7 @@ msgstr "" #: ../../source/example-mxnet-walk-through.rst:244 #: ../../source/example-pytorch-from-centralized-to-federated.rst:221 #: ../../source/tutorial-quickstart-jax.rst:169 -#: ../../source/tutorial-quickstart-mxnet.rst:169 +#: ../../source/tutorial-quickstart-mxnet.rst:171 #: ../../source/tutorial-quickstart-pytorch.rst:155 #: ../../source/tutorial-quickstart-scikitlearn.rst:108 msgid ":code:`get_parameters`" @@ -2092,7 +2488,7 @@ msgstr "" #: ../../source/example-mxnet-walk-through.rst:248 #: ../../source/example-pytorch-from-centralized-to-federated.rst:225 #: ../../source/tutorial-quickstart-jax.rst:173 -#: ../../source/tutorial-quickstart-mxnet.rst:175 +#: ../../source/tutorial-quickstart-mxnet.rst:177 #: ../../source/tutorial-quickstart-pytorch.rst:161 #: ../../source/tutorial-quickstart-scikitlearn.rst:115 msgid ":code:`fit`" @@ -2127,7 +2523,7 @@ msgstr "" #: ../../source/example-mxnet-walk-through.rst:253 #: ../../source/example-pytorch-from-centralized-to-federated.rst:230 #: ../../source/tutorial-quickstart-jax.rst:178 -#: ../../source/tutorial-quickstart-mxnet.rst:178 +#: ../../source/tutorial-quickstart-mxnet.rst:180 #: ../../source/tutorial-quickstart-pytorch.rst:164 #: ../../source/tutorial-quickstart-scikitlearn.rst:118 msgid ":code:`evaluate`" @@ -2544,8 +2940,8 @@ msgid "" "All that's left to do it to define a function that loads both model and " "data, creates a :code:`CifarClient`, and starts this client. You load " "your data and model by using :code:`cifar.py`. Start :code:`CifarClient` " -"with the function :code:`fl.client.start_numpy_client()` by pointing it " -"at the same IP adress we used in :code:`server.py`:" +"with the function :code:`fl.client.start_client()` by pointing it at the " +"same IP adress we used in :code:`server.py`:" msgstr "" #: ../../source/example-pytorch-from-centralized-to-federated.rst:307 @@ -2577,7 +2973,7 @@ msgid "" msgstr "" #: ../../source/example-walkthrough-pytorch-mnist.rst:6 -#: ../../source/tutorial-quickstart-mxnet.rst:14 +#: ../../source/tutorial-quickstart-mxnet.rst:16 #: ../../source/tutorial-quickstart-pytorch.rst:17 #: ../../source/tutorial-quickstart-scikitlearn.rst:14 msgid "" @@ -2646,10 +3042,11 @@ msgstr "" #: ../../source/example-walkthrough-pytorch-mnist.rst:69 #: ../../source/tutorial-quickstart-ios.rst:129 -#: ../../source/tutorial-quickstart-mxnet.rst:224 +#: ../../source/tutorial-quickstart-mxnet.rst:226 #: ../../source/tutorial-quickstart-pytorch.rst:203 #: ../../source/tutorial-quickstart-scikitlearn.rst:157 #: ../../source/tutorial-quickstart-tensorflow.rst:98 +#: ../../source/tutorial-quickstart-xgboost.rst:309 msgid "Flower Server" msgstr "" @@ -2671,10 +3068,11 @@ msgstr "" #: ../../source/example-walkthrough-pytorch-mnist.rst:89 #: ../../source/tutorial-quickstart-ios.rst:34 -#: ../../source/tutorial-quickstart-mxnet.rst:34 +#: ../../source/tutorial-quickstart-mxnet.rst:36 #: ../../source/tutorial-quickstart-pytorch.rst:37 #: ../../source/tutorial-quickstart-scikitlearn.rst:40 #: ../../source/tutorial-quickstart-tensorflow.rst:29 +#: ../../source/tutorial-quickstart-xgboost.rst:55 msgid "Flower Client" msgstr "" @@ -2829,7 +3227,7 @@ msgstr "" #: ../../source/explanation-differential-privacy.rst:7 msgid "" -"Please note that these components are still experimental, the correct " +"Please note that these components are still experimental; the correct " "configuration of DP for a specific task is still an unsolved problem." msgstr "" @@ -2867,9 +3265,11 @@ msgstr "" #: ../../source/explanation-differential-privacy.rst:20 msgid "" "The distribution of the update norm has been shown to vary from task-to-" -"task and to evolve as training progresses. Therefore, we use an adaptive " -"approach [andrew]_ that continuously adjusts the clipping threshold to " -"track a prespecified quantile of the update norm distribution." +"task and to evolve as training progresses. This variability is crucial in" +" understanding its impact on differential privacy guarantees, emphasizing" +" the need for an adaptive approach [andrew]_ that continuously adjusts " +"the clipping threshold to track a prespecified quantile of the update " +"norm distribution." msgstr "" #: ../../source/explanation-differential-privacy.rst:23 @@ -2879,7 +3279,7 @@ msgstr "" #: ../../source/explanation-differential-privacy.rst:25 msgid "" "We make (and attempt to enforce) a number of assumptions that must be " -"satisfied to ensure that the training process actually realises the " +"satisfied to ensure that the training process actually realizes the " ":math:`(\\epsilon, \\delta)` guarantees the user has in mind when " "configuring the setup." msgstr "" @@ -2908,8 +3308,8 @@ msgstr "" #: ../../source/explanation-differential-privacy.rst:31 msgid "" "The first two are useful for eliminating a multitude of complications " -"associated with calibrating the noise to the clipping threshold while the" -" third one is required to comply with the assumptions of the privacy " +"associated with calibrating the noise to the clipping threshold, while " +"the third one is required to comply with the assumptions of the privacy " "analysis." msgstr "" @@ -2969,7 +3369,7 @@ msgstr "" #: ../../source/explanation-differential-privacy.rst:51 msgid "" "The first version of our solution was to define a decorator whose " -"constructor accepted, among other things, a boolean valued variable " +"constructor accepted, among other things, a boolean-valued variable " "indicating whether adaptive clipping was to be enabled or not. We quickly" " realized that this would clutter its :code:`__init__()` function with " "variables corresponding to hyperparameters of adaptive clipping that " @@ -2982,6 +3382,7 @@ msgid "" msgstr "" #: ../../source/explanation-differential-privacy.rst:54 +#: ../../source/ref-api/flwr.server.strategy.DPFedAvgFixed.rst:2 msgid "DPFedAvgFixed" msgstr "" @@ -3016,7 +3417,7 @@ msgid "" ":code:`parameters` field of :code:`FitRes` for each received update and " "setting it to 1. Furthermore, if :code:`server_side_noising=true`, each " "update is perturbed with an amount of noise equal to what it would have " -"been subjected to had client-side noising being enabled. This entails " +"been subjected to had client-side noising being enabled. This entails " "*pre*-processing of the arguments to this method before passing them on " "to the wrappee's implementation of :code:`aggregate_fit()`." msgstr "" @@ -3042,6 +3443,7 @@ msgid "" msgstr "" #: ../../source/explanation-differential-privacy.rst:67 +#: ../../source/ref-api/flwr.server.strategy.DPFedAvgAdaptive.rst:2 msgid "DPFedAvgAdaptive" msgstr "" @@ -3113,15 +3515,16 @@ msgstr "" #: ../../source/explanation-differential-privacy.rst:98 msgid "" -"McMahan, H. Brendan, et al. \"Learning differentially private recurrent " -"language models.\" arXiv preprint arXiv:1710.06963 (2017)." +"McMahan et al. \"Learning Differentially Private Recurrent Language " +"Models.\" International Conference on Learning Representations (ICLR), " +"2017." msgstr "" #: ../../source/explanation-differential-privacy.rst:100 msgid "" -"Andrew, Galen, et al. \"Differentially private learning with adaptive " -"clipping.\" Advances in Neural Information Processing Systems 34 (2021): " -"17455-17466." +"Andrew, Galen, et al. \"Differentially Private Learning with Adaptive " +"Clipping.\" Advances in Neural Information Processing Systems (NeurIPS), " +"2021." msgstr "" #: ../../source/explanation-federated-evaluation.rst:2 @@ -3371,10 +3774,6 @@ msgstr "" msgid "\\[Alternative 2\\]" msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:60 -msgid "Appendix" -msgstr "" - #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:10 msgid "Flower Enhancement Doc" msgstr "" @@ -3861,7 +4260,7 @@ msgstr "" msgid "" "This can be achieved by customizing an existing strategy or by " "`implementing a custom strategy from scratch " -"`_. " +"`_. " "Here's a nonsensical example that customizes :code:`FedAvg` by adding a " "custom ``\"hello\": \"world\"`` configuration key/value pair to the " "config dict of a *single client* (only the first client in the list, the " @@ -4011,6 +4410,7 @@ msgid "" msgstr "" #: ../../source/how-to-enable-ssl-connections.rst:41 +#: ../../source/ref-api/flwr.server.Server.rst:2 msgid "Server" msgstr "" @@ -4030,7 +4430,7 @@ msgstr "" #: ../../source/how-to-enable-ssl-connections.rst:65 #: ../../source/how-to-upgrade-to-flower-1.0.rst:37 -#: ../../source/ref-api-flwr.rst:15 +#: ../../source/ref-api/flwr.client.Client.rst:2 msgid "Client" msgstr "" @@ -4048,6 +4448,7 @@ msgid "" msgstr "" #: ../../source/how-to-enable-ssl-connections.rst:89 +#: ../../source/how-to-use-built-in-mods.rst:85 #: ../../source/tutorial-series-what-is-federated-learning.ipynb:287 msgid "Conclusion" msgstr "" @@ -4344,61 +4745,99 @@ msgstr "" msgid "Install stable release" msgstr "" -#: ../../source/how-to-install-flower.rst:14 +#: ../../source/how-to-install-flower.rst:15 +msgid "Using pip" +msgstr "" + +#: ../../source/how-to-install-flower.rst:17 msgid "" "Stable releases are available on `PyPI " "`_::" msgstr "" -#: ../../source/how-to-install-flower.rst:18 +#: ../../source/how-to-install-flower.rst:21 msgid "" "For simulations that use the Virtual Client Engine, ``flwr`` should be " "installed with the ``simulation`` extra::" msgstr "" -#: ../../source/how-to-install-flower.rst:24 +#: ../../source/how-to-install-flower.rst:27 +msgid "Using conda (or mamba)" +msgstr "" + +#: ../../source/how-to-install-flower.rst:29 +msgid "Flower can also be installed from the ``conda-forge`` channel." +msgstr "" + +#: ../../source/how-to-install-flower.rst:31 +msgid "" +"If you have not added ``conda-forge`` to your channels, you will first " +"need to run the following::" +msgstr "" + +#: ../../source/how-to-install-flower.rst:36 +msgid "" +"Once the ``conda-forge`` channel has been enabled, ``flwr`` can be " +"installed with ``conda``::" +msgstr "" + +#: ../../source/how-to-install-flower.rst:40 +msgid "or with ``mamba``::" +msgstr "" + +#: ../../source/how-to-install-flower.rst:46 msgid "Verify installation" msgstr "" -#: ../../source/how-to-install-flower.rst:26 +#: ../../source/how-to-install-flower.rst:48 msgid "" -"The following command can be used to verfiy if Flower was successfully " +"The following command can be used to verify if Flower was successfully " "installed. If everything worked, it should print the version of Flower to" " the command line::" msgstr "" -#: ../../source/how-to-install-flower.rst:33 +#: ../../source/how-to-install-flower.rst:55 msgid "Advanced installation options" msgstr "" -#: ../../source/how-to-install-flower.rst:36 +#: ../../source/how-to-install-flower.rst:58 +msgid "Install via Docker" +msgstr "" + +#: ../../source/how-to-install-flower.rst:60 +msgid "" +"`How to run Flower using Docker `_" +msgstr "" + +#: ../../source/how-to-install-flower.rst:63 msgid "Install pre-release" msgstr "" -#: ../../source/how-to-install-flower.rst:38 +#: ../../source/how-to-install-flower.rst:65 msgid "" "New (possibly unstable) versions of Flower are sometimes available as " "pre-release versions (alpha, beta, release candidate) before the stable " "release happens::" msgstr "" -#: ../../source/how-to-install-flower.rst:42 +#: ../../source/how-to-install-flower.rst:69 msgid "" "For simulations that use the Virtual Client Engine, ``flwr`` pre-releases" " should be installed with the ``simulation`` extra::" msgstr "" -#: ../../source/how-to-install-flower.rst:47 +#: ../../source/how-to-install-flower.rst:74 msgid "Install nightly release" msgstr "" -#: ../../source/how-to-install-flower.rst:49 +#: ../../source/how-to-install-flower.rst:76 msgid "" "The latest (potentially unstable) changes in Flower are available as " "nightly releases::" msgstr "" -#: ../../source/how-to-install-flower.rst:53 +#: ../../source/how-to-install-flower.rst:80 msgid "" "For simulations that use the Virtual Client Engine, ``flwr-nightly`` " "should be installed with the ``simulation`` extra::" @@ -4640,6 +5079,185 @@ msgid "" "metrics.html>`_" msgstr "" +#: ../../source/how-to-run-flower-using-docker.rst:2 +msgid "Run Flower using Docker" +msgstr "" + +#: ../../source/how-to-run-flower-using-docker.rst:4 +msgid "" +"The simplest way to get started with Flower is by using the pre-made " +"Docker images, which you can find on `Docker Hub " +"`_." +msgstr "" + +#: ../../source/how-to-run-flower-using-docker.rst:7 +msgid "Before you start, make sure that the Docker daemon is running:" +msgstr "" + +#: ../../source/how-to-run-flower-using-docker.rst:14 +msgid "" +"If you do not see the version of Docker but instead get an error saying " +"that the command was not found, you will need to install Docker first. " +"You can find installation instruction `here `_." +msgstr "" + +#: ../../source/how-to-run-flower-using-docker.rst:20 +msgid "" +"On Linux, Docker commands require ``sudo`` privilege. If you want to " +"avoid using ``sudo``, you can follow the `Post-installation steps " +"`_ on the " +"official Docker website." +msgstr "" + +#: ../../source/how-to-run-flower-using-docker.rst:25 +msgid "Flower server" +msgstr "" + +#: ../../source/how-to-run-flower-using-docker.rst:28 +msgid "Quickstart" +msgstr "" + +#: ../../source/how-to-run-flower-using-docker.rst:30 +msgid "If you're looking to try out Flower, you can use the following command:" +msgstr "" + +#: ../../source/how-to-run-flower-using-docker.rst:37 +msgid "" +"The command will pull the Docker image with the tag " +"``1.7.0-py3.11-ubuntu22.04`` from Docker Hub. The tag contains the " +"information which Flower, Python and Ubuntu is used. In this case, it " +"uses Flower 1.7.0, Python 3.11 and Ubuntu 22.04. The ``--rm`` flag tells " +"Docker to remove the container after it exits." +msgstr "" + +#: ../../source/how-to-run-flower-using-docker.rst:44 +msgid "" +"By default, the Flower server keeps state in-memory. When using the " +"Docker flag ``--rm``, the state is not persisted between container " +"starts. We will show below how to save the state in a file on your host " +"system." +msgstr "" + +#: ../../source/how-to-run-flower-using-docker.rst:48 +msgid "" +"The ``-p :`` flag tells Docker to map the ports " +"``9091``/``9092`` of the host to ``9091``/``9092`` of the container, " +"allowing you to access the Driver API on ``http://localhost:9091`` and " +"the Fleet API on ``http://localhost:9092``. Lastly, any flag that comes " +"after the tag is passed to the Flower server. Here, we are passing the " +"flag ``--insecure``." +msgstr "" + +#: ../../source/how-to-run-flower-using-docker.rst:55 +msgid "" +"The ``--insecure`` flag enables insecure communication (using HTTP, not " +"HTTPS) and should only be used for testing purposes. We strongly " +"recommend enabling `SSL `_ when " +"deploying to a production environment." +msgstr "" + +#: ../../source/how-to-run-flower-using-docker.rst:60 +msgid "" +"You can use ``--help`` to view all available flags that the server " +"supports:" +msgstr "" + +#: ../../source/how-to-run-flower-using-docker.rst:67 +msgid "Mounting a volume to store the state on the host system" +msgstr "" + +#: ../../source/how-to-run-flower-using-docker.rst:69 +msgid "" +"If you want to persist the state of the server on your host system, all " +"you need to do is specify a path where you want to save the file on your " +"host system and a name for the database file. In the example below, we " +"tell Docker via the flag ``-v`` to mount the user's home directory " +"(``~/`` on your host) into the ``/app/`` directory of the container. " +"Furthermore, we use the flag ``--database`` to specify the name of the " +"database file." +msgstr "" + +#: ../../source/how-to-run-flower-using-docker.rst:82 +msgid "" +"As soon as the server starts, the file ``state.db`` is created in the " +"user's home directory on your host system. If the file already exists, " +"the server tries to restore the state from the file. To start the server " +"with an empty database, simply remove the ``state.db`` file." +msgstr "" + +#: ../../source/how-to-run-flower-using-docker.rst:87 +msgid "Enabling SSL for secure connections" +msgstr "" + +#: ../../source/how-to-run-flower-using-docker.rst:89 +msgid "" +"To enable SSL, you will need a CA certificate, a server certificate and a" +" server private key." +msgstr "" + +#: ../../source/how-to-run-flower-using-docker.rst:92 +msgid "" +"For testing purposes, you can generate your own self-signed certificates." +" The `Enable SSL connections `_ page contains a section that " +"will guide you through the process." +msgstr "" + +#: ../../source/how-to-run-flower-using-docker.rst:96 +msgid "" +"Assuming all files we need are in the local ``certificates`` directory, " +"we can use the flag ``-v`` to mount the local directory into the " +"``/app/`` directory of the container. This allows the server to access " +"the files within the container. Finally, we pass the names of the " +"certificates to the server with the ``--certificates`` flag." +msgstr "" + +#: ../../source/how-to-run-flower-using-docker.rst:108 +msgid "Using a different Flower or Python version" +msgstr "" + +#: ../../source/how-to-run-flower-using-docker.rst:110 +msgid "" +"If you want to use a different version of Flower or Python, you can do so" +" by changing the tag. All versions we provide are available on `Docker " +"Hub `_." +msgstr "" + +#: ../../source/how-to-run-flower-using-docker.rst:114 +msgid "Pinning a Docker image to a specific version" +msgstr "" + +#: ../../source/how-to-run-flower-using-docker.rst:116 +msgid "" +"It may happen that we update the images behind the tags. Such updates " +"usually include security updates of system dependencies that should not " +"change the functionality of Flower. However, if you want to ensure that " +"you always use the same image, you can specify the hash of the image " +"instead of the tag." +msgstr "" + +#: ../../source/how-to-run-flower-using-docker.rst:121 +msgid "" +"The following command returns the current image hash referenced by the " +"``server:1.7.0-py3.11-ubuntu22.04`` tag:" +msgstr "" + +#: ../../source/how-to-run-flower-using-docker.rst:128 +msgid "Next, we can pin the hash when running a new server container:" +msgstr "" + +#: ../../source/how-to-run-flower-using-docker.rst:137 +msgid "Setting environment variables" +msgstr "" + +#: ../../source/how-to-run-flower-using-docker.rst:139 +msgid "" +"To set a variable inside a Docker container, you can use the ``-e " +"=`` flag." +msgstr "" + #: ../../source/how-to-run-simulations.rst:2 msgid "Run simulations" msgstr "" @@ -4664,12 +5282,12 @@ msgstr "" msgid "" "The :code:`VirtualClientEngine` schedules, launches and manages `virtual`" " clients. These clients are identical to `non-virtual` clients (i.e. the " -"ones you launch via the command `flwr.client.start_numpy_client `_) in the sense that they can be configure " -"by creating a class inheriting, for example, from " -"`flwr.client.NumPyClient `_ " -"and therefore behave in an identical way. In addition to that, clients " -"managed by the :code:`VirtualClientEngine` are:" +"ones you launch via the command `flwr.client.start_client `_) in the sense that they can be configure by " +"creating a class inheriting, for example, from `flwr.client.NumPyClient " +"`_ and therefore behave in an " +"identical way. In addition to that, clients managed by the " +":code:`VirtualClientEngine` are:" msgstr "" #: ../../source/how-to-run-simulations.rst:12 @@ -5088,6 +5706,12 @@ msgid "" " latest one:" msgstr "" +#: ../../source/how-to-save-and-load-model-checkpoints.rst:97 +msgid "" +"Return/use this object of type ``Parameters`` wherever necessary, such as" +" in the ``initial_parameters`` when defining a ``Strategy``." +msgstr "" + #: ../../source/how-to-upgrade-to-flower-1.0.rst:2 msgid "Upgrade to Flower 1.0" msgstr "" @@ -5350,33 +5974,152 @@ msgid "" "`_ are already updated" " to Flower 1.0, they can serve as a reference for using the Flower 1.0 " "API. If there are further questionsm, `join the Flower Slack " -"`_ and use the channgel ``#questions``." +"`_ and use the channgel ``#questions``." msgstr "" -#: ../../source/how-to-use-strategies.rst:2 -msgid "Use strategies" +#: ../../source/how-to-use-built-in-mods.rst:2 +msgid "Use Built-in Mods" msgstr "" -#: ../../source/how-to-use-strategies.rst:4 +#: ../../source/how-to-use-built-in-mods.rst:4 msgid "" -"Flower allows full customization of the learning process through the " -":code:`Strategy` abstraction. A number of built-in strategies are " -"provided in the core framework." +"**Note: This tutorial covers experimental features. The functionality and" +" interfaces may change in future versions.**" msgstr "" -#: ../../source/how-to-use-strategies.rst:6 +#: ../../source/how-to-use-built-in-mods.rst:6 msgid "" -"There are three ways to customize the way Flower orchestrates the " -"learning process on the server side:" +"In this tutorial, we will learn how to utilize built-in mods to augment " +"the behavior of a ``ClientApp``. Mods (sometimes also called Modifiers) " +"allow us to perform operations before and after a task is processed in " +"the ``ClientApp``." msgstr "" -#: ../../source/how-to-use-strategies.rst:8 -msgid "Use an existing strategy, for example, :code:`FedAvg`" +#: ../../source/how-to-use-built-in-mods.rst:9 +msgid "What are Mods?" msgstr "" -#: ../../source/how-to-use-strategies.rst:9 -#: ../../source/how-to-use-strategies.rst:40 -msgid "Customize an existing strategy with callback functions" +#: ../../source/how-to-use-built-in-mods.rst:11 +msgid "" +"A Mod is a callable that wraps around a ``ClientApp``. It can manipulate " +"or inspect the incoming ``Message`` and the resulting outgoing " +"``Message``. The signature for a ``Mod`` is as follows:" +msgstr "" + +#: ../../source/how-to-use-built-in-mods.rst:18 +msgid "A typical mod function might look something like this:" +msgstr "" + +#: ../../source/how-to-use-built-in-mods.rst:31 +msgid "Using Mods" +msgstr "" + +#: ../../source/how-to-use-built-in-mods.rst:33 +msgid "To use mods in your ``ClientApp``, you can follow these steps:" +msgstr "" + +#: ../../source/how-to-use-built-in-mods.rst:36 +msgid "1. Import the required mods" +msgstr "" + +#: ../../source/how-to-use-built-in-mods.rst:38 +msgid "First, import the built-in mod you intend to use:" +msgstr "" + +#: ../../source/how-to-use-built-in-mods.rst:46 +msgid "2. Define your client function" +msgstr "" + +#: ../../source/how-to-use-built-in-mods.rst:48 +msgid "" +"Define your client function (``client_fn``) that will be wrapped by the " +"mod(s):" +msgstr "" + +#: ../../source/how-to-use-built-in-mods.rst:57 +msgid "3. Create the ``ClientApp`` with mods" +msgstr "" + +#: ../../source/how-to-use-built-in-mods.rst:59 +msgid "" +"Create your ``ClientApp`` and pass the mods as a list to the ``mods`` " +"argument. The order in which you provide the mods matters:" +msgstr "" + +#: ../../source/how-to-use-built-in-mods.rst:72 +msgid "Order of execution" +msgstr "" + +#: ../../source/how-to-use-built-in-mods.rst:74 +msgid "" +"When the ``ClientApp`` runs, the mods are executed in the order they are " +"provided in the list:" +msgstr "" + +#: ../../source/how-to-use-built-in-mods.rst:76 +msgid "``example_mod_1`` (outermost mod)" +msgstr "" + +#: ../../source/how-to-use-built-in-mods.rst:77 +msgid "``example_mod_2`` (next mod)" +msgstr "" + +#: ../../source/how-to-use-built-in-mods.rst:78 +msgid "" +"Message handler (core function that handles the incoming ``Message`` and " +"returns the outgoing ``Message``)" +msgstr "" + +#: ../../source/how-to-use-built-in-mods.rst:79 +msgid "``example_mod_2`` (on the way back)" +msgstr "" + +#: ../../source/how-to-use-built-in-mods.rst:80 +msgid "``example_mod_1`` (outermost mod on the way back)" +msgstr "" + +#: ../../source/how-to-use-built-in-mods.rst:82 +msgid "" +"Each mod has a chance to inspect and modify the incoming ``Message`` " +"before passing it to the next mod, and likewise with the outgoing " +"``Message`` before returning it up the stack." +msgstr "" + +#: ../../source/how-to-use-built-in-mods.rst:87 +msgid "" +"By following this guide, you have learned how to effectively use mods to " +"enhance your ``ClientApp``'s functionality. Remember that the order of " +"mods is crucial and affects how the input and output are processed." +msgstr "" + +#: ../../source/how-to-use-built-in-mods.rst:89 +msgid "Enjoy building more robust and flexible ``ClientApp``s with mods!" +msgstr "" + +#: ../../source/how-to-use-strategies.rst:2 +msgid "Use strategies" +msgstr "" + +#: ../../source/how-to-use-strategies.rst:4 +msgid "" +"Flower allows full customization of the learning process through the " +":code:`Strategy` abstraction. A number of built-in strategies are " +"provided in the core framework." +msgstr "" + +#: ../../source/how-to-use-strategies.rst:6 +msgid "" +"There are three ways to customize the way Flower orchestrates the " +"learning process on the server side:" +msgstr "" + +#: ../../source/how-to-use-strategies.rst:8 +msgid "Use an existing strategy, for example, :code:`FedAvg`" +msgstr "" + +#: ../../source/how-to-use-strategies.rst:9 +#: ../../source/how-to-use-strategies.rst:40 +msgid "Customize an existing strategy with callback functions" msgstr "" #: ../../source/how-to-use-strategies.rst:10 @@ -5468,35 +6211,35 @@ msgstr "" msgid "How-to guides" msgstr "" -#: ../../source/index.rst:95 +#: ../../source/index.rst:97 msgid "Legacy example guides" msgstr "" -#: ../../source/index.rst:106 ../../source/index.rst:110 +#: ../../source/index.rst:108 ../../source/index.rst:112 msgid "Explanations" msgstr "" -#: ../../source/index.rst:122 +#: None:-1 msgid "API reference" msgstr "" -#: ../../source/index.rst:129 +#: ../../source/index.rst:137 msgid "Reference docs" msgstr "" -#: ../../source/index.rst:145 +#: ../../source/index.rst:153 msgid "Contributor tutorials" msgstr "" -#: ../../source/index.rst:152 +#: ../../source/index.rst:160 msgid "Contributor how-to guides" msgstr "" -#: ../../source/index.rst:164 +#: ../../source/index.rst:173 msgid "Contributor explanations" msgstr "" -#: ../../source/index.rst:170 +#: ../../source/index.rst:179 msgid "Contributor references" msgstr "" @@ -5512,7 +6255,7 @@ msgstr "" #: ../../source/index.rst:7 msgid "" -"Welcome to Flower's documentation. `Flower `_ is a " +"Welcome to Flower's documentation. `Flower `_ is a " "friendly federated learning framework." msgstr "" @@ -5580,25 +6323,33 @@ msgid "" "specific goal." msgstr "" -#: ../../source/index.rst:108 +#: ../../source/index.rst:110 msgid "" "Understanding-oriented concept guides explain and discuss key topics and " "underlying ideas behind Flower and collaborative AI." msgstr "" -#: ../../source/index.rst:118 +#: ../../source/index.rst:120 msgid "References" msgstr "" -#: ../../source/index.rst:120 +#: ../../source/index.rst:122 msgid "Information-oriented API reference and other reference material." msgstr "" -#: ../../source/index.rst:140 +#: ../../source/index.rst:131::1 +msgid ":py:obj:`flwr `\\" +msgstr "" + +#: ../../source/index.rst:131::1 flwr:1 of +msgid "Flower main package." +msgstr "" + +#: ../../source/index.rst:148 msgid "Contributor docs" msgstr "" -#: ../../source/index.rst:142 +#: ../../source/index.rst:150 msgid "" "The Flower community welcomes contributions. The following docs are " "intended to help along the way." @@ -5609,7 +6360,7 @@ msgid "Flower CLI reference" msgstr "" #: ../../source/ref-api-cli.rst:7 -msgid "flower-server" +msgid "flower-superlink" msgstr "" #: ../../source/ref-api-cli.rst:17 @@ -5620,363 +6371,4978 @@ msgstr "" msgid "flower-fleet-api" msgstr "" -#: ../../source/ref-api-flwr.rst:2 -msgid "flwr (Python API reference)" +#: ../../source/ref-api/flwr.rst:2 +msgid "flwr" +msgstr "" + +#: ../../source/ref-api/flwr.rst:25 ../../source/ref-api/flwr.server.rst:48 +msgid "Modules" +msgstr "" + +#: ../../source/ref-api/flwr.rst:35::1 +msgid ":py:obj:`flwr.client `\\" +msgstr "" + +#: ../../source/ref-api/flwr.rst:35::1 flwr.client:1 of +msgid "Flower client." +msgstr "" + +#: ../../source/ref-api/flwr.rst:35::1 +msgid ":py:obj:`flwr.common `\\" +msgstr "" + +#: ../../source/ref-api/flwr.rst:35::1 flwr.common:1 of +msgid "Common components shared between server and client." +msgstr "" + +#: ../../source/ref-api/flwr.rst:35::1 +msgid ":py:obj:`flwr.server `\\" +msgstr "" + +#: ../../source/ref-api/flwr.rst:35::1 +#: ../../source/ref-api/flwr.server.rst:37::1 flwr.server:1 +#: flwr.server.server.Server:1 of +msgid "Flower server." +msgstr "" + +#: ../../source/ref-api/flwr.rst:35::1 +msgid ":py:obj:`flwr.simulation `\\" +msgstr "" + +#: ../../source/ref-api/flwr.rst:35::1 flwr.simulation:1 of +msgid "Flower simulation." msgstr "" -#: ../../source/ref-api-flwr.rst:8 +#: ../../source/ref-api/flwr.client.rst:2 msgid "client" msgstr "" -#: ../../source/ref-api-flwr.rst:24 -msgid "start_client" +#: ../../source/ref-api/flwr.client.rst:13 +#: ../../source/ref-api/flwr.common.rst:13 +#: ../../source/ref-api/flwr.server.driver.rst:13 +#: ../../source/ref-api/flwr.server.rst:13 +#: ../../source/ref-api/flwr.simulation.rst:13 +msgid "Functions" +msgstr "" + +#: ../../source/ref-api/flwr.client.rst:24::1 +msgid ":py:obj:`run_client_app `\\ \\(\\)" +msgstr "" + +#: ../../source/ref-api/flwr.client.rst:24::1 +#: flwr.client.app.run_client_app:1 of +msgid "Run Flower client app." +msgstr "" + +#: ../../source/ref-api/flwr.client.rst:24::1 +msgid "" +":py:obj:`start_client `\\ \\(\\*\\, " +"server\\_address\\[\\, client\\_fn\\, ...\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.client.rst:24::1 +#: flwr.client.app.start_client:1 of +msgid "Start a Flower client node which connects to a Flower server." +msgstr "" + +#: ../../source/ref-api/flwr.client.rst:24::1 +msgid "" +":py:obj:`start_numpy_client `\\ \\(\\*\\," +" server\\_address\\, client\\)" +msgstr "" + +#: ../../source/ref-api/flwr.client.rst:24::1 +#: flwr.client.app.start_numpy_client:1 of +msgid "Start a Flower NumPyClient which connects to a gRPC server." +msgstr "" + +#: ../../source/ref-api/flwr.client.rst:26 +#: ../../source/ref-api/flwr.common.rst:31 +#: ../../source/ref-api/flwr.server.driver.rst:24 +#: ../../source/ref-api/flwr.server.rst:28 +#: ../../source/ref-api/flwr.server.strategy.rst:17 +msgid "Classes" +msgstr "" + +#: ../../source/ref-api/flwr.client.rst:33::1 +msgid ":py:obj:`Client `\\ \\(\\)" +msgstr "" + +#: ../../source/ref-api/flwr.client.rst:33::1 +#: flwr.client.client.Client:1 of +msgid "Abstract base class for Flower clients." +msgstr "" + +#: ../../source/ref-api/flwr.client.rst:33::1 +msgid "" +":py:obj:`ClientApp `\\ \\(client\\_fn\\[\\, " +"mods\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.client.rst:33::1 +#: flwr.client.clientapp.ClientApp:1 of +msgid "Flower ClientApp." +msgstr "" + +#: ../../source/ref-api/flwr.client.rst:33::1 +msgid ":py:obj:`NumPyClient `\\ \\(\\)" +msgstr "" + +#: ../../source/ref-api/flwr.client.rst:33::1 +#: flwr.client.numpy_client.NumPyClient:1 of +msgid "Abstract base class for Flower clients using NumPy." +msgstr "" + +#: flwr.client.client.Client:1 flwr.client.numpy_client.NumPyClient:1 +#: flwr.server.client_manager.ClientManager:1 +#: flwr.server.strategy.strategy.Strategy:1 of +msgid "Bases: :py:class:`~abc.ABC`" msgstr "" -#: ../../source/ref-api-flwr.rst:32 +#: ../../source/ref-api/flwr.client.Client.rst:15 +#: ../../source/ref-api/flwr.client.ClientApp.rst:15 +#: ../../source/ref-api/flwr.client.NumPyClient.rst:15 +#: ../../source/ref-api/flwr.common.ClientMessage.rst:15 +#: ../../source/ref-api/flwr.common.DisconnectRes.rst:15 +#: ../../source/ref-api/flwr.common.EvaluateIns.rst:15 +#: ../../source/ref-api/flwr.common.EvaluateRes.rst:15 +#: ../../source/ref-api/flwr.common.FitIns.rst:15 +#: ../../source/ref-api/flwr.common.FitRes.rst:15 +#: ../../source/ref-api/flwr.common.GetParametersIns.rst:15 +#: ../../source/ref-api/flwr.common.GetParametersRes.rst:15 +#: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:15 +#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:15 +#: ../../source/ref-api/flwr.common.Parameters.rst:15 +#: ../../source/ref-api/flwr.common.ReconnectIns.rst:15 +#: ../../source/ref-api/flwr.common.ServerMessage.rst:15 +#: ../../source/ref-api/flwr.common.Status.rst:15 +#: ../../source/ref-api/flwr.server.ClientManager.rst:15 +#: ../../source/ref-api/flwr.server.History.rst:15 +#: ../../source/ref-api/flwr.server.Server.rst:15 +#: ../../source/ref-api/flwr.server.ServerConfig.rst:15 +#: ../../source/ref-api/flwr.server.SimpleClientManager.rst:15 +#: ../../source/ref-api/flwr.server.driver.Driver.rst:15 +#: ../../source/ref-api/flwr.server.driver.GrpcDriver.rst:15 +#: ../../source/ref-api/flwr.server.strategy.Bulyan.rst:15 +#: ../../source/ref-api/flwr.server.strategy.DPFedAvgAdaptive.rst:15 +#: ../../source/ref-api/flwr.server.strategy.DPFedAvgFixed.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FaultTolerantFedAvg.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedAdagrad.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedAdam.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedAvg.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedAvgAndroid.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedAvgM.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedMedian.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedOpt.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedProx.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedTrimmedAvg.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedXgbBagging.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedXgbCyclic.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedXgbNnAvg.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedYogi.rst:15 +#: ../../source/ref-api/flwr.server.strategy.Krum.rst:15 +#: ../../source/ref-api/flwr.server.strategy.QFedAvg.rst:15 +#: ../../source/ref-api/flwr.server.strategy.Strategy.rst:15 +msgid "Methods" +msgstr "" + +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +msgid ":py:obj:`evaluate `\\ \\(ins\\)" +msgstr "" + +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: flwr.client.client.Client.evaluate:1 +#: flwr.client.numpy_client.NumPyClient.evaluate:1 of +msgid "Evaluate the provided parameters using the locally held dataset." +msgstr "" + +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +msgid ":py:obj:`fit `\\ \\(ins\\)" +msgstr "" + +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +#: flwr.client.client.Client.fit:1 of +msgid "Refine the provided parameters using the locally held dataset." +msgstr "" + +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +msgid ":py:obj:`get_context `\\ \\(\\)" +msgstr "" + +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: flwr.client.client.Client.get_context:1 +#: flwr.client.numpy_client.NumPyClient.get_context:1 of +msgid "Get the run context from this client." +msgstr "" + +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +msgid ":py:obj:`get_parameters `\\ \\(ins\\)" +msgstr "" + +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: flwr.client.client.Client.get_parameters:1 +#: flwr.client.numpy_client.NumPyClient.get_parameters:1 of +msgid "Return the current local model parameters." +msgstr "" + +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +msgid ":py:obj:`get_properties `\\ \\(ins\\)" +msgstr "" + +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +#: flwr.client.client.Client.get_properties:1 of +msgid "Return set of client's properties." +msgstr "" + +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +msgid ":py:obj:`set_context `\\ \\(context\\)" +msgstr "" + +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: flwr.client.client.Client.set_context:1 +#: flwr.client.numpy_client.NumPyClient.set_context:1 of +msgid "Apply a run context to this client." +msgstr "" + +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +msgid ":py:obj:`to_client `\\ \\(\\)" +msgstr "" + +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +#: flwr.client.client.Client.to_client:1 of +msgid "Return client (itself)." +msgstr "" + +#: ../../source/ref-api/flwr.client.Client.rst:46 +#: ../../source/ref-api/flwr.client.NumPyClient.rst:46 +#: ../../source/ref-api/flwr.common.ClientMessage.rst:25 +#: ../../source/ref-api/flwr.common.Code.rst:19 +#: ../../source/ref-api/flwr.common.DisconnectRes.rst:25 +#: ../../source/ref-api/flwr.common.EvaluateIns.rst:25 +#: ../../source/ref-api/flwr.common.EvaluateRes.rst:25 +#: ../../source/ref-api/flwr.common.EventType.rst:19 +#: ../../source/ref-api/flwr.common.FitIns.rst:25 +#: ../../source/ref-api/flwr.common.FitRes.rst:25 +#: ../../source/ref-api/flwr.common.GetParametersIns.rst:25 +#: ../../source/ref-api/flwr.common.GetParametersRes.rst:25 +#: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:25 +#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:25 +#: ../../source/ref-api/flwr.common.Parameters.rst:25 +#: ../../source/ref-api/flwr.common.ReconnectIns.rst:25 +#: ../../source/ref-api/flwr.common.ServerMessage.rst:25 +#: ../../source/ref-api/flwr.common.Status.rst:25 +#: ../../source/ref-api/flwr.server.ServerConfig.rst:25 +msgid "Attributes" +msgstr "" + +#: flwr.client.client.Client.evaluate:1::1 of +msgid ":py:obj:`context `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.Parameters.rst:2 +#: flwr.client.app.start_client flwr.client.app.start_numpy_client +#: flwr.client.client.Client.evaluate flwr.client.client.Client.fit +#: flwr.client.client.Client.get_parameters +#: flwr.client.client.Client.get_properties +#: flwr.client.numpy_client.NumPyClient.evaluate +#: flwr.client.numpy_client.NumPyClient.fit +#: flwr.client.numpy_client.NumPyClient.get_parameters +#: flwr.client.numpy_client.NumPyClient.get_properties +#: flwr.server.app.start_server +#: flwr.server.client_manager.ClientManager.register +#: flwr.server.client_manager.ClientManager.unregister +#: flwr.server.client_manager.SimpleClientManager.register +#: flwr.server.client_manager.SimpleClientManager.unregister +#: flwr.server.client_manager.SimpleClientManager.wait_for +#: flwr.server.driver.app.start_driver flwr.server.driver.driver.Driver +#: flwr.server.strategy.bulyan.Bulyan +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit +#: flwr.server.strategy.fedadagrad.FedAdagrad +#: flwr.server.strategy.fedadam.FedAdam flwr.server.strategy.fedavg.FedAvg +#: flwr.server.strategy.fedavg_android.FedAvgAndroid +#: flwr.server.strategy.fedavgm.FedAvgM flwr.server.strategy.fedopt.FedOpt +#: flwr.server.strategy.fedprox.FedProx +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg +#: flwr.server.strategy.fedyogi.FedYogi flwr.server.strategy.krum.Krum +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate +#: flwr.server.strategy.strategy.Strategy.aggregate_fit +#: flwr.server.strategy.strategy.Strategy.configure_evaluate +#: flwr.server.strategy.strategy.Strategy.configure_fit +#: flwr.server.strategy.strategy.Strategy.evaluate +#: flwr.server.strategy.strategy.Strategy.initialize_parameters +#: flwr.simulation.app.start_simulation of +msgid "Parameters" +msgstr "" + +#: flwr.client.client.Client.evaluate:3 of +msgid "" +"The evaluation instructions containing (global) model parameters received" +" from the server and a dictionary of configuration values used to " +"customize the local evaluation process." +msgstr "" + +#: flwr.client.client.Client.evaluate flwr.client.client.Client.fit +#: flwr.client.client.Client.get_parameters +#: flwr.client.client.Client.get_properties +#: flwr.client.numpy_client.NumPyClient.evaluate +#: flwr.client.numpy_client.NumPyClient.fit +#: flwr.client.numpy_client.NumPyClient.get_parameters +#: flwr.client.numpy_client.NumPyClient.get_properties +#: flwr.server.app.start_server +#: flwr.server.client_manager.ClientManager.num_available +#: flwr.server.client_manager.ClientManager.register +#: flwr.server.client_manager.SimpleClientManager.num_available +#: flwr.server.client_manager.SimpleClientManager.register +#: flwr.server.client_manager.SimpleClientManager.wait_for +#: flwr.server.driver.app.start_driver +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate +#: flwr.server.strategy.strategy.Strategy.aggregate_fit +#: flwr.server.strategy.strategy.Strategy.configure_evaluate +#: flwr.server.strategy.strategy.Strategy.configure_fit +#: flwr.server.strategy.strategy.Strategy.evaluate +#: flwr.server.strategy.strategy.Strategy.initialize_parameters +#: flwr.simulation.app.start_simulation of +msgid "Returns" +msgstr "" + +#: flwr.client.client.Client.evaluate:8 of +msgid "" +"The evaluation result containing the loss on the local dataset and other " +"details such as the number of local data examples used for evaluation." +msgstr "" + +#: flwr.client.client.Client.evaluate flwr.client.client.Client.fit +#: flwr.client.client.Client.get_parameters +#: flwr.client.client.Client.get_properties +#: flwr.client.numpy_client.NumPyClient.get_parameters +#: flwr.client.numpy_client.NumPyClient.get_properties +#: flwr.server.app.start_server +#: flwr.server.client_manager.ClientManager.num_available +#: flwr.server.client_manager.ClientManager.register +#: flwr.server.client_manager.SimpleClientManager.num_available +#: flwr.server.client_manager.SimpleClientManager.register +#: flwr.server.client_manager.SimpleClientManager.wait_for +#: flwr.server.driver.app.start_driver +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate +#: flwr.server.strategy.strategy.Strategy.aggregate_fit +#: flwr.server.strategy.strategy.Strategy.configure_evaluate +#: flwr.server.strategy.strategy.Strategy.configure_fit +#: flwr.server.strategy.strategy.Strategy.evaluate +#: flwr.server.strategy.strategy.Strategy.initialize_parameters +#: flwr.simulation.app.start_simulation of +msgid "Return type" +msgstr "" + +#: flwr.client.client.Client.fit:3 of +msgid "" +"The training instructions containing (global) model parameters received " +"from the server and a dictionary of configuration values used to " +"customize the local training process." +msgstr "" + +#: flwr.client.client.Client.fit:8 of +msgid "" +"The training result containing updated parameters and other details such " +"as the number of local training examples used for training." +msgstr "" + +#: flwr.client.client.Client.get_parameters:3 of +msgid "" +"The get parameters instructions received from the server containing a " +"dictionary of configuration values." +msgstr "" + +#: flwr.client.client.Client.get_parameters:7 of +msgid "The current local model parameters." +msgstr "" + +#: flwr.client.client.Client.get_properties:3 of +msgid "" +"The get properties instructions received from the server containing a " +"dictionary of configuration values." +msgstr "" + +#: flwr.client.client.Client.get_properties:7 of +msgid "The current client properties." +msgstr "" + +#: ../../source/ref-api/flwr.client.ClientApp.rst:2 +msgid "ClientApp" +msgstr "" + +#: flwr.client.clientapp.ClientApp:1 flwr.common.typing.ClientMessage:1 +#: flwr.common.typing.DisconnectRes:1 flwr.common.typing.EvaluateIns:1 +#: flwr.common.typing.EvaluateRes:1 flwr.common.typing.FitIns:1 +#: flwr.common.typing.FitRes:1 flwr.common.typing.GetParametersIns:1 +#: flwr.common.typing.GetParametersRes:1 flwr.common.typing.GetPropertiesIns:1 +#: flwr.common.typing.GetPropertiesRes:1 flwr.common.typing.Parameters:1 +#: flwr.common.typing.ReconnectIns:1 flwr.common.typing.ServerMessage:1 +#: flwr.common.typing.Status:1 flwr.server.app.ServerConfig:1 +#: flwr.server.driver.driver.Driver:1 +#: flwr.server.driver.grpc_driver.GrpcDriver:1 flwr.server.history.History:1 +#: flwr.server.server.Server:1 of +msgid "Bases: :py:class:`object`" +msgstr "" + +#: flwr.client.app.start_client:33 flwr.client.app.start_numpy_client:36 +#: flwr.client.clientapp.ClientApp:4 flwr.server.app.start_server:41 +#: flwr.server.driver.app.start_driver:30 of +msgid "Examples" +msgstr "" + +#: flwr.client.clientapp.ClientApp:5 of +msgid "" +"Assuming a typical `Client` implementation named `FlowerClient`, you can " +"wrap it in a `ClientApp` as follows:" +msgstr "" + +#: flwr.client.clientapp.ClientApp:16 of +msgid "" +"If the above code is in a Python module called `client`, it can be " +"started as follows:" +msgstr "" + +#: flwr.client.clientapp.ClientApp:21 of +msgid "" +"In this `client:app` example, `client` refers to the Python module " +"`client.py` in which the previous code lives in and `app` refers to the " +"global attribute `app` that points to an object of type `ClientApp`." +msgstr "" + +#: ../../source/ref-api/flwr.client.NumPyClient.rst:2 msgid "NumPyClient" msgstr "" -#: ../../source/ref-api-flwr.rst:41 -msgid "start_numpy_client" +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +msgid "" +":py:obj:`evaluate `\\ \\(parameters\\, " +"config\\)" msgstr "" -#: ../../source/ref-api-flwr.rst:49 -msgid "start_simulation" +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +msgid ":py:obj:`fit `\\ \\(parameters\\, config\\)" msgstr "" -#: ../../source/ref-api-flwr.rst:57 -msgid "server" +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: flwr.client.numpy_client.NumPyClient.fit:1 of +msgid "Train the provided parameters using the locally held dataset." +msgstr "" + +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +msgid ":py:obj:`get_context `\\ \\(\\)" +msgstr "" + +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +msgid "" +":py:obj:`get_parameters `\\ " +"\\(config\\)" +msgstr "" + +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +msgid "" +":py:obj:`get_properties `\\ " +"\\(config\\)" +msgstr "" + +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: flwr.client.numpy_client.NumPyClient.get_properties:1 of +msgid "Return a client's set of properties." +msgstr "" + +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +msgid "" +":py:obj:`set_context `\\ " +"\\(context\\)" +msgstr "" + +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +msgid ":py:obj:`to_client `\\ \\(\\)" +msgstr "" + +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: flwr.client.numpy_client.NumPyClient.to_client:1 of +msgid "Convert to object to Client type and return it." +msgstr "" + +#: flwr.client.numpy_client.NumPyClient.evaluate:1::1 of +msgid ":py:obj:`context `\\" +msgstr "" + +#: flwr.client.numpy_client.NumPyClient.evaluate:3 +#: flwr.client.numpy_client.NumPyClient.fit:3 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:5 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:8 +#: flwr.server.strategy.strategy.Strategy.configure_evaluate:5 +#: flwr.server.strategy.strategy.Strategy.configure_fit:5 +#: flwr.server.strategy.strategy.Strategy.evaluate:8 of +msgid "The current (global) model parameters." msgstr "" -#: ../../source/ref-api-flwr.rst:65 -msgid "server.start_server" +#: flwr.client.numpy_client.NumPyClient.evaluate:5 of +msgid "" +"Configuration parameters which allow the server to influence evaluation " +"on the client. It can be used to communicate arbitrary values from the " +"server to the client, for example, to influence the number of examples " +"used for evaluation." +msgstr "" + +#: flwr.client.numpy_client.NumPyClient.evaluate:11 of +msgid "" +"* **loss** (*float*) -- The evaluation loss of the model on the local " +"dataset. * **num_examples** (*int*) -- The number of examples used for " +"evaluation. * **metrics** (*Dict[str, Scalar]*) -- A dictionary mapping " +"arbitrary string keys to values of type bool, bytes, float, int, or " +"str. It can be used to communicate arbitrary values back to the server." +msgstr "" + +#: flwr.client.numpy_client.NumPyClient.evaluate:11 of +msgid "" +"**loss** (*float*) -- The evaluation loss of the model on the local " +"dataset." +msgstr "" + +#: flwr.client.numpy_client.NumPyClient.evaluate:12 of +msgid "**num_examples** (*int*) -- The number of examples used for evaluation." +msgstr "" + +#: flwr.client.numpy_client.NumPyClient.evaluate:13 +#: flwr.client.numpy_client.NumPyClient.fit:13 of +msgid "" +"**metrics** (*Dict[str, Scalar]*) -- A dictionary mapping arbitrary " +"string keys to values of type bool, bytes, float, int, or str. It can be " +"used to communicate arbitrary values back to the server." +msgstr "" + +#: flwr.client.numpy_client.NumPyClient.evaluate:19 of +msgid "" +"The previous return type format (int, float, float) and the extended " +"format (int, float, float, Dict[str, Scalar]) have been deprecated and " +"removed since Flower 0.19." +msgstr "" + +#: flwr.client.numpy_client.NumPyClient.fit:5 of +msgid "" +"Configuration parameters which allow the server to influence training on " +"the client. It can be used to communicate arbitrary values from the " +"server to the client, for example, to set the number of (local) training " +"epochs." +msgstr "" + +#: flwr.client.numpy_client.NumPyClient.fit:11 of +msgid "" +"* **parameters** (*NDArrays*) -- The locally updated model parameters. * " +"**num_examples** (*int*) -- The number of examples used for training. * " +"**metrics** (*Dict[str, Scalar]*) -- A dictionary mapping arbitrary " +"string keys to values of type bool, bytes, float, int, or str. It can " +"be used to communicate arbitrary values back to the server." +msgstr "" + +#: flwr.client.numpy_client.NumPyClient.fit:11 of +msgid "**parameters** (*NDArrays*) -- The locally updated model parameters." +msgstr "" + +#: flwr.client.numpy_client.NumPyClient.fit:12 of +msgid "**num_examples** (*int*) -- The number of examples used for training." +msgstr "" + +#: flwr.client.numpy_client.NumPyClient.get_parameters:3 of +msgid "" +"Configuration parameters requested by the server. This can be used to " +"tell the client which parameters are needed along with some Scalar " +"attributes." +msgstr "" + +#: flwr.client.numpy_client.NumPyClient.get_parameters:8 of +msgid "**parameters** -- The local model parameters as a list of NumPy ndarrays." +msgstr "" + +#: flwr.client.numpy_client.NumPyClient.get_properties:3 of +msgid "" +"Configuration parameters requested by the server. This can be used to " +"tell the client which properties are needed along with some Scalar " +"attributes." +msgstr "" + +#: flwr.client.numpy_client.NumPyClient.get_properties:8 of +msgid "" +"**properties** -- A dictionary mapping arbitrary string keys to values of" +" type bool, bytes, float, int, or str. It can be used to communicate " +"arbitrary property values back to the server." +msgstr "" + +#: ../../source/ref-api/flwr.client.run_client_app.rst:2 +msgid "run\\_client\\_app" +msgstr "" + +#: ../../source/ref-api/flwr.client.start_client.rst:2 +msgid "start\\_client" +msgstr "" + +#: flwr.client.app.start_client:3 flwr.client.app.start_numpy_client:9 of +msgid "" +"The IPv4 or IPv6 address of the server. If the Flower server runs on the " +"same machine on port 8080, then `server_address` would be " +"`\"[::]:8080\"`." +msgstr "" + +#: flwr.client.app.start_client:7 of +msgid "A callable that instantiates a Client. (default: None)" +msgstr "" + +#: flwr.client.app.start_client:9 of +msgid "" +"An implementation of the abstract base class `flwr.client.Client` " +"(default: None)" msgstr "" -#: ../../source/ref-api-flwr.rst:73 -msgid "server.strategy" +#: flwr.client.app.start_client:12 flwr.client.app.start_numpy_client:15 of +msgid "" +"The maximum length of gRPC messages that can be exchanged with the Flower" +" server. The default should be sufficient for most models. Users who " +"train very large models might need to increase this value. Note that the " +"Flower server needs to be started with the same value (see " +"`flwr.server.start_server`), otherwise it will not know about the " +"increased limit and block larger messages." +msgstr "" + +#: flwr.client.app.start_client:19 flwr.client.app.start_numpy_client:22 +#: flwr.server.driver.app.start_driver:21 of +msgid "" +"The PEM-encoded root certificates as a byte string or a path string. If " +"provided, a secure connection using the certificates will be established " +"to an SSL-enabled Flower server." +msgstr "" + +#: flwr.client.app.start_client:23 flwr.client.app.start_numpy_client:26 of +msgid "" +"Starts an insecure gRPC connection when True. Enables HTTPS connection " +"when False, using system certificates if `root_certificates` is None." +msgstr "" + +#: flwr.client.app.start_client:26 flwr.client.app.start_numpy_client:29 of +msgid "" +"Configure the transport layer. Allowed values: - 'grpc-bidi': gRPC, " +"bidirectional streaming - 'grpc-rere': gRPC, request-response " +"(experimental) - 'rest': HTTP (experimental)" +msgstr "" + +#: flwr.client.app.start_client:34 flwr.client.app.start_numpy_client:37 of +msgid "Starting a gRPC client with an insecure server connection:" msgstr "" -#: ../../source/ref-api-flwr.rst:81 -msgid "server.strategy.Strategy" +#: flwr.client.app.start_client:41 flwr.client.app.start_numpy_client:44 of +msgid "Starting an SSL-enabled gRPC client using system certificates:" msgstr "" -#: ../../source/ref-api-flwr.rst:90 -msgid "server.strategy.FedAvg" +#: flwr.client.app.start_client:52 flwr.client.app.start_numpy_client:52 of +msgid "Starting an SSL-enabled gRPC client using provided certificates:" msgstr "" -#: ../../source/ref-api-flwr.rst:101 -msgid "server.strategy.FedAvgM" +#: ../../source/ref-api/flwr.client.start_numpy_client.rst:2 +msgid "start\\_numpy\\_client" msgstr "" -#: ../../source/ref-api-flwr.rst:112 -msgid "server.strategy.FedMedian" +#: flwr.client.app.start_numpy_client:5 of +msgid "" +"This function is deprecated since 1.7.0. Use " +":code:`flwr.client.start_client` instead and first convert your " +":code:`NumPyClient` to type :code:`flwr.client.Client` by executing its " +":code:`to_client()` method." +msgstr "" + +#: flwr.client.app.start_numpy_client:13 of +msgid "An implementation of the abstract base class `flwr.client.NumPyClient`." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:2 +msgid "common" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:29::1 +msgid ":py:obj:`bytes_to_ndarray `\\ \\(tensor\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:29::1 +#: flwr.common.parameter.bytes_to_ndarray:1 of +msgid "Deserialize NumPy ndarray from bytes." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:29::1 +msgid "" +":py:obj:`configure `\\ \\(identifier\\[\\, " +"filename\\, host\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:29::1 +#: flwr.common.logger.configure:1 of +msgid "Configure logging to file and/or remote log server." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:29::1 +msgid "" +":py:obj:`event `\\ \\(event\\_type\\[\\, " +"event\\_details\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:29::1 +#: flwr.common.telemetry.event:1 of +msgid "Submit create_event to ThreadPoolExecutor to avoid blocking." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:29::1 +msgid "" +":py:obj:`log `\\ \\(level\\, msg\\, \\*args\\, " +"\\*\\*kwargs\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:29::1 logging.Logger.log:1 +#: of +msgid "Log 'msg % args' with the integer severity 'level'." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:29::1 +msgid ":py:obj:`ndarray_to_bytes `\\ \\(ndarray\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:29::1 +#: flwr.common.parameter.ndarray_to_bytes:1 of +msgid "Serialize NumPy ndarray to bytes." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:29::1 +msgid ":py:obj:`now `\\ \\(\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:29::1 +#: flwr.common.date.now:1 of +msgid "Construct a datetime from time.time() with time zone set to UTC." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:29::1 +msgid "" +":py:obj:`ndarrays_to_parameters `\\ " +"\\(ndarrays\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:29::1 +#: flwr.common.parameter.ndarrays_to_parameters:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.ndarrays_to_parameters:1 +#: of +msgid "Convert NumPy ndarrays to parameters object." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:29::1 +msgid "" +":py:obj:`parameters_to_ndarrays `\\ " +"\\(parameters\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:29::1 +#: flwr.common.parameter.parameters_to_ndarrays:1 of +msgid "Convert parameters object to NumPy ndarrays." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:52::1 +msgid "" +":py:obj:`ClientMessage `\\ " +"\\(\\[get\\_properties\\_res\\, ...\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:52::1 +#: flwr.common.typing.ClientMessage:1 of +msgid "ClientMessage is a container used to hold one result message." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:52::1 +msgid ":py:obj:`Code `\\ \\(value\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:52::1 +#: flwr.common.typing.Code:1 of +msgid "Client status codes." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:52::1 +msgid ":py:obj:`DisconnectRes `\\ \\(reason\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:52::1 +#: flwr.common.typing.DisconnectRes:1 of +msgid "DisconnectRes message from client to server." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:52::1 +msgid "" +":py:obj:`EvaluateIns `\\ \\(parameters\\, " +"config\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:52::1 +#: flwr.common.typing.EvaluateIns:1 of +msgid "Evaluate instructions for a client." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:52::1 +msgid "" +":py:obj:`EvaluateRes `\\ \\(status\\, loss\\, " +"num\\_examples\\, metrics\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:52::1 +#: flwr.common.typing.EvaluateRes:1 of +msgid "Evaluate response from a client." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:52::1 +msgid ":py:obj:`EventType `\\ \\(value\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:52::1 +#: flwr.common.telemetry.EventType:1 of +msgid "Types of telemetry events." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:52::1 +msgid ":py:obj:`FitIns `\\ \\(parameters\\, config\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:52::1 +#: flwr.common.typing.FitIns:1 of +msgid "Fit instructions for a client." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:52::1 +msgid "" +":py:obj:`FitRes `\\ \\(status\\, parameters\\, " +"num\\_examples\\, metrics\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:52::1 +#: flwr.common.typing.FitRes:1 of +msgid "Fit response from a client." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:52::1 +msgid ":py:obj:`GetParametersIns `\\ \\(config\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:52::1 +#: flwr.common.typing.GetParametersIns:1 of +msgid "Parameters request for a client." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:52::1 +msgid "" +":py:obj:`GetParametersRes `\\ \\(status\\, " +"parameters\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:52::1 +#: flwr.common.typing.GetParametersRes:1 of +msgid "Response when asked to return parameters." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:52::1 +msgid ":py:obj:`GetPropertiesIns `\\ \\(config\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:52::1 +#: flwr.common.typing.GetPropertiesIns:1 of +msgid "Properties request for a client." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:52::1 +msgid "" +":py:obj:`GetPropertiesRes `\\ \\(status\\, " +"properties\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:52::1 +#: flwr.common.typing.GetPropertiesRes:1 of +msgid "Properties response from a client." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:52::1 +msgid ":py:obj:`NDArray `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:52::1 +msgid "" +"alias of :py:class:`~numpy.ndarray`\\ [:py:obj:`~typing.Any`, " +":py:class:`~numpy.dtype`\\ [:py:obj:`~typing.Any`]]" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:52::1 +msgid "" +":py:obj:`Parameters `\\ \\(tensors\\, " +"tensor\\_type\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:52::1 +#: flwr.common.typing.Parameters:1 of +msgid "Model parameters." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:52::1 +msgid ":py:obj:`ReconnectIns `\\ \\(seconds\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:52::1 +#: flwr.common.typing.ReconnectIns:1 of +msgid "ReconnectIns message from server to client." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:52::1 +msgid "" +":py:obj:`ServerMessage `\\ " +"\\(\\[get\\_properties\\_ins\\, ...\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:52::1 +#: flwr.common.typing.ServerMessage:1 of +msgid "ServerMessage is a container used to hold one instruction message." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:52::1 +msgid ":py:obj:`Status `\\ \\(code\\, message\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:52::1 +#: flwr.common.typing.Status:1 of +msgid "Client status." +msgstr "" + +#: ../../source/ref-api/flwr.common.ClientMessage.rst:2 +msgid "ClientMessage" +msgstr "" + +#: ../../source/ref-api/flwr.common.ClientMessage.rst:31::1 +msgid ":py:obj:`evaluate_res `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.ClientMessage.rst:31::1 +msgid ":py:obj:`fit_res `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.ClientMessage.rst:31::1 +msgid "" +":py:obj:`get_parameters_res " +"`\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.ClientMessage.rst:31::1 +msgid "" +":py:obj:`get_properties_res " +"`\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.Code.rst:2 +msgid "Code" +msgstr "" + +#: flwr.common.typing.Code:1 of +msgid "Bases: :py:class:`~enum.Enum`" +msgstr "" + +#: ../../source/ref-api/flwr.common.Code.rst:26::1 +msgid ":py:obj:`OK `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.Code.rst:26::1 +msgid "" +":py:obj:`GET_PROPERTIES_NOT_IMPLEMENTED " +"`\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.Code.rst:26::1 +msgid "" +":py:obj:`GET_PARAMETERS_NOT_IMPLEMENTED " +"`\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.Code.rst:26::1 +msgid ":py:obj:`FIT_NOT_IMPLEMENTED `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.Code.rst:26::1 +msgid "" +":py:obj:`EVALUATE_NOT_IMPLEMENTED " +"`\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.DisconnectRes.rst:2 +msgid "DisconnectRes" +msgstr "" + +#: ../../source/ref-api/flwr.common.DisconnectRes.rst:28::1 +msgid ":py:obj:`reason `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.EvaluateIns.rst:2 +msgid "EvaluateIns" +msgstr "" + +#: ../../source/ref-api/flwr.common.EvaluateIns.rst:29::1 +msgid ":py:obj:`parameters `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.EvaluateIns.rst:29::1 +msgid ":py:obj:`config `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.EvaluateRes.rst:2 +msgid "EvaluateRes" +msgstr "" + +#: ../../source/ref-api/flwr.common.EvaluateRes.rst:31::1 +msgid ":py:obj:`status `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.EvaluateRes.rst:31::1 +msgid ":py:obj:`loss `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.EvaluateRes.rst:31::1 +msgid ":py:obj:`num_examples `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.EvaluateRes.rst:31::1 +msgid ":py:obj:`metrics `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.EventType.rst:2 +msgid "EventType" +msgstr "" + +#: flwr.common.telemetry.EventType:1 of +msgid "Bases: :py:class:`str`, :py:class:`~enum.Enum`" +msgstr "" + +#: ../../source/ref-api/flwr.common.EventType.rst:42::1 +msgid ":py:obj:`PING `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.EventType.rst:42::1 +msgid ":py:obj:`START_CLIENT_ENTER `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.EventType.rst:42::1 +msgid ":py:obj:`START_CLIENT_LEAVE `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.EventType.rst:42::1 +msgid ":py:obj:`START_SERVER_ENTER `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.EventType.rst:42::1 +msgid ":py:obj:`START_SERVER_LEAVE `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.EventType.rst:42::1 +msgid "" +":py:obj:`RUN_DRIVER_API_ENTER " +"`\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.EventType.rst:42::1 +msgid "" +":py:obj:`RUN_DRIVER_API_LEAVE " +"`\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.EventType.rst:42::1 +msgid "" +":py:obj:`RUN_FLEET_API_ENTER " +"`\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.EventType.rst:42::1 +msgid "" +":py:obj:`RUN_FLEET_API_LEAVE " +"`\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.EventType.rst:42::1 +msgid "" +":py:obj:`RUN_SUPERLINK_ENTER " +"`\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.EventType.rst:42::1 +msgid "" +":py:obj:`RUN_SUPERLINK_LEAVE " +"`\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.EventType.rst:42::1 +msgid "" +":py:obj:`START_SIMULATION_ENTER " +"`\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.EventType.rst:42::1 +msgid "" +":py:obj:`START_SIMULATION_LEAVE " +"`\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.EventType.rst:42::1 +msgid ":py:obj:`DRIVER_CONNECT `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.EventType.rst:42::1 +msgid ":py:obj:`DRIVER_DISCONNECT `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.EventType.rst:42::1 +msgid ":py:obj:`START_DRIVER_ENTER `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.EventType.rst:42::1 +msgid ":py:obj:`START_DRIVER_LEAVE `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.EventType.rst:42::1 +msgid "" +":py:obj:`RUN_CLIENT_APP_ENTER " +"`\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.EventType.rst:42::1 +msgid "" +":py:obj:`RUN_CLIENT_APP_LEAVE " +"`\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.EventType.rst:42::1 +msgid "" +":py:obj:`RUN_SERVER_APP_ENTER " +"`\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.EventType.rst:42::1 +msgid "" +":py:obj:`RUN_SERVER_APP_LEAVE " +"`\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.FitIns.rst:2 +msgid "FitIns" +msgstr "" + +#: ../../source/ref-api/flwr.common.FitIns.rst:29::1 +msgid ":py:obj:`parameters `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.FitIns.rst:29::1 +msgid ":py:obj:`config `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.FitRes.rst:2 +msgid "FitRes" +msgstr "" + +#: ../../source/ref-api/flwr.common.FitRes.rst:31::1 +msgid ":py:obj:`status `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.FitRes.rst:31::1 +msgid ":py:obj:`parameters `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.FitRes.rst:31::1 +msgid ":py:obj:`num_examples `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.FitRes.rst:31::1 +msgid ":py:obj:`metrics `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.GetParametersIns.rst:2 +msgid "GetParametersIns" +msgstr "" + +#: ../../source/ref-api/flwr.common.GetParametersIns.rst:28::1 +msgid ":py:obj:`config `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.GetParametersRes.rst:2 +msgid "GetParametersRes" +msgstr "" + +#: ../../source/ref-api/flwr.common.GetParametersRes.rst:29::1 +msgid ":py:obj:`status `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.GetParametersRes.rst:29::1 +msgid ":py:obj:`parameters `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:2 +msgid "GetPropertiesIns" +msgstr "" + +#: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:28::1 +msgid ":py:obj:`config `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:2 +msgid "GetPropertiesRes" +msgstr "" + +#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:29::1 +msgid ":py:obj:`status `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:29::1 +msgid ":py:obj:`properties `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.NDArray.rst:2 +msgid "NDArray" +msgstr "" + +#: ../../source/ref-api/flwr.common.Parameters.rst:29::1 +msgid ":py:obj:`tensors `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.Parameters.rst:29::1 +msgid ":py:obj:`tensor_type `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.ReconnectIns.rst:2 +msgid "ReconnectIns" +msgstr "" + +#: ../../source/ref-api/flwr.common.ReconnectIns.rst:28::1 +msgid ":py:obj:`seconds `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.ServerMessage.rst:2 +msgid "ServerMessage" +msgstr "" + +#: ../../source/ref-api/flwr.common.ServerMessage.rst:31::1 +msgid ":py:obj:`evaluate_ins `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.ServerMessage.rst:31::1 +msgid ":py:obj:`fit_ins `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.ServerMessage.rst:31::1 +msgid "" +":py:obj:`get_parameters_ins " +"`\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.ServerMessage.rst:31::1 +msgid "" +":py:obj:`get_properties_ins " +"`\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.Status.rst:2 +msgid "Status" +msgstr "" + +#: ../../source/ref-api/flwr.common.Status.rst:29::1 +msgid ":py:obj:`code `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.Status.rst:29::1 +msgid ":py:obj:`message `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.bytes_to_ndarray.rst:2 +msgid "bytes\\_to\\_ndarray" +msgstr "" + +#: ../../source/ref-api/flwr.common.configure.rst:2 +msgid "configure" +msgstr "" + +#: ../../source/ref-api/flwr.common.event.rst:2 +msgid "event" +msgstr "" + +#: ../../source/ref-api/flwr.common.log.rst:2 +msgid "log" +msgstr "" + +#: logging.Logger.log:3 of +msgid "" +"To pass exception information, use the keyword argument exc_info with a " +"true value, e.g." +msgstr "" + +#: logging.Logger.log:6 of +#, python-format +msgid "logger.log(level, \"We have a %s\", \"mysterious problem\", exc_info=1)" +msgstr "" + +#: ../../source/ref-api/flwr.common.ndarray_to_bytes.rst:2 +msgid "ndarray\\_to\\_bytes" +msgstr "" + +#: ../../source/ref-api/flwr.common.ndarrays_to_parameters.rst:2 +msgid "ndarrays\\_to\\_parameters" +msgstr "" + +#: ../../source/ref-api/flwr.common.now.rst:2 +msgid "now" +msgstr "" + +#: ../../source/ref-api/flwr.common.parameters_to_ndarrays.rst:2 +msgid "parameters\\_to\\_ndarrays" +msgstr "" + +#: ../../source/ref-api/flwr.server.rst:2 +msgid "server" +msgstr "" + +#: ../../source/ref-api/flwr.server.rst:26::1 +msgid ":py:obj:`run_driver_api `\\ \\(\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.rst:26::1 +#: flwr.server.app.run_driver_api:1 of +msgid "Run Flower server (Driver API)." +msgstr "" + +#: ../../source/ref-api/flwr.server.rst:26::1 +msgid ":py:obj:`run_fleet_api `\\ \\(\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.rst:26::1 +#: flwr.server.app.run_fleet_api:1 of +msgid "Run Flower server (Fleet API)." +msgstr "" + +#: ../../source/ref-api/flwr.server.rst:26::1 +msgid ":py:obj:`run_server_app `\\ \\(\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.rst:26::1 +#: flwr.server.app.run_server_app:1 of +msgid "Run Flower server app." +msgstr "" + +#: ../../source/ref-api/flwr.server.rst:26::1 +msgid ":py:obj:`run_superlink `\\ \\(\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.rst:26::1 +#: flwr.server.app.run_superlink:1 of +msgid "Run Flower server (Driver API and Fleet API)." +msgstr "" + +#: ../../source/ref-api/flwr.server.rst:26::1 +msgid "" +":py:obj:`start_server `\\ \\(\\*\\[\\, " +"server\\_address\\, server\\, ...\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.rst:26::1 +#: flwr.server.app.start_server:1 of +msgid "Start a Flower server using the gRPC transport layer." +msgstr "" + +#: ../../source/ref-api/flwr.server.rst:37::1 +msgid ":py:obj:`ClientManager `\\ \\(\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.rst:37::1 +#: flwr.server.client_manager.ClientManager:1 of +msgid "Abstract base class for managing Flower clients." +msgstr "" + +#: ../../source/ref-api/flwr.server.rst:37::1 +msgid ":py:obj:`History `\\ \\(\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.rst:37::1 +#: flwr.server.history.History:1 of +msgid "History class for training and/or evaluation metrics collection." +msgstr "" + +#: ../../source/ref-api/flwr.server.rst:37::1 +msgid "" +":py:obj:`Server `\\ \\(\\*\\, client\\_manager\\[\\, " +"strategy\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.rst:37::1 +msgid "" +":py:obj:`ServerConfig `\\ \\(\\[num\\_rounds\\," +" round\\_timeout\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.rst:37::1 +#: flwr.server.app.ServerConfig:1 of +msgid "Flower server config." +msgstr "" + +#: ../../source/ref-api/flwr.server.rst:37::1 +msgid ":py:obj:`SimpleClientManager `\\ \\(\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.rst:37::1 +#: flwr.server.client_manager.SimpleClientManager:1 of +msgid "Provides a pool of available clients." +msgstr "" + +#: ../../source/ref-api/flwr.server.rst:56::1 +msgid ":py:obj:`flwr.server.driver `\\" +msgstr "" + +#: ../../source/ref-api/flwr.server.rst:56::1 flwr.server.driver:1 +#: of +msgid "Flower driver SDK." +msgstr "" + +#: ../../source/ref-api/flwr.server.rst:56::1 +msgid ":py:obj:`flwr.server.strategy `\\" +msgstr "" + +#: ../../source/ref-api/flwr.server.rst:56::1 +#: flwr.server.strategy:1 of +msgid "Contains the strategy abstraction and different implementations." +msgstr "" + +#: ../../source/ref-api/flwr.server.ClientManager.rst:2 +msgid "ClientManager" +msgstr "" + +#: flwr.server.client_manager.ClientManager.all:1::1 of +msgid ":py:obj:`all `\\ \\(\\)" +msgstr "" + +#: flwr.server.client_manager.ClientManager.all:1 +#: flwr.server.client_manager.ClientManager.all:1::1 +#: flwr.server.client_manager.SimpleClientManager.all:1 +#: flwr.server.client_manager.SimpleClientManager.all:1::1 of +msgid "Return all available clients." +msgstr "" + +#: flwr.server.client_manager.ClientManager.all:1::1 of +msgid ":py:obj:`num_available `\\ \\(\\)" +msgstr "" + +#: flwr.server.client_manager.ClientManager.all:1::1 +#: flwr.server.client_manager.ClientManager.num_available:1 +#: flwr.server.client_manager.SimpleClientManager.all:1::1 +#: flwr.server.client_manager.SimpleClientManager.num_available:1 of +msgid "Return the number of available clients." +msgstr "" + +#: flwr.server.client_manager.ClientManager.all:1::1 of +msgid ":py:obj:`register `\\ \\(client\\)" +msgstr "" + +#: flwr.server.client_manager.ClientManager.all:1::1 +#: flwr.server.client_manager.ClientManager.register:1 +#: flwr.server.client_manager.SimpleClientManager.all:1::1 +#: flwr.server.client_manager.SimpleClientManager.register:1 of +msgid "Register Flower ClientProxy instance." +msgstr "" + +#: flwr.server.client_manager.ClientManager.all:1::1 of +msgid "" +":py:obj:`sample `\\ " +"\\(num\\_clients\\[\\, min\\_num\\_clients\\, criterion\\]\\)" +msgstr "" + +#: flwr.server.client_manager.ClientManager.all:1::1 +#: flwr.server.client_manager.ClientManager.sample:1 +#: flwr.server.client_manager.SimpleClientManager.all:1::1 +#: flwr.server.client_manager.SimpleClientManager.sample:1 of +msgid "Sample a number of Flower ClientProxy instances." +msgstr "" + +#: flwr.server.client_manager.ClientManager.all:1::1 of +msgid ":py:obj:`unregister `\\ \\(client\\)" +msgstr "" + +#: flwr.server.client_manager.ClientManager.all:1::1 +#: flwr.server.client_manager.ClientManager.unregister:1 +#: flwr.server.client_manager.SimpleClientManager.all:1::1 +#: flwr.server.client_manager.SimpleClientManager.unregister:1 of +msgid "Unregister Flower ClientProxy instance." +msgstr "" + +#: flwr.server.client_manager.ClientManager.all:1::1 of +msgid "" +":py:obj:`wait_for `\\ " +"\\(num\\_clients\\, timeout\\)" +msgstr "" + +#: flwr.server.client_manager.ClientManager.all:1::1 +#: flwr.server.client_manager.ClientManager.wait_for:1 +#: flwr.server.client_manager.SimpleClientManager.all:1::1 +#: flwr.server.client_manager.SimpleClientManager.wait_for:1 of +msgid "Wait until at least `num_clients` are available." +msgstr "" + +#: flwr.server.client_manager.ClientManager.num_available:3 +#: flwr.server.client_manager.SimpleClientManager.num_available:3 of +msgid "**num_available** -- The number of currently available clients." +msgstr "" + +#: flwr.server.client_manager.ClientManager.register:6 +#: flwr.server.client_manager.SimpleClientManager.register:6 of +msgid "" +"**success** -- Indicating if registration was successful. False if " +"ClientProxy is already registered or can not be registered for any " +"reason." +msgstr "" + +#: flwr.server.client_manager.ClientManager.unregister:3 +#: flwr.server.client_manager.SimpleClientManager.unregister:3 of +msgid "This method is idempotent." +msgstr "" + +#: ../../source/ref-api/flwr.server.History.rst:2 +msgid "History" +msgstr "" + +#: flwr.server.history.History.add_loss_centralized:1::1 of +msgid "" +":py:obj:`add_loss_centralized " +"`\\ \\(server\\_round\\, " +"loss\\)" +msgstr "" + +#: flwr.server.history.History.add_loss_centralized:1 +#: flwr.server.history.History.add_loss_centralized:1::1 of +msgid "Add one loss entry (from centralized evaluation)." +msgstr "" + +#: flwr.server.history.History.add_loss_centralized:1::1 of +msgid "" +":py:obj:`add_loss_distributed " +"`\\ \\(server\\_round\\, " +"loss\\)" +msgstr "" + +#: flwr.server.history.History.add_loss_centralized:1::1 +#: flwr.server.history.History.add_loss_distributed:1 of +msgid "Add one loss entry (from distributed evaluation)." +msgstr "" + +#: flwr.server.history.History.add_loss_centralized:1::1 of +msgid "" +":py:obj:`add_metrics_centralized " +"`\\ \\(server\\_round\\, " +"metrics\\)" +msgstr "" + +#: flwr.server.history.History.add_loss_centralized:1::1 +#: flwr.server.history.History.add_metrics_centralized:1 of +msgid "Add metrics entries (from centralized evaluation)." +msgstr "" + +#: flwr.server.history.History.add_loss_centralized:1::1 of +msgid "" +":py:obj:`add_metrics_distributed " +"`\\ \\(server\\_round\\, " +"metrics\\)" +msgstr "" + +#: flwr.server.history.History.add_loss_centralized:1::1 +#: flwr.server.history.History.add_metrics_distributed:1 of +msgid "Add metrics entries (from distributed evaluation)." +msgstr "" + +#: flwr.server.history.History.add_loss_centralized:1::1 of +msgid "" +":py:obj:`add_metrics_distributed_fit " +"`\\ \\(server\\_round\\," +" ...\\)" +msgstr "" + +#: flwr.server.history.History.add_loss_centralized:1::1 +#: flwr.server.history.History.add_metrics_distributed_fit:1 of +msgid "Add metrics entries (from distributed fit)." +msgstr "" + +#: flwr.server.server.Server.client_manager:1::1 of +msgid ":py:obj:`client_manager `\\ \\(\\)" +msgstr "" + +#: flwr.server.server.Server.client_manager:1 +#: flwr.server.server.Server.client_manager:1::1 of +msgid "Return ClientManager." +msgstr "" + +#: flwr.server.server.Server.client_manager:1::1 of +msgid "" +":py:obj:`disconnect_all_clients " +"`\\ \\(timeout\\)" +msgstr "" + +#: flwr.server.server.Server.client_manager:1::1 +#: flwr.server.server.Server.disconnect_all_clients:1 of +msgid "Send shutdown signal to all clients." +msgstr "" + +#: flwr.server.server.Server.client_manager:1::1 of +msgid "" +":py:obj:`evaluate_round `\\ " +"\\(server\\_round\\, timeout\\)" +msgstr "" + +#: flwr.server.server.Server.client_manager:1::1 +#: flwr.server.server.Server.evaluate_round:1 of +msgid "Validate current global model on a number of clients." +msgstr "" + +#: flwr.server.server.Server.client_manager:1::1 of +msgid ":py:obj:`fit `\\ \\(num\\_rounds\\, timeout\\)" +msgstr "" + +#: flwr.server.server.Server.client_manager:1::1 +#: flwr.server.server.Server.fit:1 of +msgid "Run federated averaging for a number of rounds." +msgstr "" + +#: flwr.server.server.Server.client_manager:1::1 of +msgid "" +":py:obj:`fit_round `\\ \\(server\\_round\\," +" timeout\\)" +msgstr "" + +#: flwr.server.server.Server.client_manager:1::1 +#: flwr.server.server.Server.fit_round:1 of +msgid "Perform a single round of federated averaging." +msgstr "" + +#: flwr.server.server.Server.client_manager:1::1 of +msgid "" +":py:obj:`set_max_workers `\\ " +"\\(max\\_workers\\)" +msgstr "" + +#: flwr.server.server.Server.client_manager:1::1 +#: flwr.server.server.Server.set_max_workers:1 of +msgid "Set the max_workers used by ThreadPoolExecutor." +msgstr "" + +#: flwr.server.server.Server.client_manager:1::1 of +msgid ":py:obj:`set_strategy `\\ \\(strategy\\)" +msgstr "" + +#: flwr.server.server.Server.client_manager:1::1 +#: flwr.server.server.Server.set_strategy:1 of +msgid "Replace server strategy." +msgstr "" + +#: ../../source/ref-api/flwr.server.ServerConfig.rst:2 +msgid "ServerConfig" +msgstr "" + +#: flwr.server.app.ServerConfig:3 of +msgid "" +"All attributes have default values which allows users to configure just " +"the ones they care about." +msgstr "" + +#: ../../source/ref-api/flwr.server.ServerConfig.rst:29::1 +msgid ":py:obj:`num_rounds `\\" +msgstr "" + +#: ../../source/ref-api/flwr.server.ServerConfig.rst:29::1 +msgid ":py:obj:`round_timeout `\\" +msgstr "" + +#: ../../source/ref-api/flwr.server.SimpleClientManager.rst:2 +msgid "SimpleClientManager" +msgstr "" + +#: flwr.server.client_manager.SimpleClientManager:1 of +msgid "Bases: :py:class:`~flwr.server.client_manager.ClientManager`" +msgstr "" + +#: flwr.server.client_manager.SimpleClientManager.all:1::1 of +msgid ":py:obj:`all `\\ \\(\\)" +msgstr "" + +#: flwr.server.client_manager.SimpleClientManager.all:1::1 of +msgid "" +":py:obj:`num_available `\\" +" \\(\\)" +msgstr "" + +#: flwr.server.client_manager.SimpleClientManager.all:1::1 of +msgid "" +":py:obj:`register `\\ " +"\\(client\\)" +msgstr "" + +#: flwr.server.client_manager.SimpleClientManager.all:1::1 of +msgid "" +":py:obj:`sample `\\ " +"\\(num\\_clients\\[\\, min\\_num\\_clients\\, criterion\\]\\)" +msgstr "" + +#: flwr.server.client_manager.SimpleClientManager.all:1::1 of +msgid "" +":py:obj:`unregister `\\ " +"\\(client\\)" +msgstr "" + +#: flwr.server.client_manager.SimpleClientManager.all:1::1 of +msgid "" +":py:obj:`wait_for `\\ " +"\\(num\\_clients\\[\\, timeout\\]\\)" +msgstr "" + +#: flwr.server.client_manager.SimpleClientManager.wait_for:3 of +msgid "" +"Blocks until the requested number of clients is available or until a " +"timeout is reached. Current timeout default: 1 day." +msgstr "" + +#: flwr.server.client_manager.SimpleClientManager.wait_for:6 of +msgid "The number of clients to wait for." +msgstr "" + +#: flwr.server.client_manager.SimpleClientManager.wait_for:8 of +msgid "The time in seconds to wait for, defaults to 86400 (24h)." +msgstr "" + +#: flwr.server.client_manager.SimpleClientManager.wait_for:11 of +msgid "**success**" +msgstr "" + +#: ../../source/ref-api/flwr.server.driver.rst:2 +msgid "driver" +msgstr "" + +#: ../../source/ref-api/flwr.server.driver.rst:22::1 +msgid "" +":py:obj:`start_driver `\\ \\(\\*\\[\\, " +"server\\_address\\, server\\, ...\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.driver.rst:22::1 +#: flwr.server.driver.app.start_driver:1 of +msgid "Start a Flower Driver API server." +msgstr "" + +#: ../../source/ref-api/flwr.server.driver.rst:30::1 +msgid "" +":py:obj:`Driver `\\ " +"\\(\\[driver\\_service\\_address\\, ...\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.driver.rst:30::1 +#: flwr.server.driver.driver.Driver:1 of +msgid "`Driver` class provides an interface to the Driver API." +msgstr "" + +#: ../../source/ref-api/flwr.server.driver.rst:30::1 +msgid "" +":py:obj:`GrpcDriver `\\ " +"\\(\\[driver\\_service\\_address\\, ...\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.driver.rst:30::1 +#: flwr.server.driver.grpc_driver.GrpcDriver:1 of +msgid "`GrpcDriver` provides access to the gRPC Driver API/service." +msgstr "" + +#: ../../source/ref-api/flwr.server.driver.Driver.rst:2 +msgid "Driver" +msgstr "" + +#: flwr.server.driver.driver.Driver:3 of +msgid "" +"The IPv4 or IPv6 address of the Driver API server. Defaults to " +"`\"[::]:9091\"`." +msgstr "" + +#: flwr.server.app.start_server:28 flwr.server.driver.driver.Driver:6 of +msgid "" +"Tuple containing root certificate, server certificate, and private key to" +" start a secure SSL-enabled server. The tuple is expected to have three " +"bytes elements in the following order: * CA certificate. * " +"server certificate. * server private key." +msgstr "" + +#: flwr.server.app.start_server:28 flwr.server.driver.driver.Driver:6 of +msgid "" +"Tuple containing root certificate, server certificate, and private key to" +" start a secure SSL-enabled server. The tuple is expected to have three " +"bytes elements in the following order:" +msgstr "" + +#: flwr.server.app.start_server:32 flwr.server.driver.driver.Driver:10 of +msgid "CA certificate." +msgstr "" + +#: flwr.server.app.start_server:33 flwr.server.driver.driver.Driver:11 of +msgid "server certificate." +msgstr "" + +#: flwr.server.app.start_server:34 flwr.server.driver.driver.Driver:12 of +msgid "server private key." +msgstr "" + +#: flwr.server.driver.driver.Driver.get_nodes:1::1 of +msgid ":py:obj:`get_nodes `\\ \\(\\)" +msgstr "" + +#: flwr.server.driver.driver.Driver.get_nodes:1 +#: flwr.server.driver.driver.Driver.get_nodes:1::1 of +msgid "Get node IDs." +msgstr "" + +#: flwr.server.driver.driver.Driver.get_nodes:1::1 of +msgid "" +":py:obj:`pull_task_res `\\ " +"\\(task\\_ids\\)" +msgstr "" + +#: flwr.server.driver.driver.Driver.get_nodes:1::1 +#: flwr.server.driver.driver.Driver.pull_task_res:1 +#: flwr.server.driver.grpc_driver.GrpcDriver.connect:1::1 +#: flwr.server.driver.grpc_driver.GrpcDriver.pull_task_res:1 of +msgid "Get task results." +msgstr "" + +#: flwr.server.driver.driver.Driver.get_nodes:1::1 of +msgid "" +":py:obj:`push_task_ins `\\ " +"\\(task\\_ins\\_list\\)" +msgstr "" + +#: flwr.server.driver.driver.Driver.get_nodes:1::1 +#: flwr.server.driver.driver.Driver.push_task_ins:1 +#: flwr.server.driver.grpc_driver.GrpcDriver.connect:1::1 +#: flwr.server.driver.grpc_driver.GrpcDriver.push_task_ins:1 of +msgid "Schedule tasks." +msgstr "" + +#: ../../source/ref-api/flwr.server.driver.GrpcDriver.rst:2 +msgid "GrpcDriver" +msgstr "" + +#: flwr.server.driver.grpc_driver.GrpcDriver.connect:1::1 of +msgid ":py:obj:`connect `\\ \\(\\)" +msgstr "" + +#: flwr.server.driver.grpc_driver.GrpcDriver.connect:1 +#: flwr.server.driver.grpc_driver.GrpcDriver.connect:1::1 of +msgid "Connect to the Driver API." +msgstr "" + +#: flwr.server.driver.grpc_driver.GrpcDriver.connect:1::1 of +msgid "" +":py:obj:`create_run `\\ " +"\\(req\\)" +msgstr "" + +#: flwr.server.driver.grpc_driver.GrpcDriver.connect:1::1 +#: flwr.server.driver.grpc_driver.GrpcDriver.create_run:1 of +msgid "Request for run ID." +msgstr "" + +#: flwr.server.driver.grpc_driver.GrpcDriver.connect:1::1 of +msgid ":py:obj:`disconnect `\\ \\(\\)" +msgstr "" + +#: flwr.server.driver.grpc_driver.GrpcDriver.connect:1::1 +#: flwr.server.driver.grpc_driver.GrpcDriver.disconnect:1 of +msgid "Disconnect from the Driver API." +msgstr "" + +#: flwr.server.driver.grpc_driver.GrpcDriver.connect:1::1 of +msgid ":py:obj:`get_nodes `\\ \\(req\\)" +msgstr "" + +#: flwr.server.driver.grpc_driver.GrpcDriver.connect:1::1 +#: flwr.server.driver.grpc_driver.GrpcDriver.get_nodes:1 of +msgid "Get client IDs." +msgstr "" + +#: flwr.server.driver.grpc_driver.GrpcDriver.connect:1::1 of +msgid "" +":py:obj:`pull_task_res `\\ " +"\\(req\\)" +msgstr "" + +#: flwr.server.driver.grpc_driver.GrpcDriver.connect:1::1 of +msgid "" +":py:obj:`push_task_ins `\\ " +"\\(req\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.driver.start_driver.rst:2 +msgid "start\\_driver" +msgstr "" + +#: flwr.server.driver.app.start_driver:3 of +msgid "" +"The IPv4 or IPv6 address of the Driver API server. Defaults to " +"`\"[::]:8080\"`." +msgstr "" + +#: flwr.server.driver.app.start_driver:6 of +msgid "" +"A server implementation, either `flwr.server.Server` or a subclass " +"thereof. If no instance is provided, then `start_driver` will create one." +msgstr "" + +#: flwr.server.app.start_server:9 flwr.server.driver.app.start_driver:10 +#: flwr.simulation.app.start_simulation:28 of +msgid "" +"Currently supported values are `num_rounds` (int, default: 1) and " +"`round_timeout` in seconds (float, default: None)." +msgstr "" + +#: flwr.server.app.start_server:12 flwr.server.driver.app.start_driver:13 of +msgid "" +"An implementation of the abstract base class " +"`flwr.server.strategy.Strategy`. If no strategy is provided, then " +"`start_server` will use `flwr.server.strategy.FedAvg`." +msgstr "" + +#: flwr.server.driver.app.start_driver:17 of +msgid "" +"An implementation of the class `flwr.server.ClientManager`. If no " +"implementation is provided, then `start_driver` will use " +"`flwr.server.SimpleClientManager`." +msgstr "" + +#: flwr.server.app.start_server:37 flwr.server.driver.app.start_driver:26 of +msgid "**hist** -- Object containing training and evaluation metrics." +msgstr "" + +#: flwr.server.driver.app.start_driver:31 of +msgid "Starting a driver that connects to an insecure server:" +msgstr "" + +#: flwr.server.driver.app.start_driver:35 of +msgid "Starting a driver that connects to an SSL-enabled server:" +msgstr "" + +#: ../../source/ref-api/flwr.server.run_driver_api.rst:2 +msgid "run\\_driver\\_api" +msgstr "" + +#: ../../source/ref-api/flwr.server.run_fleet_api.rst:2 +msgid "run\\_fleet\\_api" +msgstr "" + +#: ../../source/ref-api/flwr.server.run_server_app.rst:2 +msgid "run\\_server\\_app" +msgstr "" + +#: ../../source/ref-api/flwr.server.run_superlink.rst:2 +msgid "run\\_superlink" +msgstr "" + +#: ../../source/ref-api/flwr.server.start_server.rst:2 +msgid "start\\_server" +msgstr "" + +#: flwr.server.app.start_server:3 of +msgid "The IPv4 or IPv6 address of the server. Defaults to `\"[::]:8080\"`." +msgstr "" + +#: flwr.server.app.start_server:5 of +msgid "" +"A server implementation, either `flwr.server.Server` or a subclass " +"thereof. If no instance is provided, then `start_server` will create one." +msgstr "" + +#: flwr.server.app.start_server:16 of +msgid "" +"An implementation of the abstract base class `flwr.server.ClientManager`." +" If no implementation is provided, then `start_server` will use " +"`flwr.server.client_manager.SimpleClientManager`." +msgstr "" + +#: flwr.server.app.start_server:21 of +msgid "" +"The maximum length of gRPC messages that can be exchanged with the Flower" +" clients. The default should be sufficient for most models. Users who " +"train very large models might need to increase this value. Note that the " +"Flower clients need to be started with the same value (see " +"`flwr.client.start_client`), otherwise clients will not know about the " +"increased limit and block larger messages." +msgstr "" + +#: flwr.server.app.start_server:42 of +msgid "Starting an insecure server:" +msgstr "" + +#: flwr.server.app.start_server:46 of +msgid "Starting an SSL-enabled server:" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:2 +msgid "strategy" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +msgid "" +":py:obj:`FaultTolerantFedAvg " +"`\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg:1 of +msgid "Configurable fault-tolerant FedAvg strategy implementation." +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +msgid "" +":py:obj:`FedAdagrad `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: flwr.server.strategy.fedadagrad.FedAdagrad:1 of +msgid "FedAdagrad strategy - Adaptive Federated Optimization using Adagrad." +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +msgid "" +":py:obj:`FedAdam `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: flwr.server.strategy.fedadam.FedAdam:1 of +msgid "FedAdam - Adaptive Federated Optimization using Adam." +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +msgid "" +":py:obj:`FedAvg `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: flwr.server.strategy.fedavg.FedAvg:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:1 of +msgid "Federated Averaging strategy." +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +msgid "" +":py:obj:`FedXgbNnAvg `\\ \\(\\*args\\, " +"\\*\\*kwargs\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg:1 of +msgid "Configurable FedXgbNnAvg strategy implementation." +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +msgid "" +":py:obj:`FedXgbBagging `\\ " +"\\(\\[evaluate\\_function\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging:1 of +msgid "Configurable FedXgbBagging strategy implementation." +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +msgid "" +":py:obj:`FedXgbCyclic `\\ " +"\\(\\*\\*kwargs\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic:1 of +msgid "Configurable FedXgbCyclic strategy implementation." +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +msgid "" +":py:obj:`FedAvgAndroid `\\ " +"\\(\\*\\[\\, fraction\\_fit\\, ...\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +msgid "" +":py:obj:`FedAvgM `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: flwr.server.strategy.fedavgm.FedAvgM:1 of +msgid "Federated Averaging with Momentum strategy." +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +msgid "" +":py:obj:`FedOpt `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: flwr.server.strategy.fedopt.FedOpt:1 of +msgid "Federated Optim strategy." +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +msgid "" +":py:obj:`FedProx `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: flwr.server.strategy.fedprox.FedProx:1 of +msgid "Federated Optimization strategy." +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +msgid "" +":py:obj:`FedYogi `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: flwr.server.strategy.fedyogi.FedYogi:1 of +msgid "FedYogi [Reddi et al., 2020] strategy." +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +msgid "" +":py:obj:`QFedAvg `\\ \\(\\*\\[\\, " +"q\\_param\\, qffl\\_learning\\_rate\\, ...\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: flwr.server.strategy.qfedavg.QFedAvg:1 of +msgid "Configurable QFedAvg strategy implementation." +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +msgid "" +":py:obj:`FedMedian `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: flwr.server.strategy.fedmedian.FedMedian:1 of +msgid "Configurable FedMedian strategy implementation." +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +msgid "" +":py:obj:`FedTrimmedAvg `\\ " +"\\(\\*\\[\\, fraction\\_fit\\, ...\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:1 of +msgid "Federated Averaging with Trimmed Mean [Dong Yin, et al., 2021]." +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +msgid "" +":py:obj:`Krum `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: flwr.server.strategy.krum.Krum:1 of +msgid "Krum [Blanchard et al., 2017] strategy." +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +msgid "" +":py:obj:`Bulyan `\\ \\(\\*\\, " +"fraction\\_fit\\, fraction\\_evaluate\\, ...\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: flwr.server.strategy.bulyan.Bulyan:1 of +msgid "Bulyan strategy." +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +msgid "" +":py:obj:`DPFedAvgAdaptive `\\ " +"\\(strategy\\, num\\_sampled\\_clients\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive:1 of +msgid "Wrapper for configuring a Strategy for DP with Adaptive Clipping." +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +msgid "" +":py:obj:`DPFedAvgFixed `\\ " +"\\(strategy\\, num\\_sampled\\_clients\\, ...\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed:1 of +msgid "Wrapper for configuring a Strategy for DP with Fixed Clipping." +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +msgid ":py:obj:`Strategy `\\ \\(\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: flwr.server.strategy.strategy.Strategy:1 of +msgid "Abstract base class for server strategy implementations." +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.Bulyan.rst:2 +msgid "Bulyan" +msgstr "" + +#: flwr.server.strategy.bulyan.Bulyan:1 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg:1 +#: flwr.server.strategy.fedavgm.FedAvgM:1 +#: flwr.server.strategy.fedmedian.FedMedian:1 +#: flwr.server.strategy.fedopt.FedOpt:1 flwr.server.strategy.fedprox.FedProx:1 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging:1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic:1 +#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg:1 +#: flwr.server.strategy.krum.Krum:1 flwr.server.strategy.qfedavg.QFedAvg:1 of +msgid "Bases: :py:class:`~flwr.server.strategy.fedavg.FedAvg`" +msgstr "" + +#: flwr.server.strategy.bulyan.Bulyan:3 of +msgid "Implementation based on https://arxiv.org/abs/1802.07927." +msgstr "" + +#: flwr.server.strategy.bulyan.Bulyan:5 +#: flwr.server.strategy.fedadagrad.FedAdagrad:5 +#: flwr.server.strategy.fedadam.FedAdam:5 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:5 +#: flwr.server.strategy.fedavgm.FedAvgM:5 flwr.server.strategy.fedopt.FedOpt:5 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:5 +#: flwr.server.strategy.fedyogi.FedYogi:5 flwr.server.strategy.krum.Krum:5 of +msgid "Fraction of clients used during training. Defaults to 1.0." +msgstr "" + +#: flwr.server.strategy.bulyan.Bulyan:7 +#: flwr.server.strategy.fedadagrad.FedAdagrad:7 +#: flwr.server.strategy.fedadam.FedAdam:7 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:7 +#: flwr.server.strategy.fedavgm.FedAvgM:7 flwr.server.strategy.fedopt.FedOpt:7 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:7 +#: flwr.server.strategy.fedyogi.FedYogi:7 flwr.server.strategy.krum.Krum:7 of +msgid "Fraction of clients used during validation. Defaults to 1.0." +msgstr "" + +#: flwr.server.strategy.bulyan.Bulyan:9 +#: flwr.server.strategy.fedadagrad.FedAdagrad:9 +#: flwr.server.strategy.fedadam.FedAdam:9 flwr.server.strategy.fedavg.FedAvg:13 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:9 +#: flwr.server.strategy.fedavgm.FedAvgM:9 flwr.server.strategy.fedopt.FedOpt:9 +#: flwr.server.strategy.fedprox.FedProx:45 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:9 +#: flwr.server.strategy.fedyogi.FedYogi:9 flwr.server.strategy.krum.Krum:9 of +msgid "Minimum number of clients used during training. Defaults to 2." +msgstr "" + +#: flwr.server.strategy.bulyan.Bulyan:11 +#: flwr.server.strategy.fedadagrad.FedAdagrad:11 +#: flwr.server.strategy.fedadam.FedAdam:11 +#: flwr.server.strategy.fedavg.FedAvg:15 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:11 +#: flwr.server.strategy.fedavgm.FedAvgM:11 +#: flwr.server.strategy.fedopt.FedOpt:11 +#: flwr.server.strategy.fedprox.FedProx:47 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:11 +#: flwr.server.strategy.fedyogi.FedYogi:11 flwr.server.strategy.krum.Krum:11 of +msgid "Minimum number of clients used during validation. Defaults to 2." +msgstr "" + +#: flwr.server.strategy.bulyan.Bulyan:13 +#: flwr.server.strategy.fedadagrad.FedAdagrad:13 +#: flwr.server.strategy.fedadam.FedAdam:13 +#: flwr.server.strategy.fedavg.FedAvg:17 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:13 +#: flwr.server.strategy.fedavgm.FedAvgM:13 +#: flwr.server.strategy.fedopt.FedOpt:13 +#: flwr.server.strategy.fedprox.FedProx:49 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:13 +#: flwr.server.strategy.fedyogi.FedYogi:13 flwr.server.strategy.krum.Krum:13 of +msgid "Minimum number of total clients in the system. Defaults to 2." +msgstr "" + +#: flwr.server.strategy.bulyan.Bulyan:15 flwr.server.strategy.krum.Krum:15 of +msgid "Number of malicious clients in the system. Defaults to 0." +msgstr "" + +#: flwr.server.strategy.bulyan.Bulyan:17 +#: flwr.server.strategy.fedadagrad.FedAdagrad:15 +#: flwr.server.strategy.fedadam.FedAdam:15 +#: flwr.server.strategy.fedavg.FedAvg:19 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:15 +#: flwr.server.strategy.fedavgm.FedAvgM:15 +#: flwr.server.strategy.fedopt.FedOpt:15 +#: flwr.server.strategy.fedprox.FedProx:51 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:15 +#: flwr.server.strategy.fedyogi.FedYogi:17 +#: flwr.server.strategy.fedyogi.FedYogi:18 +#: flwr.server.strategy.fedyogi.FedYogi:19 flwr.server.strategy.krum.Krum:20 of +msgid "Optional function used for validation. Defaults to None." +msgstr "" + +#: flwr.server.strategy.bulyan.Bulyan:19 +#: flwr.server.strategy.fedadagrad.FedAdagrad:17 +#: flwr.server.strategy.fedadam.FedAdam:17 +#: flwr.server.strategy.fedavg.FedAvg:21 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:17 +#: flwr.server.strategy.fedavgm.FedAvgM:17 +#: flwr.server.strategy.fedopt.FedOpt:17 +#: flwr.server.strategy.fedprox.FedProx:53 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:17 +#: flwr.server.strategy.fedyogi.FedYogi:20 flwr.server.strategy.krum.Krum:22 of +msgid "Function used to configure training. Defaults to None." +msgstr "" + +#: flwr.server.strategy.bulyan.Bulyan:21 +#: flwr.server.strategy.fedadagrad.FedAdagrad:19 +#: flwr.server.strategy.fedadam.FedAdam:19 +#: flwr.server.strategy.fedavg.FedAvg:23 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:19 +#: flwr.server.strategy.fedavgm.FedAvgM:19 +#: flwr.server.strategy.fedopt.FedOpt:19 +#: flwr.server.strategy.fedprox.FedProx:55 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:19 +#: flwr.server.strategy.fedyogi.FedYogi:22 flwr.server.strategy.krum.Krum:24 of +msgid "Function used to configure validation. Defaults to None." +msgstr "" + +#: flwr.server.strategy.bulyan.Bulyan:23 +#: flwr.server.strategy.fedadagrad.FedAdagrad:25 +#: flwr.server.strategy.fedadam.FedAdam:21 +#: flwr.server.strategy.fedavg.FedAvg:25 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:21 +#: flwr.server.strategy.fedavgm.FedAvgM:21 +#: flwr.server.strategy.fedopt.FedOpt:21 +#: flwr.server.strategy.fedprox.FedProx:57 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:21 +#: flwr.server.strategy.fedyogi.FedYogi:24 flwr.server.strategy.krum.Krum:26 of +msgid "Whether or not accept rounds containing failures. Defaults to True." +msgstr "" + +#: flwr.server.strategy.bulyan.Bulyan:25 +#: flwr.server.strategy.fedadagrad.FedAdagrad:27 +#: flwr.server.strategy.fedadam.FedAdam:23 +#: flwr.server.strategy.fedavg.FedAvg:27 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:24 +#: flwr.server.strategy.fedavgm.FedAvgM:23 +#: flwr.server.strategy.fedopt.FedOpt:23 +#: flwr.server.strategy.fedprox.FedProx:59 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:23 +#: flwr.server.strategy.fedyogi.FedYogi:26 flwr.server.strategy.krum.Krum:28 of +msgid "Initial global model parameters." +msgstr "" + +#: flwr.server.strategy.bulyan.Bulyan:27 of +msgid "" +"Byzantine resilient aggregation rule that is used as the first step of " +"the Bulyan (e.g., Krum)" +msgstr "" + +#: flwr.server.strategy.bulyan.Bulyan:29 of +msgid "arguments to the first_aggregation rule" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\, " +"results\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +msgid "Aggregate evaluation losses using weighted average." +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" +msgstr "" + +#: flwr.server.strategy.bulyan.Bulyan.aggregate_fit:1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "Aggregate fit results using Bulyan." +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\, " +"parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.configure_evaluate:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.configure_evaluate:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.configure_evaluate:1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.configure_evaluate:1 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: flwr.server.strategy.strategy.Strategy.configure_evaluate:1 of +msgid "Configure the next round of evaluation." +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive.configure_fit:1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.configure_fit:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.configure_fit:1 +#: flwr.server.strategy.fedprox.FedProx.configure_fit:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.configure_fit:1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.configure_fit:1 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: flwr.server.strategy.strategy.Strategy.configure_fit:1 of +msgid "Configure the next round of training." +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" +msgstr "" + +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.evaluate:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.evaluate:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.evaluate:1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg.evaluate:1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +msgid "Evaluate model parameters using an evaluation function." +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" +msgstr "" + +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.initialize_parameters:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.initialize_parameters:1 +#: flwr.server.strategy.fedavgm.FedAvgM.initialize_parameters:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +msgid "Initialize global model parameters." +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" + +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.num_evaluation_clients:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.num_evaluation_clients:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.num_evaluation_clients:1 of +msgid "Use a fraction of available clients for evaluation." +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_fit_clients `\\" +" \\(num\\_available\\_clients\\)" +msgstr "" + +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.num_fit_clients:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.num_fit_clients:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.num_fit_clients:1 of +msgid "Return the sample size and the required number of available clients." +msgstr "" + +#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive:1 of +msgid "Bases: :py:class:`~flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed`" +msgstr "" + +#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive:3 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed:3 of +msgid "This class is deprecated and will be removed in a future release." +msgstr "" + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of +msgid "Aggregate evaluation losses using the given strategy." +msgstr "" + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_fit " +"`\\ " +"\\(server\\_round\\, results\\, failures\\)" +msgstr "" + +#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive.aggregate_fit:1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of +msgid "Aggregate training results as in DPFedAvgFixed and update clip norms." +msgstr "" + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:1 of +msgid "Configure the next round of evaluation using the specified strategy." +msgstr "" + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_fit " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" +msgstr "" + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.evaluate:1 of +msgid "Evaluate model parameters using an evaluation function from the strategy." +msgstr "" + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" +msgstr "" + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.initialize_parameters:1 of +msgid "Initialize global model parameters using given strategy." +msgstr "" + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:3 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:6 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:3 +#: flwr.server.strategy.strategy.Strategy.aggregate_fit:3 +#: flwr.server.strategy.strategy.Strategy.configure_evaluate:3 +#: flwr.server.strategy.strategy.Strategy.configure_fit:3 +#: flwr.server.strategy.strategy.Strategy.evaluate:6 of +msgid "The current round of federated learning." +msgstr "" + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:7 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:10 +#: flwr.server.strategy.strategy.Strategy.configure_evaluate:7 +#: flwr.server.strategy.strategy.Strategy.configure_fit:7 +#: flwr.server.strategy.strategy.Strategy.initialize_parameters:3 of +msgid "The client manager which holds all currently connected clients." +msgstr "" + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:10 +#: flwr.server.strategy.strategy.Strategy.configure_evaluate:10 of +msgid "" +"**evaluate_configuration** -- A list of tuples. Each tuple in the list " +"identifies a `ClientProxy` and the `EvaluateIns` for this particular " +"`ClientProxy`. If a particular `ClientProxy` is not included in this " +"list, it means that this `ClientProxy` will not participate in the next " +"round of federated evaluation." +msgstr "" + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed:1 +#: flwr.server.strategy.fedavg.FedAvg:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:1 of +msgid "Bases: :py:class:`~flwr.server.strategy.strategy.Strategy`" +msgstr "" + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_fit " +"`\\ " +"\\(server\\_round\\, results\\, failures\\)" +msgstr "" + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_fit:1 of +msgid "Aggregate training results using unweighted aggregation." +msgstr "" + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_fit " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:1 of +msgid "" +"Configure the next round of training incorporating Differential Privacy " +"(DP)." +msgstr "" + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" +msgstr "" + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" +msgstr "" + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:3 of +msgid "" +"Configuration of the next training round includes information related to " +"DP, such as clip norm and noise stddev." +msgstr "" + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:13 +#: flwr.server.strategy.strategy.Strategy.configure_fit:10 of +msgid "" +"**fit_configuration** -- A list of tuples. Each tuple in the list " +"identifies a `ClientProxy` and the `FitIns` for this particular " +"`ClientProxy`. If a particular `ClientProxy` is not included in this " +"list, it means that this `ClientProxy` will not participate in the next " +"round of federated learning." +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.FaultTolerantFedAvg.rst:2 +msgid "FaultTolerantFedAvg" +msgstr "" + +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_fit " +"`\\ " +"\\(server\\_round\\, results\\, failures\\)" +msgstr "" + +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_fit:1 +#: flwr.server.strategy.fedadagrad.FedAdagrad.aggregate_fit:1 +#: flwr.server.strategy.fedadam.FedAdam.aggregate_fit:1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_fit:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_fit:1 +#: flwr.server.strategy.fedavgm.FedAvgM.aggregate_fit:1 +#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg.aggregate_fit:1 +#: flwr.server.strategy.fedyogi.FedYogi.aggregate_fit:1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_fit:1 of +msgid "Aggregate fit results using weighted average." +msgstr "" + +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_fit " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" +msgstr "" + +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" +msgstr "" + +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" + +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.FedAdagrad.rst:2 +#: ../../source/ref-changelog.md:839 +msgid "FedAdagrad" +msgstr "" + +#: flwr.server.strategy.fedadagrad.FedAdagrad:1 +#: flwr.server.strategy.fedadam.FedAdam:1 +#: flwr.server.strategy.fedyogi.FedYogi:1 of +msgid "Bases: :py:class:`~flwr.server.strategy.fedopt.FedOpt`" +msgstr "" + +#: flwr.server.strategy.fedadagrad.FedAdagrad:3 +#: flwr.server.strategy.fedadam.FedAdam:3 flwr.server.strategy.fedopt.FedOpt:3 +#: flwr.server.strategy.fedyogi.FedYogi:3 of +msgid "Implementation based on https://arxiv.org/abs/2003.00295v5" +msgstr "" + +#: flwr.server.strategy.fedadagrad.FedAdagrad:21 +#: flwr.server.strategy.fedadagrad.FedAdagrad:23 +#: flwr.server.strategy.fedadam.FedAdam:25 +#: flwr.server.strategy.fedadam.FedAdam:27 +#: flwr.server.strategy.fedavg.FedAvg:29 flwr.server.strategy.fedavg.FedAvg:31 +#: flwr.server.strategy.fedopt.FedOpt:25 flwr.server.strategy.fedopt.FedOpt:27 +#: flwr.server.strategy.fedprox.FedProx:61 +#: flwr.server.strategy.fedprox.FedProx:63 +#: flwr.server.strategy.fedyogi.FedYogi:28 +#: flwr.server.strategy.fedyogi.FedYogi:30 of +msgid "Metrics aggregation function, optional." +msgstr "" + +#: flwr.server.strategy.fedadagrad.FedAdagrad:29 +#: flwr.server.strategy.fedadam.FedAdam:29 +#: flwr.server.strategy.fedopt.FedOpt:29 of +msgid "Server-side learning rate. Defaults to 1e-1." +msgstr "" + +#: flwr.server.strategy.fedadagrad.FedAdagrad:31 +#: flwr.server.strategy.fedadam.FedAdam:31 +#: flwr.server.strategy.fedopt.FedOpt:31 of +msgid "Client-side learning rate. Defaults to 1e-1." +msgstr "" + +#: flwr.server.strategy.fedadagrad.FedAdagrad:33 +#: flwr.server.strategy.fedadam.FedAdam:37 +#: flwr.server.strategy.fedopt.FedOpt:37 of +msgid "Controls the algorithm's degree of adaptability. Defaults to 1e-9." +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_fit `\\" +" \\(server\\_round\\, results\\, failures\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_fit `\\" +" \\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.FedAdam.rst:2 +msgid "FedAdam" +msgstr "" + +#: flwr.server.strategy.fedadam.FedAdam:33 +#: flwr.server.strategy.fedyogi.FedYogi:36 of +msgid "Momentum parameter. Defaults to 0.9." +msgstr "" + +#: flwr.server.strategy.fedadam.FedAdam:35 +#: flwr.server.strategy.fedyogi.FedYogi:38 of +msgid "Second moment parameter. Defaults to 0.99." +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\," +" results\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.FedAvg.rst:2 +msgid "FedAvg" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg:3 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:3 of +msgid "Implementation based on https://arxiv.org/abs/1602.05629" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg:5 flwr.server.strategy.fedprox.FedProx:37 +#: of +msgid "" +"Fraction of clients used during training. In case `min_fit_clients` is " +"larger than `fraction_fit * available_clients`, `min_fit_clients` will " +"still be sampled. Defaults to 1.0." +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg:9 flwr.server.strategy.fedprox.FedProx:41 +#: of +msgid "" +"Fraction of clients used during validation. In case " +"`min_evaluate_clients` is larger than `fraction_evaluate * " +"available_clients`, `min_evaluate_clients` will still be sampled. " +"Defaults to 1.0." +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\, " +"results\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\, " +"parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_fit_clients `\\" +" \\(num\\_available\\_clients\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.FedAvgAndroid.rst:2 +msgid "FedAvgAndroid" +msgstr "" + +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_fit " +"`\\ " +"\\(server\\_round\\, results\\, failures\\)" +msgstr "" + +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`bytes_to_ndarray " +"`\\ \\(tensor\\)" +msgstr "" + +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.bytes_to_ndarray:1 of +msgid "Deserialize NumPy array from bytes." +msgstr "" + +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_fit " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" +msgstr "" + +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" +msgstr "" + +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`ndarray_to_bytes " +"`\\ \\(ndarray\\)" +msgstr "" + +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.ndarray_to_bytes:1 of +msgid "Serialize NumPy array to bytes." +msgstr "" + +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`ndarrays_to_parameters " +"`\\ " +"\\(ndarrays\\)" +msgstr "" + +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" + +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" + +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`parameters_to_ndarrays " +"`\\ " +"\\(parameters\\)" +msgstr "" + +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.parameters_to_ndarrays:1 +#: of +msgid "Convert parameters object to NumPy weights." +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.FedAvgM.rst:2 +msgid "FedAvgM" +msgstr "" + +#: flwr.server.strategy.fedavgm.FedAvgM:3 of +msgid "Implementation based on https://arxiv.org/abs/1909.06335" +msgstr "" + +#: flwr.server.strategy.fedavgm.FedAvgM:25 of +msgid "" +"Server-side learning rate used in server-side optimization. Defaults to " +"1.0." +msgstr "" + +#: flwr.server.strategy.fedavgm.FedAvgM:28 of +msgid "Server-side momentum factor used for FedAvgM. Defaults to 0.0." +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\," +" results\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.FedMedian.rst:2 +msgid "FedMedian" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedmedian.FedMedian.aggregate_fit:1 of +msgid "Aggregate fit results using median." +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.FedOpt.rst:2 +msgid "FedOpt" +msgstr "" + +#: flwr.server.strategy.fedopt.FedOpt:33 of +msgid "Momentum parameter. Defaults to 0.0." +msgstr "" + +#: flwr.server.strategy.fedopt.FedOpt:35 of +msgid "Second moment parameter. Defaults to 0.0." +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\, " +"results\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\, " +"parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_fit_clients `\\" +" \\(num\\_available\\_clients\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.FedProx.rst:2 +msgid "FedProx" +msgstr "" + +#: flwr.server.strategy.fedprox.FedProx:3 of +msgid "Implementation based on https://arxiv.org/abs/1812.06127" +msgstr "" + +#: flwr.server.strategy.fedprox.FedProx:5 of +msgid "" +"The strategy in itself will not be different than FedAvg, the client " +"needs to be adjusted. A proximal term needs to be added to the loss " +"function during the training:" +msgstr "" + +#: flwr.server.strategy.fedprox.FedProx:9 of +msgid "" +"\\\\frac{\\\\mu}{2} || w - w^t ||^2\n" +"\n" +msgstr "" + +#: flwr.server.strategy.fedprox.FedProx:12 of +msgid "" +"Where $w^t$ are the global parameters and $w$ are the local weights the " +"function will be optimized with." +msgstr "" + +#: flwr.server.strategy.fedprox.FedProx:15 of +msgid "In PyTorch, for example, the loss would go from:" +msgstr "" + +#: flwr.server.strategy.fedprox.FedProx:21 of +msgid "To:" +msgstr "" + +#: flwr.server.strategy.fedprox.FedProx:30 of +msgid "" +"With `global_params` being a copy of the parameters before the training " +"takes place." +msgstr "" + +#: flwr.server.strategy.fedprox.FedProx:65 of +msgid "" +"The weight of the proximal term used in the optimization. 0.0 makes this " +"strategy equivalent to FedAvg, and the higher the coefficient, the more " +"regularization will be used (that is, the client parameters will need to " +"be closer to the server parameters during training)." +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\," +" results\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" + +#: flwr.server.strategy.fedprox.FedProx.configure_fit:3 of +msgid "Sends the proximal factor mu to the clients" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.FedTrimmedAvg.rst:2 +msgid "FedTrimmedAvg" +msgstr "" + +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:3 of +msgid "Implemented based on: https://arxiv.org/abs/1803.01498" +msgstr "" + +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:25 of +msgid "Fraction to cut off of both tails of the distribution. Defaults to 0.2." +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_fit " +"`\\ " +"\\(server\\_round\\, results\\, failures\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg.aggregate_fit:1 of +msgid "Aggregate fit results using trimmed average." +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_fit " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.FedXgbBagging.rst:2 +msgid "FedXgbBagging" +msgstr "" + +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of +msgid "Aggregate evaluation metrics using average." +msgstr "" + +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_fit " +"`\\ " +"\\(server\\_round\\, results\\, failures\\)" +msgstr "" + +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_fit:1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_fit:1 of +msgid "Aggregate fit results using bagging." +msgstr "" + +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_fit " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" +msgstr "" + +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" +msgstr "" + +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" + +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.FedXgbCyclic.rst:2 +msgid "FedXgbCyclic" +msgstr "" + +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_fit " +"`\\ \\(server\\_round\\," +" results\\, failures\\)" +msgstr "" + +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_fit " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" +msgstr "" + +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" +msgstr "" + +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" + +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.FedXgbNnAvg.rst:2 +msgid "FedXgbNnAvg" +msgstr "" + +#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg:5 of +msgid "" +"This strategy is deprecated, but a copy of it is available in Flower " +"Baselines: " +"https://github.com/adap/flower/tree/main/baselines/hfedxgboost." +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_fit " +"`\\ \\(server\\_round\\, " +"results\\, failures\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_fit " +"`\\ \\(server\\_round\\, " +"parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.FedYogi.rst:2 +msgid "FedYogi" +msgstr "" + +#: flwr.server.strategy.fedyogi.FedYogi:32 of +msgid "Server-side learning rate. Defaults to 1e-2." +msgstr "" + +#: flwr.server.strategy.fedyogi.FedYogi:34 of +msgid "Client-side learning rate. Defaults to 0.0316." +msgstr "" + +#: flwr.server.strategy.fedyogi.FedYogi:40 of +msgid "Controls the algorithm's degree of adaptability. Defaults to 1e-3." +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\," +" results\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.Krum.rst:2 +msgid "Krum" +msgstr "" + +#: flwr.server.strategy.krum.Krum:3 of +msgid "Implementation based on https://arxiv.org/abs/1703.02757" +msgstr "" + +#: flwr.server.strategy.krum.Krum:17 of +msgid "" +"Number of clients to keep before averaging (MultiKrum). Defaults to 0, in" +" that case classical Krum is applied." +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\, " +"results\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.krum.Krum.aggregate_fit:1 of +msgid "Aggregate fit results using Krum." +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\, " +"parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_fit_clients `\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.QFedAvg.rst:2 +msgid "QFedAvg" +msgstr "" + +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\," +" results\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" +msgstr "" + +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" +msgstr "" + +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" +msgstr "" + +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" + +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.Strategy.rst:2 +msgid "Strategy" +msgstr "" + +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of +msgid "Aggregate evaluation results." +msgstr "" + +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" +msgstr "" + +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: flwr.server.strategy.strategy.Strategy.aggregate_fit:1 of +msgid "Aggregate training results." +msgstr "" + +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" +msgstr "" + +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: flwr.server.strategy.strategy.Strategy.evaluate:1 of +msgid "Evaluate the current model parameters." +msgstr "" + +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" +msgstr "" + +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: flwr.server.strategy.strategy.Strategy.initialize_parameters:1 of +msgid "Initialize the (global) model parameters." +msgstr "" + +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:5 of +msgid "" +"Successful updates from the previously selected and configured clients. " +"Each pair of `(ClientProxy, FitRes` constitutes a successful update from " +"one of the previously selected clients. Not that not all previously " +"selected clients are necessarily included in this list: a client might " +"drop out and not submit a result. For each client that did not submit an " +"update, there should be an `Exception` in `failures`." +msgstr "" + +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:13 +#: flwr.server.strategy.strategy.Strategy.aggregate_fit:13 of +msgid "Exceptions that occurred while the server was waiting for client updates." +msgstr "" + +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:16 of +msgid "" +"**aggregation_result** -- The aggregated evaluation result. Aggregation " +"typically uses some variant of a weighted average." +msgstr "" + +#: flwr.server.strategy.strategy.Strategy.aggregate_fit:5 of +msgid "" +"Successful updates from the previously selected and configured clients. " +"Each pair of `(ClientProxy, FitRes)` constitutes a successful update from" +" one of the previously selected clients. Not that not all previously " +"selected clients are necessarily included in this list: a client might " +"drop out and not submit a result. For each client that did not submit an " +"update, there should be an `Exception` in `failures`." +msgstr "" + +#: flwr.server.strategy.strategy.Strategy.aggregate_fit:17 of +msgid "" +"**parameters** -- If parameters are returned, then the server will treat " +"these as the new global model parameters (i.e., it will replace the " +"previous parameters with the ones returned from this method). If `None` " +"is returned (e.g., because there were only failures and no viable " +"results) then the server will no update the previous model parameters, " +"the updates received in this round are discarded, and the global model " +"parameters remain the same." +msgstr "" + +#: flwr.server.strategy.strategy.Strategy.evaluate:3 of +msgid "" +"This function can be used to perform centralized (i.e., server-side) " +"evaluation of model parameters." +msgstr "" + +#: flwr.server.strategy.strategy.Strategy.evaluate:11 of +msgid "" +"**evaluation_result** -- The evaluation result, usually a Tuple " +"containing loss and a dictionary containing task-specific metrics (e.g., " +"accuracy)." +msgstr "" + +#: flwr.server.strategy.strategy.Strategy.initialize_parameters:6 of +msgid "" +"**parameters** -- If parameters are returned, then the server will treat " +"these as the initial global model parameters." +msgstr "" + +#: ../../source/ref-api/flwr.simulation.rst:2 +msgid "simulation" +msgstr "" + +#: ../../source/ref-api/flwr.simulation.rst:17::1 +msgid "" +":py:obj:`start_simulation `\\ \\(\\*\\," +" client\\_fn\\[\\, ...\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.simulation.rst:17::1 +#: flwr.simulation.app.start_simulation:1 of +msgid "Start a Ray-based Flower simulation server." +msgstr "" + +#: ../../source/ref-api/flwr.simulation.start_simulation.rst:2 +msgid "start\\_simulation" +msgstr "" + +#: flwr.simulation.app.start_simulation:3 of +msgid "" +"A function creating client instances. The function must take a single " +"`str` argument called `cid`. It should return a single client instance of" +" type Client. Note that the created client instances are ephemeral and " +"will often be destroyed after a single method invocation. Since client " +"instances are not long-lived, they should not attempt to carry state over" +" method invocations. Any state required by the instance (model, dataset, " +"hyperparameters, ...) should be (re-)created in either the call to " +"`client_fn` or the call to any of the client methods (e.g., load " +"evaluation data in the `evaluate` method itself)." +msgstr "" + +#: flwr.simulation.app.start_simulation:13 of +msgid "" +"The total number of clients in this simulation. This must be set if " +"`clients_ids` is not set and vice-versa." +msgstr "" + +#: flwr.simulation.app.start_simulation:16 of +msgid "" +"List `client_id`s for each client. This is only required if `num_clients`" +" is not set. Setting both `num_clients` and `clients_ids` with " +"`len(clients_ids)` not equal to `num_clients` generates an error." +msgstr "" + +#: flwr.simulation.app.start_simulation:20 of +msgid "" +"CPU and GPU resources for a single client. Supported keys are `num_cpus` " +"and `num_gpus`. To understand the GPU utilization caused by `num_gpus`, " +"as well as using custom resources, please consult the Ray documentation." +msgstr "" + +#: flwr.simulation.app.start_simulation:25 of +msgid "" +"An implementation of the abstract base class `flwr.server.Server`. If no " +"instance is provided, then `start_server` will create one." +msgstr "" + +#: flwr.simulation.app.start_simulation:31 of +msgid "" +"An implementation of the abstract base class `flwr.server.Strategy`. If " +"no strategy is provided, then `start_server` will use " +"`flwr.server.strategy.FedAvg`." +msgstr "" + +#: flwr.simulation.app.start_simulation:35 of +msgid "" +"An implementation of the abstract base class `flwr.server.ClientManager`." +" If no implementation is provided, then `start_simulation` will use " +"`flwr.server.client_manager.SimpleClientManager`." +msgstr "" + +#: flwr.simulation.app.start_simulation:39 of +msgid "" +"Optional dictionary containing arguments for the call to `ray.init`. If " +"ray_init_args is None (the default), Ray will be initialized with the " +"following default args: { \"ignore_reinit_error\": True, " +"\"include_dashboard\": False } An empty dictionary can be used " +"(ray_init_args={}) to prevent any arguments from being passed to " +"ray.init." +msgstr "" + +#: flwr.simulation.app.start_simulation:39 of +msgid "" +"Optional dictionary containing arguments for the call to `ray.init`. If " +"ray_init_args is None (the default), Ray will be initialized with the " +"following default args:" +msgstr "" + +#: flwr.simulation.app.start_simulation:43 of +msgid "{ \"ignore_reinit_error\": True, \"include_dashboard\": False }" +msgstr "" + +#: flwr.simulation.app.start_simulation:45 of +msgid "" +"An empty dictionary can be used (ray_init_args={}) to prevent any " +"arguments from being passed to ray.init." +msgstr "" + +#: flwr.simulation.app.start_simulation:48 of +msgid "" +"Set to True to prevent `ray.shutdown()` in case " +"`ray.is_initialized()=True`." +msgstr "" + +#: flwr.simulation.app.start_simulation:50 of +msgid "" +"Optionally specify the type of actor to use. The actor object, which " +"persists throughout the simulation, will be the process in charge of " +"running the clients' jobs (i.e. their `fit()` method)." +msgstr "" + +#: flwr.simulation.app.start_simulation:54 of +msgid "" +"If you want to create your own Actor classes, you might need to pass some" +" input argument. You can use this dictionary for such purpose." +msgstr "" + +#: flwr.simulation.app.start_simulation:57 of +msgid "" +"(default: \"DEFAULT\") Optional string (\"DEFAULT\" or \"SPREAD\") for " +"the VCE to choose in which node the actor is placed. If you are an " +"advanced user needed more control you can use lower-level scheduling " +"strategies to pin actors to specific compute nodes (e.g. via " +"NodeAffinitySchedulingStrategy). Please note this is an advanced feature." +" For all details, please refer to the Ray documentation: " +"https://docs.ray.io/en/latest/ray-core/scheduling/index.html" +msgstr "" + +#: flwr.simulation.app.start_simulation:66 of +msgid "**hist** -- Object containing metrics from training." +msgstr "" + +#: ../../source/ref-changelog.md:1 +msgid "Changelog" +msgstr "" + +#: ../../source/ref-changelog.md:3 +msgid "Unreleased" +msgstr "" + +#: ../../source/ref-changelog.md:5 ../../source/ref-changelog.md:17 +#: ../../source/ref-changelog.md:110 ../../source/ref-changelog.md:210 +#: ../../source/ref-changelog.md:294 ../../source/ref-changelog.md:358 +#: ../../source/ref-changelog.md:416 ../../source/ref-changelog.md:485 +#: ../../source/ref-changelog.md:614 ../../source/ref-changelog.md:656 +#: ../../source/ref-changelog.md:723 ../../source/ref-changelog.md:789 +#: ../../source/ref-changelog.md:834 ../../source/ref-changelog.md:873 +#: ../../source/ref-changelog.md:906 ../../source/ref-changelog.md:956 +msgid "What's new?" +msgstr "" + +#: ../../source/ref-changelog.md:7 ../../source/ref-changelog.md:80 +#: ../../source/ref-changelog.md:192 ../../source/ref-changelog.md:282 +#: ../../source/ref-changelog.md:346 ../../source/ref-changelog.md:404 +#: ../../source/ref-changelog.md:473 ../../source/ref-changelog.md:535 +#: ../../source/ref-changelog.md:554 ../../source/ref-changelog.md:710 +#: ../../source/ref-changelog.md:781 ../../source/ref-changelog.md:818 +#: ../../source/ref-changelog.md:861 +msgid "Incompatible changes" +msgstr "" + +#: ../../source/ref-changelog.md:9 +msgid "v1.7.0 (2024-02-05)" +msgstr "" + +#: ../../source/ref-changelog.md:11 ../../source/ref-changelog.md:104 +#: ../../source/ref-changelog.md:204 ../../source/ref-changelog.md:288 +#: ../../source/ref-changelog.md:352 ../../source/ref-changelog.md:410 +#: ../../source/ref-changelog.md:479 ../../source/ref-changelog.md:548 +msgid "Thanks to our contributors" +msgstr "" + +#: ../../source/ref-changelog.md:13 ../../source/ref-changelog.md:106 +#: ../../source/ref-changelog.md:206 ../../source/ref-changelog.md:290 +#: ../../source/ref-changelog.md:354 ../../source/ref-changelog.md:412 +msgid "" +"We would like to give our special thanks to all the contributors who made" +" the new version of Flower possible (in `git shortlog` order):" +msgstr "" + +#: ../../source/ref-changelog.md:15 +msgid "" +"`Aasheesh Singh`, `Adam Narozniak`, `Aml Hassan Esmil`, `Charles " +"Beauville`, `Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo " +"Gabrielli`, `Gustavo Bertoli`, `HelinLin`, `Heng Pan`, `Javier`, `M S " +"Chaitanya Kumar`, `Mohammad Naseri`, `Nikos Vlachakis`, `Pritam Neog`, " +"`Robert Kuska`, `Robert Steiner`, `Taner Topal`, `Yahia Salaheldin " +"Shaaban`, `Yan Gao`, `Yasar Abbas` " +msgstr "" + +#: ../../source/ref-changelog.md:19 +msgid "" +"**Introduce stateful clients (experimental)** " +"([#2770](https://github.com/adap/flower/pull/2770), " +"[#2686](https://github.com/adap/flower/pull/2686), " +"[#2696](https://github.com/adap/flower/pull/2696), " +"[#2643](https://github.com/adap/flower/pull/2643), " +"[#2769](https://github.com/adap/flower/pull/2769))" +msgstr "" + +#: ../../source/ref-changelog.md:21 +msgid "" +"Subclasses of `Client` and `NumPyClient` can now store local state that " +"remains on the client. Let's start with the highlight first: this new " +"feature is compatible with both simulated clients (via " +"`start_simulation`) and networked clients (via `start_client`). It's also" +" the first preview of new abstractions like `Context` and `RecordSet`. " +"Clients can access state of type `RecordSet` via `state: RecordSet = " +"self.context.state`. Changes to this `RecordSet` are preserved across " +"different rounds of execution to enable stateful computations in a " +"unified way across simulation and deployment." +msgstr "" + +#: ../../source/ref-changelog.md:23 +msgid "" +"**Improve performance** " +"([#2293](https://github.com/adap/flower/pull/2293))" +msgstr "" + +#: ../../source/ref-changelog.md:25 +msgid "" +"Flower is faster than ever. All `FedAvg`-derived strategies now use in-" +"place aggregation to reduce memory consumption. The Flower client " +"serialization/deserialization has been rewritten from the ground up, " +"which results in significant speedups, especially when the client-side " +"training time is short." +msgstr "" + +#: ../../source/ref-changelog.md:27 +msgid "" +"**Support Federated Learning with Apple MLX and Flower** " +"([#2693](https://github.com/adap/flower/pull/2693))" +msgstr "" + +#: ../../source/ref-changelog.md:29 +msgid "" +"Flower has official support for federated learning using [Appple " +"MLX](https://ml-explore.github.io/mlx) via the new `quickstart-mlx` code " +"example." +msgstr "" + +#: ../../source/ref-changelog.md:31 +msgid "" +"**Introduce new XGBoost cyclic strategy** " +"([#2666](https://github.com/adap/flower/pull/2666), " +"[#2668](https://github.com/adap/flower/pull/2668))" msgstr "" -#: ../../source/ref-api-flwr.rst:122 -msgid "server.strategy.QFedAvg" +#: ../../source/ref-changelog.md:33 +msgid "" +"A new strategy called `FedXgbCyclic` supports a client-by-client style of" +" training (often called cyclic). The `xgboost-comprehensive` code example" +" shows how to use it in a full project. In addition to that, `xgboost-" +"comprehensive` now also supports simulation mode. With this, Flower " +"offers best-in-class XGBoost support." msgstr "" -#: ../../source/ref-api-flwr.rst:133 -msgid "server.strategy.FaultTolerantFedAvg" +#: ../../source/ref-changelog.md:35 +msgid "" +"**Support Python 3.11** " +"([#2394](https://github.com/adap/flower/pull/2394))" msgstr "" -#: ../../source/ref-api-flwr.rst:144 -msgid "server.strategy.FedOpt" +#: ../../source/ref-changelog.md:37 +msgid "" +"Framework tests now run on Python 3.8, 3.9, 3.10, and 3.11. This will " +"ensure better support for users using more recent Python versions." msgstr "" -#: ../../source/ref-api-flwr.rst:155 -msgid "server.strategy.FedProx" +#: ../../source/ref-changelog.md:39 +msgid "" +"**Update gRPC and ProtoBuf dependencies** " +"([#2814](https://github.com/adap/flower/pull/2814))" msgstr "" -#: ../../source/ref-api-flwr.rst:166 -msgid "server.strategy.FedAdagrad" +#: ../../source/ref-changelog.md:41 +msgid "" +"The `grpcio` and `protobuf` dependencies were updated to their latest " +"versions for improved security and performance." msgstr "" -#: ../../source/ref-api-flwr.rst:177 -msgid "server.strategy.FedAdam" +#: ../../source/ref-changelog.md:43 +msgid "" +"**Introduce Docker image for Flower server** " +"([#2700](https://github.com/adap/flower/pull/2700), " +"[#2688](https://github.com/adap/flower/pull/2688), " +"[#2705](https://github.com/adap/flower/pull/2705), " +"[#2695](https://github.com/adap/flower/pull/2695), " +"[#2747](https://github.com/adap/flower/pull/2747), " +"[#2746](https://github.com/adap/flower/pull/2746), " +"[#2680](https://github.com/adap/flower/pull/2680), " +"[#2682](https://github.com/adap/flower/pull/2682), " +"[#2701](https://github.com/adap/flower/pull/2701))" msgstr "" -#: ../../source/ref-api-flwr.rst:188 -msgid "server.strategy.FedYogi" +#: ../../source/ref-changelog.md:45 +msgid "" +"The Flower server can now be run using an official Docker image. A new " +"how-to guide explains [how to run Flower using " +"Docker](https://flower.ai/docs/framework/how-to-run-flower-using-" +"docker.html). An official Flower client Docker image will follow." msgstr "" -#: ../../source/ref-api-flwr.rst:199 -msgid "server.strategy.FedTrimmedAvg" +#: ../../source/ref-changelog.md:47 +msgid "" +"**Introduce** `flower-via-docker-compose` **example** " +"([#2626](https://github.com/adap/flower/pull/2626))" msgstr "" -#: ../../source/ref-api-flwr.rst:210 -msgid "server.strategy.Krum" +#: ../../source/ref-changelog.md:49 +msgid "" +"**Introduce** `quickstart-sklearn-tabular` **example** " +"([#2719](https://github.com/adap/flower/pull/2719))" msgstr "" -#: ../../source/ref-api-flwr.rst:221 -msgid "server.strategy.FedXgbNnAvg" +#: ../../source/ref-changelog.md:51 +msgid "" +"**Introduce** `custom-metrics` **example** " +"([#1958](https://github.com/adap/flower/pull/1958))" msgstr "" -#: ../../source/ref-api-flwr.rst:232 -msgid "server.strategy.DPFedAvgAdaptive" +#: ../../source/ref-changelog.md:53 +msgid "" +"**Update code examples to use Flower Datasets** " +"([#2450](https://github.com/adap/flower/pull/2450), " +"[#2456](https://github.com/adap/flower/pull/2456), " +"[#2318](https://github.com/adap/flower/pull/2318), " +"[#2712](https://github.com/adap/flower/pull/2712))" msgstr "" -#: ../../source/ref-api-flwr.rst:243 -msgid "server.strategy.DPFedAvgFixed" +#: ../../source/ref-changelog.md:55 +msgid "" +"Several code examples were updated to use [Flower " +"Datasets](https://flower.ai/docs/datasets/)." msgstr "" -#: ../../source/ref-api-flwr.rst:251 -msgid "common" +#: ../../source/ref-changelog.md:57 +msgid "" +"**General updates to Flower Examples** " +"([#2381](https://github.com/adap/flower/pull/2381), " +"[#2805](https://github.com/adap/flower/pull/2805), " +"[#2782](https://github.com/adap/flower/pull/2782), " +"[#2806](https://github.com/adap/flower/pull/2806), " +"[#2829](https://github.com/adap/flower/pull/2829), " +"[#2825](https://github.com/adap/flower/pull/2825), " +"[#2816](https://github.com/adap/flower/pull/2816), " +"[#2726](https://github.com/adap/flower/pull/2726), " +"[#2659](https://github.com/adap/flower/pull/2659), " +"[#2655](https://github.com/adap/flower/pull/2655))" msgstr "" -#: flwr.common:1 of -msgid "Common components shared between server and client." +#: ../../source/ref-changelog.md:59 +msgid "Many Flower code examples received substantial updates." msgstr "" -#: flwr.common.typing.ClientMessage:1 of -msgid "ClientMessage is a container used to hold one result message." +#: ../../source/ref-changelog.md:61 ../../source/ref-changelog.md:154 +msgid "**Update Flower Baselines**" msgstr "" -#: flwr.common.typing.Code:1 of -msgid "Client status codes." +#: ../../source/ref-changelog.md:63 +msgid "" +"HFedXGBoost ([#2226](https://github.com/adap/flower/pull/2226), " +"[#2771](https://github.com/adap/flower/pull/2771))" msgstr "" -#: flwr.common.typing.DisconnectRes:1 of -msgid "DisconnectRes message from client to server." +#: ../../source/ref-changelog.md:64 +msgid "FedVSSL ([#2412](https://github.com/adap/flower/pull/2412))" msgstr "" -#: flwr.common.typing.EvaluateIns:1 of -msgid "Evaluate instructions for a client." +#: ../../source/ref-changelog.md:65 +msgid "FedNova ([#2179](https://github.com/adap/flower/pull/2179))" msgstr "" -#: flwr.common.typing.EvaluateRes:1 of -msgid "Evaluate response from a client." +#: ../../source/ref-changelog.md:66 +msgid "HeteroFL ([#2439](https://github.com/adap/flower/pull/2439))" msgstr "" -#: flwr.common.telemetry.EventType:1 of -msgid "Types of telemetry events." +#: ../../source/ref-changelog.md:67 +msgid "FedAvgM ([#2246](https://github.com/adap/flower/pull/2246))" +msgstr "" + +#: ../../source/ref-changelog.md:68 +msgid "FedPara ([#2722](https://github.com/adap/flower/pull/2722))" +msgstr "" + +#: ../../source/ref-changelog.md:70 +msgid "" +"**Improve documentation** " +"([#2674](https://github.com/adap/flower/pull/2674), " +"[#2480](https://github.com/adap/flower/pull/2480), " +"[#2826](https://github.com/adap/flower/pull/2826), " +"[#2727](https://github.com/adap/flower/pull/2727), " +"[#2761](https://github.com/adap/flower/pull/2761), " +"[#2900](https://github.com/adap/flower/pull/2900))" +msgstr "" + +#: ../../source/ref-changelog.md:72 +msgid "" +"**Improved testing and development infrastructure** " +"([#2797](https://github.com/adap/flower/pull/2797), " +"[#2676](https://github.com/adap/flower/pull/2676), " +"[#2644](https://github.com/adap/flower/pull/2644), " +"[#2656](https://github.com/adap/flower/pull/2656), " +"[#2848](https://github.com/adap/flower/pull/2848), " +"[#2675](https://github.com/adap/flower/pull/2675), " +"[#2735](https://github.com/adap/flower/pull/2735), " +"[#2767](https://github.com/adap/flower/pull/2767), " +"[#2732](https://github.com/adap/flower/pull/2732), " +"[#2744](https://github.com/adap/flower/pull/2744), " +"[#2681](https://github.com/adap/flower/pull/2681), " +"[#2699](https://github.com/adap/flower/pull/2699), " +"[#2745](https://github.com/adap/flower/pull/2745), " +"[#2734](https://github.com/adap/flower/pull/2734), " +"[#2731](https://github.com/adap/flower/pull/2731), " +"[#2652](https://github.com/adap/flower/pull/2652), " +"[#2720](https://github.com/adap/flower/pull/2720), " +"[#2721](https://github.com/adap/flower/pull/2721), " +"[#2717](https://github.com/adap/flower/pull/2717), " +"[#2864](https://github.com/adap/flower/pull/2864), " +"[#2694](https://github.com/adap/flower/pull/2694), " +"[#2709](https://github.com/adap/flower/pull/2709), " +"[#2658](https://github.com/adap/flower/pull/2658), " +"[#2796](https://github.com/adap/flower/pull/2796), " +"[#2692](https://github.com/adap/flower/pull/2692), " +"[#2657](https://github.com/adap/flower/pull/2657), " +"[#2813](https://github.com/adap/flower/pull/2813), " +"[#2661](https://github.com/adap/flower/pull/2661), " +"[#2398](https://github.com/adap/flower/pull/2398))" +msgstr "" + +#: ../../source/ref-changelog.md:74 +msgid "" +"The Flower testing and development infrastructure has received " +"substantial updates. This makes Flower 1.7 the most tested release ever." +msgstr "" + +#: ../../source/ref-changelog.md:76 +msgid "" +"**Update dependencies** " +"([#2753](https://github.com/adap/flower/pull/2753), " +"[#2651](https://github.com/adap/flower/pull/2651), " +"[#2739](https://github.com/adap/flower/pull/2739), " +"[#2837](https://github.com/adap/flower/pull/2837), " +"[#2788](https://github.com/adap/flower/pull/2788), " +"[#2811](https://github.com/adap/flower/pull/2811), " +"[#2774](https://github.com/adap/flower/pull/2774), " +"[#2790](https://github.com/adap/flower/pull/2790), " +"[#2751](https://github.com/adap/flower/pull/2751), " +"[#2850](https://github.com/adap/flower/pull/2850), " +"[#2812](https://github.com/adap/flower/pull/2812), " +"[#2872](https://github.com/adap/flower/pull/2872), " +"[#2736](https://github.com/adap/flower/pull/2736), " +"[#2756](https://github.com/adap/flower/pull/2756), " +"[#2857](https://github.com/adap/flower/pull/2857), " +"[#2757](https://github.com/adap/flower/pull/2757), " +"[#2810](https://github.com/adap/flower/pull/2810), " +"[#2740](https://github.com/adap/flower/pull/2740), " +"[#2789](https://github.com/adap/flower/pull/2789))" +msgstr "" + +#: ../../source/ref-changelog.md:78 +msgid "" +"**General improvements** " +"([#2803](https://github.com/adap/flower/pull/2803), " +"[#2847](https://github.com/adap/flower/pull/2847), " +"[#2877](https://github.com/adap/flower/pull/2877), " +"[#2690](https://github.com/adap/flower/pull/2690), " +"[#2889](https://github.com/adap/flower/pull/2889), " +"[#2874](https://github.com/adap/flower/pull/2874), " +"[#2819](https://github.com/adap/flower/pull/2819), " +"[#2689](https://github.com/adap/flower/pull/2689), " +"[#2457](https://github.com/adap/flower/pull/2457), " +"[#2870](https://github.com/adap/flower/pull/2870), " +"[#2669](https://github.com/adap/flower/pull/2669), " +"[#2876](https://github.com/adap/flower/pull/2876), " +"[#2885](https://github.com/adap/flower/pull/2885), " +"[#2858](https://github.com/adap/flower/pull/2858), " +"[#2867](https://github.com/adap/flower/pull/2867), " +"[#2351](https://github.com/adap/flower/pull/2351), " +"[#2886](https://github.com/adap/flower/pull/2886), " +"[#2860](https://github.com/adap/flower/pull/2860), " +"[#2828](https://github.com/adap/flower/pull/2828), " +"[#2869](https://github.com/adap/flower/pull/2869), " +"[#2875](https://github.com/adap/flower/pull/2875), " +"[#2733](https://github.com/adap/flower/pull/2733), " +"[#2488](https://github.com/adap/flower/pull/2488), " +"[#2646](https://github.com/adap/flower/pull/2646), " +"[#2879](https://github.com/adap/flower/pull/2879), " +"[#2821](https://github.com/adap/flower/pull/2821), " +"[#2855](https://github.com/adap/flower/pull/2855), " +"[#2800](https://github.com/adap/flower/pull/2800), " +"[#2807](https://github.com/adap/flower/pull/2807), " +"[#2801](https://github.com/adap/flower/pull/2801), " +"[#2804](https://github.com/adap/flower/pull/2804), " +"[#2851](https://github.com/adap/flower/pull/2851), " +"[#2787](https://github.com/adap/flower/pull/2787), " +"[#2852](https://github.com/adap/flower/pull/2852), " +"[#2672](https://github.com/adap/flower/pull/2672), " +"[#2759](https://github.com/adap/flower/pull/2759))" msgstr "" -#: flwr.common.typing.FitIns:1 of -msgid "Fit instructions for a client." +#: ../../source/ref-changelog.md:82 +msgid "" +"**Deprecate** `start_numpy_client` " +"([#2563](https://github.com/adap/flower/pull/2563), " +"[#2718](https://github.com/adap/flower/pull/2718))" msgstr "" -#: flwr.common.typing.FitRes:1 of -msgid "Fit response from a client." +#: ../../source/ref-changelog.md:84 +msgid "" +"Until now, clients of type `NumPyClient` needed to be started via " +"`start_numpy_client`. In our efforts to consolidate framework APIs, we " +"have introduced changes, and now all client types should start via " +"`start_client`. To continue using `NumPyClient` clients, you simply need " +"to first call the `.to_client()` method and then pass returned `Client` " +"object to `start_client`. The examples and the documentation have been " +"updated accordingly." msgstr "" -#: flwr.common.typing.GetParametersIns:1 of -msgid "Parameters request for a client." +#: ../../source/ref-changelog.md:86 +msgid "" +"**Deprecate legacy DP wrappers** " +"([#2749](https://github.com/adap/flower/pull/2749))" msgstr "" -#: flwr.common.typing.GetParametersRes:1 of -msgid "Response when asked to return parameters." +#: ../../source/ref-changelog.md:88 +msgid "" +"Legacy DP wrapper classes are deprecated, but still functional. This is " +"in preparation for an all-new pluggable version of differential privacy " +"support in Flower." msgstr "" -#: flwr.common.typing.GetPropertiesIns:1 of -msgid "Properties request for a client." +#: ../../source/ref-changelog.md:90 +msgid "" +"**Make optional arg** `--callable` **in** `flower-client` **a required " +"positional arg** ([#2673](https://github.com/adap/flower/pull/2673))" msgstr "" -#: flwr.common.typing.GetPropertiesRes:1 of -msgid "Properties response from a client." +#: ../../source/ref-changelog.md:92 +msgid "" +"**Rename** `certificates` **to** `root_certificates` **in** `Driver` " +"([#2890](https://github.com/adap/flower/pull/2890))" msgstr "" -#: flwr.common.typing.Parameters:1 of -msgid "Model parameters." +#: ../../source/ref-changelog.md:94 +msgid "" +"**Drop experimental** `Task` **fields** " +"([#2866](https://github.com/adap/flower/pull/2866), " +"[#2865](https://github.com/adap/flower/pull/2865))" msgstr "" -#: flwr.common.typing.ReconnectIns:1 of -msgid "ReconnectIns message from server to client." +#: ../../source/ref-changelog.md:96 +msgid "" +"Experimental fields `sa`, `legacy_server_message` and " +"`legacy_client_message` were removed from `Task` message. The removed " +"fields are superseded by the new `RecordSet` abstraction." msgstr "" -#: flwr.common.typing.ServerMessage:1 of -msgid "ServerMessage is a container used to hold one instruction message." +#: ../../source/ref-changelog.md:98 +msgid "" +"**Retire MXNet examples** " +"([#2724](https://github.com/adap/flower/pull/2724))" msgstr "" -#: flwr.common.typing.Status:1 of -msgid "Client status." +#: ../../source/ref-changelog.md:100 +msgid "" +"The development of the MXNet fremework has ended and the project is now " +"[archived on GitHub](https://github.com/apache/mxnet). Existing MXNet " +"examples won't receive updates." msgstr "" -#: flwr.common.parameter.bytes_to_ndarray:1 of -msgid "Deserialize NumPy ndarray from bytes." +#: ../../source/ref-changelog.md:102 +msgid "v1.6.0 (2023-11-28)" msgstr "" -#: flwr.common.logger.configure:1 of -msgid "Configure logging to file and/or remote log server." +#: ../../source/ref-changelog.md:108 +msgid "" +"`Aashish Kolluri`, `Adam Narozniak`, `Alessio Mora`, `Barathwaja S`, " +"`Charles Beauville`, `Daniel J. Beutel`, `Daniel Nata Nugraha`, `Gabriel " +"Mota`, `Heng Pan`, `Ivan Agarský`, `JS.KIM`, `Javier`, `Marius Schlegel`," +" `Navin Chandra`, `Nic Lane`, `Peterpan828`, `Qinbin Li`, `Shaz-hash`, " +"`Steve Laskaridis`, `Taner Topal`, `William Lindskog`, `Yan Gao`, " +"`cnxdeveloper`, `k3nfalt` " msgstr "" -#: logging.Logger.log:1 of -msgid "Log 'msg % args' with the integer severity 'level'." +#: ../../source/ref-changelog.md:112 +msgid "" +"**Add experimental support for Python 3.12** " +"([#2565](https://github.com/adap/flower/pull/2565))" msgstr "" -#: logging.Logger.log:3 of +#: ../../source/ref-changelog.md:114 msgid "" -"To pass exception information, use the keyword argument exc_info with a " -"true value, e.g." +"**Add new XGBoost examples** " +"([#2612](https://github.com/adap/flower/pull/2612), " +"[#2554](https://github.com/adap/flower/pull/2554), " +"[#2617](https://github.com/adap/flower/pull/2617), " +"[#2618](https://github.com/adap/flower/pull/2618), " +"[#2619](https://github.com/adap/flower/pull/2619), " +"[#2567](https://github.com/adap/flower/pull/2567))" msgstr "" -#: logging.Logger.log:6 of -#, python-format -msgid "logger.log(level, \"We have a %s\", \"mysterious problem\", exc_info=1)" +#: ../../source/ref-changelog.md:116 +msgid "" +"We have added a new `xgboost-quickstart` example alongside a new " +"`xgboost-comprehensive` example that goes more in-depth." msgstr "" -#: flwr.common.parameter.ndarray_to_bytes:1 of -msgid "Serialize NumPy ndarray to bytes." +#: ../../source/ref-changelog.md:118 +msgid "" +"**Add Vertical FL example** " +"([#2598](https://github.com/adap/flower/pull/2598))" msgstr "" -#: flwr.common.parameter.ndarrays_to_parameters:1 of -msgid "Convert NumPy ndarrays to parameters object." +#: ../../source/ref-changelog.md:120 +msgid "" +"We had many questions about Vertical Federated Learning using Flower, so " +"we decided to add an simple example for it on the [Titanic " +"dataset](https://www.kaggle.com/competitions/titanic/data) alongside a " +"tutorial (in the README)." msgstr "" -#: flwr.common.date.now:1 of -msgid "Construct a datetime from time.time() with time zone set to UTC." +#: ../../source/ref-changelog.md:122 +msgid "" +"**Support custom** `ClientManager` **in** `start_driver()` " +"([#2292](https://github.com/adap/flower/pull/2292))" msgstr "" -#: flwr.common.parameter.parameters_to_ndarrays:1 of -msgid "Convert parameters object to NumPy ndarrays." +#: ../../source/ref-changelog.md:124 +msgid "" +"**Update REST API to support create and delete nodes** " +"([#2283](https://github.com/adap/flower/pull/2283))" msgstr "" -#: ../../source/ref-changelog.md:1 -msgid "Changelog" +#: ../../source/ref-changelog.md:126 +msgid "" +"**Update the Android SDK** " +"([#2187](https://github.com/adap/flower/pull/2187))" msgstr "" -#: ../../source/ref-changelog.md:3 -msgid "Unreleased" +#: ../../source/ref-changelog.md:128 +msgid "Add gRPC request-response capability to the Android SDK." msgstr "" -#: ../../source/ref-changelog.md:5 +#: ../../source/ref-changelog.md:130 msgid "" -"**Support custom** `ClientManager` **in** `start_driver()` " -"([#2292](https://github.com/adap/flower/pull/2292))" +"**Update the C++ SDK** " +"([#2537](https://github.com/adap/flower/pull/2537), " +"[#2528](https://github.com/adap/flower/pull/2528), " +"[#2523](https://github.com/adap/flower/pull/2523), " +"[#2522](https://github.com/adap/flower/pull/2522))" msgstr "" -#: ../../source/ref-changelog.md:7 -msgid "" -"**Update REST API to support create and delete nodes** " -"([#2283](https://github.com/adap/flower/pull/2283))" +#: ../../source/ref-changelog.md:132 +msgid "Add gRPC request-response capability to the C++ SDK." msgstr "" -#: ../../source/ref-changelog.md:9 ../../source/ref-changelog.md:59 -#: ../../source/ref-changelog.md:143 ../../source/ref-changelog.md:207 -#: ../../source/ref-changelog.md:265 ../../source/ref-changelog.md:334 -#: ../../source/ref-changelog.md:463 ../../source/ref-changelog.md:505 -#: ../../source/ref-changelog.md:572 ../../source/ref-changelog.md:638 -#: ../../source/ref-changelog.md:683 ../../source/ref-changelog.md:722 -#: ../../source/ref-changelog.md:755 ../../source/ref-changelog.md:805 -msgid "What's new?" +#: ../../source/ref-changelog.md:134 +msgid "" +"**Make HTTPS the new default** " +"([#2591](https://github.com/adap/flower/pull/2591), " +"[#2636](https://github.com/adap/flower/pull/2636))" msgstr "" -#: ../../source/ref-changelog.md:11 +#: ../../source/ref-changelog.md:136 msgid "" -"**Fix the incorrect return types of Strategy** " -"([#2432](https://github.com/adap/flower/pull/2432/files))" +"Flower is moving to HTTPS by default. The new `flower-server` requires " +"passing `--certificates`, but users can enable `--insecure` to use HTTP " +"for prototyping. The same applies to `flower-client`, which can either " +"use user-provided credentials or gRPC-bundled certificates to connect to " +"an HTTPS-enabled server or requires opt-out via passing `--insecure` to " +"enable insecure HTTP connections." msgstr "" -#: ../../source/ref-changelog.md:13 +#: ../../source/ref-changelog.md:138 msgid "" -"The types of the return values in the docstrings in two methods " -"(`aggregate_fit` and `aggregate_evaluate`) now match the hint types in " -"the code." +"For backward compatibility, `start_client()` and `start_numpy_client()` " +"will still start in insecure mode by default. In a future release, " +"insecure connections will require user opt-in by passing `insecure=True`." msgstr "" -#: ../../source/ref-changelog.md:15 +#: ../../source/ref-changelog.md:140 msgid "" "**Unify client API** ([#2303](https://github.com/adap/flower/pull/2303), " "[#2390](https://github.com/adap/flower/pull/2390), " "[#2493](https://github.com/adap/flower/pull/2493))" msgstr "" -#: ../../source/ref-changelog.md:17 +#: ../../source/ref-changelog.md:142 msgid "" "Using the `client_fn`, Flower clients can interchangeably run as " "standalone processes (i.e. via `start_client`) or in simulation (i.e. via" " `start_simulation`) without requiring changes to how the client class is" -" defined and instantiated. Calling `start_numpy_client` is now " -"deprecated." +" defined and instantiated. The `to_client()` function is introduced to " +"convert a `NumPyClient` to a `Client`." msgstr "" -#: ../../source/ref-changelog.md:19 -msgid "**Update Flower Baselines**" +#: ../../source/ref-changelog.md:144 +msgid "" +"**Add new** `Bulyan` **strategy** " +"([#1817](https://github.com/adap/flower/pull/1817), " +"[#1891](https://github.com/adap/flower/pull/1891))" msgstr "" -#: ../../source/ref-changelog.md:21 +#: ../../source/ref-changelog.md:146 +msgid "" +"The new `Bulyan` strategy implements Bulyan by [El Mhamdi et al., " +"2018](https://arxiv.org/abs/1802.07927)" +msgstr "" + +#: ../../source/ref-changelog.md:148 +msgid "" +"**Add new** `XGB Bagging` **strategy** " +"([#2611](https://github.com/adap/flower/pull/2611))" +msgstr "" + +#: ../../source/ref-changelog.md:150 ../../source/ref-changelog.md:152 +msgid "" +"**Introduce `WorkloadState`** " +"([#2564](https://github.com/adap/flower/pull/2564), " +"[#2632](https://github.com/adap/flower/pull/2632))" +msgstr "" + +#: ../../source/ref-changelog.md:156 msgid "" "FedProx ([#2210](https://github.com/adap/flower/pull/2210), " "[#2286](https://github.com/adap/flower/pull/2286), " "[#2509](https://github.com/adap/flower/pull/2509))" msgstr "" -#: ../../source/ref-changelog.md:23 +#: ../../source/ref-changelog.md:158 msgid "" "Baselines Docs ([#2290](https://github.com/adap/flower/pull/2290), " "[#2400](https://github.com/adap/flower/pull/2400))" msgstr "" -#: ../../source/ref-changelog.md:25 +#: ../../source/ref-changelog.md:160 msgid "" "FedMLB ([#2340](https://github.com/adap/flower/pull/2340), " "[#2507](https://github.com/adap/flower/pull/2507))" msgstr "" -#: ../../source/ref-changelog.md:27 +#: ../../source/ref-changelog.md:162 msgid "" "TAMUNA ([#2254](https://github.com/adap/flower/pull/2254), " "[#2508](https://github.com/adap/flower/pull/2508))" msgstr "" -#: ../../source/ref-changelog.md:29 +#: ../../source/ref-changelog.md:164 msgid "FedMeta [#2438](https://github.com/adap/flower/pull/2438)" msgstr "" -#: ../../source/ref-changelog.md:31 +#: ../../source/ref-changelog.md:166 +msgid "FjORD [#2431](https://github.com/adap/flower/pull/2431)" +msgstr "" + +#: ../../source/ref-changelog.md:168 +msgid "MOON [#2421](https://github.com/adap/flower/pull/2421)" +msgstr "" + +#: ../../source/ref-changelog.md:170 +msgid "DepthFL [#2295](https://github.com/adap/flower/pull/2295)" +msgstr "" + +#: ../../source/ref-changelog.md:172 +msgid "FedPer [#2266](https://github.com/adap/flower/pull/2266)" +msgstr "" + +#: ../../source/ref-changelog.md:174 +msgid "FedWav2vec [#2551](https://github.com/adap/flower/pull/2551)" +msgstr "" + +#: ../../source/ref-changelog.md:176 +msgid "niid-Bench [#2428](https://github.com/adap/flower/pull/2428)" +msgstr "" + +#: ../../source/ref-changelog.md:178 msgid "" -"**Update Flower Examples** " -"([#2384](https://github.com/adap/flower/pull/2384)), " -"([#2425](https://github.com/adap/flower/pull/2425))" +"FedBN ([#2608](https://github.com/adap/flower/pull/2608), " +"[#2615](https://github.com/adap/flower/pull/2615))" msgstr "" -#: ../../source/ref-changelog.md:33 +#: ../../source/ref-changelog.md:180 +msgid "" +"**General updates to Flower Examples** " +"([#2384](https://github.com/adap/flower/pull/2384), " +"[#2425](https://github.com/adap/flower/pull/2425), " +"[#2526](https://github.com/adap/flower/pull/2526), " +"[#2302](https://github.com/adap/flower/pull/2302), " +"[#2545](https://github.com/adap/flower/pull/2545))" +msgstr "" + +#: ../../source/ref-changelog.md:182 msgid "" -"**General updates to baselines** " +"**General updates to Flower Baselines** " "([#2301](https://github.com/adap/flower/pull/2301), " "[#2305](https://github.com/adap/flower/pull/2305), " "[#2307](https://github.com/adap/flower/pull/2307), " "[#2327](https://github.com/adap/flower/pull/2327), " -"[#2435](https://github.com/adap/flower/pull/2435))" +"[#2435](https://github.com/adap/flower/pull/2435), " +"[#2462](https://github.com/adap/flower/pull/2462), " +"[#2463](https://github.com/adap/flower/pull/2463), " +"[#2461](https://github.com/adap/flower/pull/2461), " +"[#2469](https://github.com/adap/flower/pull/2469), " +"[#2466](https://github.com/adap/flower/pull/2466), " +"[#2471](https://github.com/adap/flower/pull/2471), " +"[#2472](https://github.com/adap/flower/pull/2472), " +"[#2470](https://github.com/adap/flower/pull/2470))" msgstr "" -#: ../../source/ref-changelog.md:35 +#: ../../source/ref-changelog.md:184 msgid "" "**General updates to the simulation engine** " "([#2331](https://github.com/adap/flower/pull/2331), " "[#2447](https://github.com/adap/flower/pull/2447), " -"[#2448](https://github.com/adap/flower/pull/2448))" +"[#2448](https://github.com/adap/flower/pull/2448), " +"[#2294](https://github.com/adap/flower/pull/2294))" msgstr "" -#: ../../source/ref-changelog.md:37 +#: ../../source/ref-changelog.md:186 +msgid "" +"**General updates to Flower SDKs** " +"([#2288](https://github.com/adap/flower/pull/2288), " +"[#2429](https://github.com/adap/flower/pull/2429), " +"[#2555](https://github.com/adap/flower/pull/2555), " +"[#2543](https://github.com/adap/flower/pull/2543), " +"[#2544](https://github.com/adap/flower/pull/2544), " +"[#2597](https://github.com/adap/flower/pull/2597), " +"[#2623](https://github.com/adap/flower/pull/2623))" +msgstr "" + +#: ../../source/ref-changelog.md:188 msgid "" "**General improvements** " "([#2309](https://github.com/adap/flower/pull/2309), " "[#2310](https://github.com/adap/flower/pull/2310), " -"[2313](https://github.com/adap/flower/pull/2313), " +"[#2313](https://github.com/adap/flower/pull/2313), " "[#2316](https://github.com/adap/flower/pull/2316), " -"[2317](https://github.com/adap/flower/pull/2317),[#2349](https://github.com/adap/flower/pull/2349)," -" [#2360](https://github.com/adap/flower/pull/2360), " +"[#2317](https://github.com/adap/flower/pull/2317), " +"[#2349](https://github.com/adap/flower/pull/2349), " +"[#2360](https://github.com/adap/flower/pull/2360), " "[#2402](https://github.com/adap/flower/pull/2402), " -"[#2446](https://github.com/adap/flower/pull/2446))" -msgstr "" - -#: ../../source/ref-changelog.md:39 ../../source/ref-changelog.md:129 -#: ../../source/ref-changelog.md:193 ../../source/ref-changelog.md:247 -#: ../../source/ref-changelog.md:314 +"[#2446](https://github.com/adap/flower/pull/2446), " +"[#2561](https://github.com/adap/flower/pull/2561), " +"[#2273](https://github.com/adap/flower/pull/2273), " +"[#2267](https://github.com/adap/flower/pull/2267), " +"[#2274](https://github.com/adap/flower/pull/2274), " +"[#2275](https://github.com/adap/flower/pull/2275), " +"[#2432](https://github.com/adap/flower/pull/2432), " +"[#2251](https://github.com/adap/flower/pull/2251), " +"[#2321](https://github.com/adap/flower/pull/2321), " +"[#1936](https://github.com/adap/flower/pull/1936), " +"[#2408](https://github.com/adap/flower/pull/2408), " +"[#2413](https://github.com/adap/flower/pull/2413), " +"[#2401](https://github.com/adap/flower/pull/2401), " +"[#2531](https://github.com/adap/flower/pull/2531), " +"[#2534](https://github.com/adap/flower/pull/2534), " +"[#2535](https://github.com/adap/flower/pull/2535), " +"[#2521](https://github.com/adap/flower/pull/2521), " +"[#2553](https://github.com/adap/flower/pull/2553), " +"[#2596](https://github.com/adap/flower/pull/2596))" +msgstr "" + +#: ../../source/ref-changelog.md:190 ../../source/ref-changelog.md:280 +#: ../../source/ref-changelog.md:344 ../../source/ref-changelog.md:398 +#: ../../source/ref-changelog.md:465 msgid "Flower received many improvements under the hood, too many to list here." msgstr "" -#: ../../source/ref-changelog.md:41 ../../source/ref-changelog.md:131 -#: ../../source/ref-changelog.md:195 ../../source/ref-changelog.md:253 -#: ../../source/ref-changelog.md:322 ../../source/ref-changelog.md:384 -#: ../../source/ref-changelog.md:403 ../../source/ref-changelog.md:559 -#: ../../source/ref-changelog.md:630 ../../source/ref-changelog.md:667 -#: ../../source/ref-changelog.md:710 -msgid "Incompatible changes" -msgstr "" - -#: ../../source/ref-changelog.md:43 +#: ../../source/ref-changelog.md:194 msgid "" "**Remove support for Python 3.7** " "([#2280](https://github.com/adap/flower/pull/2280), " @@ -5987,43 +11353,30 @@ msgid "" "[#2356](https://github.com/adap/flower/pull/2356))" msgstr "" -#: ../../source/ref-changelog.md:45 +#: ../../source/ref-changelog.md:196 msgid "" "Python 3.7 support was deprecated in Flower 1.5, and this release removes" " support. Flower now requires Python 3.8." msgstr "" -#: ../../source/ref-changelog.md:47 +#: ../../source/ref-changelog.md:198 msgid "" "**Remove experimental argument** `rest` **from** `start_client` " "([#2324](https://github.com/adap/flower/pull/2324))" msgstr "" -#: ../../source/ref-changelog.md:49 +#: ../../source/ref-changelog.md:200 msgid "" "The (still experimental) argument `rest` was removed from `start_client` " "and `start_numpy_client`. Use `transport=\"rest\"` to opt into the " "experimental REST API instead." msgstr "" -#: ../../source/ref-changelog.md:51 +#: ../../source/ref-changelog.md:202 msgid "v1.5.0 (2023-08-31)" msgstr "" -#: ../../source/ref-changelog.md:53 ../../source/ref-changelog.md:137 -#: ../../source/ref-changelog.md:201 ../../source/ref-changelog.md:259 -#: ../../source/ref-changelog.md:328 ../../source/ref-changelog.md:397 -msgid "Thanks to our contributors" -msgstr "" - -#: ../../source/ref-changelog.md:55 ../../source/ref-changelog.md:139 -#: ../../source/ref-changelog.md:203 ../../source/ref-changelog.md:261 -msgid "" -"We would like to give our special thanks to all the contributors who made" -" the new version of Flower possible (in `git shortlog` order):" -msgstr "" - -#: ../../source/ref-changelog.md:57 +#: ../../source/ref-changelog.md:208 msgid "" "`Adam Narozniak`, `Anass Anhari`, `Charles Beauville`, `Dana-Farber`, " "`Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo Gabrielli`, `Gustavo " @@ -6032,7 +11385,7 @@ msgid "" "TOKEN_v1.5.0-->" msgstr "" -#: ../../source/ref-changelog.md:61 +#: ../../source/ref-changelog.md:212 msgid "" "**Introduce new simulation engine** " "([#1969](https://github.com/adap/flower/pull/1969), " @@ -6040,7 +11393,7 @@ msgid "" "[#2248](https://github.com/adap/flower/pull/2248))" msgstr "" -#: ../../source/ref-changelog.md:63 +#: ../../source/ref-changelog.md:214 msgid "" "The new simulation engine has been rewritten from the ground up, yet it " "remains fully backwards compatible. It offers much improved stability and" @@ -6049,18 +11402,18 @@ msgid "" "only, CPU+GPU, multi-GPU, or multi-node multi-GPU environments." msgstr "" -#: ../../source/ref-changelog.md:65 +#: ../../source/ref-changelog.md:216 msgid "" "Comprehensive documentation includes a new [how-to run " -"simulations](https://flower.dev/docs/framework/how-to-run-" +"simulations](https://flower.ai/docs/framework/how-to-run-" "simulations.html) guide, new [simulation-" -"pytorch](https://flower.dev/docs/examples/simulation-pytorch.html) and " -"[simulation-tensorflow](https://flower.dev/docs/examples/simulation-" +"pytorch](https://flower.ai/docs/examples/simulation-pytorch.html) and " +"[simulation-tensorflow](https://flower.ai/docs/examples/simulation-" "tensorflow.html) notebooks, and a new [YouTube tutorial " "series](https://www.youtube.com/watch?v=cRebUIGB5RU&list=PLNG4feLHqCWlnj8a_E1A_n5zr2-8pafTB)." msgstr "" -#: ../../source/ref-changelog.md:67 +#: ../../source/ref-changelog.md:218 msgid "" "**Restructure Flower Docs** " "([#1824](https://github.com/adap/flower/pull/1824), " @@ -6092,42 +11445,42 @@ msgid "" "[#2227](https://github.com/adap/flower/pull/2227))" msgstr "" -#: ../../source/ref-changelog.md:69 +#: ../../source/ref-changelog.md:220 msgid "" "Much effort went into a completely restructured Flower docs experience. " -"The documentation on [flower.dev/docs](flower.dev/docs) is now divided " +"The documentation on [flower.ai/docs](flower.ai/docs) is now divided " "into Flower Framework, Flower Baselines, Flower Android SDK, Flower iOS " "SDK, and code example projects." msgstr "" -#: ../../source/ref-changelog.md:71 +#: ../../source/ref-changelog.md:222 msgid "" "**Introduce Flower Swift SDK** " "([#1858](https://github.com/adap/flower/pull/1858), " "[#1897](https://github.com/adap/flower/pull/1897))" msgstr "" -#: ../../source/ref-changelog.md:73 +#: ../../source/ref-changelog.md:224 msgid "" "This is the first preview release of the Flower Swift SDK. Flower support" " on iOS is improving, and alongside the Swift SDK and code example, there" " is now also an iOS quickstart tutorial." msgstr "" -#: ../../source/ref-changelog.md:75 +#: ../../source/ref-changelog.md:226 msgid "" "**Introduce Flower Android SDK** " "([#2131](https://github.com/adap/flower/pull/2131))" msgstr "" -#: ../../source/ref-changelog.md:77 +#: ../../source/ref-changelog.md:228 msgid "" "This is the first preview release of the Flower Kotlin SDK. Flower " "support on Android is improving, and alongside the Kotlin SDK and code " "example, there is now also an Android quickstart tutorial." msgstr "" -#: ../../source/ref-changelog.md:79 +#: ../../source/ref-changelog.md:230 msgid "" "**Introduce new end-to-end testing infrastructure** " "([#1842](https://github.com/adap/flower/pull/1842), " @@ -6149,42 +11502,42 @@ msgid "" "[#2165](https://github.com/adap/flower/pull/2165))" msgstr "" -#: ../../source/ref-changelog.md:81 +#: ../../source/ref-changelog.md:232 msgid "" "A new testing infrastructure ensures that new changes stay compatible " "with existing framework integrations or strategies." msgstr "" -#: ../../source/ref-changelog.md:83 +#: ../../source/ref-changelog.md:234 msgid "**Deprecate Python 3.7**" msgstr "" -#: ../../source/ref-changelog.md:85 +#: ../../source/ref-changelog.md:236 msgid "" "Since Python 3.7 reached its end of life (EOL) on 2023-06-27, support for" " Python 3.7 is now deprecated and will be removed in an upcoming release." msgstr "" -#: ../../source/ref-changelog.md:87 +#: ../../source/ref-changelog.md:238 msgid "" "**Add new** `FedTrimmedAvg` **strategy** " "([#1769](https://github.com/adap/flower/pull/1769), " "[#1853](https://github.com/adap/flower/pull/1853))" msgstr "" -#: ../../source/ref-changelog.md:89 +#: ../../source/ref-changelog.md:240 msgid "" "The new `FedTrimmedAvg` strategy implements Trimmed Mean by [Dong Yin, " "2018](https://arxiv.org/abs/1803.01498)." msgstr "" -#: ../../source/ref-changelog.md:91 +#: ../../source/ref-changelog.md:242 msgid "" "**Introduce start_driver** " "([#1697](https://github.com/adap/flower/pull/1697))" msgstr "" -#: ../../source/ref-changelog.md:93 +#: ../../source/ref-changelog.md:244 msgid "" "In addition to `start_server` and using the raw Driver API, there is a " "new `start_driver` function that allows for running `start_server` " @@ -6193,13 +11546,13 @@ msgid "" "`start_driver`." msgstr "" -#: ../../source/ref-changelog.md:95 +#: ../../source/ref-changelog.md:246 msgid "" "**Add parameter aggregation to** `mt-pytorch` **code example** " "([#1785](https://github.com/adap/flower/pull/1785))" msgstr "" -#: ../../source/ref-changelog.md:97 +#: ../../source/ref-changelog.md:248 msgid "" "The `mt-pytorch` example shows how to aggregate parameters when writing a" " driver script. The included `driver.py` and `server.py` have been " @@ -6207,53 +11560,53 @@ msgid "" "building server-side logic." msgstr "" -#: ../../source/ref-changelog.md:99 +#: ../../source/ref-changelog.md:250 msgid "" "**Migrate experimental REST API to Starlette** " "([2171](https://github.com/adap/flower/pull/2171))" msgstr "" -#: ../../source/ref-changelog.md:101 +#: ../../source/ref-changelog.md:252 msgid "" "The (experimental) REST API used to be implemented in " "[FastAPI](https://fastapi.tiangolo.com/), but it has now been migrated to" " use [Starlette](https://www.starlette.io/) directly." msgstr "" -#: ../../source/ref-changelog.md:103 +#: ../../source/ref-changelog.md:254 msgid "" "Please note: The REST request-response API is still experimental and will" " likely change significantly over time." msgstr "" -#: ../../source/ref-changelog.md:105 +#: ../../source/ref-changelog.md:256 msgid "" "**Introduce experimental gRPC request-response API** " "([#1867](https://github.com/adap/flower/pull/1867), " "[#1901](https://github.com/adap/flower/pull/1901))" msgstr "" -#: ../../source/ref-changelog.md:107 +#: ../../source/ref-changelog.md:258 msgid "" "In addition to the existing gRPC API (based on bidirectional streaming) " "and the experimental REST API, there is now a new gRPC API that uses a " "request-response model to communicate with client nodes." msgstr "" -#: ../../source/ref-changelog.md:109 +#: ../../source/ref-changelog.md:260 msgid "" "Please note: The gRPC request-response API is still experimental and will" " likely change significantly over time." msgstr "" -#: ../../source/ref-changelog.md:111 +#: ../../source/ref-changelog.md:262 msgid "" "**Replace the experimental** `start_client(rest=True)` **with the new** " "`start_client(transport=\"rest\")` " "([#1880](https://github.com/adap/flower/pull/1880))" msgstr "" -#: ../../source/ref-changelog.md:113 +#: ../../source/ref-changelog.md:264 msgid "" "The (experimental) `start_client` argument `rest` was deprecated in " "favour of a new argument `transport`. `start_client(transport=\"rest\")` " @@ -6262,30 +11615,30 @@ msgid "" "argument `rest` will be removed in a future release." msgstr "" -#: ../../source/ref-changelog.md:115 +#: ../../source/ref-changelog.md:266 msgid "" "**Add a new gRPC option** " "([#2197](https://github.com/adap/flower/pull/2197))" msgstr "" -#: ../../source/ref-changelog.md:117 +#: ../../source/ref-changelog.md:268 msgid "" "We now start a gRPC server with the `grpc.keepalive_permit_without_calls`" " option set to 0 by default. This prevents the clients from sending " "keepalive pings when there is no outstanding stream." msgstr "" -#: ../../source/ref-changelog.md:119 +#: ../../source/ref-changelog.md:270 msgid "" "**Improve example notebooks** " "([#2005](https://github.com/adap/flower/pull/2005))" msgstr "" -#: ../../source/ref-changelog.md:121 +#: ../../source/ref-changelog.md:272 msgid "There's a new 30min Federated Learning PyTorch tutorial!" msgstr "" -#: ../../source/ref-changelog.md:123 +#: ../../source/ref-changelog.md:274 msgid "" "**Example updates** ([#1772](https://github.com/adap/flower/pull/1772), " "[#1873](https://github.com/adap/flower/pull/1873), " @@ -6300,7 +11653,7 @@ msgid "" "[#2183](https://github.com/adap/flower/pull/2183))" msgstr "" -#: ../../source/ref-changelog.md:125 +#: ../../source/ref-changelog.md:276 msgid "" "Many examples have received significant updates, including simplified " "advanced-tensorflow and advanced-pytorch examples, improved macOS " @@ -6309,7 +11662,7 @@ msgid "" "(in addition to `pyproject.toml`)." msgstr "" -#: ../../source/ref-changelog.md:127 +#: ../../source/ref-changelog.md:278 msgid "" "**General improvements** " "([#1872](https://github.com/adap/flower/pull/1872), " @@ -6320,17 +11673,17 @@ msgid "" "[#2171](https://github.com/adap/flower/pull/2171))" msgstr "" -#: ../../source/ref-changelog.md:133 ../../source/ref-changelog.md:197 -#: ../../source/ref-changelog.md:255 ../../source/ref-changelog.md:324 -#: ../../source/ref-changelog.md:386 +#: ../../source/ref-changelog.md:284 ../../source/ref-changelog.md:348 +#: ../../source/ref-changelog.md:406 ../../source/ref-changelog.md:475 +#: ../../source/ref-changelog.md:537 msgid "None" msgstr "" -#: ../../source/ref-changelog.md:135 +#: ../../source/ref-changelog.md:286 msgid "v1.4.0 (2023-04-21)" msgstr "" -#: ../../source/ref-changelog.md:141 +#: ../../source/ref-changelog.md:292 msgid "" "`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " "`Chenyang Ma (Danny)`, `Daniel J. Beutel`, `Edoardo`, `Gautam Jajoo`, " @@ -6340,7 +11693,7 @@ msgid "" "`Steve Laskaridis`, `Steven Hé (Sīchàng)`, `Taner Topal`" msgstr "" -#: ../../source/ref-changelog.md:145 +#: ../../source/ref-changelog.md:296 msgid "" "**Introduce support for XGBoost (**`FedXgbNnAvg` **strategy and " "example)** ([#1694](https://github.com/adap/flower/pull/1694), " @@ -6351,7 +11704,7 @@ msgid "" "[#1795](https://github.com/adap/flower/pull/1795))" msgstr "" -#: ../../source/ref-changelog.md:147 +#: ../../source/ref-changelog.md:298 msgid "" "XGBoost is a tree-based ensemble machine learning algorithm that uses " "gradient boosting to improve model accuracy. We added a new `FedXgbNnAvg`" @@ -6362,14 +11715,14 @@ msgid "" " that demonstrates the usage of this new strategy in an XGBoost project." msgstr "" -#: ../../source/ref-changelog.md:149 +#: ../../source/ref-changelog.md:300 msgid "" "**Introduce iOS SDK (preview)** " "([#1621](https://github.com/adap/flower/pull/1621), " "[#1764](https://github.com/adap/flower/pull/1764))" msgstr "" -#: ../../source/ref-changelog.md:151 +#: ../../source/ref-changelog.md:302 msgid "" "This is a major update for anyone wanting to implement Federated Learning" " on iOS mobile devices. We now have a swift iOS SDK present under " @@ -6380,23 +11733,23 @@ msgid "" "been updated!" msgstr "" -#: ../../source/ref-changelog.md:153 +#: ../../source/ref-changelog.md:304 msgid "" "**Introduce new \"What is Federated Learning?\" tutorial** " "([#1657](https://github.com/adap/flower/pull/1657), " "[#1721](https://github.com/adap/flower/pull/1721))" msgstr "" -#: ../../source/ref-changelog.md:155 +#: ../../source/ref-changelog.md:306 msgid "" -"A new [entry-level tutorial](https://flower.dev/docs/framework/tutorial-" +"A new [entry-level tutorial](https://flower.ai/docs/framework/tutorial-" "what-is-federated-learning.html) in our documentation explains the basics" " of Fedetated Learning. It enables anyone who's unfamiliar with Federated" " Learning to start their journey with Flower. Forward it to anyone who's " "interested in Federated Learning!" msgstr "" -#: ../../source/ref-changelog.md:157 +#: ../../source/ref-changelog.md:308 msgid "" "**Introduce new Flower Baseline: FedProx MNIST** " "([#1513](https://github.com/adap/flower/pull/1513), " @@ -6405,7 +11758,7 @@ msgid "" "[#1679](https://github.com/adap/flower/pull/1679))" msgstr "" -#: ../../source/ref-changelog.md:159 +#: ../../source/ref-changelog.md:310 msgid "" "This new baseline replicates the MNIST+CNN task from the paper [Federated" " Optimization in Heterogeneous Networks (Li et al., " @@ -6413,13 +11766,13 @@ msgid "" " which aims at making convergence more robust in heterogenous settings." msgstr "" -#: ../../source/ref-changelog.md:161 +#: ../../source/ref-changelog.md:312 msgid "" "**Introduce new Flower Baseline: FedAvg FEMNIST** " "([#1655](https://github.com/adap/flower/pull/1655))" msgstr "" -#: ../../source/ref-changelog.md:163 +#: ../../source/ref-changelog.md:314 msgid "" "This new baseline replicates an experiment evaluating the performance of " "the FedAvg algorithm on the FEMNIST dataset from the paper [LEAF: A " @@ -6427,7 +11780,7 @@ msgid "" "2018)](https://arxiv.org/abs/1812.01097)." msgstr "" -#: ../../source/ref-changelog.md:165 +#: ../../source/ref-changelog.md:316 msgid "" "**Introduce (experimental) REST API** " "([#1594](https://github.com/adap/flower/pull/1594), " @@ -6439,20 +11792,20 @@ msgid "" "[#1733](https://github.com/adap/flower/pull/1733))" msgstr "" -#: ../../source/ref-changelog.md:167 +#: ../../source/ref-changelog.md:318 msgid "" "A new REST API has been introduced as an alternative to the gRPC-based " "communication stack. In this initial version, the REST API only supports " "anonymous clients." msgstr "" -#: ../../source/ref-changelog.md:169 +#: ../../source/ref-changelog.md:320 msgid "" "Please note: The REST API is still experimental and will likely change " "significantly over time." msgstr "" -#: ../../source/ref-changelog.md:171 +#: ../../source/ref-changelog.md:322 msgid "" "**Improve the (experimental) Driver API** " "([#1663](https://github.com/adap/flower/pull/1663), " @@ -6466,7 +11819,7 @@ msgid "" "[#1794](https://github.com/adap/flower/pull/1794))" msgstr "" -#: ../../source/ref-changelog.md:173 +#: ../../source/ref-changelog.md:324 msgid "" "The Driver API is still an experimental feature, but this release " "introduces some major upgrades. One of the main improvements is the " @@ -6476,58 +11829,58 @@ msgid "" "improves the memory efficiency of a long-running Flower server." msgstr "" -#: ../../source/ref-changelog.md:175 +#: ../../source/ref-changelog.md:326 msgid "" "**Fix spilling issues related to Ray during simulations** " "([#1698](https://github.com/adap/flower/pull/1698))" msgstr "" -#: ../../source/ref-changelog.md:177 +#: ../../source/ref-changelog.md:328 msgid "" "While running long simulations, `ray` was sometimes spilling huge amounts" " of data that would make the training unable to continue. This is now " "fixed! 🎉" msgstr "" -#: ../../source/ref-changelog.md:179 +#: ../../source/ref-changelog.md:330 msgid "" "**Add new example using** `TabNet` **and Flower** " "([#1725](https://github.com/adap/flower/pull/1725))" msgstr "" -#: ../../source/ref-changelog.md:181 +#: ../../source/ref-changelog.md:332 msgid "" "TabNet is a powerful and flexible framework for training machine learning" " models on tabular data. We now have a federated example using Flower: " "[https://github.com/adap/flower/tree/main/examples/tabnet](https://github.com/adap/flower/tree/main/examples/quickstart_tabnet)." msgstr "" -#: ../../source/ref-changelog.md:183 +#: ../../source/ref-changelog.md:334 msgid "" "**Add new how-to guide for monitoring simulations** " "([#1649](https://github.com/adap/flower/pull/1649))" msgstr "" -#: ../../source/ref-changelog.md:185 +#: ../../source/ref-changelog.md:336 msgid "" "We now have a documentation guide to help users monitor their performance" " during simulations." msgstr "" -#: ../../source/ref-changelog.md:187 +#: ../../source/ref-changelog.md:338 msgid "" "**Add training metrics to** `History` **object during simulations** " "([#1696](https://github.com/adap/flower/pull/1696))" msgstr "" -#: ../../source/ref-changelog.md:189 +#: ../../source/ref-changelog.md:340 msgid "" "The `fit_metrics_aggregation_fn` can be used to aggregate training " "metrics, but previous releases did not save the results in the `History` " "object. This is now the case!" msgstr "" -#: ../../source/ref-changelog.md:191 +#: ../../source/ref-changelog.md:342 msgid "" "**General improvements** " "([#1659](https://github.com/adap/flower/pull/1659), " @@ -6581,23 +11934,23 @@ msgid "" "[#1805](https://github.com/adap/flower/pull/1805))" msgstr "" -#: ../../source/ref-changelog.md:199 +#: ../../source/ref-changelog.md:350 msgid "v1.3.0 (2023-02-06)" msgstr "" -#: ../../source/ref-changelog.md:205 +#: ../../source/ref-changelog.md:356 msgid "" "`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " "`Daniel J. Beutel`, `JDRanpariya`, `Lennart Behme`, `Taner Topal`" msgstr "" -#: ../../source/ref-changelog.md:209 +#: ../../source/ref-changelog.md:360 msgid "" "**Add support for** `workload_id` **and** `group_id` **in Driver API** " "([#1595](https://github.com/adap/flower/pull/1595))" msgstr "" -#: ../../source/ref-changelog.md:211 +#: ../../source/ref-changelog.md:362 msgid "" "The (experimental) Driver API now supports a `workload_id` that can be " "used to identify which workload a task belongs to. It also supports a new" @@ -6606,50 +11959,50 @@ msgid "" " to decide whether they want to handle a task or not." msgstr "" -#: ../../source/ref-changelog.md:213 +#: ../../source/ref-changelog.md:364 msgid "" "**Make Driver API and Fleet API address configurable** " "([#1637](https://github.com/adap/flower/pull/1637))" msgstr "" -#: ../../source/ref-changelog.md:215 +#: ../../source/ref-changelog.md:366 msgid "" "The (experimental) long-running Flower server (Driver API and Fleet API) " "can now configure the server address of both Driver API (via `--driver-" "api-address`) and Fleet API (via `--fleet-api-address`) when starting:" msgstr "" -#: ../../source/ref-changelog.md:217 +#: ../../source/ref-changelog.md:368 msgid "" "`flower-server --driver-api-address \"0.0.0.0:8081\" --fleet-api-address " "\"0.0.0.0:8086\"`" msgstr "" -#: ../../source/ref-changelog.md:219 +#: ../../source/ref-changelog.md:370 msgid "Both IPv4 and IPv6 addresses are supported." msgstr "" -#: ../../source/ref-changelog.md:221 +#: ../../source/ref-changelog.md:372 msgid "" "**Add new example of Federated Learning using fastai and Flower** " "([#1598](https://github.com/adap/flower/pull/1598))" msgstr "" -#: ../../source/ref-changelog.md:223 +#: ../../source/ref-changelog.md:374 msgid "" "A new code example (`quickstart_fastai`) demonstrates federated learning " "with [fastai](https://www.fast.ai/) and Flower. You can find it here: " "[quickstart_fastai](https://github.com/adap/flower/tree/main/examples/quickstart_fastai)." msgstr "" -#: ../../source/ref-changelog.md:225 +#: ../../source/ref-changelog.md:376 msgid "" "**Make Android example compatible with** `flwr >= 1.0.0` **and the latest" " versions of Android** " "([#1603](https://github.com/adap/flower/pull/1603))" msgstr "" -#: ../../source/ref-changelog.md:227 +#: ../../source/ref-changelog.md:378 msgid "" "The Android code example has received a substantial update: the project " "is compatible with Flower 1.0 (and later), the UI received a full " @@ -6657,13 +12010,13 @@ msgid "" "tooling." msgstr "" -#: ../../source/ref-changelog.md:229 +#: ../../source/ref-changelog.md:380 msgid "" "**Add new `FedProx` strategy** " "([#1619](https://github.com/adap/flower/pull/1619))" msgstr "" -#: ../../source/ref-changelog.md:231 +#: ../../source/ref-changelog.md:382 msgid "" "This " "[strategy](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedprox.py)" @@ -6675,25 +12028,25 @@ msgid "" "respect to the global models." msgstr "" -#: ../../source/ref-changelog.md:233 +#: ../../source/ref-changelog.md:384 msgid "" "**Add new metrics to telemetry events** " "([#1640](https://github.com/adap/flower/pull/1640))" msgstr "" -#: ../../source/ref-changelog.md:235 +#: ../../source/ref-changelog.md:386 msgid "" "An updated event structure allows, for example, the clustering of events " "within the same workload." msgstr "" -#: ../../source/ref-changelog.md:237 +#: ../../source/ref-changelog.md:388 msgid "" "**Add new custom strategy tutorial section** " "[#1623](https://github.com/adap/flower/pull/1623)" msgstr "" -#: ../../source/ref-changelog.md:239 +#: ../../source/ref-changelog.md:390 msgid "" "The Flower tutorial now has a new section that covers implementing a " "custom strategy from scratch: [Open in " @@ -6701,13 +12054,13 @@ msgid "" "/tutorial-build-a-strategy-from-scratch-pytorch.ipynb)" msgstr "" -#: ../../source/ref-changelog.md:241 +#: ../../source/ref-changelog.md:392 msgid "" "**Add new custom serialization tutorial section** " "([#1622](https://github.com/adap/flower/pull/1622))" msgstr "" -#: ../../source/ref-changelog.md:243 +#: ../../source/ref-changelog.md:394 msgid "" "The Flower tutorial now has a new section that covers custom " "serialization: [Open in " @@ -6715,7 +12068,7 @@ msgid "" "/tutorial-customize-the-client-pytorch.ipynb)" msgstr "" -#: ../../source/ref-changelog.md:245 +#: ../../source/ref-changelog.md:396 msgid "" "**General improvements** " "([#1638](https://github.com/adap/flower/pull/1638), " @@ -6753,7 +12106,7 @@ msgid "" "[#1586](https://github.com/adap/flower/pull/1586))" msgstr "" -#: ../../source/ref-changelog.md:249 +#: ../../source/ref-changelog.md:400 msgid "" "**Updated documentation** " "([#1629](https://github.com/adap/flower/pull/1629), " @@ -6765,48 +12118,48 @@ msgid "" "[#1614](https://github.com/adap/flower/pull/1614))" msgstr "" -#: ../../source/ref-changelog.md:251 ../../source/ref-changelog.md:318 +#: ../../source/ref-changelog.md:402 ../../source/ref-changelog.md:469 msgid "" "As usual, the documentation has improved quite a bit. It is another step " "in our effort to make the Flower documentation the best documentation of " "any project. Stay tuned and as always, feel free to provide feedback!" msgstr "" -#: ../../source/ref-changelog.md:257 +#: ../../source/ref-changelog.md:408 msgid "v1.2.0 (2023-01-13)" msgstr "" -#: ../../source/ref-changelog.md:263 +#: ../../source/ref-changelog.md:414 msgid "" "`Adam Narozniak`, `Charles Beauville`, `Daniel J. Beutel`, `Edoardo`, `L." " Jiang`, `Ragy`, `Taner Topal`, `dannymcy`" msgstr "" -#: ../../source/ref-changelog.md:267 +#: ../../source/ref-changelog.md:418 msgid "" "**Introduce new Flower Baseline: FedAvg MNIST** " "([#1497](https://github.com/adap/flower/pull/1497), " "[#1552](https://github.com/adap/flower/pull/1552))" msgstr "" -#: ../../source/ref-changelog.md:269 +#: ../../source/ref-changelog.md:420 msgid "" "Over the coming weeks, we will be releasing a number of new reference " "implementations useful especially to FL newcomers. They will typically " "revisit well known papers from the literature, and be suitable for " "integration in your own application or for experimentation, in order to " "deepen your knowledge of FL in general. Today's release is the first in " -"this series. [Read more.](https://flower.dev/blog/2023-01-12-fl-starter-" +"this series. [Read more.](https://flower.ai/blog/2023-01-12-fl-starter-" "pack-fedavg-mnist-cnn/)" msgstr "" -#: ../../source/ref-changelog.md:271 +#: ../../source/ref-changelog.md:422 msgid "" "**Improve GPU support in simulations** " "([#1555](https://github.com/adap/flower/pull/1555))" msgstr "" -#: ../../source/ref-changelog.md:273 +#: ../../source/ref-changelog.md:424 msgid "" "The Ray-based Virtual Client Engine (`start_simulation`) has been updated" " to improve GPU support. The update includes some of the hard-earned " @@ -6814,45 +12167,45 @@ msgid "" "defaults make running GPU-based simulations substantially more robust." msgstr "" -#: ../../source/ref-changelog.md:275 +#: ../../source/ref-changelog.md:426 msgid "" "**Improve GPU support in Jupyter Notebook tutorials** " "([#1527](https://github.com/adap/flower/pull/1527), " "[#1558](https://github.com/adap/flower/pull/1558))" msgstr "" -#: ../../source/ref-changelog.md:277 +#: ../../source/ref-changelog.md:428 msgid "" "Some users reported that Jupyter Notebooks have not always been easy to " "use on GPU instances. We listened and made improvements to all of our " "Jupyter notebooks! Check out the updated notebooks here:" msgstr "" -#: ../../source/ref-changelog.md:279 +#: ../../source/ref-changelog.md:430 msgid "" -"[An Introduction to Federated Learning](https://flower.dev/docs/framework" +"[An Introduction to Federated Learning](https://flower.ai/docs/framework" "/tutorial-get-started-with-flower-pytorch.html)" msgstr "" -#: ../../source/ref-changelog.md:280 +#: ../../source/ref-changelog.md:431 msgid "" -"[Strategies in Federated Learning](https://flower.dev/docs/framework" +"[Strategies in Federated Learning](https://flower.ai/docs/framework" "/tutorial-use-a-federated-learning-strategy-pytorch.html)" msgstr "" -#: ../../source/ref-changelog.md:281 +#: ../../source/ref-changelog.md:432 msgid "" -"[Building a Strategy](https://flower.dev/docs/framework/tutorial-build-a" +"[Building a Strategy](https://flower.ai/docs/framework/tutorial-build-a" "-strategy-from-scratch-pytorch.html)" msgstr "" -#: ../../source/ref-changelog.md:282 +#: ../../source/ref-changelog.md:433 msgid "" -"[Client and NumPyClient](https://flower.dev/docs/framework/tutorial-" +"[Client and NumPyClient](https://flower.ai/docs/framework/tutorial-" "customize-the-client-pytorch.html)" msgstr "" -#: ../../source/ref-changelog.md:284 +#: ../../source/ref-changelog.md:435 msgid "" "**Introduce optional telemetry** " "([#1533](https://github.com/adap/flower/pull/1533), " @@ -6860,7 +12213,7 @@ msgid "" "[#1584](https://github.com/adap/flower/pull/1584))" msgstr "" -#: ../../source/ref-changelog.md:286 +#: ../../source/ref-changelog.md:437 msgid "" "After a [request for " "feedback](https://github.com/adap/flower/issues/1534) from the community," @@ -6870,15 +12223,15 @@ msgid "" "used and what challenges users might face." msgstr "" -#: ../../source/ref-changelog.md:288 +#: ../../source/ref-changelog.md:439 msgid "" "**Flower is a friendly framework for collaborative AI and data science.**" " Staying true to this statement, Flower makes it easy to disable " "telemetry for users who do not want to share anonymous usage metrics. " -"[Read more.](https://flower.dev/docs/telemetry.html)." +"[Read more.](https://flower.ai/docs/telemetry.html)." msgstr "" -#: ../../source/ref-changelog.md:290 +#: ../../source/ref-changelog.md:441 msgid "" "**Introduce (experimental) Driver API** " "([#1520](https://github.com/adap/flower/pull/1520), " @@ -6890,7 +12243,7 @@ msgid "" "[#1567](https://github.com/adap/flower/pull/1567))" msgstr "" -#: ../../source/ref-changelog.md:292 +#: ../../source/ref-changelog.md:443 msgid "" "Flower now has a new (experimental) Driver API which will enable fully " "programmable, async, and multi-tenant Federated Learning and Federated " @@ -6899,7 +12252,7 @@ msgid "" "and you can start building those things now, too." msgstr "" -#: ../../source/ref-changelog.md:294 +#: ../../source/ref-changelog.md:445 msgid "" "The Driver API also enables a new execution mode in which the server runs" " indefinitely. Multiple individual workloads can run concurrently and " @@ -6907,58 +12260,58 @@ msgid "" "especially useful for users who want to deploy Flower in production." msgstr "" -#: ../../source/ref-changelog.md:296 +#: ../../source/ref-changelog.md:447 msgid "" "To learn more, check out the `mt-pytorch` code example. We look forward " "to you feedback!" msgstr "" -#: ../../source/ref-changelog.md:298 +#: ../../source/ref-changelog.md:449 msgid "" "Please note: *The Driver API is still experimental and will likely change" " significantly over time.*" msgstr "" -#: ../../source/ref-changelog.md:300 +#: ../../source/ref-changelog.md:451 msgid "" "**Add new Federated Analytics with Pandas example** " "([#1469](https://github.com/adap/flower/pull/1469), " "[#1535](https://github.com/adap/flower/pull/1535))" msgstr "" -#: ../../source/ref-changelog.md:302 +#: ../../source/ref-changelog.md:453 msgid "" "A new code example (`quickstart_pandas`) demonstrates federated analytics" " with Pandas and Flower. You can find it here: " "[quickstart_pandas](https://github.com/adap/flower/tree/main/examples/quickstart_pandas)." msgstr "" -#: ../../source/ref-changelog.md:304 +#: ../../source/ref-changelog.md:455 msgid "" "**Add new strategies: Krum and MultiKrum** " "([#1481](https://github.com/adap/flower/pull/1481))" msgstr "" -#: ../../source/ref-changelog.md:306 +#: ../../source/ref-changelog.md:457 msgid "" "Edoardo, a computer science student at the Sapienza University of Rome, " "contributed a new `Krum` strategy that enables users to easily use Krum " "and MultiKrum in their workloads." msgstr "" -#: ../../source/ref-changelog.md:308 +#: ../../source/ref-changelog.md:459 msgid "" "**Update C++ example to be compatible with Flower v1.2.0** " "([#1495](https://github.com/adap/flower/pull/1495))" msgstr "" -#: ../../source/ref-changelog.md:310 +#: ../../source/ref-changelog.md:461 msgid "" "The C++ code example has received a substantial update to make it " "compatible with the latest version of Flower." msgstr "" -#: ../../source/ref-changelog.md:312 +#: ../../source/ref-changelog.md:463 msgid "" "**General improvements** " "([#1491](https://github.com/adap/flower/pull/1491), " @@ -6976,7 +12329,7 @@ msgid "" "[#1566](https://github.com/adap/flower/pull/1566))" msgstr "" -#: ../../source/ref-changelog.md:316 +#: ../../source/ref-changelog.md:467 msgid "" "**Updated documentation** " "([#1494](https://github.com/adap/flower/pull/1494), " @@ -6990,24 +12343,24 @@ msgid "" "[#1515](https://github.com/adap/flower/pull/1515))" msgstr "" -#: ../../source/ref-changelog.md:320 +#: ../../source/ref-changelog.md:471 msgid "" "One highlight is the new [first time contributor " -"guide](https://flower.dev/docs/first-time-contributors.html): if you've " +"guide](https://flower.ai/docs/first-time-contributors.html): if you've " "never contributed on GitHub before, this is the perfect place to start!" msgstr "" -#: ../../source/ref-changelog.md:326 +#: ../../source/ref-changelog.md:477 msgid "v1.1.0 (2022-10-31)" msgstr "" -#: ../../source/ref-changelog.md:330 +#: ../../source/ref-changelog.md:481 msgid "" "We would like to give our **special thanks** to all the contributors who " "made the new version of Flower possible (in `git shortlog` order):" msgstr "" -#: ../../source/ref-changelog.md:332 +#: ../../source/ref-changelog.md:483 msgid "" "`Akis Linardos`, `Christopher S`, `Daniel J. Beutel`, `George`, `Jan " "Schlicht`, `Mohammad Fares`, `Pedro Porto Buarque de Gusmão`, `Philipp " @@ -7015,14 +12368,14 @@ msgid "" "`danielnugraha`, `edogab33`" msgstr "" -#: ../../source/ref-changelog.md:336 +#: ../../source/ref-changelog.md:487 msgid "" "**Introduce Differential Privacy wrappers (preview)** " "([#1357](https://github.com/adap/flower/pull/1357), " "[#1460](https://github.com/adap/flower/pull/1460))" msgstr "" -#: ../../source/ref-changelog.md:338 +#: ../../source/ref-changelog.md:489 msgid "" "The first (experimental) preview of pluggable Differential Privacy " "wrappers enables easy configuration and usage of differential privacy " @@ -7031,13 +12384,13 @@ msgid "" "over to the Flower docs, a new explainer goes into more detail." msgstr "" -#: ../../source/ref-changelog.md:340 +#: ../../source/ref-changelog.md:491 msgid "" "**New iOS CoreML code example** " "([#1289](https://github.com/adap/flower/pull/1289))" msgstr "" -#: ../../source/ref-changelog.md:342 +#: ../../source/ref-changelog.md:493 msgid "" "Flower goes iOS! A massive new code example shows how Flower clients can " "be built for iOS. The code example contains both Flower iOS SDK " @@ -7045,39 +12398,39 @@ msgid "" "on CoreML." msgstr "" -#: ../../source/ref-changelog.md:344 +#: ../../source/ref-changelog.md:495 msgid "" "**New FedMedian strategy** " "([#1461](https://github.com/adap/flower/pull/1461))" msgstr "" -#: ../../source/ref-changelog.md:346 +#: ../../source/ref-changelog.md:497 msgid "" "The new `FedMedian` strategy implements Federated Median (FedMedian) by " "[Yin et al., 2018](https://arxiv.org/pdf/1803.01498v1.pdf)." msgstr "" -#: ../../source/ref-changelog.md:348 +#: ../../source/ref-changelog.md:499 msgid "" "**Log** `Client` **exceptions in Virtual Client Engine** " "([#1493](https://github.com/adap/flower/pull/1493))" msgstr "" -#: ../../source/ref-changelog.md:350 +#: ../../source/ref-changelog.md:501 msgid "" "All `Client` exceptions happening in the VCE are now logged by default " "and not just exposed to the configured `Strategy` (via the `failures` " "argument)." msgstr "" -#: ../../source/ref-changelog.md:352 +#: ../../source/ref-changelog.md:503 msgid "" "**Improve Virtual Client Engine internals** " "([#1401](https://github.com/adap/flower/pull/1401), " "[#1453](https://github.com/adap/flower/pull/1453))" msgstr "" -#: ../../source/ref-changelog.md:354 +#: ../../source/ref-changelog.md:505 msgid "" "Some internals of the Virtual Client Engine have been revamped. The VCE " "now uses Ray 2.0 under the hood, the value type of the `client_resources`" @@ -7085,25 +12438,25 @@ msgid "" "allocated." msgstr "" -#: ../../source/ref-changelog.md:356 +#: ../../source/ref-changelog.md:507 msgid "" "**Support optional** `Client`**/**`NumPyClient` **methods in Virtual " "Client Engine**" msgstr "" -#: ../../source/ref-changelog.md:358 +#: ../../source/ref-changelog.md:509 msgid "" "The Virtual Client Engine now has full support for optional `Client` (and" " `NumPyClient`) methods." msgstr "" -#: ../../source/ref-changelog.md:360 +#: ../../source/ref-changelog.md:511 msgid "" "**Provide type information to packages using** `flwr` " "([#1377](https://github.com/adap/flower/pull/1377))" msgstr "" -#: ../../source/ref-changelog.md:362 +#: ../../source/ref-changelog.md:513 msgid "" "The package `flwr` is now bundled with a `py.typed` file indicating that " "the package is typed. This enables typing support for projects or " @@ -7111,20 +12464,20 @@ msgid "" "static type checkers like `mypy`." msgstr "" -#: ../../source/ref-changelog.md:364 +#: ../../source/ref-changelog.md:515 msgid "" "**Updated code example** " "([#1344](https://github.com/adap/flower/pull/1344), " "[#1347](https://github.com/adap/flower/pull/1347))" msgstr "" -#: ../../source/ref-changelog.md:366 +#: ../../source/ref-changelog.md:517 msgid "" "The code examples covering scikit-learn and PyTorch Lightning have been " "updated to work with the latest version of Flower." msgstr "" -#: ../../source/ref-changelog.md:368 +#: ../../source/ref-changelog.md:519 msgid "" "**Updated documentation** " "([#1355](https://github.com/adap/flower/pull/1355), " @@ -7146,32 +12499,32 @@ msgid "" "[#1467](https://github.com/adap/flower/pull/1467))" msgstr "" -#: ../../source/ref-changelog.md:370 +#: ../../source/ref-changelog.md:521 msgid "" "There have been so many documentation updates that it doesn't even make " "sense to list them individually." msgstr "" -#: ../../source/ref-changelog.md:372 +#: ../../source/ref-changelog.md:523 msgid "" "**Restructured documentation** " "([#1387](https://github.com/adap/flower/pull/1387))" msgstr "" -#: ../../source/ref-changelog.md:374 +#: ../../source/ref-changelog.md:525 msgid "" "The documentation has been restructured to make it easier to navigate. " "This is just the first step in a larger effort to make the Flower " "documentation the best documentation of any project ever. Stay tuned!" msgstr "" -#: ../../source/ref-changelog.md:376 +#: ../../source/ref-changelog.md:527 msgid "" "**Open in Colab button** " "([#1389](https://github.com/adap/flower/pull/1389))" msgstr "" -#: ../../source/ref-changelog.md:378 +#: ../../source/ref-changelog.md:529 msgid "" "The four parts of the Flower Federated Learning Tutorial now come with a " "new `Open in Colab` button. No need to install anything on your local " @@ -7179,7 +12532,7 @@ msgid "" "only a single click away." msgstr "" -#: ../../source/ref-changelog.md:380 +#: ../../source/ref-changelog.md:531 msgid "" "**Improved tutorial** ([#1468](https://github.com/adap/flower/pull/1468)," " [#1470](https://github.com/adap/flower/pull/1470), " @@ -7189,7 +12542,7 @@ msgid "" "[#1475](https://github.com/adap/flower/pull/1475))" msgstr "" -#: ../../source/ref-changelog.md:382 +#: ../../source/ref-changelog.md:533 msgid "" "The Flower Federated Learning Tutorial has two brand-new parts covering " "custom strategies (still WIP) and the distinction between `Client` and " @@ -7197,40 +12550,40 @@ msgid "" "(many small changes and fixes)." msgstr "" -#: ../../source/ref-changelog.md:388 +#: ../../source/ref-changelog.md:539 msgid "v1.0.0 (2022-07-28)" msgstr "" -#: ../../source/ref-changelog.md:390 +#: ../../source/ref-changelog.md:541 msgid "Highlights" msgstr "" -#: ../../source/ref-changelog.md:392 +#: ../../source/ref-changelog.md:543 msgid "Stable **Virtual Client Engine** (accessible via `start_simulation`)" msgstr "" -#: ../../source/ref-changelog.md:393 +#: ../../source/ref-changelog.md:544 msgid "All `Client`/`NumPyClient` methods are now optional" msgstr "" -#: ../../source/ref-changelog.md:394 +#: ../../source/ref-changelog.md:545 msgid "Configurable `get_parameters`" msgstr "" -#: ../../source/ref-changelog.md:395 +#: ../../source/ref-changelog.md:546 msgid "" "Tons of small API cleanups resulting in a more coherent developer " "experience" msgstr "" -#: ../../source/ref-changelog.md:399 +#: ../../source/ref-changelog.md:550 msgid "" "We would like to give our **special thanks** to all the contributors who " "made Flower 1.0 possible (in reverse [GitHub " "Contributors](https://github.com/adap/flower/graphs/contributors) order):" msgstr "" -#: ../../source/ref-changelog.md:401 +#: ../../source/ref-changelog.md:552 msgid "" "[@rtaiello](https://github.com/rtaiello), " "[@g-pichler](https://github.com/g-pichler), [@rob-" @@ -7270,13 +12623,13 @@ msgid "" "[@danieljanes](https://github.com/danieljanes)." msgstr "" -#: ../../source/ref-changelog.md:405 +#: ../../source/ref-changelog.md:556 msgid "" "**All arguments must be passed as keyword arguments** " "([#1338](https://github.com/adap/flower/pull/1338))" msgstr "" -#: ../../source/ref-changelog.md:407 +#: ../../source/ref-changelog.md:558 msgid "" "Pass all arguments as keyword arguments, positional arguments are not " "longer supported. Code that uses positional arguments (e.g., " @@ -7286,14 +12639,14 @@ msgid "" "client=FlowerClient())`)." msgstr "" -#: ../../source/ref-changelog.md:409 +#: ../../source/ref-changelog.md:560 msgid "" "**Introduce configuration object** `ServerConfig` **in** `start_server` " "**and** `start_simulation` " "([#1317](https://github.com/adap/flower/pull/1317))" msgstr "" -#: ../../source/ref-changelog.md:411 +#: ../../source/ref-changelog.md:562 msgid "" "Instead of a config dictionary `{\"num_rounds\": 3, \"round_timeout\": " "600.0}`, `start_server` and `start_simulation` now expect a configuration" @@ -7302,37 +12655,37 @@ msgid "" "safe code easier and the default parameters values more transparent." msgstr "" -#: ../../source/ref-changelog.md:413 +#: ../../source/ref-changelog.md:564 msgid "" "**Rename built-in strategy parameters for clarity** " "([#1334](https://github.com/adap/flower/pull/1334))" msgstr "" -#: ../../source/ref-changelog.md:415 +#: ../../source/ref-changelog.md:566 msgid "" "The following built-in strategy parameters were renamed to improve " "readability and consistency with other API's:" msgstr "" -#: ../../source/ref-changelog.md:417 +#: ../../source/ref-changelog.md:568 msgid "`fraction_eval` --> `fraction_evaluate`" msgstr "" -#: ../../source/ref-changelog.md:418 +#: ../../source/ref-changelog.md:569 msgid "`min_eval_clients` --> `min_evaluate_clients`" msgstr "" -#: ../../source/ref-changelog.md:419 +#: ../../source/ref-changelog.md:570 msgid "`eval_fn` --> `evaluate_fn`" msgstr "" -#: ../../source/ref-changelog.md:421 +#: ../../source/ref-changelog.md:572 msgid "" "**Update default arguments of built-in strategies** " "([#1278](https://github.com/adap/flower/pull/1278))" msgstr "" -#: ../../source/ref-changelog.md:423 +#: ../../source/ref-changelog.md:574 msgid "" "All built-in strategies now use `fraction_fit=1.0` and " "`fraction_evaluate=1.0`, which means they select *all* currently " @@ -7341,29 +12694,29 @@ msgid "" "initializing the strategy in the following way:" msgstr "" -#: ../../source/ref-changelog.md:425 +#: ../../source/ref-changelog.md:576 msgid "`strategy = FedAvg(fraction_fit=0.1, fraction_evaluate=0.1)`" msgstr "" -#: ../../source/ref-changelog.md:427 +#: ../../source/ref-changelog.md:578 msgid "" "**Add** `server_round` **to** `Strategy.evaluate` " "([#1334](https://github.com/adap/flower/pull/1334))" msgstr "" -#: ../../source/ref-changelog.md:429 +#: ../../source/ref-changelog.md:580 msgid "" "The `Strategy` method `evaluate` now receives the current round of " "federated learning/evaluation as the first parameter." msgstr "" -#: ../../source/ref-changelog.md:431 +#: ../../source/ref-changelog.md:582 msgid "" "**Add** `server_round` **and** `config` **parameters to** `evaluate_fn` " "([#1334](https://github.com/adap/flower/pull/1334))" msgstr "" -#: ../../source/ref-changelog.md:433 +#: ../../source/ref-changelog.md:584 msgid "" "The `evaluate_fn` passed to built-in strategies like `FedAvg` now takes " "three parameters: (1) The current round of federated learning/evaluation " @@ -7371,13 +12724,13 @@ msgid "" "and (3) a config dictionary (`config`)." msgstr "" -#: ../../source/ref-changelog.md:435 +#: ../../source/ref-changelog.md:586 msgid "" "**Rename** `rnd` **to** `server_round` " "([#1321](https://github.com/adap/flower/pull/1321))" msgstr "" -#: ../../source/ref-changelog.md:437 +#: ../../source/ref-changelog.md:588 msgid "" "Several Flower methods and functions (`evaluate_fn`, `configure_fit`, " "`aggregate_fit`, `configure_evaluate`, `aggregate_evaluate`) receive the " @@ -7386,73 +12739,73 @@ msgid "" "has been renamed from `rnd` to `server_round`." msgstr "" -#: ../../source/ref-changelog.md:439 +#: ../../source/ref-changelog.md:590 msgid "" "**Move** `flwr.dataset` **to** `flwr_baselines` " "([#1273](https://github.com/adap/flower/pull/1273))" msgstr "" -#: ../../source/ref-changelog.md:441 +#: ../../source/ref-changelog.md:592 msgid "The experimental package `flwr.dataset` was migrated to Flower Baselines." msgstr "" -#: ../../source/ref-changelog.md:443 +#: ../../source/ref-changelog.md:594 msgid "" "**Remove experimental strategies** " "([#1280](https://github.com/adap/flower/pull/1280))" msgstr "" -#: ../../source/ref-changelog.md:445 +#: ../../source/ref-changelog.md:596 msgid "" "Remove unmaintained experimental strategies (`FastAndSlow`, `FedFSv0`, " "`FedFSv1`)." msgstr "" -#: ../../source/ref-changelog.md:447 +#: ../../source/ref-changelog.md:598 msgid "" "**Rename** `Weights` **to** `NDArrays` " "([#1258](https://github.com/adap/flower/pull/1258), " "[#1259](https://github.com/adap/flower/pull/1259))" msgstr "" -#: ../../source/ref-changelog.md:449 +#: ../../source/ref-changelog.md:600 msgid "" "`flwr.common.Weights` was renamed to `flwr.common.NDArrays` to better " "capture what this type is all about." msgstr "" -#: ../../source/ref-changelog.md:451 +#: ../../source/ref-changelog.md:602 msgid "" "**Remove antiquated** `force_final_distributed_eval` **from** " "`start_server` ([#1258](https://github.com/adap/flower/pull/1258), " "[#1259](https://github.com/adap/flower/pull/1259))" msgstr "" -#: ../../source/ref-changelog.md:453 +#: ../../source/ref-changelog.md:604 msgid "" "The `start_server` parameter `force_final_distributed_eval` has long been" " a historic artefact, in this release it is finally gone for good." msgstr "" -#: ../../source/ref-changelog.md:455 +#: ../../source/ref-changelog.md:606 msgid "" "**Make** `get_parameters` **configurable** " "([#1242](https://github.com/adap/flower/pull/1242))" msgstr "" -#: ../../source/ref-changelog.md:457 +#: ../../source/ref-changelog.md:608 msgid "" "The `get_parameters` method now accepts a configuration dictionary, just " "like `get_properties`, `fit`, and `evaluate`." msgstr "" -#: ../../source/ref-changelog.md:459 +#: ../../source/ref-changelog.md:610 msgid "" "**Replace** `num_rounds` **in** `start_simulation` **with new** `config` " "**parameter** ([#1281](https://github.com/adap/flower/pull/1281))" msgstr "" -#: ../../source/ref-changelog.md:461 +#: ../../source/ref-changelog.md:612 msgid "" "The `start_simulation` function now accepts a configuration dictionary " "`config` instead of the `num_rounds` integer. This improves the " @@ -7460,26 +12813,26 @@ msgid "" "transitioning between the two easier." msgstr "" -#: ../../source/ref-changelog.md:465 +#: ../../source/ref-changelog.md:616 msgid "" "**Support Python 3.10** " "([#1320](https://github.com/adap/flower/pull/1320))" msgstr "" -#: ../../source/ref-changelog.md:467 +#: ../../source/ref-changelog.md:618 msgid "" "The previous Flower release introduced experimental support for Python " "3.10, this release declares Python 3.10 support as stable." msgstr "" -#: ../../source/ref-changelog.md:469 +#: ../../source/ref-changelog.md:620 msgid "" "**Make all** `Client` **and** `NumPyClient` **methods optional** " "([#1260](https://github.com/adap/flower/pull/1260), " "[#1277](https://github.com/adap/flower/pull/1277))" msgstr "" -#: ../../source/ref-changelog.md:471 +#: ../../source/ref-changelog.md:622 msgid "" "The `Client`/`NumPyClient` methods `get_properties`, `get_parameters`, " "`fit`, and `evaluate` are all optional. This enables writing clients that" @@ -7487,13 +12840,13 @@ msgid "" "implement `evaluate` when using centralized evaluation!" msgstr "" -#: ../../source/ref-changelog.md:473 +#: ../../source/ref-changelog.md:624 msgid "" "**Enable passing a** `Server` **instance to** `start_simulation` " "([#1281](https://github.com/adap/flower/pull/1281))" msgstr "" -#: ../../source/ref-changelog.md:475 +#: ../../source/ref-changelog.md:626 msgid "" "Similar to `start_server`, `start_simulation` now accepts a full `Server`" " instance. This enables users to heavily customize the execution of " @@ -7501,7 +12854,7 @@ msgid "" " Virtual Client Engine." msgstr "" -#: ../../source/ref-changelog.md:477 +#: ../../source/ref-changelog.md:628 msgid "" "**Update code examples** " "([#1291](https://github.com/adap/flower/pull/1291), " @@ -7509,50 +12862,50 @@ msgid "" "[#1282](https://github.com/adap/flower/pull/1282))" msgstr "" -#: ../../source/ref-changelog.md:479 +#: ../../source/ref-changelog.md:630 msgid "" "Many code examples received small or even large maintenance updates, " "among them are" msgstr "" -#: ../../source/ref-changelog.md:481 +#: ../../source/ref-changelog.md:632 msgid "`scikit-learn`" msgstr "" -#: ../../source/ref-changelog.md:482 +#: ../../source/ref-changelog.md:633 msgid "`simulation_pytorch`" msgstr "" -#: ../../source/ref-changelog.md:483 +#: ../../source/ref-changelog.md:634 msgid "`quickstart_pytorch`" msgstr "" -#: ../../source/ref-changelog.md:484 +#: ../../source/ref-changelog.md:635 msgid "`quickstart_simulation`" msgstr "" -#: ../../source/ref-changelog.md:485 +#: ../../source/ref-changelog.md:636 msgid "`quickstart_tensorflow`" msgstr "" -#: ../../source/ref-changelog.md:486 +#: ../../source/ref-changelog.md:637 msgid "`advanced_tensorflow`" msgstr "" -#: ../../source/ref-changelog.md:488 +#: ../../source/ref-changelog.md:639 msgid "" "**Remove the obsolete simulation example** " "([#1328](https://github.com/adap/flower/pull/1328))" msgstr "" -#: ../../source/ref-changelog.md:490 +#: ../../source/ref-changelog.md:641 msgid "" "Removes the obsolete `simulation` example and renames " "`quickstart_simulation` to `simulation_tensorflow` so it fits withs the " "naming of `simulation_pytorch`" msgstr "" -#: ../../source/ref-changelog.md:492 +#: ../../source/ref-changelog.md:643 msgid "" "**Update documentation** " "([#1223](https://github.com/adap/flower/pull/1223), " @@ -7567,7 +12920,7 @@ msgid "" "[#1307](https://github.com/adap/flower/pull/1307))" msgstr "" -#: ../../source/ref-changelog.md:494 +#: ../../source/ref-changelog.md:645 msgid "" "One substantial documentation update fixes multiple smaller rendering " "issues, makes titles more succinct to improve navigation, removes a " @@ -7577,24 +12930,24 @@ msgid "" "fixes a number of smaller details!" msgstr "" -#: ../../source/ref-changelog.md:496 ../../source/ref-changelog.md:551 -#: ../../source/ref-changelog.md:620 ../../source/ref-changelog.md:659 +#: ../../source/ref-changelog.md:647 ../../source/ref-changelog.md:702 +#: ../../source/ref-changelog.md:771 ../../source/ref-changelog.md:810 msgid "**Minor updates**" msgstr "" -#: ../../source/ref-changelog.md:498 +#: ../../source/ref-changelog.md:649 msgid "" "Add round number to fit and evaluate log messages " "([#1266](https://github.com/adap/flower/pull/1266))" msgstr "" -#: ../../source/ref-changelog.md:499 +#: ../../source/ref-changelog.md:650 msgid "" "Add secure gRPC connection to the `advanced_tensorflow` code example " "([#847](https://github.com/adap/flower/pull/847))" msgstr "" -#: ../../source/ref-changelog.md:500 +#: ../../source/ref-changelog.md:651 msgid "" "Update developer tooling " "([#1231](https://github.com/adap/flower/pull/1231), " @@ -7603,7 +12956,7 @@ msgid "" "[#1310](https://github.com/adap/flower/pull/1310))" msgstr "" -#: ../../source/ref-changelog.md:501 +#: ../../source/ref-changelog.md:652 msgid "" "Rename ProtoBuf messages to improve consistency " "([#1214](https://github.com/adap/flower/pull/1214), " @@ -7611,11 +12964,11 @@ msgid "" "[#1259](https://github.com/adap/flower/pull/1259))" msgstr "" -#: ../../source/ref-changelog.md:503 +#: ../../source/ref-changelog.md:654 msgid "v0.19.0 (2022-05-18)" msgstr "" -#: ../../source/ref-changelog.md:507 +#: ../../source/ref-changelog.md:658 msgid "" "**Flower Baselines (preview): FedOpt, FedBN, FedAvgM** " "([#919](https://github.com/adap/flower/pull/919), " @@ -7623,50 +12976,50 @@ msgid "" "[#914](https://github.com/adap/flower/pull/914))" msgstr "" -#: ../../source/ref-changelog.md:509 +#: ../../source/ref-changelog.md:660 msgid "" "The first preview release of Flower Baselines has arrived! We're " "kickstarting Flower Baselines with implementations of FedOpt (FedYogi, " "FedAdam, FedAdagrad), FedBN, and FedAvgM. Check the documentation on how " -"to use [Flower Baselines](https://flower.dev/docs/using-baselines.html). " +"to use [Flower Baselines](https://flower.ai/docs/using-baselines.html). " "With this first preview release we're also inviting the community to " -"[contribute their own baselines](https://flower.dev/docs/contributing-" +"[contribute their own baselines](https://flower.ai/docs/contributing-" "baselines.html)." msgstr "" -#: ../../source/ref-changelog.md:511 +#: ../../source/ref-changelog.md:662 msgid "" "**C++ client SDK (preview) and code example** " "([#1111](https://github.com/adap/flower/pull/1111))" msgstr "" -#: ../../source/ref-changelog.md:513 +#: ../../source/ref-changelog.md:664 msgid "" "Preview support for Flower clients written in C++. The C++ preview " "includes a Flower client SDK and a quickstart code example that " "demonstrates a simple C++ client using the SDK." msgstr "" -#: ../../source/ref-changelog.md:515 +#: ../../source/ref-changelog.md:666 msgid "" "**Add experimental support for Python 3.10 and Python 3.11** " "([#1135](https://github.com/adap/flower/pull/1135))" msgstr "" -#: ../../source/ref-changelog.md:517 +#: ../../source/ref-changelog.md:668 msgid "" "Python 3.10 is the latest stable release of Python and Python 3.11 is due" " to be released in October. This Flower release adds experimental support" " for both Python versions." msgstr "" -#: ../../source/ref-changelog.md:519 +#: ../../source/ref-changelog.md:670 msgid "" "**Aggregate custom metrics through user-provided functions** " "([#1144](https://github.com/adap/flower/pull/1144))" msgstr "" -#: ../../source/ref-changelog.md:521 +#: ../../source/ref-changelog.md:672 msgid "" "Custom metrics (e.g., `accuracy`) can now be aggregated without having to" " customize the strategy. Built-in strategies support two new arguments, " @@ -7674,13 +13027,13 @@ msgid "" "allow passing custom metric aggregation functions." msgstr "" -#: ../../source/ref-changelog.md:523 +#: ../../source/ref-changelog.md:674 msgid "" "**User-configurable round timeout** " "([#1162](https://github.com/adap/flower/pull/1162))" msgstr "" -#: ../../source/ref-changelog.md:525 +#: ../../source/ref-changelog.md:676 msgid "" "A new configuration value allows the round timeout to be set for " "`start_server` and `start_simulation`. If the `config` dictionary " @@ -7689,14 +13042,14 @@ msgid "" "connection." msgstr "" -#: ../../source/ref-changelog.md:527 +#: ../../source/ref-changelog.md:678 msgid "" "**Enable both federated evaluation and centralized evaluation to be used " "at the same time in all built-in strategies** " "([#1091](https://github.com/adap/flower/pull/1091))" msgstr "" -#: ../../source/ref-changelog.md:529 +#: ../../source/ref-changelog.md:680 msgid "" "Built-in strategies can now perform both federated evaluation (i.e., " "client-side) and centralized evaluation (i.e., server-side) in the same " @@ -7704,155 +13057,155 @@ msgid "" " `0.0`." msgstr "" -#: ../../source/ref-changelog.md:531 +#: ../../source/ref-changelog.md:682 msgid "" "**Two new Jupyter Notebook tutorials** " "([#1141](https://github.com/adap/flower/pull/1141))" msgstr "" -#: ../../source/ref-changelog.md:533 +#: ../../source/ref-changelog.md:684 msgid "" "Two Jupyter Notebook tutorials (compatible with Google Colab) explain " "basic and intermediate Flower features:" msgstr "" -#: ../../source/ref-changelog.md:535 +#: ../../source/ref-changelog.md:686 msgid "" "*An Introduction to Federated Learning*: [Open in " "Colab](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-1" "-Intro-to-FL-PyTorch.ipynb)" msgstr "" -#: ../../source/ref-changelog.md:537 +#: ../../source/ref-changelog.md:688 msgid "" "*Using Strategies in Federated Learning*: [Open in " "Colab](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-2" "-Strategies-in-FL-PyTorch.ipynb)" msgstr "" -#: ../../source/ref-changelog.md:539 +#: ../../source/ref-changelog.md:690 msgid "" "**New FedAvgM strategy (Federated Averaging with Server Momentum)** " "([#1076](https://github.com/adap/flower/pull/1076))" msgstr "" -#: ../../source/ref-changelog.md:541 +#: ../../source/ref-changelog.md:692 msgid "" "The new `FedAvgM` strategy implements Federated Averaging with Server " "Momentum \\[Hsu et al., 2019\\]." msgstr "" -#: ../../source/ref-changelog.md:543 +#: ../../source/ref-changelog.md:694 msgid "" "**New advanced PyTorch code example** " "([#1007](https://github.com/adap/flower/pull/1007))" msgstr "" -#: ../../source/ref-changelog.md:545 +#: ../../source/ref-changelog.md:696 msgid "" "A new code example (`advanced_pytorch`) demonstrates advanced Flower " "concepts with PyTorch." msgstr "" -#: ../../source/ref-changelog.md:547 +#: ../../source/ref-changelog.md:698 msgid "" "**New JAX code example** " "([#906](https://github.com/adap/flower/pull/906), " "[#1143](https://github.com/adap/flower/pull/1143))" msgstr "" -#: ../../source/ref-changelog.md:549 +#: ../../source/ref-changelog.md:700 msgid "" "A new code example (`jax_from_centralized_to_federated`) shows federated " "learning with JAX and Flower." msgstr "" -#: ../../source/ref-changelog.md:553 +#: ../../source/ref-changelog.md:704 msgid "" "New option to keep Ray running if Ray was already initialized in " "`start_simulation` ([#1177](https://github.com/adap/flower/pull/1177))" msgstr "" -#: ../../source/ref-changelog.md:554 +#: ../../source/ref-changelog.md:705 msgid "" "Add support for custom `ClientManager` as a `start_simulation` parameter " "([#1171](https://github.com/adap/flower/pull/1171))" msgstr "" -#: ../../source/ref-changelog.md:555 +#: ../../source/ref-changelog.md:706 msgid "" "New documentation for [implementing " -"strategies](https://flower.dev/docs/framework/how-to-implement-" +"strategies](https://flower.ai/docs/framework/how-to-implement-" "strategies.html) ([#1097](https://github.com/adap/flower/pull/1097), " "[#1175](https://github.com/adap/flower/pull/1175))" msgstr "" -#: ../../source/ref-changelog.md:556 +#: ../../source/ref-changelog.md:707 msgid "" "New mobile-friendly documentation theme " "([#1174](https://github.com/adap/flower/pull/1174))" msgstr "" -#: ../../source/ref-changelog.md:557 +#: ../../source/ref-changelog.md:708 msgid "" "Limit version range for (optional) `ray` dependency to include only " "compatible releases (`>=1.9.2,<1.12.0`) " "([#1205](https://github.com/adap/flower/pull/1205))" msgstr "" -#: ../../source/ref-changelog.md:561 +#: ../../source/ref-changelog.md:712 msgid "" "**Remove deprecated support for Python 3.6** " "([#871](https://github.com/adap/flower/pull/871))" msgstr "" -#: ../../source/ref-changelog.md:562 +#: ../../source/ref-changelog.md:713 msgid "" "**Remove deprecated KerasClient** " "([#857](https://github.com/adap/flower/pull/857))" msgstr "" -#: ../../source/ref-changelog.md:563 +#: ../../source/ref-changelog.md:714 msgid "" "**Remove deprecated no-op extra installs** " "([#973](https://github.com/adap/flower/pull/973))" msgstr "" -#: ../../source/ref-changelog.md:564 +#: ../../source/ref-changelog.md:715 msgid "" "**Remove deprecated proto fields from** `FitRes` **and** `EvaluateRes` " "([#869](https://github.com/adap/flower/pull/869))" msgstr "" -#: ../../source/ref-changelog.md:565 +#: ../../source/ref-changelog.md:716 msgid "" "**Remove deprecated QffedAvg strategy (replaced by QFedAvg)** " "([#1107](https://github.com/adap/flower/pull/1107))" msgstr "" -#: ../../source/ref-changelog.md:566 +#: ../../source/ref-changelog.md:717 msgid "" "**Remove deprecated DefaultStrategy strategy** " "([#1142](https://github.com/adap/flower/pull/1142))" msgstr "" -#: ../../source/ref-changelog.md:567 +#: ../../source/ref-changelog.md:718 msgid "" "**Remove deprecated support for eval_fn accuracy return value** " "([#1142](https://github.com/adap/flower/pull/1142))" msgstr "" -#: ../../source/ref-changelog.md:568 +#: ../../source/ref-changelog.md:719 msgid "" "**Remove deprecated support for passing initial parameters as NumPy " "ndarrays** ([#1142](https://github.com/adap/flower/pull/1142))" msgstr "" -#: ../../source/ref-changelog.md:570 +#: ../../source/ref-changelog.md:721 msgid "v0.18.0 (2022-02-28)" msgstr "" -#: ../../source/ref-changelog.md:574 +#: ../../source/ref-changelog.md:725 msgid "" "**Improved Virtual Client Engine compatibility with Jupyter Notebook / " "Google Colab** ([#866](https://github.com/adap/flower/pull/866), " @@ -7861,7 +13214,7 @@ msgid "" "[#1036](https://github.com/adap/flower/pull/1036))" msgstr "" -#: ../../source/ref-changelog.md:576 +#: ../../source/ref-changelog.md:727 msgid "" "Simulations (using the Virtual Client Engine through `start_simulation`) " "now work more smoothly on Jupyter Notebooks (incl. Google Colab) after " @@ -7869,38 +13222,38 @@ msgid "" "flwr[simulation]`)." msgstr "" -#: ../../source/ref-changelog.md:578 +#: ../../source/ref-changelog.md:729 msgid "" "**New Jupyter Notebook code example** " "([#833](https://github.com/adap/flower/pull/833))" msgstr "" -#: ../../source/ref-changelog.md:580 +#: ../../source/ref-changelog.md:731 msgid "" "A new code example (`quickstart_simulation`) demonstrates Flower " "simulations using the Virtual Client Engine through Jupyter Notebook " "(incl. Google Colab)." msgstr "" -#: ../../source/ref-changelog.md:582 +#: ../../source/ref-changelog.md:733 msgid "" "**Client properties (feature preview)** " "([#795](https://github.com/adap/flower/pull/795))" msgstr "" -#: ../../source/ref-changelog.md:584 +#: ../../source/ref-changelog.md:735 msgid "" "Clients can implement a new method `get_properties` to enable server-side" " strategies to query client properties." msgstr "" -#: ../../source/ref-changelog.md:586 +#: ../../source/ref-changelog.md:737 msgid "" "**Experimental Android support with TFLite** " "([#865](https://github.com/adap/flower/pull/865))" msgstr "" -#: ../../source/ref-changelog.md:588 +#: ../../source/ref-changelog.md:739 msgid "" "Android support has finally arrived in `main`! Flower is both client-" "agnostic and framework-agnostic by design. One can integrate arbitrary " @@ -7908,7 +13261,7 @@ msgid "" "become a lot easier." msgstr "" -#: ../../source/ref-changelog.md:590 +#: ../../source/ref-changelog.md:741 msgid "" "The example uses TFLite on the client side, along with a new " "`FedAvgAndroid` strategy. The Android client and `FedAvgAndroid` are " @@ -7917,13 +13270,13 @@ msgid "" " functionality from `FedAvgAndroid`." msgstr "" -#: ../../source/ref-changelog.md:592 +#: ../../source/ref-changelog.md:743 msgid "" "**Make gRPC keepalive time user-configurable and decrease default " "keepalive time** ([#1069](https://github.com/adap/flower/pull/1069))" msgstr "" -#: ../../source/ref-changelog.md:594 +#: ../../source/ref-changelog.md:745 msgid "" "The default gRPC keepalive time has been reduced to increase the " "compatibility of Flower with more cloud environments (for example, " @@ -7931,31 +13284,31 @@ msgid "" " gRPC stack based on specific requirements." msgstr "" -#: ../../source/ref-changelog.md:596 +#: ../../source/ref-changelog.md:747 msgid "" "**New differential privacy example using Opacus and PyTorch** " "([#805](https://github.com/adap/flower/pull/805))" msgstr "" -#: ../../source/ref-changelog.md:598 +#: ../../source/ref-changelog.md:749 msgid "" "A new code example (`opacus`) demonstrates differentially-private " "federated learning with Opacus, PyTorch, and Flower." msgstr "" -#: ../../source/ref-changelog.md:600 +#: ../../source/ref-changelog.md:751 msgid "" "**New Hugging Face Transformers code example** " "([#863](https://github.com/adap/flower/pull/863))" msgstr "" -#: ../../source/ref-changelog.md:602 +#: ../../source/ref-changelog.md:753 msgid "" "A new code example (`quickstart_huggingface`) demonstrates usage of " "Hugging Face Transformers with Flower." msgstr "" -#: ../../source/ref-changelog.md:604 +#: ../../source/ref-changelog.md:755 msgid "" "**New MLCube code example** " "([#779](https://github.com/adap/flower/pull/779), " @@ -7964,13 +13317,13 @@ msgid "" "[#1090](https://github.com/adap/flower/pull/1090))" msgstr "" -#: ../../source/ref-changelog.md:606 +#: ../../source/ref-changelog.md:757 msgid "" "A new code example (`quickstart_mlcube`) demonstrates usage of MLCube " "with Flower." msgstr "" -#: ../../source/ref-changelog.md:608 +#: ../../source/ref-changelog.md:759 msgid "" "**SSL-enabled server and client** " "([#842](https://github.com/adap/flower/pull/842), " @@ -7981,33 +13334,33 @@ msgid "" "[#994](https://github.com/adap/flower/pull/994))" msgstr "" -#: ../../source/ref-changelog.md:610 +#: ../../source/ref-changelog.md:761 msgid "" "SSL enables secure encrypted connections between clients and servers. " "This release open-sources the Flower secure gRPC implementation to make " "encrypted communication channels accessible to all Flower users." msgstr "" -#: ../../source/ref-changelog.md:612 +#: ../../source/ref-changelog.md:763 msgid "" "**Updated** `FedAdam` **and** `FedYogi` **strategies** " "([#885](https://github.com/adap/flower/pull/885), " "[#895](https://github.com/adap/flower/pull/895))" msgstr "" -#: ../../source/ref-changelog.md:614 +#: ../../source/ref-changelog.md:765 msgid "" "`FedAdam` and `FedAdam` match the latest version of the Adaptive " "Federated Optimization paper." msgstr "" -#: ../../source/ref-changelog.md:616 +#: ../../source/ref-changelog.md:767 msgid "" "**Initialize** `start_simulation` **with a list of client IDs** " "([#860](https://github.com/adap/flower/pull/860))" msgstr "" -#: ../../source/ref-changelog.md:618 +#: ../../source/ref-changelog.md:769 msgid "" "`start_simulation` can now be called with a list of client IDs " "(`clients_ids`, type: `List[str]`). Those IDs will be passed to the " @@ -8016,55 +13369,55 @@ msgid "" "identifiers." msgstr "" -#: ../../source/ref-changelog.md:622 +#: ../../source/ref-changelog.md:773 msgid "" "Update `num_examples` calculation in PyTorch code examples in " "([#909](https://github.com/adap/flower/pull/909))" msgstr "" -#: ../../source/ref-changelog.md:623 +#: ../../source/ref-changelog.md:774 msgid "" "Expose Flower version through `flwr.__version__` " "([#952](https://github.com/adap/flower/pull/952))" msgstr "" -#: ../../source/ref-changelog.md:624 +#: ../../source/ref-changelog.md:775 msgid "" "`start_server` in `app.py` now returns a `History` object containing " "metrics from training ([#974](https://github.com/adap/flower/pull/974))" msgstr "" -#: ../../source/ref-changelog.md:625 +#: ../../source/ref-changelog.md:776 msgid "" "Make `max_workers` (used by `ThreadPoolExecutor`) configurable " "([#978](https://github.com/adap/flower/pull/978))" msgstr "" -#: ../../source/ref-changelog.md:626 +#: ../../source/ref-changelog.md:777 msgid "" "Increase sleep time after server start to three seconds in all code " "examples ([#1086](https://github.com/adap/flower/pull/1086))" msgstr "" -#: ../../source/ref-changelog.md:627 +#: ../../source/ref-changelog.md:778 msgid "" "Added a new FAQ section to the documentation " "([#948](https://github.com/adap/flower/pull/948))" msgstr "" -#: ../../source/ref-changelog.md:628 +#: ../../source/ref-changelog.md:779 msgid "" "And many more under-the-hood changes, library updates, documentation " "changes, and tooling improvements!" msgstr "" -#: ../../source/ref-changelog.md:632 +#: ../../source/ref-changelog.md:783 msgid "" "**Removed** `flwr_example` **and** `flwr_experimental` **from release " "build** ([#869](https://github.com/adap/flower/pull/869))" msgstr "" -#: ../../source/ref-changelog.md:634 +#: ../../source/ref-changelog.md:785 msgid "" "The packages `flwr_example` and `flwr_experimental` have been deprecated " "since Flower 0.12.0 and they are not longer included in Flower release " @@ -8073,11 +13426,11 @@ msgid "" "an upcoming release." msgstr "" -#: ../../source/ref-changelog.md:636 +#: ../../source/ref-changelog.md:787 msgid "v0.17.0 (2021-09-24)" msgstr "" -#: ../../source/ref-changelog.md:640 +#: ../../source/ref-changelog.md:791 msgid "" "**Experimental virtual client engine** " "([#781](https://github.com/adap/flower/pull/781) " @@ -8085,7 +13438,7 @@ msgid "" "[#791](https://github.com/adap/flower/pull/791))" msgstr "" -#: ../../source/ref-changelog.md:642 +#: ../../source/ref-changelog.md:793 msgid "" "One of Flower's goals is to enable research at scale. This release " "enables a first (experimental) peek at a major new feature, codenamed the" @@ -8095,7 +13448,7 @@ msgid "" "code examples called `quickstart_simulation` and `simulation_pytorch`." msgstr "" -#: ../../source/ref-changelog.md:644 +#: ../../source/ref-changelog.md:795 msgid "" "The feature is still experimental, so there's no stability guarantee for " "the API. It's also not quite ready for prime time and comes with a few " @@ -8103,86 +13456,86 @@ msgid "" "out and share their thoughts." msgstr "" -#: ../../source/ref-changelog.md:646 +#: ../../source/ref-changelog.md:797 msgid "" "**New built-in strategies** " "([#828](https://github.com/adap/flower/pull/828) " "[#822](https://github.com/adap/flower/pull/822))" msgstr "" -#: ../../source/ref-changelog.md:648 +#: ../../source/ref-changelog.md:799 msgid "" "FedYogi - Federated learning strategy using Yogi on server-side. " "Implementation based on https://arxiv.org/abs/2003.00295" msgstr "" -#: ../../source/ref-changelog.md:649 +#: ../../source/ref-changelog.md:800 msgid "" "FedAdam - Federated learning strategy using Adam on server-side. " "Implementation based on https://arxiv.org/abs/2003.00295" msgstr "" -#: ../../source/ref-changelog.md:651 +#: ../../source/ref-changelog.md:802 msgid "" "**New PyTorch Lightning code example** " "([#617](https://github.com/adap/flower/pull/617))" msgstr "" -#: ../../source/ref-changelog.md:653 +#: ../../source/ref-changelog.md:804 msgid "" "**New Variational Auto-Encoder code example** " "([#752](https://github.com/adap/flower/pull/752))" msgstr "" -#: ../../source/ref-changelog.md:655 +#: ../../source/ref-changelog.md:806 msgid "" "**New scikit-learn code example** " "([#748](https://github.com/adap/flower/pull/748))" msgstr "" -#: ../../source/ref-changelog.md:657 +#: ../../source/ref-changelog.md:808 msgid "" "**New experimental TensorBoard strategy** " "([#789](https://github.com/adap/flower/pull/789))" msgstr "" -#: ../../source/ref-changelog.md:661 +#: ../../source/ref-changelog.md:812 msgid "" "Improved advanced TensorFlow code example " "([#769](https://github.com/adap/flower/pull/769))" msgstr "" -#: ../../source/ref-changelog.md:662 +#: ../../source/ref-changelog.md:813 msgid "" "Warning when `min_available_clients` is misconfigured " "([#830](https://github.com/adap/flower/pull/830))" msgstr "" -#: ../../source/ref-changelog.md:663 +#: ../../source/ref-changelog.md:814 msgid "" "Improved gRPC server docs " "([#841](https://github.com/adap/flower/pull/841))" msgstr "" -#: ../../source/ref-changelog.md:664 +#: ../../source/ref-changelog.md:815 msgid "" "Improved error message in `NumPyClient` " "([#851](https://github.com/adap/flower/pull/851))" msgstr "" -#: ../../source/ref-changelog.md:665 +#: ../../source/ref-changelog.md:816 msgid "" "Improved PyTorch quickstart code example " "([#852](https://github.com/adap/flower/pull/852))" msgstr "" -#: ../../source/ref-changelog.md:669 +#: ../../source/ref-changelog.md:820 msgid "" "**Disabled final distributed evaluation** " "([#800](https://github.com/adap/flower/pull/800))" msgstr "" -#: ../../source/ref-changelog.md:671 +#: ../../source/ref-changelog.md:822 msgid "" "Prior behaviour was to perform a final round of distributed evaluation on" " all connected clients, which is often not required (e.g., when using " @@ -8190,13 +13543,13 @@ msgid "" "`force_final_distributed_eval=True` to `start_server`." msgstr "" -#: ../../source/ref-changelog.md:673 +#: ../../source/ref-changelog.md:824 msgid "" "**Renamed q-FedAvg strategy** " "([#802](https://github.com/adap/flower/pull/802))" msgstr "" -#: ../../source/ref-changelog.md:675 +#: ../../source/ref-changelog.md:826 msgid "" "The strategy named `QffedAvg` was renamed to `QFedAvg` to better reflect " "the notation given in the original paper (q-FFL is the optimization " @@ -8205,14 +13558,14 @@ msgid "" " (it will be removed in a future release)." msgstr "" -#: ../../source/ref-changelog.md:677 +#: ../../source/ref-changelog.md:828 msgid "" "**Deprecated and renamed code example** `simulation_pytorch` **to** " "`simulation_pytorch_legacy` " "([#791](https://github.com/adap/flower/pull/791))" msgstr "" -#: ../../source/ref-changelog.md:679 +#: ../../source/ref-changelog.md:830 msgid "" "This example has been replaced by a new example. The new example is based" " on the experimental virtual client engine, which will become the new " @@ -8221,31 +13574,27 @@ msgid "" "removed in the future." msgstr "" -#: ../../source/ref-changelog.md:681 +#: ../../source/ref-changelog.md:832 msgid "v0.16.0 (2021-05-11)" msgstr "" -#: ../../source/ref-changelog.md:685 +#: ../../source/ref-changelog.md:836 msgid "" "**New built-in strategies** " "([#549](https://github.com/adap/flower/pull/549))" msgstr "" -#: ../../source/ref-changelog.md:687 +#: ../../source/ref-changelog.md:838 msgid "(abstract) FedOpt" msgstr "" -#: ../../source/ref-changelog.md:688 -msgid "FedAdagrad" -msgstr "" - -#: ../../source/ref-changelog.md:690 +#: ../../source/ref-changelog.md:841 msgid "" "**Custom metrics for server and strategies** " "([#717](https://github.com/adap/flower/pull/717))" msgstr "" -#: ../../source/ref-changelog.md:692 +#: ../../source/ref-changelog.md:843 msgid "" "The Flower server is now fully task-agnostic, all remaining instances of " "task-specific metrics (such as `accuracy`) have been replaced by custom " @@ -8254,7 +13603,7 @@ msgid "" "release, custom metrics replace task-specific metrics on the server." msgstr "" -#: ../../source/ref-changelog.md:694 +#: ../../source/ref-changelog.md:845 msgid "" "Custom metric dictionaries are now used in two user-facing APIs: they are" " returned from Strategy methods `aggregate_fit`/`aggregate_evaluate` and " @@ -8264,7 +13613,7 @@ msgid "" "track of." msgstr "" -#: ../../source/ref-changelog.md:696 +#: ../../source/ref-changelog.md:847 msgid "" "Stratey implementations should migrate their `aggregate_fit` and " "`aggregate_evaluate` methods to the new return type (e.g., by simply " @@ -8272,19 +13621,19 @@ msgid "" " from `return loss, accuracy` to `return loss, {\"accuracy\": accuracy}`." msgstr "" -#: ../../source/ref-changelog.md:698 +#: ../../source/ref-changelog.md:849 msgid "" "Flower 0.15-style return types are deprecated (but still supported), " "compatibility will be removed in a future release." msgstr "" -#: ../../source/ref-changelog.md:700 +#: ../../source/ref-changelog.md:851 msgid "" "**Migration warnings for deprecated functionality** " "([#690](https://github.com/adap/flower/pull/690))" msgstr "" -#: ../../source/ref-changelog.md:702 +#: ../../source/ref-changelog.md:853 msgid "" "Earlier versions of Flower were often migrated to new APIs, while " "maintaining compatibility with legacy APIs. This release introduces " @@ -8293,7 +13642,7 @@ msgid "" "recent APIs, thus easing the transition from one release to another." msgstr "" -#: ../../source/ref-changelog.md:704 +#: ../../source/ref-changelog.md:855 msgid "" "Improved docs and docstrings " "([#691](https://github.com/adap/flower/pull/691) " @@ -8301,11 +13650,11 @@ msgid "" "[#713](https://github.com/adap/flower/pull/713))" msgstr "" -#: ../../source/ref-changelog.md:706 +#: ../../source/ref-changelog.md:857 msgid "MXNet example and documentation" msgstr "" -#: ../../source/ref-changelog.md:708 +#: ../../source/ref-changelog.md:859 msgid "" "FedBN implementation in example PyTorch: From Centralized To Federated " "([#696](https://github.com/adap/flower/pull/696) " @@ -8313,13 +13662,13 @@ msgid "" "[#705](https://github.com/adap/flower/pull/705))" msgstr "" -#: ../../source/ref-changelog.md:712 +#: ../../source/ref-changelog.md:863 msgid "" "**Serialization-agnostic server** " "([#721](https://github.com/adap/flower/pull/721))" msgstr "" -#: ../../source/ref-changelog.md:714 +#: ../../source/ref-changelog.md:865 msgid "" "The Flower server is now fully serialization-agnostic. Prior usage of " "class `Weights` (which represents parameters as deserialized NumPy " @@ -8330,7 +13679,7 @@ msgid "" "serialization/deserialization)." msgstr "" -#: ../../source/ref-changelog.md:716 +#: ../../source/ref-changelog.md:867 msgid "" "Built-in strategies implement this approach by handling serialization and" " deserialization to/from `Weights` internally. Custom/3rd-party Strategy " @@ -8340,31 +13689,31 @@ msgid "" " easily migrate to the new format." msgstr "" -#: ../../source/ref-changelog.md:718 +#: ../../source/ref-changelog.md:869 msgid "" "Deprecated `flwr.server.Server.evaluate`, use " "`flwr.server.Server.evaluate_round` instead " "([#717](https://github.com/adap/flower/pull/717))" msgstr "" -#: ../../source/ref-changelog.md:720 +#: ../../source/ref-changelog.md:871 msgid "v0.15.0 (2021-03-12)" msgstr "" -#: ../../source/ref-changelog.md:724 +#: ../../source/ref-changelog.md:875 msgid "" "**Server-side parameter initialization** " "([#658](https://github.com/adap/flower/pull/658))" msgstr "" -#: ../../source/ref-changelog.md:726 +#: ../../source/ref-changelog.md:877 msgid "" "Model parameters can now be initialized on the server-side. Server-side " "parameter initialization works via a new `Strategy` method called " "`initialize_parameters`." msgstr "" -#: ../../source/ref-changelog.md:728 +#: ../../source/ref-changelog.md:879 msgid "" "Built-in strategies support a new constructor argument called " "`initial_parameters` to set the initial parameters. Built-in strategies " @@ -8372,7 +13721,7 @@ msgid "" "delete them to free the memory afterwards." msgstr "" -#: ../../source/ref-changelog.md:747 +#: ../../source/ref-changelog.md:898 msgid "" "If no initial parameters are provided to the strategy, the server will " "continue to use the current behaviour (namely, it will ask one of the " @@ -8380,21 +13729,21 @@ msgid "" "parameters)." msgstr "" -#: ../../source/ref-changelog.md:749 +#: ../../source/ref-changelog.md:900 msgid "Deprecations" msgstr "" -#: ../../source/ref-changelog.md:751 +#: ../../source/ref-changelog.md:902 msgid "" "Deprecate `flwr.server.strategy.DefaultStrategy` (migrate to " "`flwr.server.strategy.FedAvg`, which is equivalent)" msgstr "" -#: ../../source/ref-changelog.md:753 +#: ../../source/ref-changelog.md:904 msgid "v0.14.0 (2021-02-18)" msgstr "" -#: ../../source/ref-changelog.md:757 +#: ../../source/ref-changelog.md:908 msgid "" "**Generalized** `Client.fit` **and** `Client.evaluate` **return values** " "([#610](https://github.com/adap/flower/pull/610) " @@ -8402,7 +13751,7 @@ msgid "" "[#633](https://github.com/adap/flower/pull/633))" msgstr "" -#: ../../source/ref-changelog.md:759 +#: ../../source/ref-changelog.md:910 msgid "" "Clients can now return an additional dictionary mapping `str` keys to " "values of the following types: `bool`, `bytes`, `float`, `int`, `str`. " @@ -8410,7 +13759,7 @@ msgid "" "and make use of them on the server side!" msgstr "" -#: ../../source/ref-changelog.md:761 +#: ../../source/ref-changelog.md:912 msgid "" "This improvement also allowed for more consistent return types between " "`fit` and `evaluate`: `evaluate` should now return a tuple `(float, int, " @@ -8418,7 +13767,7 @@ msgid "" "holding arbitrary problem-specific values like accuracy." msgstr "" -#: ../../source/ref-changelog.md:763 +#: ../../source/ref-changelog.md:914 msgid "" "In case you wondered: this feature is compatible with existing projects, " "the additional dictionary return value is optional. New code should " @@ -8428,19 +13777,19 @@ msgid "" "details." msgstr "" -#: ../../source/ref-changelog.md:765 +#: ../../source/ref-changelog.md:916 msgid "" "*Code example:* note the additional dictionary return values in both " "`FlwrClient.fit` and `FlwrClient.evaluate`:" msgstr "" -#: ../../source/ref-changelog.md:780 +#: ../../source/ref-changelog.md:931 msgid "" "**Generalized** `config` **argument in** `Client.fit` **and** " "`Client.evaluate` ([#595](https://github.com/adap/flower/pull/595))" msgstr "" -#: ../../source/ref-changelog.md:782 +#: ../../source/ref-changelog.md:933 msgid "" "The `config` argument used to be of type `Dict[str, str]`, which means " "that dictionary values were expected to be strings. The new release " @@ -8448,58 +13797,58 @@ msgid "" "`bytes`, `float`, `int`, `str`." msgstr "" -#: ../../source/ref-changelog.md:784 +#: ../../source/ref-changelog.md:935 msgid "" "This means one can now pass almost arbitrary values to `fit`/`evaluate` " "using the `config` dictionary. Yay, no more `str(epochs)` on the server-" "side and `int(config[\"epochs\"])` on the client side!" msgstr "" -#: ../../source/ref-changelog.md:786 +#: ../../source/ref-changelog.md:937 msgid "" "*Code example:* note that the `config` dictionary now contains non-`str` " "values in both `Client.fit` and `Client.evaluate`:" msgstr "" -#: ../../source/ref-changelog.md:803 +#: ../../source/ref-changelog.md:954 msgid "v0.13.0 (2021-01-08)" msgstr "" -#: ../../source/ref-changelog.md:807 +#: ../../source/ref-changelog.md:958 msgid "" "New example: PyTorch From Centralized To Federated " "([#549](https://github.com/adap/flower/pull/549))" msgstr "" -#: ../../source/ref-changelog.md:808 +#: ../../source/ref-changelog.md:959 msgid "Improved documentation" msgstr "" -#: ../../source/ref-changelog.md:809 +#: ../../source/ref-changelog.md:960 msgid "New documentation theme ([#551](https://github.com/adap/flower/pull/551))" msgstr "" -#: ../../source/ref-changelog.md:810 +#: ../../source/ref-changelog.md:961 msgid "New API reference ([#554](https://github.com/adap/flower/pull/554))" msgstr "" -#: ../../source/ref-changelog.md:811 +#: ../../source/ref-changelog.md:962 msgid "" "Updated examples documentation " "([#549](https://github.com/adap/flower/pull/549))" msgstr "" -#: ../../source/ref-changelog.md:812 +#: ../../source/ref-changelog.md:963 msgid "" "Removed obsolete documentation " "([#548](https://github.com/adap/flower/pull/548))" msgstr "" -#: ../../source/ref-changelog.md:814 +#: ../../source/ref-changelog.md:965 msgid "Bugfix:" msgstr "" -#: ../../source/ref-changelog.md:816 +#: ../../source/ref-changelog.md:967 msgid "" "`Server.fit` does not disconnect clients when finished, disconnecting the" " clients is now handled in `flwr.server.start_server` " @@ -8507,28 +13856,28 @@ msgid "" "[#540](https://github.com/adap/flower/issues/540))." msgstr "" -#: ../../source/ref-changelog.md:818 +#: ../../source/ref-changelog.md:969 msgid "v0.12.0 (2020-12-07)" msgstr "" -#: ../../source/ref-changelog.md:820 ../../source/ref-changelog.md:836 +#: ../../source/ref-changelog.md:971 ../../source/ref-changelog.md:987 msgid "Important changes:" msgstr "" -#: ../../source/ref-changelog.md:822 +#: ../../source/ref-changelog.md:973 msgid "" "Added an example for embedded devices " "([#507](https://github.com/adap/flower/pull/507))" msgstr "" -#: ../../source/ref-changelog.md:823 +#: ../../source/ref-changelog.md:974 msgid "" "Added a new NumPyClient (in addition to the existing KerasClient) " "([#504](https://github.com/adap/flower/pull/504) " "[#508](https://github.com/adap/flower/pull/508))" msgstr "" -#: ../../source/ref-changelog.md:824 +#: ../../source/ref-changelog.md:975 msgid "" "Deprecated `flwr_example` package and started to migrate examples into " "the top-level `examples` directory " @@ -8536,15 +13885,15 @@ msgid "" "[#512](https://github.com/adap/flower/pull/512))" msgstr "" -#: ../../source/ref-changelog.md:826 +#: ../../source/ref-changelog.md:977 msgid "v0.11.0 (2020-11-30)" msgstr "" -#: ../../source/ref-changelog.md:828 +#: ../../source/ref-changelog.md:979 msgid "Incompatible changes:" msgstr "" -#: ../../source/ref-changelog.md:830 +#: ../../source/ref-changelog.md:981 msgid "" "Renamed strategy methods " "([#486](https://github.com/adap/flower/pull/486)) to unify the naming of " @@ -8554,48 +13903,48 @@ msgid "" "migrate rename the following `Strategy` methods accordingly:" msgstr "" -#: ../../source/ref-changelog.md:831 +#: ../../source/ref-changelog.md:982 msgid "`on_configure_evaluate` => `configure_evaluate`" msgstr "" -#: ../../source/ref-changelog.md:832 +#: ../../source/ref-changelog.md:983 msgid "`on_aggregate_evaluate` => `aggregate_evaluate`" msgstr "" -#: ../../source/ref-changelog.md:833 +#: ../../source/ref-changelog.md:984 msgid "`on_configure_fit` => `configure_fit`" msgstr "" -#: ../../source/ref-changelog.md:834 +#: ../../source/ref-changelog.md:985 msgid "`on_aggregate_fit` => `aggregate_fit`" msgstr "" -#: ../../source/ref-changelog.md:838 +#: ../../source/ref-changelog.md:989 msgid "" "Deprecated `DefaultStrategy` " "([#479](https://github.com/adap/flower/pull/479)). To migrate use " "`FedAvg` instead." msgstr "" -#: ../../source/ref-changelog.md:839 +#: ../../source/ref-changelog.md:990 msgid "" "Simplified examples and baselines " "([#484](https://github.com/adap/flower/pull/484))." msgstr "" -#: ../../source/ref-changelog.md:840 +#: ../../source/ref-changelog.md:991 msgid "" "Removed presently unused `on_conclude_round` from strategy interface " "([#483](https://github.com/adap/flower/pull/483))." msgstr "" -#: ../../source/ref-changelog.md:841 +#: ../../source/ref-changelog.md:992 msgid "" "Set minimal Python version to 3.6.1 instead of 3.6.9 " "([#471](https://github.com/adap/flower/pull/471))." msgstr "" -#: ../../source/ref-changelog.md:842 +#: ../../source/ref-changelog.md:993 msgid "" "Improved `Strategy` docstrings " "([#470](https://github.com/adap/flower/pull/470))." @@ -8645,13 +13994,13 @@ msgstr "" #: ../../source/ref-example-projects.rst:26 msgid "" -"`Quickstart TensorFlow (Tutorial) `_" msgstr "" #: ../../source/ref-example-projects.rst:27 msgid "" -"`Quickstart TensorFlow (Blog Post) `_" msgstr "" @@ -8674,7 +14023,7 @@ msgstr "" #: ../../source/ref-example-projects.rst:37 msgid "" -"`Quickstart PyTorch (Tutorial) `_" msgstr "" @@ -8698,7 +14047,7 @@ msgstr "" #: ../../source/ref-example-projects.rst:46 msgid "" "`PyTorch: From Centralized To Federated (Tutorial) " -"`_" msgstr "" @@ -8721,7 +14070,7 @@ msgstr "" #: ../../source/ref-example-projects.rst:55 msgid "" "`Federated Learning on Raspberry Pi and Nvidia Jetson (Blog Post) " -"`_" +"`_" msgstr "" #: ../../source/ref-example-projects.rst:60 @@ -8908,7 +14257,7 @@ msgstr "" #: ../../source/ref-faq.rst:15 msgid "" "Find the `blog post about federated learning on embedded device here " -"`_" +"`_" " and the corresponding `GitHub code example " "`_." msgstr "" @@ -8920,18 +14269,18 @@ msgstr "" #: ../../source/ref-faq.rst:19 msgid "" "Yes, it does. Please take a look at our `blog post " -"`_ or check out the code examples:" msgstr "" #: ../../source/ref-faq.rst:21 msgid "" -"`Android Kotlin example `_" msgstr "" #: ../../source/ref-faq.rst:22 -msgid "`Android Java example `_" +msgid "`Android Java example `_" msgstr "" #: ../../source/ref-faq.rst @@ -9117,7 +14466,7 @@ msgstr "" msgid "" "You may delete the source ID at any time. If you wish for all events " "logged under a specific source ID to be deleted, you can send a deletion " -"request mentioning the source ID to `telemetry@flower.dev`. All events " +"request mentioning the source ID to `telemetry@flower.ai`. All events " "related to that source ID will then be permanently deleted." msgstr "" @@ -9165,8 +14514,8 @@ msgstr "" msgid "" "We want to hear from you. If you have any feedback or ideas on how to " "improve the way we handle anonymous usage metrics, reach out to us via " -"[Slack](https://flower.dev/join-slack/) (channel `#telemetry`) or email " -"(`telemetry@flower.dev`)." +"[Slack](https://flower.ai/join-slack/) (channel `#telemetry`) or email " +"(`telemetry@flower.ai`)." msgstr "" #: ../../source/tutorial-quickstart-android.rst:-1 @@ -9388,7 +14737,7 @@ msgstr "" msgid "" "First of all, for running the Flower Python server, it is recommended to " "create a virtual environment and run everything within a `virtualenv " -"`_. For the Flower " +"`_. For the Flower " "client implementation in iOS, it is recommended to use Xcode as our IDE." msgstr "" @@ -9486,7 +14835,7 @@ msgid "" msgstr "" #: ../../source/tutorial-quickstart-ios.rst:131 -#: ../../source/tutorial-quickstart-mxnet.rst:226 +#: ../../source/tutorial-quickstart-mxnet.rst:228 #: ../../source/tutorial-quickstart-pytorch.rst:205 #: ../../source/tutorial-quickstart-tensorflow.rst:100 msgid "" @@ -9496,7 +14845,7 @@ msgid "" msgstr "" #: ../../source/tutorial-quickstart-ios.rst:142 -#: ../../source/tutorial-quickstart-mxnet.rst:237 +#: ../../source/tutorial-quickstart-mxnet.rst:239 #: ../../source/tutorial-quickstart-pytorch.rst:216 #: ../../source/tutorial-quickstart-scikitlearn.rst:215 #: ../../source/tutorial-quickstart-tensorflow.rst:112 @@ -9506,6 +14855,7 @@ msgstr "" #: ../../source/tutorial-quickstart-ios.rst:144 #: ../../source/tutorial-quickstart-pytorch.rst:218 #: ../../source/tutorial-quickstart-tensorflow.rst:114 +#: ../../source/tutorial-quickstart-xgboost.rst:525 msgid "" "With both client and server ready, we can now run everything and see " "federated learning in action. FL systems usually have a server and " @@ -9550,21 +14900,29 @@ msgstr "" msgid "Quickstart MXNet" msgstr "" -#: ../../source/tutorial-quickstart-mxnet.rst:10 +#: ../../source/tutorial-quickstart-mxnet.rst:7 +msgid "" +"MXNet is no longer maintained and has been moved into `Attic " +"`_. As a result, we would " +"encourage you to use other ML frameworks alongise Flower, for example, " +"PyTorch. This tutorial might be removed in future versions of Flower." +msgstr "" + +#: ../../source/tutorial-quickstart-mxnet.rst:12 msgid "" "In this tutorial, we will learn how to train a :code:`Sequential` model " "on MNIST using Flower and MXNet." msgstr "" -#: ../../source/tutorial-quickstart-mxnet.rst:12 +#: ../../source/tutorial-quickstart-mxnet.rst:14 #: ../../source/tutorial-quickstart-scikitlearn.rst:12 msgid "" "It is recommended to create a virtual environment and run everything " -"within this `virtualenv `_." msgstr "" -#: ../../source/tutorial-quickstart-mxnet.rst:16 +#: ../../source/tutorial-quickstart-mxnet.rst:18 #: ../../source/tutorial-quickstart-scikitlearn.rst:16 msgid "" "*Clients* are responsible for generating individual model parameter " @@ -9575,18 +14933,18 @@ msgid "" "called a *round*." msgstr "" -#: ../../source/tutorial-quickstart-mxnet.rst:20 +#: ../../source/tutorial-quickstart-mxnet.rst:22 #: ../../source/tutorial-quickstart-scikitlearn.rst:20 msgid "" "Now that we have a rough idea of what is going on, let's get started. We " "first need to install Flower. You can do this by running:" msgstr "" -#: ../../source/tutorial-quickstart-mxnet.rst:26 +#: ../../source/tutorial-quickstart-mxnet.rst:28 msgid "Since we want to use MXNet, let's go ahead and install it:" msgstr "" -#: ../../source/tutorial-quickstart-mxnet.rst:36 +#: ../../source/tutorial-quickstart-mxnet.rst:38 msgid "" "Now that we have all our dependencies installed, let's run a simple " "distributed training with two clients and one server. Our training " @@ -9595,53 +14953,53 @@ msgid "" "`_." msgstr "" -#: ../../source/tutorial-quickstart-mxnet.rst:38 +#: ../../source/tutorial-quickstart-mxnet.rst:40 msgid "" "In a file called :code:`client.py`, import Flower and MXNet related " "packages:" msgstr "" -#: ../../source/tutorial-quickstart-mxnet.rst:53 +#: ../../source/tutorial-quickstart-mxnet.rst:55 msgid "In addition, define the device allocation in MXNet with:" msgstr "" -#: ../../source/tutorial-quickstart-mxnet.rst:59 +#: ../../source/tutorial-quickstart-mxnet.rst:61 msgid "" "We use MXNet to load MNIST, a popular image classification dataset of " "handwritten digits for machine learning. The MXNet utility " ":code:`mx.test_utils.get_mnist()` downloads the training and test data." msgstr "" -#: ../../source/tutorial-quickstart-mxnet.rst:73 +#: ../../source/tutorial-quickstart-mxnet.rst:75 msgid "" "Define the training and loss with MXNet. We train the model by looping " "over the dataset, measure the corresponding loss, and optimize it." msgstr "" -#: ../../source/tutorial-quickstart-mxnet.rst:111 +#: ../../source/tutorial-quickstart-mxnet.rst:113 msgid "" "Next, we define the validation of our machine learning model. We loop " "over the test set and measure both loss and accuracy on the test set." msgstr "" -#: ../../source/tutorial-quickstart-mxnet.rst:135 +#: ../../source/tutorial-quickstart-mxnet.rst:137 msgid "" "After defining the training and testing of a MXNet machine learning " "model, we use these functions to implement a Flower client." msgstr "" -#: ../../source/tutorial-quickstart-mxnet.rst:137 +#: ../../source/tutorial-quickstart-mxnet.rst:139 msgid "Our Flower clients will use a simple :code:`Sequential` model:" msgstr "" -#: ../../source/tutorial-quickstart-mxnet.rst:156 +#: ../../source/tutorial-quickstart-mxnet.rst:158 msgid "" "After loading the dataset with :code:`load_data()` we perform one forward" " propagation to initialize the model and model parameters with " ":code:`model(init)`. Next, we implement a Flower client." msgstr "" -#: ../../source/tutorial-quickstart-mxnet.rst:158 +#: ../../source/tutorial-quickstart-mxnet.rst:160 #: ../../source/tutorial-quickstart-pytorch.rst:144 #: ../../source/tutorial-quickstart-tensorflow.rst:54 msgid "" @@ -9652,7 +15010,7 @@ msgid "" "your code (i.e., to train the neural network we defined earlier)." msgstr "" -#: ../../source/tutorial-quickstart-mxnet.rst:164 +#: ../../source/tutorial-quickstart-mxnet.rst:166 msgid "" "Flower provides a convenience class called :code:`NumPyClient` which " "makes it easier to implement the :code:`Client` interface when your " @@ -9661,19 +15019,19 @@ msgid "" "though):" msgstr "" -#: ../../source/tutorial-quickstart-mxnet.rst:170 +#: ../../source/tutorial-quickstart-mxnet.rst:172 #: ../../source/tutorial-quickstart-pytorch.rst:156 #: ../../source/tutorial-quickstart-scikitlearn.rst:109 msgid "return the model weight as a list of NumPy ndarrays" msgstr "" -#: ../../source/tutorial-quickstart-mxnet.rst:171 +#: ../../source/tutorial-quickstart-mxnet.rst:173 #: ../../source/tutorial-quickstart-pytorch.rst:157 #: ../../source/tutorial-quickstart-scikitlearn.rst:111 msgid ":code:`set_parameters` (optional)" msgstr "" -#: ../../source/tutorial-quickstart-mxnet.rst:172 +#: ../../source/tutorial-quickstart-mxnet.rst:174 #: ../../source/tutorial-quickstart-pytorch.rst:158 #: ../../source/tutorial-quickstart-scikitlearn.rst:111 msgid "" @@ -9681,42 +15039,41 @@ msgid "" "server" msgstr "" -#: ../../source/tutorial-quickstart-mxnet.rst:174 +#: ../../source/tutorial-quickstart-mxnet.rst:176 #: ../../source/tutorial-quickstart-pytorch.rst:160 #: ../../source/tutorial-quickstart-scikitlearn.rst:114 msgid "set the local model weights" msgstr "" -#: ../../source/tutorial-quickstart-mxnet.rst:175 +#: ../../source/tutorial-quickstart-mxnet.rst:177 #: ../../source/tutorial-quickstart-pytorch.rst:161 #: ../../source/tutorial-quickstart-scikitlearn.rst:115 msgid "train the local model" msgstr "" -#: ../../source/tutorial-quickstart-mxnet.rst:176 +#: ../../source/tutorial-quickstart-mxnet.rst:178 #: ../../source/tutorial-quickstart-pytorch.rst:162 #: ../../source/tutorial-quickstart-scikitlearn.rst:116 msgid "receive the updated local model weights" msgstr "" -#: ../../source/tutorial-quickstart-mxnet.rst:178 +#: ../../source/tutorial-quickstart-mxnet.rst:180 #: ../../source/tutorial-quickstart-pytorch.rst:164 #: ../../source/tutorial-quickstart-scikitlearn.rst:118 msgid "test the local model" msgstr "" -#: ../../source/tutorial-quickstart-mxnet.rst:180 +#: ../../source/tutorial-quickstart-mxnet.rst:182 msgid "They can be implemented in the following way:" msgstr "" -#: ../../source/tutorial-quickstart-mxnet.rst:210 +#: ../../source/tutorial-quickstart-mxnet.rst:212 msgid "" "We can now create an instance of our class :code:`MNISTClient` and add " "one line to actually run this client:" msgstr "" -#: ../../source/tutorial-quickstart-mxnet.rst:217 -#: ../../source/tutorial-quickstart-scikitlearn.rst:150 +#: ../../source/tutorial-quickstart-mxnet.rst:219 msgid "" "That's it for the client. We only have to implement :code:`Client` or " ":code:`NumPyClient` and call :code:`fl.client.start_client()` or " @@ -9728,39 +15085,42 @@ msgid "" "that needs to change is the :code:`server_address` we pass to the client." msgstr "" -#: ../../source/tutorial-quickstart-mxnet.rst:239 +#: ../../source/tutorial-quickstart-mxnet.rst:241 msgid "" "With both client and server ready, we can now run everything and see " "federated learning in action. Federated learning systems usually have a " "server and multiple clients. We therefore have to start the server first:" msgstr "" -#: ../../source/tutorial-quickstart-mxnet.rst:247 +#: ../../source/tutorial-quickstart-mxnet.rst:249 #: ../../source/tutorial-quickstart-pytorch.rst:226 #: ../../source/tutorial-quickstart-scikitlearn.rst:224 #: ../../source/tutorial-quickstart-tensorflow.rst:122 +#: ../../source/tutorial-quickstart-xgboost.rst:533 msgid "" "Once the server is running we can start the clients in different " "terminals. Open a new terminal and start the first client:" msgstr "" -#: ../../source/tutorial-quickstart-mxnet.rst:254 +#: ../../source/tutorial-quickstart-mxnet.rst:256 #: ../../source/tutorial-quickstart-pytorch.rst:233 #: ../../source/tutorial-quickstart-scikitlearn.rst:231 #: ../../source/tutorial-quickstart-tensorflow.rst:129 +#: ../../source/tutorial-quickstart-xgboost.rst:540 msgid "Open another terminal and start the second client:" msgstr "" -#: ../../source/tutorial-quickstart-mxnet.rst:260 +#: ../../source/tutorial-quickstart-mxnet.rst:262 #: ../../source/tutorial-quickstart-pytorch.rst:239 #: ../../source/tutorial-quickstart-scikitlearn.rst:237 +#: ../../source/tutorial-quickstart-xgboost.rst:546 msgid "" "Each client will have its own dataset. You should now see how the " "training does in the very first terminal (the one that started the " "server):" msgstr "" -#: ../../source/tutorial-quickstart-mxnet.rst:292 +#: ../../source/tutorial-quickstart-mxnet.rst:294 msgid "" "Congratulations! You've successfully built and run your first federated " "learning system. The full `source code " @@ -9803,9 +15163,10 @@ msgid "" msgstr "" #: ../../source/tutorial-quickstart-pytorch.rst:15 +#: ../../source/tutorial-quickstart-xgboost.rst:39 msgid "" "First of all, it is recommended to create a virtual environment and run " -"everything within a `virtualenv `_." msgstr "" @@ -9896,10 +15257,11 @@ msgstr "" #: ../../source/tutorial-quickstart-tensorflow.rst:90 msgid "" "That's it for the client. We only have to implement :code:`Client` or " -":code:`NumPyClient` and call :code:`fl.client.start_client()` or " -":code:`fl.client.start_numpy_client()`. The string :code:`\"[::]:8080\"` " -"tells the client which server to connect to. In our case we can run the " -"server and the client on the same machine, therefore we use " +":code:`NumPyClient` and call :code:`fl.client.start_client()`. If you " +"implement a client of type :code:`NumPyClient` you'll need to first call " +"its :code:`to_client()` method. The string :code:`\"[::]:8080\"` tells " +"the client which server to connect to. In our case we can run the server " +"and the client on the same machine, therefore we use " ":code:`\"[::]:8080\"`. If we run a truly federated workload with the " "server and clients running on different machines, all that needs to " "change is the :code:`server_address` we point the client at." @@ -10075,6 +15437,19 @@ msgid "" "one line to actually run this client:" msgstr "" +#: ../../source/tutorial-quickstart-scikitlearn.rst:150 +msgid "" +"That's it for the client. We only have to implement :code:`Client` or " +":code:`NumPyClient` and call :code:`fl.client.start_client()`. If you " +"implement a client of type :code:`NumPyClient` you'll need to first call " +"its :code:`to_client()` method. The string :code:`\"0.0.0.0:8080\"` tells" +" the client which server to connect to. In our case we can run the server" +" and the client on the same machine, therefore we use " +":code:`\"0.0.0.0:8080\"`. If we run a truly federated workload with the " +"server and clients running on different machines, all that needs to " +"change is the :code:`server_address` we pass to the client." +msgstr "" + #: ../../source/tutorial-quickstart-scikitlearn.rst:159 msgid "" "The following Flower server is a little bit more advanced and returns an " @@ -10202,17 +15577,496 @@ msgstr "" msgid "Quickstart XGBoost" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:10 +#: ../../source/tutorial-quickstart-xgboost.rst:14 +msgid "Federated XGBoost" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:16 msgid "" -"Let's build a horizontal federated learning system using XGBoost and " -"Flower!" +"EXtreme Gradient Boosting (**XGBoost**) is a robust and efficient " +"implementation of gradient-boosted decision tree (**GBDT**), that " +"maximises the computational boundaries for boosted tree methods. It's " +"primarily designed to enhance both the performance and computational " +"speed of machine learning models. In XGBoost, trees are constructed " +"concurrently, unlike the sequential approach taken by GBDT." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:12 +#: ../../source/tutorial-quickstart-xgboost.rst:20 msgid "" -"Please refer to the `full code example " -"`_ to learn more." +"Often, for tabular data on medium-sized datasets with fewer than 10k " +"training examples, XGBoost surpasses the results of deep learning " +"techniques." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:23 +msgid "Why federated XGBoost?" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:25 +msgid "" +"Indeed, as the demand for data privacy and decentralized learning grows, " +"there's an increasing requirement to implement federated XGBoost systems " +"for specialised applications, like survival analysis and financial fraud " +"detection." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:27 +msgid "" +"Federated learning ensures that raw data remains on the local device, " +"making it an attractive approach for sensitive domains where data " +"security and privacy are paramount. Given the robustness and efficiency " +"of XGBoost, combining it with federated learning offers a promising " +"solution for these specific challenges." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:30 +msgid "" +"In this tutorial we will learn how to train a federated XGBoost model on " +"HIGGS dataset using Flower and :code:`xgboost` package. We use a simple " +"example (`full code xgboost-quickstart " +"`_)" +" with two *clients* and one *server* to demonstrate how federated XGBoost" +" works, and then we dive into a more complex example (`full code xgboost-" +"comprehensive `_) to run various experiments." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:37 +msgid "Environment Setup" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:41 +msgid "" +"We first need to install Flower and Flower Datasets. You can do this by " +"running :" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:47 +msgid "" +"Since we want to use :code:`xgboost` package to build up XGBoost trees, " +"let's go ahead and install :code:`xgboost`:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:57 +msgid "" +"*Clients* are responsible for generating individual weight-updates for " +"the model based on their local datasets. Now that we have all our " +"dependencies installed, let's run a simple distributed training with two " +"clients and one server." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:60 +msgid "" +"In a file called :code:`client.py`, import xgboost, Flower, Flower " +"Datasets and other related functions:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:87 +msgid "Dataset partition and hyper-parameter selection" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:89 +msgid "" +"Prior to local training, we require loading the HIGGS dataset from Flower" +" Datasets and conduct data partitioning for FL:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:102 +msgid "" +"In this example, we split the dataset into two partitions with uniform " +"distribution (:code:`IidPartitioner(num_partitions=2)`). Then, we load " +"the partition for the given client based on :code:`node_id`:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:121 +msgid "" +"After that, we do train/test splitting on the given partition (client's " +"local data), and transform data format for :code:`xgboost` package." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:134 +msgid "" +"The functions of :code:`train_test_split` and " +":code:`transform_dataset_to_dmatrix` are defined as below:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:158 +msgid "Finally, we define the hyper-parameters used for XGBoost training." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:174 +msgid "" +"The :code:`num_local_round` represents the number of iterations for local" +" tree boost. We use CPU for the training in default. One can shift it to " +"GPU by setting :code:`tree_method` to :code:`gpu_hist`. We use AUC as " +"evaluation metric." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:181 +msgid "Flower client definition for XGBoost" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:183 +msgid "" +"After loading the dataset we define the Flower client. We follow the " +"general rule to define :code:`XgbClient` class inherited from " +":code:`fl.client.Client`." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:193 +msgid "" +"The :code:`self.bst` is used to keep the Booster objects that remain " +"consistent across rounds, allowing them to store predictions from trees " +"integrated in earlier rounds and maintain other essential data structures" +" for training." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:196 +msgid "" +"Then, we override :code:`get_parameters`, :code:`fit` and " +":code:`evaluate` methods insides :code:`XgbClient` class as follows." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:210 +msgid "" +"Unlike neural network training, XGBoost trees are not started from a " +"specified random weights. In this case, we do not use " +":code:`get_parameters` and :code:`set_parameters` to initialise model " +"parameters for XGBoost. As a result, let's return an empty tensor in " +":code:`get_parameters` when it is called by the server at the first " +"round." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:251 +msgid "" +"In :code:`fit`, at the first round, we call :code:`xgb.train()` to build " +"up the first set of trees. the returned Booster object and config are " +"stored in :code:`self.bst` and :code:`self.config`, respectively. From " +"the second round, we load the global model sent from server to " +":code:`self.bst`, and then update model weights on local training data " +"with function :code:`local_boost` as follows:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:269 +msgid "" +"Given :code:`num_local_round`, we update trees by calling " +":code:`self.bst.update` method. After training, the last " +":code:`N=num_local_round` trees will be extracted to send to the server." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:291 +msgid "" +"In :code:`evaluate`, we call :code:`self.bst.eval_set` function to " +"conduct evaluation on valid set. The AUC value will be returned." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:294 +msgid "" +"Now, we can create an instance of our class :code:`XgbClient` and add one" +" line to actually run this client:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:300 +msgid "" +"That's it for the client. We only have to implement :code:`Client`and " +"call :code:`fl.client.start_client()`. The string :code:`\"[::]:8080\"` " +"tells the client which server to connect to. In our case we can run the " +"server and the client on the same machine, therefore we use " +":code:`\"[::]:8080\"`. If we run a truly federated workload with the " +"server and clients running on different machines, all that needs to " +"change is the :code:`server_address` we point the client at." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:311 +msgid "" +"These updates are then sent to the *server* which will aggregate them to " +"produce a better model. Finally, the *server* sends this improved version" +" of the model back to each *client* to finish a complete FL round." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:314 +msgid "" +"In a file named :code:`server.py`, import Flower and FedXgbBagging from " +":code:`flwr.server.strategy`." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:316 +msgid "We first define a strategy for XGBoost bagging aggregation." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:339 +msgid "" +"We use two clients for this example. An " +":code:`evaluate_metrics_aggregation` function is defined to collect and " +"wighted average the AUC values from clients." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:342 +msgid "Then, we start the server:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:354 +msgid "Tree-based bagging aggregation" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:356 +msgid "" +"You must be curious about how bagging aggregation works. Let's look into " +"the details." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:358 +msgid "" +"In file :code:`flwr.server.strategy.fedxgb_bagging.py`, we define " +":code:`FedXgbBagging` inherited from :code:`flwr.server.strategy.FedAvg`." +" Then, we override the :code:`aggregate_fit`, :code:`aggregate_evaluate` " +"and :code:`evaluate` methods as follows:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:454 +msgid "" +"In :code:`aggregate_fit`, we sequentially aggregate the clients' XGBoost " +"trees by calling :code:`aggregate()` function:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:513 +msgid "" +"In this function, we first fetch the number of trees and the number of " +"parallel trees for the current and previous model by calling " +":code:`_get_tree_nums`. Then, the fetched information will be aggregated." +" After that, the trees (containing model weights) are aggregated to " +"generate a new tree model." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:518 +msgid "" +"After traversal of all clients' models, a new global model is generated, " +"followed by the serialisation, and sending back to each client." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:523 +msgid "Launch Federated XGBoost!" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:585 +msgid "" +"Congratulations! You've successfully built and run your first federated " +"XGBoost system. The AUC values can be checked in " +":code:`metrics_distributed`. One can see that the average AUC increases " +"over FL rounds." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:590 +msgid "" +"The full `source code `_ for this example can be found in :code:`examples" +"/xgboost-quickstart`." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:594 +msgid "Comprehensive Federated XGBoost" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:596 +msgid "" +"Now that you have known how federated XGBoost work with Flower, it's time" +" to run some more comprehensive experiments by customising the " +"experimental settings. In the xgboost-comprehensive example (`full code " +"`_), we provide more options to define various experimental" +" setups, including aggregation strategies, data partitioning and " +"centralised/distributed evaluation. We also support `Flower simulation " +"`_ making " +"it easy to simulate large client cohorts in a resource-aware manner. " +"Let's take a look!" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:603 +msgid "Cyclic training" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:605 +msgid "" +"In addition to bagging aggregation, we offer a cyclic training scheme, " +"which performs FL in a client-by-client fashion. Instead of aggregating " +"multiple clients, there is only one single client participating in the " +"training per round in the cyclic training scenario. The trained local " +"XGBoost trees will be passed to the next client as an initialised model " +"for next round's boosting." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:609 +msgid "" +"To do this, we first customise a :code:`ClientManager` in " +":code:`server_utils.py`:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:649 +msgid "" +"The customised :code:`ClientManager` samples all available clients in " +"each FL round based on the order of connection to the server. Then, we " +"define a new strategy :code:`FedXgbCyclic` in " +":code:`flwr.server.strategy.fedxgb_cyclic.py`, in order to sequentially " +"select only one client in given round and pass the received model to next" +" client." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:690 +msgid "" +"Unlike the original :code:`FedAvg`, we don't perform aggregation here. " +"Instead, we just make a copy of the received client model as global model" +" by overriding :code:`aggregate_fit`." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:693 +msgid "" +"Also, the customised :code:`configure_fit` and :code:`configure_evaluate`" +" methods ensure the clients to be sequentially selected given FL round:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:757 +msgid "Customised data partitioning" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:759 +msgid "" +"In :code:`dataset.py`, we have a function :code:`instantiate_partitioner`" +" to instantiate the data partitioner based on the given " +":code:`num_partitions` and :code:`partitioner_type`. Currently, we " +"provide four supported partitioner type to simulate the uniformity/non-" +"uniformity in data quantity (uniform, linear, square, exponential)." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:790 +msgid "Customised centralised/distributed evaluation" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:792 +msgid "" +"To facilitate centralised evaluation, we define a function in " +":code:`server_utils.py`:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:824 +msgid "" +"This function returns a evaluation function which instantiates a " +":code:`Booster` object and loads the global model weights to it. The " +"evaluation is conducted by calling :code:`eval_set()` method, and the " +"tested AUC value is reported." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:827 +msgid "" +"As for distributed evaluation on the clients, it's same as the quick-" +"start example by overriding the :code:`evaluate()` method insides the " +":code:`XgbClient` class in :code:`client_utils.py`." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:831 +msgid "Flower simulation" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:832 +msgid "" +"We also provide an example code (:code:`sim.py`) to use the simulation " +"capabilities of Flower to simulate federated XGBoost training on either a" +" single machine or a cluster of machines." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:866 +msgid "" +"After importing all required packages, we define a :code:`main()` " +"function to perform the simulation process:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:921 +msgid "" +"We first load the dataset and perform data partitioning, and the pre-" +"processed data is stored in a :code:`list`. After the simulation begins, " +"the clients won't need to pre-process their partitions again." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:924 +msgid "Then, we define the strategies and other hyper-parameters:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:975 +msgid "" +"After that, we start the simulation by calling " +":code:`fl.simulation.start_simulation`:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:995 +msgid "" +"One of key parameters for :code:`start_simulation` is :code:`client_fn` " +"which returns a function to construct a client. We define it as follows:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1038 +msgid "Arguments parser" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1040 +msgid "" +"In :code:`utils.py`, we define the arguments parsers for clients, server " +"and simulation, allowing users to specify different experimental " +"settings. Let's first see the sever side:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1086 +msgid "" +"This allows user to specify training strategies / the number of total " +"clients / FL rounds / participating clients / clients for evaluation, and" +" evaluation fashion. Note that with :code:`--centralised-eval`, the sever" +" will do centralised evaluation and all functionalities for client " +"evaluation will be disabled." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1090 +msgid "Then, the argument parser on client side:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1144 +msgid "" +"This defines various options for client data partitioning. Besides, " +"clients also have an option to conduct evaluation on centralised test set" +" by setting :code:`--centralised-eval`, as well as an option to perform " +"scaled learning rate based on the number of clients by setting :code" +":`--scaled-lr`." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1148 +msgid "We also have an argument parser for simulation:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1226 +msgid "This integrates all arguments for both client and server sides." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1229 +msgid "Example commands" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1231 +msgid "" +"To run a centralised evaluated experiment with bagging strategy on 5 " +"clients with exponential distribution for 50 rounds, we first start the " +"server as below:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1238 +msgid "Then, on each client terminal, we start the clients:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1244 +msgid "To run the same experiment with Flower simulation:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1250 +msgid "" +"The full `code `_ for this comprehensive example can be found in" +" :code:`examples/xgboost-comprehensive`." msgstr "" #: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:9 @@ -10223,10 +16077,10 @@ msgstr "" msgid "" "Welcome to the third part of the Flower federated learning tutorial. In " "previous parts of this tutorial, we introduced federated learning with " -"PyTorch and Flower (`part 1 `__) and we learned how strategies " "can be used to customize the execution on both the server and the clients" -" (`part 2 `__)." msgstr "" @@ -10234,7 +16088,7 @@ msgstr "" msgid "" "In this notebook, we'll continue to customize the federated learning " "system we built previously by creating a custom version of FedAvg (again," -" using `Flower `__ and `PyTorch " +" using `Flower `__ and `PyTorch " "`__)." msgstr "" @@ -10245,7 +16099,7 @@ msgstr "" msgid "" "`Star Flower on GitHub `__ ⭐️ and join " "the Flower community on Slack to connect, ask questions, and get help: " -"`Join Slack `__ 🌼 We'd love to hear from " +"`Join Slack `__ 🌼 We'd love to hear from " "you in the ``#introductions`` channel! And if anything is unclear, head " "over to the ``#questions`` channel." msgstr "" @@ -10291,7 +16145,6 @@ msgstr "" #: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:101 #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:102 -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:104 #: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:101 msgid "" "It is possible to switch to a runtime that has GPU acceleration enabled " @@ -10390,17 +16243,17 @@ msgstr "" #: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:550 #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:948 -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:749 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:729 #: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:715 #: ../../source/tutorial-series-what-is-federated-learning.ipynb:369 msgid "" "Before you continue, make sure to join the Flower community on Slack: " -"`Join Slack `__" +"`Join Slack `__" msgstr "" #: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:552 #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:950 -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:751 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:731 #: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:717 #: ../../source/tutorial-series-what-is-federated-learning.ipynb:371 msgid "" @@ -10411,7 +16264,7 @@ msgstr "" #: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:554 msgid "" "The `Flower Federated Learning Tutorial - Part 4 " -"`__ introduces ``Client``, the flexible API underlying " "``NumPyClient``." msgstr "" @@ -10424,12 +16277,12 @@ msgstr "" msgid "" "Welcome to the fourth part of the Flower federated learning tutorial. In " "the previous parts of this tutorial, we introduced federated learning " -"with PyTorch and Flower (`part 1 `__), we learned how " "strategies can be used to customize the execution on both the server and " -"the clients (`part 2 `__), and we built our own " -"custom strategy from scratch (`part 3 `__)." msgstr "" @@ -10712,7 +16565,7 @@ msgid "" msgstr "" #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:954 -msgid "`Read Flower Docs `__" +msgid "`Read Flower Docs `__" msgstr "" #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:955 @@ -10724,12 +16577,12 @@ msgstr "" #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:956 msgid "" "`Use Flower Baselines for your research " -"`__" +"`__" msgstr "" #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:957 msgid "" -"`Watch Flower Summit 2023 videos `__" msgstr "" @@ -10744,10 +16597,11 @@ msgstr "" #: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:13 msgid "" -"In this notebook, we'll build a federated learning system using Flower " -"and PyTorch. In part 1, we use PyTorch for the model training pipeline " -"and data loading. In part 2, we continue to federate the PyTorch-based " -"pipeline using Flower." +"In this notebook, we'll build a federated learning system using Flower, " +"`Flower Datasets `__ and PyTorch. In " +"part 1, we use PyTorch for the model training pipeline and data loading. " +"In part 2, we continue to federate the PyTorch-based pipeline using " +"Flower." msgstr "" #: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:17 @@ -10763,23 +16617,38 @@ msgstr "" #: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:45 msgid "" "Next, we install the necessary packages for PyTorch (``torch`` and " -"``torchvision``) and Flower (``flwr``):" +"``torchvision``), Flower Datasets (``flwr-datasets``) and Flower " +"(``flwr``):" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:105 +msgid "" +"It is possible to switch to a runtime that has GPU acceleration enabled " +"(on Google Colab: ``Runtime > Change runtime type > Hardware accelerator:" +" GPU > Save``). Note, however, that Google Colab is not always able to " +"offer GPU acceleration. If you see an error related to GPU availability " +"in one of the following sections, consider switching back to CPU-based " +"execution by setting ``DEVICE = torch.device(\"cpu\")``. If the runtime " +"has GPU acceleration enabled, you should see the output ``Training on " +"cuda``, otherwise it'll say ``Training on cpu``." msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:117 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:118 msgid "Loading the data" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:119 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:120 msgid "" "Federated learning can be applied to many different types of tasks across" " different domains. In this tutorial, we introduce federated learning by " "training a simple convolutional neural network (CNN) on the popular " "CIFAR-10 dataset. CIFAR-10 can be used to train image classifiers that " -"distinguish between images from ten different classes:" +"distinguish between images from ten different classes: 'airplane', " +"'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', and " +"'truck'." msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:150 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:131 msgid "" "We simulate having multiple datasets from multiple organizations (also " "called the \"cross-silo\" setting in federated learning) by splitting the" @@ -10790,22 +16659,22 @@ msgid "" "data is naturally partitioned)." msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:152 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:133 msgid "" "Each organization will act as a client in the federated learning system. " "So having ten organizations participate in a federation means having ten " -"clients connected to the federated learning server:" +"clients connected to the federated learning server." msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:172 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:144 msgid "" -"Let's now load the CIFAR-10 training and test set, partition them into " -"ten smaller datasets (each split into training and validation set), and " -"wrap the resulting partitions by creating a PyTorch ``DataLoader`` for " -"each of them:" +"Let's now create the Federated Dataset abstraction that from ``flwr-" +"datasets`` that partitions the CIFAR-10. We will create small training " +"and test set for each edge device and wrap each of them into a PyTorch " +"``DataLoader``:" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:222 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:198 msgid "" "We now have a list of ten training sets and ten validation sets " "(``trainloaders`` and ``valloaders``) representing the data of ten " @@ -10817,13 +16686,13 @@ msgid "" "multiple partitions." msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:225 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:201 msgid "" "Let's take a look at the first batch of images and labels in the first " "training set (i.e., ``trainloaders[0]``) before we move on:" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:264 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:240 msgid "" "The output above shows a random batch of images from the first " "``trainloader`` in our list of ten ``trainloaders``. It also prints the " @@ -10832,11 +16701,11 @@ msgid "" "batch of images." msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:276 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:252 msgid "Step 1: Centralized Training with PyTorch" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:287 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:263 msgid "" "Next, we're going to use PyTorch to define a simple convolutional neural " "network. This introduction assumes basic familiarity with PyTorch, so it " @@ -10846,26 +16715,26 @@ msgid "" "`__." msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:299 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:275 msgid "Defining the model" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:301 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:277 msgid "" "We use the simple CNN described in the `PyTorch tutorial " "`__:" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:338 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:314 msgid "Let's continue with the usual training and test functions:" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:398 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:374 msgid "Training the model" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:400 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:376 msgid "" "We now have all the basic building blocks we need: a dataset, a model, a " "training function, and a test function. Let's put them together to train " @@ -10875,7 +16744,7 @@ msgid "" "models only on this internal data:" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:430 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:406 msgid "" "Training the simple CNN on our CIFAR-10 split for 5 epochs should result " "in a test set accuracy of about 41%, which is not good, but at the same " @@ -10884,11 +16753,11 @@ msgid "" "sets the stage for what comes next - federated learning!" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:442 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:418 msgid "Step 2: Federated Learning with Flower" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:444 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:420 msgid "" "Step 1 demonstrated a simple centralized training pipeline. All data was " "in one place (i.e., a single ``trainloader`` and a single ``valloader``)." @@ -10897,11 +16766,11 @@ msgid "" "organizations using federated learning." msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:456 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:432 msgid "Updating model parameters" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:458 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:434 msgid "" "In federated learning, the server sends the global model parameters to " "the client, and the client updates the local model with the parameters " @@ -10912,7 +16781,7 @@ msgid "" "parameters)." msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:460 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:436 msgid "" "We need two helper functions to update the local model with parameters " "received from the server and to get the updated model parameters from the" @@ -10920,7 +16789,7 @@ msgid "" "two functions do just that for the PyTorch model above." msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:462 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:438 msgid "" "The details of how this works are not really important here (feel free to" " consult the PyTorch documentation if you want to learn more). In " @@ -10929,11 +16798,11 @@ msgid "" "ndarray's (which Flower knows how to serialize/deserialize):" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:490 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:466 msgid "Implementing a Flower client" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:492 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:468 msgid "" "With that out of the way, let's move on to the interesting part. " "Federated learning systems consist of a server and multiple clients. In " @@ -10943,39 +16812,39 @@ msgid "" "requires us to write less boilerplate." msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:494 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:470 msgid "" "To implement the Flower client, we create a subclass of " "``flwr.client.NumPyClient`` and implement the three methods " "``get_parameters``, ``fit``, and ``evaluate``:" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:496 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:472 msgid "``get_parameters``: Return the current local model parameters" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:497 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:473 msgid "" "``fit``: Receive model parameters from the server, train the model " "parameters on the local data, and return the (updated) model parameters " "to the server" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:498 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:474 msgid "" "``evaluate``: Receive model parameters from the server, evaluate the " "model parameters on the local data, and return the evaluation result to " "the server" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:500 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:476 msgid "" "We mentioned that our clients will use the previously defined PyTorch " "components for model training and evaluation. Let's see a simple Flower " "client implementation that brings everything together:" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:537 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:513 msgid "" "Our class ``FlowerClient`` defines how local training/evaluation will be " "performed and allows Flower to call the local training/evaluation through" @@ -10990,11 +16859,11 @@ msgid "" "evaluation)." msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:541 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:517 msgid "Using the Virtual Client Engine" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:543 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:519 msgid "" "In this notebook, we want to simulate a federated learning system with 10" " clients on a single machine. This means that the server and all 10 " @@ -11005,7 +16874,7 @@ msgid "" "clients participates in a single round of federated learning." msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:545 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:521 msgid "" "In addition to the regular capabilities where server and clients run on " "multiple machines, Flower, therefore, provides special simulation " @@ -11021,11 +16890,11 @@ msgid "" "different clients, as can be seen below:" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:580 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:556 msgid "Starting the training" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:582 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:558 msgid "" "We now have the class ``FlowerClient`` which defines client-side " "training/evaluation and ``client_fn`` which allows Flower to create " @@ -11034,7 +16903,7 @@ msgid "" "actual simulation using ``flwr.simulation.start_simulation``." msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:584 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:560 msgid "" "The function ``start_simulation`` accepts a number of arguments, amongst " "them the ``client_fn`` used to create ``FlowerClient`` instances, the " @@ -11044,7 +16913,7 @@ msgid "" "*Federated Averaging* (FedAvg)." msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:586 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:562 msgid "" "Flower has a number of built-in strategies, but we can also use our own " "strategy implementations to customize nearly all aspects of the federated" @@ -11054,15 +16923,15 @@ msgid "" "starts the simulation:" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:628 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:608 msgid "Behind the scenes" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:630 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:610 msgid "So how does this work? How does Flower execute this simulation?" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:632 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:612 #, python-format msgid "" "When we call ``start_simulation``, we tell Flower that there are 10 " @@ -11072,7 +16941,7 @@ msgid "" "ahead and selects 10 random clients (i.e., 100% of 10)." msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:634 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:614 msgid "" "Flower then asks the selected 10 clients to train the model. When the " "server receives the model parameter updates from the clients, it hands " @@ -11081,17 +16950,17 @@ msgid "" " then gets used in the next round of federated learning." msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:646 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:626 msgid "Where's the accuracy?" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:648 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:628 msgid "" "You may have noticed that all metrics except for ``losses_distributed`` " "are empty. Where did the ``{\"accuracy\": float(accuracy)}`` go?" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:650 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:630 msgid "" "Flower can automatically aggregate losses returned by individual clients," " but it cannot do the same for metrics in the generic metrics dictionary " @@ -11101,7 +16970,7 @@ msgid "" "handle these automatically." msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:652 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:632 msgid "" "As users, we need to tell the framework how to handle/aggregate these " "custom metrics, and we do so by passing metric aggregation functions to " @@ -11111,19 +16980,19 @@ msgid "" "``evaluate_metrics_aggregation_fn``." msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:654 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:634 msgid "" "Let's create a simple weighted averaging function to aggregate the " "``accuracy`` metric we return from ``evaluate``:" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:680 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:660 msgid "" "The only thing left to do is to tell the strategy to call this function " "whenever it receives evaluation metric dictionaries from the clients:" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:717 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:697 msgid "" "We now have a full system that performs federated training and federated " "evaluation. It uses the ``weighted_average`` function to aggregate custom" @@ -11131,7 +17000,7 @@ msgid "" "all clients on the server side." msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:719 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:699 msgid "" "The other two categories of metrics (``losses_centralized`` and " "``metrics_centralized``) are still empty because they only apply when " @@ -11139,12 +17008,12 @@ msgid "" "will cover centralized evaluation." msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:731 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:711 #: ../../source/tutorial-series-what-is-federated-learning.ipynb:351 msgid "Final remarks" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:733 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:713 msgid "" "Congratulations, you just trained a convolutional neural network, " "federated over 10 clients! With that, you understand the basics of " @@ -11154,7 +17023,7 @@ msgid "" "Transformers or speech with SpeechBrain." msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:735 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:715 msgid "" "In the next notebook, we're going to cover some more advanced concepts. " "Want to customize your strategy? Initialize parameters on the server " @@ -11162,10 +17031,10 @@ msgid "" "all this and more in the next tutorial." msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:753 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:733 msgid "" "The `Flower Federated Learning Tutorial - Part 2 " -"`__ goes into more depth about strategies and all " "the advanced things you can build with them." msgstr "" @@ -11178,7 +17047,7 @@ msgstr "" msgid "" "Welcome to the next part of the federated learning tutorial. In previous " "parts of this tutorial, we introduced federated learning with PyTorch and" -" Flower (`part 1 `__)." msgstr "" @@ -11186,7 +17055,7 @@ msgstr "" msgid "" "In this notebook, we'll begin to customize the federated learning system " "we built in the introductory notebook (again, using `Flower " -"`__ and `PyTorch `__)." +"`__ and `PyTorch `__)." msgstr "" #: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:17 @@ -11384,7 +17253,7 @@ msgstr "" #: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:719 msgid "" "The `Flower Federated Learning Tutorial - Part 3 " -"`__ shows how to build a fully custom ``Strategy`` " "from scratch." msgstr "" @@ -11412,7 +17281,7 @@ msgstr "" msgid "" "`Star Flower on GitHub `__ ⭐️ and join " "the open-source Flower community on Slack to connect, ask questions, and " -"get help: `Join Slack `__ 🌼 We'd love to " +"get help: `Join Slack `__ 🌼 We'd love to " "hear from you in the ``#introductions`` channel! And if anything is " "unclear, head over to the ``#questions`` channel." msgstr "" @@ -11439,7 +17308,7 @@ msgid "" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:41 -msgid "|ed6498a023f2477a9ccd57ee4514bda4|" +msgid "|31e4b1afa87c4b968327bbeafbf184d4|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:109 @@ -11454,7 +17323,7 @@ msgid "" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:53 -msgid "|5a4f742489ac4f819afefdd4dc9ab272|" +msgid "|c9d935b4284e4c389a33d86b33e07c0a|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:111 @@ -11475,7 +17344,7 @@ msgid "" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:67 -msgid "|3331c80cd05045f6a56524d8e3e76d0c|" +msgid "|00727b5faffb468f84dd1b03ded88638|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:113 @@ -11491,7 +17360,7 @@ msgid "" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:79 -msgid "|4987b26884ec4b2c8f06c1264bcebe60|" +msgid "|daf0cf0ff4c24fd29439af78416cf47b|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:115 @@ -11507,7 +17376,7 @@ msgid "" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:91 -msgid "|ec8ae2d778aa493a986eb2fa29c220e5|" +msgid "|9f093007080d471d94ca90d3e9fde9b6|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:117 @@ -11522,7 +17391,7 @@ msgid "" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:103 -msgid "|b8949d0669fe4f8eadc9a4932f4e9c57|" +msgid "|46a26e6150e0479fbd3dfd655f36eb13|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:119 @@ -11542,7 +17411,7 @@ msgid "" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:138 -msgid "|94ff30bdcd09443e8488b5f29932a541|" +msgid "|3daba297595c4c7fb845d90404a6179a|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:173 @@ -11557,7 +17426,7 @@ msgid "" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:150 -msgid "|48dccf1d6d0544bba8917d2783a47719|" +msgid "|5769874fa9c4455b80b2efda850d39d7|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:175 @@ -11697,7 +17566,7 @@ msgid "" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:210 -msgid "|0366618db96b4f329f0d4372d1150fde|" +msgid "|ba47ffb421814b0f8f9fa5719093d839|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:307 @@ -11721,7 +17590,7 @@ msgid "" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:225 -msgid "|ac80eddc76e6478081b1ca35eed029c0|" +msgid "|aeac5bf79cbf497082e979834717e01b|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:309 @@ -11745,7 +17614,7 @@ msgid "" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:240 -msgid "|1ac94140c317450e89678db133c7f3c2|" +msgid "|ce27ed4bbe95459dba016afc42486ba2|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:311 @@ -11768,7 +17637,7 @@ msgid "" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:255 -msgid "|f8850c6e96fc4430b55e53bba237a7c0|" +msgid "|ae94a7f71dda443cbec2385751427d41|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:313 @@ -11806,7 +17675,7 @@ msgid "" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:273 -msgid "|4a368fdd3fc34adabd20a46752a68582|" +msgid "|e61fce4d43d243e7bb08bdde97d81ce6|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:315 @@ -11904,7 +17773,7 @@ msgid "" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:334 -msgid "|40f69c17bb444652a7c8dfe577cd120e|" +msgid "|08cb60859b07461588fe44e55810b050|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:340 @@ -11928,8 +17797,585 @@ msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:373 msgid "" "The `Flower Federated Learning Tutorial - Part 1 " -"`__ shows how to build a simple federated learning system " "with PyTorch and Flower." msgstr "" +#~ msgid "" +#~ "Configuring and setting up the " +#~ ":code:`Dockerfile` as well the configuration" +#~ " for the devcontainer can be a " +#~ "bit more involved. The good thing " +#~ "is you want have to do it. " +#~ "Usually it should be enough to " +#~ "install Docker on your system and " +#~ "ensure its available on your command " +#~ "line. Additionally, install the `VSCode " +#~ "Containers Extension `_." +#~ msgstr "" + +#~ msgid "" +#~ "``flwr = { path = " +#~ "\"../../dist/flwr-1.0.0-py3-none-any.whl\" }`` " +#~ "(without extras)" +#~ msgstr "" + +#~ msgid "" +#~ "``flwr = { path = " +#~ "\"../../dist/flwr-1.0.0-py3-none-any.whl\", extras =" +#~ " [\"simulation\"] }`` (with extras)" +#~ msgstr "" + +#~ msgid "Upload the whl (e.g., ``flwr-1.7.0-py3-none-any.whl``)" +#~ msgstr "" + +#~ msgid "" +#~ "Change ``!pip install -q 'flwr[simulation]'" +#~ " torch torchvision matplotlib`` to ``!pip" +#~ " install -q 'flwr-1.7.0-py3-none-" +#~ "any.whl[simulation]' torch torchvision matplotlib``" +#~ msgstr "" + +#~ msgid "Before the release" +#~ msgstr "" + +#~ msgid "" +#~ "Update the changelog (``changelog.md``) with" +#~ " all relevant changes that happened " +#~ "after the last release. If the " +#~ "last release was tagged ``v1.2.0``, you" +#~ " can use the following URL to " +#~ "see all commits that got merged " +#~ "into ``main`` since then:" +#~ msgstr "" + +#~ msgid "" +#~ "`GitHub: Compare v1.2.0...main " +#~ "`_" +#~ msgstr "" + +#~ msgid "" +#~ "Thank the authors who contributed since" +#~ " the last release. This can be " +#~ "done by running the ``./dev/add-" +#~ "shortlog.sh`` convenience script (it can " +#~ "be ran multiple times and will " +#~ "update the names in the list if" +#~ " new contributors were added in the" +#~ " meantime)." +#~ msgstr "" + +#~ msgid "" +#~ "Update the ``changelog.md`` section header " +#~ "``Unreleased`` to contain the version " +#~ "number and date for the release " +#~ "you are building. Create a pull " +#~ "request with the change." +#~ msgstr "" + +#~ msgid "" +#~ "Tag the release commit with the " +#~ "version number as soon as the PR" +#~ " is merged: ``git tag v0.12.3``, then" +#~ " ``git push --tags``. This will " +#~ "create a draft release on GitHub " +#~ "containing the correct artifacts and the" +#~ " relevant part of the changelog." +#~ msgstr "" + +#~ msgid "" +#~ "Note that, in order to build the" +#~ " documentation locally (with ``poetry run" +#~ " make html``, like described below), " +#~ "`Pandoc _` needs " +#~ "to be installed on the system." +#~ msgstr "" + +#~ msgid "" +#~ "If you're familiar with how contributing" +#~ " on GitHub works, you can directly" +#~ " checkout our `getting started guide " +#~ "for contributors `_ and examples " +#~ "of `good first contributions " +#~ "`_." +#~ msgstr "" + +#~ msgid "" +#~ "This will create a `flower/` (or " +#~ "the name of your fork if you " +#~ "renamed it) folder in the current " +#~ "working directory." +#~ msgstr "" + +#~ msgid "Otherwise you can always find this option in the `Branches` page." +#~ msgstr "" + +#~ msgid "" +#~ "Once you click the `Compare & pull" +#~ " request` button, you should see " +#~ "something similar to this:" +#~ msgstr "" + +#~ msgid "Find the source file in `doc/source`" +#~ msgstr "" + +#~ msgid "" +#~ "Make the change in the `.rst` file" +#~ " (beware, the dashes under the title" +#~ " should be the same length as " +#~ "the title itself)" +#~ msgstr "" + +#~ msgid "Change the file name to `save-progress.rst`" +#~ msgstr "" + +#~ msgid "Add a redirect rule to `doc/source/conf.py`" +#~ msgstr "" + +#~ msgid "" +#~ "This will cause a redirect from " +#~ "`saving-progress.html` to `save-progress.html`," +#~ " old links will continue to work." +#~ msgstr "" + +#~ msgid "" +#~ "For the lateral navigation bar to " +#~ "work properly, it is very important " +#~ "to update the `index.rst` file as " +#~ "well. This is where we define the" +#~ " whole arborescence of the navbar." +#~ msgstr "" + +#~ msgid "Find and modify the file name in `index.rst`" +#~ msgstr "" + +#~ msgid "Add CI job to deploy the staging system when the `main` branch changes" +#~ msgstr "" + +#~ msgid "`Python 3.7 `_ or above" +#~ msgstr "" + +#~ msgid "" +#~ "First, clone the `Flower repository " +#~ "`_ from GitHub::" +#~ msgstr "" + +#~ msgid "" +#~ "Second, create a virtual environment " +#~ "(and activate it). If you chose to" +#~ " use :code:`pyenv` (with the :code" +#~ ":`pyenv-virtualenv` plugin) and already " +#~ "have it installed , you can use" +#~ " the following convenience script (by " +#~ "default it will use :code:`Python " +#~ "3.8.17`, but you can change it by" +#~ " providing a specific :code:``)::" +#~ msgstr "" + +#~ msgid "" +#~ "If you don't have :code:`pyenv` " +#~ "installed, you can use the following " +#~ "script that will install pyenv, set " +#~ "it up and create the virtual " +#~ "environment (with :code:`Python 3.8.17` by " +#~ "default)::" +#~ msgstr "" + +#~ msgid "" +#~ "Third, install the Flower package in " +#~ "development mode (think :code:`pip install " +#~ "-e`) along with all necessary " +#~ "dependencies::" +#~ msgstr "" + +#~ msgid "" +#~ "Developers could run the full set " +#~ "of Github Actions workflows under their" +#~ " local environment by using `Act " +#~ "_`. Please refer to" +#~ " the installation instructions under the" +#~ " linked repository and run the next" +#~ " command under Flower main cloned " +#~ "repository folder::" +#~ msgstr "" + +#~ msgid "" +#~ "Please note that these components are" +#~ " still experimental, the correct " +#~ "configuration of DP for a specific " +#~ "task is still an unsolved problem." +#~ msgstr "" + +#~ msgid "" +#~ "The distribution of the update norm " +#~ "has been shown to vary from " +#~ "task-to-task and to evolve as " +#~ "training progresses. Therefore, we use " +#~ "an adaptive approach [andrew]_ that " +#~ "continuously adjusts the clipping threshold" +#~ " to track a prespecified quantile of" +#~ " the update norm distribution." +#~ msgstr "" + +#~ msgid "" +#~ "We make (and attempt to enforce) a" +#~ " number of assumptions that must be" +#~ " satisfied to ensure that the " +#~ "training process actually realises the " +#~ ":math:`(\\epsilon, \\delta)` guarantees the " +#~ "user has in mind when configuring " +#~ "the setup." +#~ msgstr "" + +#~ msgid "" +#~ "The first two are useful for " +#~ "eliminating a multitude of complications " +#~ "associated with calibrating the noise to" +#~ " the clipping threshold while the " +#~ "third one is required to comply " +#~ "with the assumptions of the privacy " +#~ "analysis." +#~ msgstr "" + +#~ msgid "" +#~ "The first version of our solution " +#~ "was to define a decorator whose " +#~ "constructor accepted, among other things, " +#~ "a boolean valued variable indicating " +#~ "whether adaptive clipping was to be " +#~ "enabled or not. We quickly realized " +#~ "that this would clutter its " +#~ ":code:`__init__()` function with variables " +#~ "corresponding to hyperparameters of adaptive" +#~ " clipping that would remain unused " +#~ "when it was disabled. A cleaner " +#~ "implementation could be achieved by " +#~ "splitting the functionality into two " +#~ "decorators, :code:`DPFedAvgFixed` and " +#~ ":code:`DPFedAvgAdaptive`, with the latter sub-" +#~ " classing the former. The constructors " +#~ "for both classes accept a boolean " +#~ "parameter :code:`server_side_noising`, which, as " +#~ "the name suggests, determines where " +#~ "noising is to be performed." +#~ msgstr "" + +#~ msgid "" +#~ ":code:`aggregate_fit()`: We check whether any" +#~ " of the sampled clients dropped out" +#~ " or failed to upload an update " +#~ "before the round timed out. In " +#~ "that case, we need to abort the" +#~ " current round, discarding any successful" +#~ " updates that were received, and move" +#~ " on to the next one. On the " +#~ "other hand, if all clients responded " +#~ "successfully, we must force the " +#~ "averaging of the updates to happen " +#~ "in an unweighted manner by intercepting" +#~ " the :code:`parameters` field of " +#~ ":code:`FitRes` for each received update " +#~ "and setting it to 1. Furthermore, " +#~ "if :code:`server_side_noising=true`, each update " +#~ "is perturbed with an amount of " +#~ "noise equal to what it would have" +#~ " been subjected to had client-side" +#~ " noising being enabled. This entails " +#~ "*pre*-processing of the arguments to " +#~ "this method before passing them on " +#~ "to the wrappee's implementation of " +#~ ":code:`aggregate_fit()`." +#~ msgstr "" + +#~ msgid "" +#~ "McMahan, H. Brendan, et al. \"Learning" +#~ " differentially private recurrent language " +#~ "models.\" arXiv preprint arXiv:1710.06963 " +#~ "(2017)." +#~ msgstr "" + +#~ msgid "" +#~ "Andrew, Galen, et al. \"Differentially " +#~ "private learning with adaptive clipping.\" " +#~ "Advances in Neural Information Processing " +#~ "Systems 34 (2021): 17455-17466." +#~ msgstr "" + +#~ msgid "" +#~ "The following command can be used " +#~ "to verfiy if Flower was successfully " +#~ "installed. If everything worked, it " +#~ "should print the version of Flower " +#~ "to the command line::" +#~ msgstr "" + +#~ msgid "flwr (Python API reference)" +#~ msgstr "" + +#~ msgid "start_client" +#~ msgstr "" + +#~ msgid "start_numpy_client" +#~ msgstr "" + +#~ msgid "start_simulation" +#~ msgstr "" + +#~ msgid "server.start_server" +#~ msgstr "" + +#~ msgid "server.strategy" +#~ msgstr "" + +#~ msgid "server.strategy.Strategy" +#~ msgstr "" + +#~ msgid "server.strategy.FedAvg" +#~ msgstr "" + +#~ msgid "server.strategy.FedAvgM" +#~ msgstr "" + +#~ msgid "server.strategy.FedMedian" +#~ msgstr "" + +#~ msgid "server.strategy.QFedAvg" +#~ msgstr "" + +#~ msgid "server.strategy.FaultTolerantFedAvg" +#~ msgstr "" + +#~ msgid "server.strategy.FedOpt" +#~ msgstr "" + +#~ msgid "server.strategy.FedProx" +#~ msgstr "" + +#~ msgid "server.strategy.FedAdagrad" +#~ msgstr "" + +#~ msgid "server.strategy.FedAdam" +#~ msgstr "" + +#~ msgid "server.strategy.FedYogi" +#~ msgstr "" + +#~ msgid "server.strategy.FedTrimmedAvg" +#~ msgstr "" + +#~ msgid "server.strategy.Krum" +#~ msgstr "" + +#~ msgid "server.strategy.FedXgbNnAvg" +#~ msgstr "" + +#~ msgid "server.strategy.DPFedAvgAdaptive" +#~ msgstr "" + +#~ msgid "server.strategy.DPFedAvgFixed" +#~ msgstr "" + +#~ msgid "" +#~ "**Fix the incorrect return types of " +#~ "Strategy** " +#~ "([#2432](https://github.com/adap/flower/pull/2432/files))" +#~ msgstr "" + +#~ msgid "" +#~ "The types of the return values in" +#~ " the docstrings in two methods " +#~ "(`aggregate_fit` and `aggregate_evaluate`) now " +#~ "match the hint types in the code." +#~ msgstr "" + +#~ msgid "" +#~ "Using the `client_fn`, Flower clients " +#~ "can interchangeably run as standalone " +#~ "processes (i.e. via `start_client`) or " +#~ "in simulation (i.e. via `start_simulation`)" +#~ " without requiring changes to how the" +#~ " client class is defined and " +#~ "instantiated. Calling `start_numpy_client` is " +#~ "now deprecated." +#~ msgstr "" + +#~ msgid "" +#~ "**Update Flower Examples** " +#~ "([#2384](https://github.com/adap/flower/pull/2384)), " +#~ "([#2425](https://github.com/adap/flower/pull/2425))" +#~ msgstr "" + +#~ msgid "" +#~ "**General updates to baselines** " +#~ "([#2301](https://github.com/adap/flower/pull/2301), " +#~ "[#2305](https://github.com/adap/flower/pull/2305), " +#~ "[#2307](https://github.com/adap/flower/pull/2307), " +#~ "[#2327](https://github.com/adap/flower/pull/2327), " +#~ "[#2435](https://github.com/adap/flower/pull/2435))" +#~ msgstr "" + +#~ msgid "" +#~ "**General updates to the simulation " +#~ "engine** ([#2331](https://github.com/adap/flower/pull/2331), " +#~ "[#2447](https://github.com/adap/flower/pull/2447), " +#~ "[#2448](https://github.com/adap/flower/pull/2448))" +#~ msgstr "" + +#~ msgid "" +#~ "**General improvements** " +#~ "([#2309](https://github.com/adap/flower/pull/2309), " +#~ "[#2310](https://github.com/adap/flower/pull/2310), " +#~ "[2313](https://github.com/adap/flower/pull/2313), " +#~ "[#2316](https://github.com/adap/flower/pull/2316), " +#~ "[2317](https://github.com/adap/flower/pull/2317),[#2349](https://github.com/adap/flower/pull/2349)," +#~ " [#2360](https://github.com/adap/flower/pull/2360), " +#~ "[#2402](https://github.com/adap/flower/pull/2402), " +#~ "[#2446](https://github.com/adap/flower/pull/2446))" +#~ msgstr "" + +#~ msgid "" +#~ "`flower-superlink --driver-api-address " +#~ "\"0.0.0.0:8081\" --fleet-api-address " +#~ "\"0.0.0.0:8086\"`" +#~ msgstr "" + +#~ msgid "" +#~ "That's it for the client. We only" +#~ " have to implement :code:`Client` or " +#~ ":code:`NumPyClient` and call " +#~ ":code:`fl.client.start_client()`. The string " +#~ ":code:`\"0.0.0.0:8080\"` tells the client " +#~ "which server to connect to. In our" +#~ " case we can run the server and" +#~ " the client on the same machine, " +#~ "therefore we use :code:`\"0.0.0.0:8080\"`. If" +#~ " we run a truly federated workload" +#~ " with the server and clients running" +#~ " on different machines, all that " +#~ "needs to change is the " +#~ ":code:`server_address` we pass to the " +#~ "client." +#~ msgstr "" + +#~ msgid "" +#~ "That's it for the client. We only" +#~ " have to implement :code:`Client` or " +#~ ":code:`NumPyClient` and call " +#~ ":code:`fl.client.start_client()`. The string " +#~ ":code:`\"[::]:8080\"` tells the client which" +#~ " server to connect to. In our " +#~ "case we can run the server and " +#~ "the client on the same machine, " +#~ "therefore we use :code:`\"[::]:8080\"`. If " +#~ "we run a truly federated workload " +#~ "with the server and clients running " +#~ "on different machines, all that needs" +#~ " to change is the :code:`server_address`" +#~ " we point the client at." +#~ msgstr "" + +#~ msgid "" +#~ "Let's build a horizontal federated " +#~ "learning system using XGBoost and " +#~ "Flower!" +#~ msgstr "" + +#~ msgid "" +#~ "Please refer to the `full code " +#~ "example `_ to learn " +#~ "more." +#~ msgstr "" + +#~ msgid "" +#~ "In this notebook, we'll build a " +#~ "federated learning system using Flower " +#~ "and PyTorch. In part 1, we use " +#~ "PyTorch for the model training pipeline" +#~ " and data loading. In part 2, " +#~ "we continue to federate the PyTorch-" +#~ "based pipeline using Flower." +#~ msgstr "" + +#~ msgid "" +#~ "Next, we install the necessary packages" +#~ " for PyTorch (``torch`` and " +#~ "``torchvision``) and Flower (``flwr``):" +#~ msgstr "" + +#~ msgid "" +#~ "Federated learning can be applied to " +#~ "many different types of tasks across " +#~ "different domains. In this tutorial, we" +#~ " introduce federated learning by training" +#~ " a simple convolutional neural network " +#~ "(CNN) on the popular CIFAR-10 dataset." +#~ " CIFAR-10 can be used to train " +#~ "image classifiers that distinguish between " +#~ "images from ten different classes:" +#~ msgstr "" + +#~ msgid "" +#~ "Each organization will act as a " +#~ "client in the federated learning system." +#~ " So having ten organizations participate" +#~ " in a federation means having ten " +#~ "clients connected to the federated " +#~ "learning server:" +#~ msgstr "" + +#~ msgid "" +#~ "Let's now load the CIFAR-10 training " +#~ "and test set, partition them into " +#~ "ten smaller datasets (each split into" +#~ " training and validation set), and " +#~ "wrap the resulting partitions by " +#~ "creating a PyTorch ``DataLoader`` for " +#~ "each of them:" +#~ msgstr "" + +#~ msgid "|ed6498a023f2477a9ccd57ee4514bda4|" +#~ msgstr "" + +#~ msgid "|5a4f742489ac4f819afefdd4dc9ab272|" +#~ msgstr "" + +#~ msgid "|3331c80cd05045f6a56524d8e3e76d0c|" +#~ msgstr "" + +#~ msgid "|4987b26884ec4b2c8f06c1264bcebe60|" +#~ msgstr "" + +#~ msgid "|ec8ae2d778aa493a986eb2fa29c220e5|" +#~ msgstr "" + +#~ msgid "|b8949d0669fe4f8eadc9a4932f4e9c57|" +#~ msgstr "" + +#~ msgid "|94ff30bdcd09443e8488b5f29932a541|" +#~ msgstr "" + +#~ msgid "|48dccf1d6d0544bba8917d2783a47719|" +#~ msgstr "" + +#~ msgid "|0366618db96b4f329f0d4372d1150fde|" +#~ msgstr "" + +#~ msgid "|ac80eddc76e6478081b1ca35eed029c0|" +#~ msgstr "" + +#~ msgid "|1ac94140c317450e89678db133c7f3c2|" +#~ msgstr "" + +#~ msgid "|f8850c6e96fc4430b55e53bba237a7c0|" +#~ msgstr "" + +#~ msgid "|4a368fdd3fc34adabd20a46752a68582|" +#~ msgstr "" + +#~ msgid "|40f69c17bb444652a7c8dfe577cd120e|" +#~ msgstr "" + diff --git a/doc/locales/zh_Hans/LC_MESSAGES/framework-docs.po b/doc/locales/zh_Hans/LC_MESSAGES/framework-docs.po index 87af28422d56..720de8578261 100644 --- a/doc/locales/zh_Hans/LC_MESSAGES/framework-docs.po +++ b/doc/locales/zh_Hans/LC_MESSAGES/framework-docs.po @@ -7,17 +7,16 @@ msgid "" msgstr "" "Project-Id-Version: Flower main\n" "Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2023-11-23 18:31+0100\n" -"PO-Revision-Date: 2023-11-25 19:00+0000\n" +"POT-Creation-Date: 2024-02-13 11:23+0100\n" +"PO-Revision-Date: 2024-02-10 11:56+0000\n" "Last-Translator: Yan Gao \n" -"Language-Team: Chinese (Simplified) \n" "Language: zh_Hans\n" +"Language-Team: Chinese (Simplified) \n" +"Plural-Forms: nplurals=1; plural=0;\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=utf-8\n" "Content-Transfer-Encoding: 8bit\n" -"Plural-Forms: nplurals=1; plural=0;\n" -"X-Generator: Weblate 5.2.1-rc\n" "Generated-By: Babel 2.13.1\n" #: ../../source/contributor-explanation-architecture.rst:2 @@ -30,9 +29,9 @@ msgstr "边缘客户端引擎" #: ../../source/contributor-explanation-architecture.rst:7 msgid "" -"`Flower `_ core framework architecture with Edge " +"`Flower `_ core framework architecture with Edge " "Client Engine" -msgstr "具有边缘客户端引擎的`Flower `核心架构" +msgstr "具有边缘客户端引擎的`Flower `核心架构" #: ../../source/contributor-explanation-architecture.rst:13 msgid "Virtual Client Engine" @@ -40,9 +39,9 @@ msgstr "虚拟客户端引擎" #: ../../source/contributor-explanation-architecture.rst:15 msgid "" -"`Flower `_ core framework architecture with Virtual " +"`Flower `_ core framework architecture with Virtual " "Client Engine" -msgstr "具有虚拟客户端引擎的`Flower `核心架构" +msgstr "具有虚拟客户端引擎的`Flower `核心架构" #: ../../source/contributor-explanation-architecture.rst:21 msgid "Virtual Client Engine and Edge Client Engine in the same workload" @@ -50,130 +49,251 @@ msgstr "可同步进行的虚拟客户端引擎和边缘客户端引擎" #: ../../source/contributor-explanation-architecture.rst:23 msgid "" -"`Flower `_ core framework architecture with both " +"`Flower `_ core framework architecture with both " "Virtual Client Engine and Edge Client Engine" -msgstr "具有虚拟客户端引擎和边缘客户端引擎的`Flower `核心架构" +msgstr "具有虚拟客户端引擎和边缘客户端引擎的`Flower `核心架构" -#: ../../source/contributor-how-create-new-messages.rst:2 -msgid "Creating New Messages" -msgstr "创建新信息" +#: ../../source/contributor-how-to-build-docker-images.rst:2 +msgid "How to build Docker Flower images locally" +msgstr "" -#: ../../source/contributor-how-create-new-messages.rst:4 +#: ../../source/contributor-how-to-build-docker-images.rst:4 msgid "" -"This is a simple guide for creating a new type of message between the " -"server and clients in Flower." +"Flower provides pre-made docker images on `Docker Hub " +"`_ that include all necessary " +"dependencies for running the server. You can also build your own custom " +"docker images from scratch with a different version of Python or Ubuntu " +"if that is what you need. In this guide, we will explain what images " +"exist and how to build them locally." msgstr "" -#: ../../source/contributor-how-create-new-messages.rst:6 +#: ../../source/contributor-how-to-build-docker-images.rst:9 msgid "" -"Let's suppose we have the following example functions in " -":code:`server.py` and :code:`numpy_client.py`..." +"Before we can start, we need to meet a few prerequisites in our local " +"development environment." msgstr "" -#: ../../source/contributor-how-create-new-messages.rst:8 -msgid "Server's side:" +#: ../../source/contributor-how-to-build-docker-images.rst:11 +#, fuzzy +msgid "Clone the flower repository." +msgstr "**叉花仓库**" + +#: ../../source/contributor-how-to-build-docker-images.rst:17 +msgid "Verify the Docker daemon is running." msgstr "" -#: ../../source/contributor-how-create-new-messages.rst:17 -msgid "Client's side:" +#: ../../source/contributor-how-to-build-docker-images.rst:19 +msgid "" +"Please follow the first section on `Run Flower using Docker " +"`_ " +"which covers this step in more detail." msgstr "" -#: ../../source/contributor-how-create-new-messages.rst:26 +#: ../../source/contributor-how-to-build-docker-images.rst:23 msgid "" -"Let's now see what we need to implement in order to get this simple " -"function between the server and client to work!" +"Currently, Flower provides two images, a base image and a server image. " +"There will also be a client image soon. The base image, as the name " +"suggests, contains basic dependencies that both the server and the client" +" need. This includes system dependencies, Python and Python tools. The " +"server image is based on the base image, but it additionally installs the" +" Flower server using ``pip``." msgstr "" -#: ../../source/contributor-how-create-new-messages.rst:30 -msgid "Message Types for Protocol Buffers" +#: ../../source/contributor-how-to-build-docker-images.rst:28 +msgid "" +"The build instructions that assemble the images are located in the " +"respective Dockerfiles. You can find them in the subdirectories of " +"``src/docker``." msgstr "" -#: ../../source/contributor-how-create-new-messages.rst:32 +#: ../../source/contributor-how-to-build-docker-images.rst:31 msgid "" -"The first thing we need to do is to define a message type for the RPC " -"system in :code:`transport.proto`. Note that we have to do it for both " -"the request and response messages. For more details on the syntax of " -"proto3, please see the `official documentation " -"`_." +"Both, base and server image are configured via build arguments. Through " +"build arguments, we can make our build more flexible. For example, in the" +" base image, we can specify the version of Python to install using the " +"``PYTHON_VERSION`` build argument. Some of the build arguments have " +"default values, others must be specified when building the image. All " +"available build arguments for each image are listed in one of the tables " +"below." msgstr "" -#: ../../source/contributor-how-create-new-messages.rst:35 -msgid "Within the :code:`ServerMessage` block:" +#: ../../source/contributor-how-to-build-docker-images.rst:38 +#, fuzzy +msgid "Building the base image" +msgstr "加载数据" + +#: ../../source/contributor-how-to-build-docker-images.rst:44 +#: ../../source/contributor-how-to-build-docker-images.rst:86 +#, fuzzy +msgid "Build argument" +msgstr "构建文档" + +#: ../../source/contributor-how-to-build-docker-images.rst:45 +#: ../../source/contributor-how-to-build-docker-images.rst:87 +#, fuzzy +msgid "Description" +msgstr "停用" + +#: ../../source/contributor-how-to-build-docker-images.rst:46 +#: ../../source/contributor-how-to-build-docker-images.rst:88 +#, fuzzy +msgid "Required" +msgstr "所需变更" + +#: ../../source/contributor-how-to-build-docker-images.rst:47 +#: ../../source/contributor-how-to-build-docker-images.rst:89 +#, fuzzy +msgid "Example" +msgstr "实例" + +#: ../../source/contributor-how-to-build-docker-images.rst:48 +#, fuzzy +msgid "``PYTHON_VERSION``" +msgstr "Python 版本" + +#: ../../source/contributor-how-to-build-docker-images.rst:49 +msgid "Version of ``python`` to be installed." msgstr "" -#: ../../source/contributor-how-create-new-messages.rst:52 -msgid "Within the ClientMessage block:" +#: ../../source/contributor-how-to-build-docker-images.rst:50 +#: ../../source/contributor-how-to-build-docker-images.rst:54 +#: ../../source/contributor-how-to-build-docker-images.rst:58 +#: ../../source/contributor-how-to-build-docker-images.rst:100 +#, fuzzy +msgid "Yes" +msgstr "类型" + +#: ../../source/contributor-how-to-build-docker-images.rst:51 +#, fuzzy +msgid "``3.11``" +msgstr "``1.0.0rc1``" + +#: ../../source/contributor-how-to-build-docker-images.rst:52 +msgid "``PIP_VERSION``" msgstr "" -#: ../../source/contributor-how-create-new-messages.rst:70 -msgid "" -"Make sure to also add a field of the newly created message type in " -":code:`oneof msg`." +#: ../../source/contributor-how-to-build-docker-images.rst:53 +msgid "Version of ``pip`` to be installed." msgstr "" -#: ../../source/contributor-how-create-new-messages.rst:72 -msgid "Once that is done, we will compile the file with:" +#: ../../source/contributor-how-to-build-docker-images.rst:55 +#, fuzzy +msgid "``23.0.1``" +msgstr "``1.0.0rc1``" + +#: ../../source/contributor-how-to-build-docker-images.rst:56 +msgid "``SETUPTOOLS_VERSION``" msgstr "" -#: ../../source/contributor-how-create-new-messages.rst:78 -msgid "If it compiles succesfully, you should see the following message:" +#: ../../source/contributor-how-to-build-docker-images.rst:57 +msgid "Version of ``setuptools`` to be installed." msgstr "" -#: ../../source/contributor-how-create-new-messages.rst:87 -msgid "Serialization and Deserialization Functions" +#: ../../source/contributor-how-to-build-docker-images.rst:59 +#, fuzzy +msgid "``69.0.2``" +msgstr "``1.0.0b0``" + +#: ../../source/contributor-how-to-build-docker-images.rst:60 +msgid "``UBUNTU_VERSION``" msgstr "" -#: ../../source/contributor-how-create-new-messages.rst:89 -msgid "" -"Our next step is to add functions to serialize and deserialize Python " -"datatypes to or from our defined RPC message types. You should add these " -"functions in :code:`serde.py`." +#: ../../source/contributor-how-to-build-docker-images.rst:61 +msgid "Version of the official Ubuntu Docker image." msgstr "" -#: ../../source/contributor-how-create-new-messages.rst:91 -msgid "The four functions:" +#: ../../source/contributor-how-to-build-docker-images.rst:62 +msgid "Defaults to ``22.04``." msgstr "" -#: ../../source/contributor-how-create-new-messages.rst:112 -msgid "Sending the Message from the Server" +#: ../../source/contributor-how-to-build-docker-images.rst:65 +msgid "" +"The following example creates a base image with Python 3.11.0, pip 23.0.1" +" and setuptools 69.0.2:" msgstr "" -#: ../../source/contributor-how-create-new-messages.rst:114 +#: ../../source/contributor-how-to-build-docker-images.rst:76 msgid "" -"Now write the request function in your Client Proxy class (e.g., " -":code:`grpc_client_proxy.py`) using the serde functions you just created:" +"The name of image is ``flwr_base`` and the tag ``0.1.0``. Remember that " +"the build arguments as well as the name and tag can be adapted to your " +"needs. These values serve as examples only." msgstr "" -#: ../../source/contributor-how-create-new-messages.rst:128 -msgid "Receiving the Message by the Client" +#: ../../source/contributor-how-to-build-docker-images.rst:80 +#, fuzzy +msgid "Building the server image" +msgstr "启动服务器" + +#: ../../source/contributor-how-to-build-docker-images.rst:90 +msgid "``BASE_REPOSITORY``" +msgstr "" + +#: ../../source/contributor-how-to-build-docker-images.rst:91 +msgid "The repository name of the base image." +msgstr "" + +#: ../../source/contributor-how-to-build-docker-images.rst:92 +msgid "Defaults to ``flwr/server``." +msgstr "" + +#: ../../source/contributor-how-to-build-docker-images.rst:94 +msgid "``BASE_IMAGE_TAG``" +msgstr "" + +#: ../../source/contributor-how-to-build-docker-images.rst:95 +msgid "The image tag of the base image." +msgstr "" + +#: ../../source/contributor-how-to-build-docker-images.rst:96 +msgid "Defaults to ``py3.11-ubuntu22.04``." +msgstr "" + +#: ../../source/contributor-how-to-build-docker-images.rst:98 +msgid "``FLWR_VERSION``" +msgstr "" + +#: ../../source/contributor-how-to-build-docker-images.rst:99 +msgid "Version of Flower to be installed." msgstr "" -#: ../../source/contributor-how-create-new-messages.rst:130 +#: ../../source/contributor-how-to-build-docker-images.rst:101 +#, fuzzy +msgid "``1.7.0``" +msgstr "``1.0.0b0``" + +#: ../../source/contributor-how-to-build-docker-images.rst:103 msgid "" -"Last step! Modify the code in :code:`message_handler.py` to check the " -"field of your message and call the :code:`example_response` function. " -"Remember to use the serde functions!" +"The following example creates a server image with the official Flower " +"base image py3.11-ubuntu22.04 and Flower 1.7.0:" msgstr "" -#: ../../source/contributor-how-create-new-messages.rst:132 -msgid "Within the handle function:" +#: ../../source/contributor-how-to-build-docker-images.rst:114 +msgid "" +"The name of image is ``flwr_server`` and the tag ``0.1.0``. Remember that" +" the build arguments as well as the name and tag can be adapted to your " +"needs. These values serve as examples only." msgstr "" -#: ../../source/contributor-how-create-new-messages.rst:139 -msgid "And add a new function:" +#: ../../source/contributor-how-to-build-docker-images.rst:117 +msgid "" +"If you want to use your own base image instead of the official Flower " +"base image, all you need to do is set the ``BASE_REPOSITORY`` and " +"``BASE_IMAGE_TAG`` build arguments. The value of ``BASE_REPOSITORY`` must" +" match the name of your image and the value of ``BASE_IMAGE_TAG`` must " +"match the tag of your image." msgstr "" -#: ../../source/contributor-how-create-new-messages.rst:149 -msgid "Hopefully, when you run your program you will get the intended result!" +#: ../../source/contributor-how-to-build-docker-images.rst:131 +msgid "After creating the image, we can test whether the image is working:" msgstr "" #: ../../source/contributor-how-to-contribute-translations.rst:2 msgid "Contribute translations" -msgstr "" +msgstr "贡献译文" #: ../../source/contributor-how-to-contribute-translations.rst:4 msgid "" -"Since `Flower 1.5 `_ we have introduced translations to " "our doc pages, but, as you might have noticed, the translations are often" " imperfect. If you speak languages other than English, you might be able " @@ -182,6 +302,10 @@ msgid "" "also be a great opportunity for those wanting to become open source " "contributors with little prerequistes." msgstr "" +"从 `Flower 1.5 `_ " +"开始,我们在文档页面中引入了翻译,但正如你可能已经注意到的,这些翻译往往并不完美。如果您会说英语以外的语言,也许您可以帮助我们翻译这些文档,让更多的人了解" +" Federated Learning!对于那些想成为开源贡献者的人来说,这也是一个很好的机会。" #: ../../source/contributor-how-to-contribute-translations.rst:13 msgid "" @@ -189,10 +313,12 @@ msgid "" "`_, this " "where most of the work will happen." msgstr "" +"我们的翻译项目已在 \"Weblate `_\"上公开,大部分工作都将在这里进行。" #: ../../source/contributor-how-to-contribute-translations.rst:18 msgid "Contribute to existing languages" -msgstr "" +msgstr "为现有语言作出贡献" #: ../../source/contributor-how-to-contribute-translations.rst:23 msgid "" @@ -202,6 +328,8 @@ msgid "" " profile settings can be found `here " "`_." msgstr "" +"您需要做的第一件事就是在本`网页`_上创建一个免费的Weblate帐户。有关个人资料设置的更多信息,请参阅`这里" +" `_。" #: ../../source/contributor-how-to-contribute-translations.rst:29 msgid "" @@ -210,12 +338,15 @@ msgid "" "docs/framework/>`_. Here, you should see the different existing languages" " that can be found on the website." msgstr "" +"登录到Weblate后,您可以导航到 \"Flower Framework " +"\"项目`_。在这里,您可以看到网站上现有的各种语言。" #: ../../source/contributor-how-to-contribute-translations.rst:34 msgid "" "Once you have selected the language you want to contribute to, you should" " see a similar interface to this:" -msgstr "" +msgstr "选择您要贡献的语言后,您应该会看到与此类似的界面:" #: ../../source/contributor-how-to-contribute-translations.rst:39 msgid "" @@ -223,11 +354,11 @@ msgid "" "button on the top right (in the ``Translation status`` section). This " "will automatically bring you to the translation interface for " "untranslated strings." -msgstr "" +msgstr "最简单的方法是点击右上角(\"翻译状态 \"部分)的 \"翻译 \"按钮。这将自动带您进入未翻译字符串的翻译界面。" #: ../../source/contributor-how-to-contribute-translations.rst:43 msgid "This is what the interface looks like:" -msgstr "" +msgstr "这就是界面的样子:" #: ../../source/contributor-how-to-contribute-translations.rst:47 msgid "" @@ -238,6 +369,9 @@ msgid "" "your translation to suggestions for other users to view), or ``Skip`` (to" " go to the next untranslated string without saving anything)." msgstr "" +"您可以在顶部的文本框中输入翻译内容,满意后按 " +"\"保存并继续\"(保存翻译内容并转到下一个未翻译的字符串)、\"保存并停留\"(保存翻译内容并停留在同一页面)、\"建议\"(将您的翻译添加到建议中供其他用户查看)或" +" \"跳过\"(转到下一个未翻译的字符串而不保存任何内容)。" #: ../../source/contributor-how-to-contribute-translations.rst:54 msgid "" @@ -247,13 +381,15 @@ msgid "" "translations in ``Other languages``, and the ``History`` of translations " "for this string." msgstr "" +"为了帮助翻译,您可以在底部看到 \"邻近字符串\"、\"评论\"(来自其他贡献者)、\"自动建议\"(来自机器翻译引擎)、\"其他语言 " +"\"中的翻译以及该字符串的 \"历史翻译\"。" #: ../../source/contributor-how-to-contribute-translations.rst:59 msgid "" "On the right, under the ``String information`` section, you can also " "click the link under ``Source string location`` in order to view the " "source of the doc file containing the string." -msgstr "" +msgstr "在右侧的 \"字符串信息 \"部分,您还可以单击 \"源字符串位置 \"下的链接,以查看包含字符串的 doc 文件的源文件。" #: ../../source/contributor-how-to-contribute-translations.rst:63 msgid "" @@ -261,21 +397,145 @@ msgid "" "this `in-depth guide " "`_." msgstr "" +"有关使用 Weblate 进行翻译的更多信息,您可以查看本 \"深入指南 " +"`_\"。" #: ../../source/contributor-how-to-contribute-translations.rst:67 msgid "Add new languages" -msgstr "" +msgstr "添加新语言" #: ../../source/contributor-how-to-contribute-translations.rst:69 msgid "" "If you want to add a new language, you will first have to contact us, " -"either on `Slack `_, or by opening an " +"either on `Slack `_, or by opening an " "issue on our `GitHub repo `_." msgstr "" +"如果您想添加新语言,请先联系我们,可以在 `Slack `_ 上联系,也可以在我们的" +" `GitHub repo `_ 上提交问题。" + +#: ../../source/contributor-how-to-create-new-messages.rst:2 +msgid "Creating New Messages" +msgstr "创建新信息" + +#: ../../source/contributor-how-to-create-new-messages.rst:4 +msgid "" +"This is a simple guide for creating a new type of message between the " +"server and clients in Flower." +msgstr "这是一个如何用Flower在服务器和客户端之间创建新类型的信息的简要指导。" + +#: ../../source/contributor-how-to-create-new-messages.rst:6 +msgid "" +"Let's suppose we have the following example functions in " +":code:`server.py` and :code:`numpy_client.py`..." +msgstr "假设我们在脚本code:`server.py`和code:`numpy_client.py`中有以下的示例函数..." + +#: ../../source/contributor-how-to-create-new-messages.rst:8 +msgid "Server's side:" +msgstr "在服务器端:" + +#: ../../source/contributor-how-to-create-new-messages.rst:17 +msgid "Client's side:" +msgstr "在客户端:" + +#: ../../source/contributor-how-to-create-new-messages.rst:26 +msgid "" +"Let's now see what we need to implement in order to get this simple " +"function between the server and client to work!" +msgstr "现在让我们来看看,为了让服务器和客户端之间的这个简单的函数正常工作,我们需要实现哪些功能!" + +#: ../../source/contributor-how-to-create-new-messages.rst:30 +msgid "Message Types for Protocol Buffers" +msgstr "协议缓冲区的信息类型" + +#: ../../source/contributor-how-to-create-new-messages.rst:32 +msgid "" +"The first thing we need to do is to define a message type for the RPC " +"system in :code:`transport.proto`. Note that we have to do it for both " +"the request and response messages. For more details on the syntax of " +"proto3, please see the `official documentation " +"`_." +msgstr "" +"我们需要做的第一件事是在脚本code:`transport.proto`中定义 RPC " +"系统的消息类型。请注意,我们必须对请求信息和响应信息都这样做。有关 proto3 语法的更多详情,请参阅官方文档 " +"`_。" + +#: ../../source/contributor-how-to-create-new-messages.rst:35 +msgid "Within the :code:`ServerMessage` block:" +msgstr "在 :code:`ServerMessage` 代码块中:" + +#: ../../source/contributor-how-to-create-new-messages.rst:52 +msgid "Within the ClientMessage block:" +msgstr "在 ClientMessage 代码块中:" + +#: ../../source/contributor-how-to-create-new-messages.rst:70 +msgid "" +"Make sure to also add a field of the newly created message type in " +":code:`oneof msg`." +msgstr "确保在 :code:`oneof msg` 中也添加一个新创建的消息类型字段。" + +#: ../../source/contributor-how-to-create-new-messages.rst:72 +msgid "Once that is done, we will compile the file with:" +msgstr "完成后,我们将使用:" + +#: ../../source/contributor-how-to-create-new-messages.rst:78 +msgid "If it compiles succesfully, you should see the following message:" +msgstr "如果编译成功,你应该会看到以下信息:" + +#: ../../source/contributor-how-to-create-new-messages.rst:87 +msgid "Serialization and Deserialization Functions" +msgstr "序列化和反序列化函数" + +#: ../../source/contributor-how-to-create-new-messages.rst:89 +msgid "" +"Our next step is to add functions to serialize and deserialize Python " +"datatypes to or from our defined RPC message types. You should add these " +"functions in :code:`serde.py`." +msgstr "" +"下一步是添加函数,以便将 Python 数据类型序列化和反序列化为我们定义的 RPC 消息类型或从我们定义的 RPC 消息类型反序列化和反序列化 " +"Python 数据类型。您应该在 :code:`serde.py` 中添加这些函数。" + +#: ../../source/contributor-how-to-create-new-messages.rst:91 +msgid "The four functions:" +msgstr "四种函数:" + +#: ../../source/contributor-how-to-create-new-messages.rst:112 +msgid "Sending the Message from the Server" +msgstr "从服务器发送信息" + +#: ../../source/contributor-how-to-create-new-messages.rst:114 +msgid "" +"Now write the request function in your Client Proxy class (e.g., " +":code:`grpc_client_proxy.py`) using the serde functions you just created:" +msgstr "现在,在客户端代理类(例如 :code:`grpc_client_proxy.py`)中使用刚才创建的 serde 函数编写请求函数:" + +#: ../../source/contributor-how-to-create-new-messages.rst:128 +msgid "Receiving the Message by the Client" +msgstr "由客户端接收信息" + +#: ../../source/contributor-how-to-create-new-messages.rst:130 +msgid "" +"Last step! Modify the code in :code:`message_handler.py` to check the " +"field of your message and call the :code:`example_response` function. " +"Remember to use the serde functions!" +msgstr "" +"最后一步 修改 :code:`message_handler.py` 中的代码,检查信息的字段并调用 " +":code:`example_response` 函数。记住使用 serde 函数!" + +#: ../../source/contributor-how-to-create-new-messages.rst:132 +msgid "Within the handle function:" +msgstr "在句柄函数内:" + +#: ../../source/contributor-how-to-create-new-messages.rst:139 +msgid "And add a new function:" +msgstr "并增加一个新函数:" + +#: ../../source/contributor-how-to-create-new-messages.rst:149 +msgid "Hopefully, when you run your program you will get the intended result!" +msgstr "希望您在运行程序时能得到预期的结果!" #: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:2 msgid "Develop in VSCode Dev Containers" -msgstr "" +msgstr "使用 VSCode Dev Containers 进行开发" #: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:4 msgid "" @@ -284,6 +544,8 @@ msgid "" "tests. For this purpose we are using the VSCode Remote Containers " "extension. What is it? Read the following quote:" msgstr "" +"在开发 Flower 框架时,我们希望确保所有贡献者使用相同的开发环境来格式化代码或运行测试。为此,我们使用了 VSCode " +"远程容器扩展。这是什么?请阅读下面这段话:" #: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:7 msgid "" @@ -297,6 +559,10 @@ msgid "" "separate tools, libraries, or runtimes needed for working with a " "codebase." msgstr "" +"Visual Studio Code Remote - " +"Containers扩展可让你将Docker容器用作功能齐全的开发环境。它允许你打开容器内(或挂载到容器内)的任何文件夹,并利用 Visual " +"Studio Code 的全部功能集。项目中的 :code:`devcontainer.json` 文件会告诉 VS Code " +"如何访问(或创建)一个带有定义明确的工具和运行时栈的开发容器。该容器可用于运行应用程序,也可用于分离处理代码库所需的工具、库或运行时。" #: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:9 msgid "" @@ -305,27 +571,32 @@ msgid "" "container, where they have full access to the tools, platform, and file " "system. This means that you can seamlessly switch your entire development" " environment just by connecting to a different container." -msgstr "" +msgstr "工作区文件从本地文件系统加载,或复制或克隆到容器中。扩展在容器内安装和运行,在容器内它们可以完全访问工具、平台和文件系统。这意味着,只需连接到不同的容器,就能无缝切换整个开发环境。" #: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:11 msgid "" "Source: `Official VSCode documentation " "`_" -msgstr "" +msgstr "来源:`VSCode 官方文档 `_" #: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:15 msgid "Getting started" -msgstr "" +msgstr "开始" #: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:17 +#, fuzzy msgid "" "Configuring and setting up the :code:`Dockerfile` as well the " "configuration for the devcontainer can be a bit more involved. The good " -"thing is you want have to do it. Usually it should be enough to install " -"Docker on your system and ensure its available on your command line. " -"Additionally, install the `VSCode Containers Extension `_." +"thing is you don't have to do it. Usually it should be enough to install " +"`Docker `_ on your system and " +"ensure its available on your command line. Additionally, install the " +"`VSCode Containers Extension `_." msgstr "" +"配置和设置 :code:`Dockerfile` 以及 devcontainer 的配置可能比较复杂。好在你想做就得做。通常只需在系统中安装 " +"Docker 并确保其在命令行中可用即可。此外,请安装 `VSCode Containers Extension " +"`_。" #: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:19 msgid "" @@ -336,12 +607,14 @@ msgid "" "area in the bottom left corner of your VSCode window and select the " "option *(Re)Open Folder in Container*." msgstr "" +"现在你应该可以开始了。启动 VSCode 时,它会要求你在容器环境中运行,如果你确认,它会自动构建容器并使用它。要手动指示 VSCode 使用 " +"devcontainer,可以在安装扩展后,点击 VSCode 窗口左下角的绿色区域,然后选择 \"*(重新)在容器中打开文件夹*\"选项。" #: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:21 msgid "" "In some cases your setup might be more involved. For those cases consult " "the following sources:" -msgstr "" +msgstr "在某些情况下,您的设置可能更复杂。有关这些情况,请参考以下资料:" #: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:23 msgid "" @@ -349,24 +622,26 @@ msgid "" "`_" msgstr "" +"在容器内开发 `_" #: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:24 msgid "" "`Remote development in Containers " "`_" -msgstr "" +msgstr "容器中的远程开发 `_" #: ../../source/contributor-how-to-install-development-versions.rst:2 msgid "Install development versions" -msgstr "" +msgstr "安装开发版本" #: ../../source/contributor-how-to-install-development-versions.rst:5 msgid "Install development versions of Flower" -msgstr "" +msgstr "安装 Flower 的开发版本" #: ../../source/contributor-how-to-install-development-versions.rst:8 msgid "Using Poetry (recommended)" -msgstr "" +msgstr "使用诗歌(推荐)" #: ../../source/contributor-how-to-install-development-versions.rst:10 msgid "" @@ -374,50 +649,61 @@ msgid "" "in ``pyproject.toml`` and then reinstall (don't forget to delete " "``poetry.lock`` (``rm poetry.lock``) before running ``poetry install``)." msgstr "" +"安装来自 PyPI 的 ``flwr`` 预发布版本:更新 ``pyproject.toml`` 中的 ``flwr`` " +"依赖关系,然后重新安装(运行 ``poetry install` 前,别忘了删除 ``poetry.lock` (``rm " +"poetry.lock`))。" #: ../../source/contributor-how-to-install-development-versions.rst:12 msgid "" "``flwr = { version = \"1.0.0a0\", allow-prereleases = true }`` (without " "extras)" -msgstr "" +msgstr "``flwr = { version = \"1.0.0a0\", allow-prereleases = true }`` (不含额外内容)" #: ../../source/contributor-how-to-install-development-versions.rst:13 msgid "" "``flwr = { version = \"1.0.0a0\", allow-prereleases = true, extras = " "[\"simulation\"] }`` (with extras)" msgstr "" +"``flwr = { version = \"1.0.0a0\", allow-prereleases = true, extras = " +"[\"simulation\"] }`` (包含额外内容)" #: ../../source/contributor-how-to-install-development-versions.rst:15 msgid "" "Install ``flwr`` from a local copy of the Flower source code via " "``pyproject.toml``:" -msgstr "" +msgstr "通过 ``pyproject.toml`` 从 Flower 源代码的本地副本安装 ``flwr``:" #: ../../source/contributor-how-to-install-development-versions.rst:17 msgid "``flwr = { path = \"../../\", develop = true }`` (without extras)" -msgstr "" +msgstr "``flwr = { path = \"../../\", develop = true }`` (不含额外内容)" #: ../../source/contributor-how-to-install-development-versions.rst:18 msgid "" "``flwr = { path = \"../../\", develop = true, extras = [\"simulation\"] " "}`` (with extras)" msgstr "" +"``flwr = { path = \"../../\", develop = true, extras = [\"simulation\"] " +"}`` (包含额外内容)" #: ../../source/contributor-how-to-install-development-versions.rst:20 msgid "Install ``flwr`` from a local wheel file via ``pyproject.toml``:" -msgstr "" +msgstr "通过 ``pyproject.toml`` 从本地轮子文件安装 ``flwr``:" #: ../../source/contributor-how-to-install-development-versions.rst:22 +#, fuzzy msgid "" -"``flwr = { path = \"../../dist/flwr-1.0.0-py3-none-any.whl\" }`` (without" +"``flwr = { path = \"../../dist/flwr-1.8.0-py3-none-any.whl\" }`` (without" " extras)" -msgstr "" +msgstr "``flwr = { path = \"../../dist/flwr-1.0.0-py3-none-any.whl\" }``(无额外内容)" #: ../../source/contributor-how-to-install-development-versions.rst:23 +#, fuzzy msgid "" -"``flwr = { path = \"../../dist/flwr-1.0.0-py3-none-any.whl\", extras = " +"``flwr = { path = \"../../dist/flwr-1.8.0-py3-none-any.whl\", extras = " "[\"simulation\"] }`` (with extras)" msgstr "" +"``flwr = { path = \"../../dist/flwr-1.0.0-py3-none-any.whl\", extras = " +"[\"simulation\"] }`` (包含额外内容)" #: ../../source/contributor-how-to-install-development-versions.rst:25 msgid "" @@ -425,299 +711,309 @@ msgid "" "Dependency Specification `_" msgstr "" +"有关详细信息,请参阅 Poetry 文档: 诗歌依赖性规范 `_" #: ../../source/contributor-how-to-install-development-versions.rst:28 msgid "Using pip (recommended on Colab)" -msgstr "" +msgstr "使用 pip(建议在 Colab 上使用)" #: ../../source/contributor-how-to-install-development-versions.rst:30 msgid "Install a ``flwr`` pre-release from PyPI:" -msgstr "" +msgstr "从 PyPI 安装 ``flwr`` 预发行版:" #: ../../source/contributor-how-to-install-development-versions.rst:32 msgid "``pip install -U --pre flwr`` (without extras)" -msgstr "" +msgstr "`pip install -U -pre flwr``(不含额外功能)" #: ../../source/contributor-how-to-install-development-versions.rst:33 msgid "``pip install -U --pre flwr[simulation]`` (with extras)" -msgstr "" +msgstr "`pip install -U -pre flwr[simulation]``(包含额外功能)" #: ../../source/contributor-how-to-install-development-versions.rst:35 msgid "" "Python packages can be installed from git repositories. Use one of the " "following commands to install the Flower directly from GitHub." -msgstr "" +msgstr "Python 软件包可以从 git 仓库安装。使用以下命令之一直接从 GitHub 安装 Flower。" #: ../../source/contributor-how-to-install-development-versions.rst:37 msgid "Install ``flwr`` from the default GitHub branch (``main``):" -msgstr "" +msgstr "从 GitHub 的默认分支 (``main`) 安装 ``flwr``:" #: ../../source/contributor-how-to-install-development-versions.rst:39 msgid "" "``pip install flwr@git+https://github.com/adap/flower.git`` (without " "extras)" -msgstr "" +msgstr "`pip install flwr@git+https://github.com/adap/flower.git`` (不含额外功能)" #: ../../source/contributor-how-to-install-development-versions.rst:40 msgid "" "``pip install flwr[simulation]@git+https://github.com/adap/flower.git`` " "(with extras)" msgstr "" +"`pip install " +"flwr[simulation]@git+https://github.com/adap/flower.git``(带附加功能)" #: ../../source/contributor-how-to-install-development-versions.rst:42 msgid "Install ``flwr`` from a specific GitHub branch (``branch-name``):" -msgstr "" +msgstr "从特定的 GitHub 分支 (`分支名`) 安装 ``flwr``:" #: ../../source/contributor-how-to-install-development-versions.rst:44 msgid "" "``pip install flwr@git+https://github.com/adap/flower.git@branch-name`` " "(without extras)" msgstr "" +"`pip install flwr@git+https://github.com/adap/flower.git@branch-name`` " +"(不含附加功能)" #: ../../source/contributor-how-to-install-development-versions.rst:45 msgid "" "``pip install flwr[simulation]@git+https://github.com/adap/flower.git" "@branch-name`` (with extras)" -msgstr "" +msgstr "`pip安装flwr[模拟]@git+https://github.com/adap/flower.git@分支名``(带附加功能)" #: ../../source/contributor-how-to-install-development-versions.rst:49 msgid "Open Jupyter Notebooks on Google Colab" -msgstr "" +msgstr "在谷歌 Colab 上打开 Jupyter 笔记本" #: ../../source/contributor-how-to-install-development-versions.rst:51 msgid "" "Open the notebook ``doc/source/tutorial-get-started-with-flower-" "pytorch.ipynb``:" -msgstr "" +msgstr "打开笔记本 ``doc/source/tutorial-get-started-with-flower-pytorch.ipynb``:" #: ../../source/contributor-how-to-install-development-versions.rst:53 msgid "" "https://colab.research.google.com/github/adap/flower/blob/main/doc/source" "/tutorial-get-started-with-flower-pytorch.ipynb" msgstr "" +"https://colab.research.google.com/github/adap/flower/blob/main/doc/source" +"/tutorial-get-started-with-flower-pytorch.ipynb" #: ../../source/contributor-how-to-install-development-versions.rst:55 msgid "" "Open a development version of the same notebook from branch `branch-name`" " by changing ``main`` to ``branch-name`` (right after ``blob``):" msgstr "" +"将 ``main`` 改为 ``branch-name``(紧跟在 ``blob``之后),从分支 `branch-name` " +"打开同一笔记本的开发版本:" #: ../../source/contributor-how-to-install-development-versions.rst:57 msgid "" "https://colab.research.google.com/github/adap/flower/blob/branch-" "name/doc/source/tutorial-get-started-with-flower-pytorch.ipynb" msgstr "" +"https://colab.research.google.com/github/adap/flower/blob/branch-" +"name/doc/source/tutorial-get-started-with-flower-pytorch.ipynb" #: ../../source/contributor-how-to-install-development-versions.rst:59 msgid "Install a `whl` on Google Colab:" -msgstr "" +msgstr "在 Google Colab 上安装 `whl`:" #: ../../source/contributor-how-to-install-development-versions.rst:61 msgid "" "In the vertical icon grid on the left hand side, select ``Files`` > " "``Upload to session storage``" -msgstr "" +msgstr "在左侧的垂直图标网格中,选择 \"文件\">\"上传到会话存储\"" #: ../../source/contributor-how-to-install-development-versions.rst:62 -msgid "Upload the whl (e.g., ``flwr-1.6.0-py3-none-any.whl``)" -msgstr "" +#, fuzzy +msgid "Upload the whl (e.g., ``flwr-1.8.0-py3-none-any.whl``)" +msgstr "更新 whl (e.g., ``flwr-1.7.0-py3-none-any.whl``)" #: ../../source/contributor-how-to-install-development-versions.rst:63 +#, fuzzy msgid "" "Change ``!pip install -q 'flwr[simulation]' torch torchvision " -"matplotlib`` to ``!pip install -q 'flwr-1.6.0-py3-none-" +"matplotlib`` to ``!pip install -q 'flwr-1.8.0-py3-none-" "any.whl[simulation]' torch torchvision matplotlib``" msgstr "" +"把``!pip install -q 'flwr[simulation]' torch torchvision " +"matplotlib``变为``!pip install -q 'flwr-1.7.0-py3-none-any.whl[simulation]'" +" torch torchvision matplotlib``" #: ../../source/contributor-how-to-release-flower.rst:2 msgid "Release Flower" -msgstr "" +msgstr "发布 Flower" #: ../../source/contributor-how-to-release-flower.rst:4 msgid "" "This document describes the current release process. It may or may not " "change in the future." -msgstr "" +msgstr "本文件描述了当前的发布流程。今后可能会有变化,也可能不会有变化。" #: ../../source/contributor-how-to-release-flower.rst:7 -msgid "Before the release" -msgstr "" - -#: ../../source/contributor-how-to-release-flower.rst:9 -msgid "" -"Update the changelog (``changelog.md``) with all relevant changes that " -"happened after the last release. If the last release was tagged " -"``v1.2.0``, you can use the following URL to see all commits that got " -"merged into ``main`` since then:" -msgstr "" - -#: ../../source/contributor-how-to-release-flower.rst:11 -msgid "" -"`GitHub: Compare v1.2.0...main " -"`_" -msgstr "" - -#: ../../source/contributor-how-to-release-flower.rst:13 -msgid "" -"Thank the authors who contributed since the last release. This can be " -"done by running the ``./dev/add-shortlog.sh`` convenience script (it can " -"be ran multiple times and will update the names in the list if new " -"contributors were added in the meantime)." -msgstr "" - -#: ../../source/contributor-how-to-release-flower.rst:16 msgid "During the release" -msgstr "" +msgstr "在发布期间" -#: ../../source/contributor-how-to-release-flower.rst:18 +#: ../../source/contributor-how-to-release-flower.rst:9 msgid "" "The version number of a release is stated in ``pyproject.toml``. To " "release a new version of Flower, the following things need to happen (in " "that order):" +msgstr "版本号在 ``pyproject.toml`` 中说明。要发布 Flower 的新版本,需要完成以下工作(按顺序排列):" + +#: ../../source/contributor-how-to-release-flower.rst:11 +msgid "" +"Run ``python3 src/py/flwr_tool/update_changelog.py `` in " +"order to add every new change to the changelog (feel free to make manual " +"changes to the changelog afterwards until it looks good)." msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:20 +#: ../../source/contributor-how-to-release-flower.rst:12 msgid "" -"Update the ``changelog.md`` section header ``Unreleased`` to contain the " -"version number and date for the release you are building. Create a pull " -"request with the change." +"Once the changelog has been updated with all the changes, run ``./dev" +"/prepare-release-changelog.sh v``, where ```` " +"is the version stated in ``pyproject.toml`` (notice the ``v`` added " +"before it). This will replace the ``Unreleased`` header of the changelog " +"by the version and current date, and it will add a thanking message for " +"the contributors. Open a pull request with those changes." msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:21 +#: ../../source/contributor-how-to-release-flower.rst:13 +#, fuzzy msgid "" -"Tag the release commit with the version number as soon as the PR is " -"merged: ``git tag v0.12.3``, then ``git push --tags``. This will create a" -" draft release on GitHub containing the correct artifacts and the " -"relevant part of the changelog." +"Once the pull request is merged, tag the release commit with the version " +"number as soon as the PR is merged: ``git tag v`` (notice " +"the ``v`` added before the version number), then ``git push --tags``. " +"This will create a draft release on GitHub containing the correct " +"artifacts and the relevant part of the changelog." msgstr "" +"在 PR 合并后立即用版本号标记发布提交:``git tag v0.12.3``,然后``git push --tags``。这将在 GitHub" +" 上创建一个包含正确工件和更新日志相关部分的发布草案。" -#: ../../source/contributor-how-to-release-flower.rst:22 +#: ../../source/contributor-how-to-release-flower.rst:14 msgid "Check the draft release on GitHub, and if everything is good, publish it." -msgstr "" +msgstr "检查 GitHub 上的发布稿,如果一切正常,就发布它。" -#: ../../source/contributor-how-to-release-flower.rst:25 +#: ../../source/contributor-how-to-release-flower.rst:17 msgid "After the release" -msgstr "" +msgstr "发布后" -#: ../../source/contributor-how-to-release-flower.rst:27 +#: ../../source/contributor-how-to-release-flower.rst:19 msgid "Create a pull request which contains the following changes:" -msgstr "" +msgstr "创建包含以下更改的拉取请求:" -#: ../../source/contributor-how-to-release-flower.rst:29 +#: ../../source/contributor-how-to-release-flower.rst:21 msgid "Increase the minor version in ``pyproject.toml`` by one." -msgstr "" +msgstr "将 ``pyproject.toml`` 中的次要版本增加一个。" -#: ../../source/contributor-how-to-release-flower.rst:30 +#: ../../source/contributor-how-to-release-flower.rst:22 msgid "Update all files which contain the current version number if necessary." -msgstr "" +msgstr "如有必要,更新包含当前版本号的所有文件。" -#: ../../source/contributor-how-to-release-flower.rst:31 +#: ../../source/contributor-how-to-release-flower.rst:23 msgid "Add a new ``Unreleased`` section in ``changelog.md``." -msgstr "" +msgstr "在 ``changelog.md`` 中添加新的 ``Unreleased`` 部分。" -#: ../../source/contributor-how-to-release-flower.rst:33 +#: ../../source/contributor-how-to-release-flower.rst:25 msgid "" "Merge the pull request on the same day (i.e., before a new nighly release" " gets published to PyPI)." -msgstr "" +msgstr "在同一天合并拉取请求(即在新版本发布到 PyPI 之前)。" -#: ../../source/contributor-how-to-release-flower.rst:36 +#: ../../source/contributor-how-to-release-flower.rst:28 msgid "Publishing a pre-release" -msgstr "" +msgstr "发布预发布版本" -#: ../../source/contributor-how-to-release-flower.rst:39 +#: ../../source/contributor-how-to-release-flower.rst:31 msgid "Pre-release naming" -msgstr "" +msgstr "释放前命名" -#: ../../source/contributor-how-to-release-flower.rst:41 +#: ../../source/contributor-how-to-release-flower.rst:33 msgid "" "PyPI supports pre-releases (alpha, beta, release candiate). Pre-releases " "MUST use one of the following naming patterns:" -msgstr "" +msgstr "PyPI 支持预发布版本(alpha、beta、release candiate)。预发布版本必须使用以下命名模式之一:" -#: ../../source/contributor-how-to-release-flower.rst:43 +#: ../../source/contributor-how-to-release-flower.rst:35 msgid "Alpha: ``MAJOR.MINOR.PATCHaN``" -msgstr "" +msgstr "阿尔法 ``MAJOR.MINOR.PATCHaN``" -#: ../../source/contributor-how-to-release-flower.rst:44 +#: ../../source/contributor-how-to-release-flower.rst:36 msgid "Beta: ``MAJOR.MINOR.PATCHbN``" -msgstr "" +msgstr "贝塔: ``MAJOR.MINOR.PATCHbN``" -#: ../../source/contributor-how-to-release-flower.rst:45 +#: ../../source/contributor-how-to-release-flower.rst:37 msgid "Release candiate (RC): ``MAJOR.MINOR.PATCHrcN``" -msgstr "" +msgstr "版本代号 (RC): ``MAJOR.MINOR.PATCHrcN``" -#: ../../source/contributor-how-to-release-flower.rst:47 +#: ../../source/contributor-how-to-release-flower.rst:39 msgid "Examples include:" -msgstr "" +msgstr "例子包括:" -#: ../../source/contributor-how-to-release-flower.rst:49 +#: ../../source/contributor-how-to-release-flower.rst:41 msgid "``1.0.0a0``" -msgstr "" +msgstr "``1.0.0a0``" -#: ../../source/contributor-how-to-release-flower.rst:50 +#: ../../source/contributor-how-to-release-flower.rst:42 msgid "``1.0.0b0``" -msgstr "" +msgstr "``1.0.0b0``" -#: ../../source/contributor-how-to-release-flower.rst:51 +#: ../../source/contributor-how-to-release-flower.rst:43 msgid "``1.0.0rc0``" -msgstr "" +msgstr "``1.0.0rc0``" -#: ../../source/contributor-how-to-release-flower.rst:52 +#: ../../source/contributor-how-to-release-flower.rst:44 msgid "``1.0.0rc1``" -msgstr "" +msgstr "``1.0.0rc1``" -#: ../../source/contributor-how-to-release-flower.rst:54 +#: ../../source/contributor-how-to-release-flower.rst:46 msgid "" "This is in line with PEP-440 and the recommendations from the Python " "Packaging Authority (PyPA):" -msgstr "" +msgstr "这符合 PEP-440 和 Python 包装管理局 (PyPA) 的建议:" -#: ../../source/contributor-how-to-release-flower.rst:57 +#: ../../source/contributor-how-to-release-flower.rst:49 msgid "`PEP-440 `_" -msgstr "" +msgstr "`PEP-440 `_" -#: ../../source/contributor-how-to-release-flower.rst:58 +#: ../../source/contributor-how-to-release-flower.rst:50 msgid "" "`PyPA Choosing a versioning scheme " "`_" msgstr "" +"`PyPA 选择版本控制方案 `_" -#: ../../source/contributor-how-to-release-flower.rst:60 +#: ../../source/contributor-how-to-release-flower.rst:52 msgid "" "Note that the approach defined by PyPA is not compatible with SemVer " "2.0.0 spec, for details consult the `Semantic Versioning Specification " "`_ (specifically item " "11 on precedence)." msgstr "" +"请注意,PyPA 所定义的方法与 SemVer 2.0.0 " +"规范不兼容,详情请查阅《语义版本规范》`_(特别是关于优先级的第 11 项)。" -#: ../../source/contributor-how-to-release-flower.rst:63 +#: ../../source/contributor-how-to-release-flower.rst:55 msgid "Pre-release classification" -msgstr "" +msgstr "发布前分类" -#: ../../source/contributor-how-to-release-flower.rst:65 +#: ../../source/contributor-how-to-release-flower.rst:57 msgid "Should the next pre-release be called alpha, beta, or release candidate?" -msgstr "" +msgstr "下一个预发布版应该叫阿尔法版、贝塔版还是候选发布版?" -#: ../../source/contributor-how-to-release-flower.rst:67 +#: ../../source/contributor-how-to-release-flower.rst:59 msgid "" "RC: feature complete, no known issues (apart from issues that are " "classified as \"won't fix\" for the next stable release) - if no issues " "surface this will become the next stable release" -msgstr "" +msgstr "RC:功能完整,无已知问题(除了下一个稳定版中被列为 \"不会修复 \"的问题)--如果没有问题出现,这将成为下一个稳定版" -#: ../../source/contributor-how-to-release-flower.rst:68 +#: ../../source/contributor-how-to-release-flower.rst:60 msgid "Beta: feature complete, allowed to have known issues" -msgstr "" +msgstr "贝塔版:功能完整,允许存在已知问题" -#: ../../source/contributor-how-to-release-flower.rst:69 +#: ../../source/contributor-how-to-release-flower.rst:61 msgid "Alpha: not feature complete, allowed to have known issues" -msgstr "" +msgstr "阿尔法版:功能不完整,允许存在已知问题" #: ../../source/contributor-how-to-set-up-a-virtual-env.rst:2 msgid "Set up a virtual env" -msgstr "" +msgstr "建立虚拟环境" #: ../../source/contributor-how-to-set-up-a-virtual-env.rst:4 msgid "" @@ -726,10 +1022,12 @@ msgid "" "environment with pyenv virtualenv, poetry, or Anaconda. You can follow " "the instructions or choose your preferred setup." msgstr "" +"建议在虚拟环境中运行 Python 设置。本指南展示了如何使用 pyenv virtualenv、poes 或 Anaconda " +"创建虚拟环境的三个不同示例。您可以按照说明或选择您喜欢的设置。" #: ../../source/contributor-how-to-set-up-a-virtual-env.rst:9 msgid "Python Version" -msgstr "" +msgstr "Python 版本" #: ../../source/contributor-how-to-set-up-a-virtual-env.rst:11 #: ../../source/how-to-install-flower.rst:8 @@ -738,10 +1036,12 @@ msgid "" "but `Python 3.10 `_ or above is " "recommended." msgstr "" +"Flower 至少需要 `Python 3.8 `_,但建议使用 `Python " +"3.10 `_或更高版本。" #: ../../source/contributor-how-to-set-up-a-virtual-env.rst:14 msgid "Virutualenv with Pyenv/Virtualenv" -msgstr "" +msgstr "Virutualenv 和 Pyenv/Virtualenv" #: ../../source/contributor-how-to-set-up-a-virtual-env.rst:16 msgid "" @@ -750,24 +1050,27 @@ msgid "" "/pyenv-virtualenv>`_. Please see `Flower examples " "`_ for details." msgstr "" +"其中一个推荐的虚拟环境是 `pyenv `_/`virtualenv " +"`_。详情请参见 `Flower 示例 " +"`_。" #: ../../source/contributor-how-to-set-up-a-virtual-env.rst:18 msgid "" "Once Pyenv is set up, you can use it to install `Python Version 3.10 " "`_ or above:" -msgstr "" +msgstr "一旦设置好 Pyenv,就可以用它来安装 `Python 3.10 `_ 或更高版本:" #: ../../source/contributor-how-to-set-up-a-virtual-env.rst:24 msgid "Create the virtualenv with:" -msgstr "" +msgstr "创建虚拟环境:" #: ../../source/contributor-how-to-set-up-a-virtual-env.rst:31 msgid "Activate the virtualenv by running the following command:" -msgstr "" +msgstr "运行以下命令激活 virtualenv:" #: ../../source/contributor-how-to-set-up-a-virtual-env.rst:39 msgid "Virtualenv with Poetry" -msgstr "" +msgstr "有诗意的 Virtualenv" #: ../../source/contributor-how-to-set-up-a-virtual-env.rst:41 msgid "" @@ -775,16 +1078,18 @@ msgid "" "poetry.org/docs/>`_ to manage dependencies. After installing Poetry you " "simply create a virtual environment with:" msgstr "" +"Flower 示例基于 `Poetry `_ 来管理依赖关系。安装 Poetry" +" 后,只需创建一个虚拟环境即可:" #: ../../source/contributor-how-to-set-up-a-virtual-env.rst:47 msgid "" "If you open a new terminal you can activate the previously created " "virtual environment with the following command:" -msgstr "" +msgstr "如果打开一个新终端,可以使用以下命令激活之前创建的虚拟环境:" #: ../../source/contributor-how-to-set-up-a-virtual-env.rst:55 msgid "Virtualenv with Anaconda" -msgstr "" +msgstr "使用 Anaconda 的 Virtualenv" #: ../../source/contributor-how-to-set-up-a-virtual-env.rst:57 msgid "" @@ -793,28 +1098,33 @@ msgid "" "/user-guide/install/index.html>`_ package. After setting it up you can " "create a virtual environment with:" msgstr "" +"如果你更喜欢在虚拟环境中使用 Anaconda,那么请安装并设置 `conda " +"`_ 软件包。设置完成后,您就可以使用以下工具创建虚拟环境:" #: ../../source/contributor-how-to-set-up-a-virtual-env.rst:63 msgid "and activate the virtual environment with:" -msgstr "" +msgstr "并激活虚拟环境:" #: ../../source/contributor-how-to-set-up-a-virtual-env.rst:71 msgid "And then?" -msgstr "" +msgstr "然后呢?" #: ../../source/contributor-how-to-set-up-a-virtual-env.rst:73 msgid "" "As soon as you created your virtual environment you clone one of the " "`Flower examples `_." msgstr "" +"创建虚拟环境后,您可以克隆一个 `Flower 示例 " +"`_。" #: ../../source/contributor-how-to-write-documentation.rst:2 msgid "Write documentation" -msgstr "" +msgstr "编写文件" #: ../../source/contributor-how-to-write-documentation.rst:6 msgid "Project layout" -msgstr "" +msgstr "项目布局" #: ../../source/contributor-how-to-write-documentation.rst:8 msgid "" @@ -822,53 +1132,58 @@ msgid "" " documentation system supports both reStructuredText (``.rst`` files) and" " Markdown (``.md`` files)." msgstr "" +"Flower 文档位于 ``doc`` 目录中。基于 Sphinx 的文档系统支持 reStructuredText(``.rst`` 文件)和 " +"Markdown(``.md`` 文件)。" #: ../../source/contributor-how-to-write-documentation.rst:10 -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:119 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:142 +#, fuzzy msgid "" "Note that, in order to build the documentation locally (with ``poetry run" " make html``, like described below), `Pandoc " -"_` needs to be installed on the " +"`_ needs to be installed on the " "system." msgstr "" +"请注意,要在本地构建文档(使用 ``poetry run make html``,如下所述),系统上必须安装 ``Pandoc " +"_`。" #: ../../source/contributor-how-to-write-documentation.rst:14 msgid "Edit an existing page" -msgstr "" +msgstr "编辑现有页面" #: ../../source/contributor-how-to-write-documentation.rst:16 msgid "Edit an existing ``.rst`` (or ``.md``) file under ``doc/source/``" -msgstr "" +msgstr "编辑 ``doc/source/`` 下现有的 ``.rst`` (或 ``.md``) 文件" #: ../../source/contributor-how-to-write-documentation.rst:17 #: ../../source/contributor-how-to-write-documentation.rst:27 msgid "Compile the docs: ``cd doc``, then ``poetry run make html``" -msgstr "" +msgstr "编译文档: cd doc``,然后 ``poetry run make html``" #: ../../source/contributor-how-to-write-documentation.rst:18 #: ../../source/contributor-how-to-write-documentation.rst:28 msgid "Open ``doc/build/html/index.html`` in the browser to check the result" -msgstr "" +msgstr "在浏览器中打开 ``doc/build/html/index.html`` 查看结果" #: ../../source/contributor-how-to-write-documentation.rst:22 msgid "Create a new page" -msgstr "" +msgstr "创建新页面" #: ../../source/contributor-how-to-write-documentation.rst:24 msgid "Add new ``.rst`` file under ``doc/source/``" -msgstr "" +msgstr "在 ``doc/source/`` 下添加新的 ``.rst`` 文件" #: ../../source/contributor-how-to-write-documentation.rst:25 msgid "Add content to the new ``.rst`` file" -msgstr "" +msgstr "为新的 ``.rst`` 文件添加内容" #: ../../source/contributor-how-to-write-documentation.rst:26 msgid "Link to the new rst from ``index.rst``" -msgstr "" +msgstr "从 ``index.rst`` 链接到新的 rst" #: ../../source/contributor-ref-good-first-contributions.rst:2 msgid "Good first contributions" -msgstr "" +msgstr "良好的首批捐款" #: ../../source/contributor-ref-good-first-contributions.rst:4 msgid "" @@ -877,40 +1192,44 @@ msgid "" "where to start to increase your chances of getting your PR accepted into " "the Flower codebase." msgstr "" +"我们欢迎为《鲜花》投稿!然而,要知道从哪里开始并非易事。因此,我们提出了一些建议,告诉您从哪里开始,以增加您的 PR 被 Flower " +"代码库接受的机会。" #: ../../source/contributor-ref-good-first-contributions.rst:11 msgid "Where to start" -msgstr "" +msgstr "从哪里开始" #: ../../source/contributor-ref-good-first-contributions.rst:13 msgid "" "Until the Flower core library matures it will be easier to get PR's " "accepted if they only touch non-core areas of the codebase. Good " "candidates to get started are:" -msgstr "" +msgstr "在 Flower 核心库成熟之前,如果 PR 只涉及代码库中的非核心区域,则会更容易被接受。可以从以下方面入手:" #: ../../source/contributor-ref-good-first-contributions.rst:17 msgid "Documentation: What's missing? What could be expressed more clearly?" -msgstr "" +msgstr "文件: 缺少什么?哪些内容可以表达得更清楚?" #: ../../source/contributor-ref-good-first-contributions.rst:18 msgid "Baselines: See below." -msgstr "" +msgstr "基线: 见下文。" #: ../../source/contributor-ref-good-first-contributions.rst:19 msgid "Examples: See below." -msgstr "" +msgstr "举例说明: 见下文。" #: ../../source/contributor-ref-good-first-contributions.rst:23 msgid "Request for Flower Baselines" -msgstr "" +msgstr "Flower 基线申请" #: ../../source/contributor-ref-good-first-contributions.rst:25 msgid "" "If you are not familiar with Flower Baselines, you should probably check-" -"out our `contributing guide for baselines `_." msgstr "" +"如果您对 Flower Baselines 还不熟悉,也许应该看看我们的 \"基线贡献指南 `_\"。" #: ../../source/contributor-ref-good-first-contributions.rst:27 msgid "" @@ -920,39 +1239,42 @@ msgid "" " and that has no assignes, feel free to assign it to yourself and start " "working on it!" msgstr "" +"然后,您应该查看开放的 `issues " +"`_" +" 基线请求。如果您发现了自己想做的基线,而它还没有被分配,请随时把它分配给自己,然后开始工作!" #: ../../source/contributor-ref-good-first-contributions.rst:31 msgid "" "Otherwise, if you don't find a baseline you'd like to work on, be sure to" " open a new issue with the baseline request template!" -msgstr "" +msgstr "否则,如果您没有找到想要处理的基线,请务必使用基线请求模板打开一个新问题!" #: ../../source/contributor-ref-good-first-contributions.rst:34 msgid "Request for examples" -msgstr "" +msgstr "要求提供范例" #: ../../source/contributor-ref-good-first-contributions.rst:36 msgid "" "We wish we had more time to write usage examples because we believe they " "help users to get started with building what they want to build. Here are" " a few ideas where we'd be happy to accept a PR:" -msgstr "" +msgstr "我们希望有更多的时间来撰写使用示例,因为我们相信这些示例可以帮助用户开始构建他们想要构建的东西。以下是我们乐意接受 PR 的几个想法:" #: ../../source/contributor-ref-good-first-contributions.rst:40 msgid "Llama 2 fine-tuning, with Hugging Face Transformers and PyTorch" -msgstr "" +msgstr "微调 \"拉玛 2\",使用 \"抱脸变形金刚 \"和 PyTorch" #: ../../source/contributor-ref-good-first-contributions.rst:41 msgid "XGBoost" -msgstr "" +msgstr "XGBoost" #: ../../source/contributor-ref-good-first-contributions.rst:42 msgid "Android ONNX on-device training" -msgstr "" +msgstr "安卓 ONNX 设备上培训" #: ../../source/contributor-ref-secure-aggregation-protocols.rst:2 msgid "Secure Aggregation Protocols" -msgstr "" +msgstr "安全聚合协议" #: ../../source/contributor-ref-secure-aggregation-protocols.rst:4 msgid "" @@ -961,10 +1283,12 @@ msgid "" " not be accurate in practice. The SecAgg protocol can be considered as a " "special case of the SecAgg+ protocol." msgstr "" +"包括 SecAgg、SecAgg+ 和 LightSecAgg 协议。LightSecAgg " +"协议尚未实施,因此其图表和抽象在实践中可能并不准确。SecAgg 协议可视为 SecAgg+ 协议的特例。" #: ../../source/contributor-ref-secure-aggregation-protocols.rst:8 msgid "The :code:`SecAgg+` abstraction" -msgstr "" +msgstr "代码:`SecAgg+` 抽象" #: ../../source/contributor-ref-secure-aggregation-protocols.rst:10 #: ../../source/contributor-ref-secure-aggregation-protocols.rst:161 @@ -973,50 +1297,55 @@ msgid "" "(int) for secure aggregation, and thus many python dictionaries used have" " keys of int type rather than ClientProxy type." msgstr "" +"在此实现中,将为每个客户端分配一个唯一索引(int),以确保聚合的安全性,因此使用的许多 python 字典的键都是 int 类型,而不是 " +"ClientProxy 类型。" #: ../../source/contributor-ref-secure-aggregation-protocols.rst:65 #: ../../source/contributor-ref-secure-aggregation-protocols.rst:198 msgid "" "The Flower server will execute and process received results in the " "following order:" -msgstr "" +msgstr "Flower 服务器将按以下顺序执行和处理收到的结果:" #: ../../source/contributor-ref-secure-aggregation-protocols.rst:159 msgid "The :code:`LightSecAgg` abstraction" -msgstr "" +msgstr "代码:`LightSecAgg` 抽象" #: ../../source/contributor-ref-secure-aggregation-protocols.rst:271 msgid "Types" -msgstr "" +msgstr "类型" #: ../../source/contributor-tutorial-contribute-on-github.rst:2 msgid "Contribute on GitHub" -msgstr "" +msgstr "在 GitHub 上投稿" #: ../../source/contributor-tutorial-contribute-on-github.rst:4 msgid "" "This guide is for people who want to get involved with Flower, but who " "are not used to contributing to GitHub projects." -msgstr "" +msgstr "本指南适用于想参与 Flower,但不习惯为 GitHub 项目贡献的人。" #: ../../source/contributor-tutorial-contribute-on-github.rst:6 +#, fuzzy msgid "" "If you're familiar with how contributing on GitHub works, you can " "directly checkout our `getting started guide for contributors " -"`_ and " -"examples of `good first contributions `_." +"`_." msgstr "" +"如果您熟悉如何在 GitHub 上贡献,可以直接查看我们的 \"贡献者入门指南\" `_ 和 \"优秀的首次贡献示例\" " +"`_。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:12 +#: ../../source/contributor-tutorial-contribute-on-github.rst:11 msgid "Setting up the repository" -msgstr "" +msgstr "建立资源库" -#: ../../source/contributor-tutorial-contribute-on-github.rst:23 +#: ../../source/contributor-tutorial-contribute-on-github.rst:22 msgid "**Create a GitHub account and setup Git**" -msgstr "" +msgstr "**创建 GitHub 账户并设置 Git**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:15 +#: ../../source/contributor-tutorial-contribute-on-github.rst:14 msgid "" "Git is a distributed version control tool. This allows for an entire " "codebase's history to be stored and every developer's machine. It is a " @@ -1024,109 +1353,122 @@ msgid "" "follow this `guide `_ to set it up." msgstr "" +"Git 是一种分布式版本控制工具。它可以将整个代码库的历史记录保存在每个开发人员的机器上。您需要在本地计算机上安装该软件,可以按照本指南 " +"`_ 进行设置。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:18 +#: ../../source/contributor-tutorial-contribute-on-github.rst:17 msgid "" "GitHub, itself, is a code hosting platform for version control and " "collaboration. It allows for everyone to collaborate and work from " "anywhere on remote repositories." -msgstr "" +msgstr "GitHub 本身是一个用于版本控制和协作的代码托管平台。它允许每个人在任何地方对远程仓库进行协作和工作。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:20 +#: ../../source/contributor-tutorial-contribute-on-github.rst:19 msgid "" "If you haven't already, you will need to create an account on `GitHub " "`_." -msgstr "" +msgstr "如果还没有,您需要在 `GitHub `_ 上创建一个账户。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:22 +#: ../../source/contributor-tutorial-contribute-on-github.rst:21 msgid "" "The idea behind the generic Git and GitHub workflow boils down to this: " "you download code from a remote repository on GitHub, make changes " "locally and keep track of them using Git and then you upload your new " "history back to GitHub." msgstr "" +"通用的 Git 和 GitHub 工作流程背后的理念可以归结为:从 GitHub 上的远程仓库下载代码,在本地进行修改并使用 Git " +"进行跟踪,然后将新的历史记录上传回 GitHub。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:34 +#: ../../source/contributor-tutorial-contribute-on-github.rst:33 msgid "**Forking the Flower repository**" -msgstr "" +msgstr "**叉花仓库**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:26 +#: ../../source/contributor-tutorial-contribute-on-github.rst:25 msgid "" "A fork is a personal copy of a GitHub repository. To create one for " "Flower, you must navigate to https://github.com/adap/flower (while " "connected to your GitHub account) and click the ``Fork`` button situated " "on the top right of the page." msgstr "" +"fork 是 GitHub 仓库的个人副本。要为 Flower 创建一个 fork,您必须导航到 " +"https://github.com/adap/flower(同时连接到您的 GitHub 账户),然后点击页面右上方的 ``Fork`` 按钮。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:31 +#: ../../source/contributor-tutorial-contribute-on-github.rst:30 msgid "" "You can change the name if you want, but this is not necessary as this " "version of Flower will be yours and will sit inside your own account " "(i.e., in your own list of repositories). Once created, you should see on" " the top left corner that you are looking at your own version of Flower." msgstr "" +"您可以更改名称,但没有必要,因为这个版本的 Flower " +"将是您自己的,并位于您自己的账户中(即,在您自己的版本库列表中)。创建完成后,您会在左上角看到自己的 Flower 版本。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:49 +#: ../../source/contributor-tutorial-contribute-on-github.rst:48 msgid "**Cloning your forked repository**" -msgstr "" +msgstr "**克隆你的分叉仓库**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:37 +#: ../../source/contributor-tutorial-contribute-on-github.rst:36 msgid "" "The next step is to download the forked repository on your machine to be " "able to make changes to it. On your forked repository page, you should " "first click on the ``Code`` button on the right, this will give you the " "ability to copy the HTTPS link of the repository." msgstr "" +"下一步是在你的机器上下载分叉版本库,以便对其进行修改。在分叉版本库页面上,首先点击右侧的 \"代码 \"按钮,这样就能复制版本库的 HTTPS " +"链接。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:43 +#: ../../source/contributor-tutorial-contribute-on-github.rst:42 msgid "" "Once you copied the \\, you can open a terminal on your machine, " "navigate to the place you want to download the repository to and type:" -msgstr "" +msgstr "一旦复制了 (),你就可以在你的机器上打开一个终端,导航到你想下载软件源的地方,然后键入:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:49 +#: ../../source/contributor-tutorial-contribute-on-github.rst:48 +#, fuzzy msgid "" -"This will create a `flower/` (or the name of your fork if you renamed it)" -" folder in the current working directory." -msgstr "" +"This will create a ``flower/`` (or the name of your fork if you renamed " +"it) folder in the current working directory." +msgstr "这将在当前工作目录下创建一个 `flower/`(如果重命名了,则使用 fork 的名称)文件夹。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:68 +#: ../../source/contributor-tutorial-contribute-on-github.rst:67 msgid "**Add origin**" -msgstr "" +msgstr "**添加原产地**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:52 +#: ../../source/contributor-tutorial-contribute-on-github.rst:51 msgid "You can then go into the repository folder:" -msgstr "" +msgstr "然后,您就可以进入存储库文件夹:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:58 +#: ../../source/contributor-tutorial-contribute-on-github.rst:57 msgid "" "And here we will need to add an origin to our repository. The origin is " "the \\ of the remote fork repository. To obtain it, we can do as " "previously mentioned by going to our fork repository on our GitHub " "account and copying the link." msgstr "" +"在这里,我们需要为我们的版本库添加一个 origin。origin 是远程 fork 仓库的 " +"\\。要获得它,我们可以像前面提到的那样,访问 GitHub 账户上的分叉仓库并复制链接。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:63 +#: ../../source/contributor-tutorial-contribute-on-github.rst:62 msgid "" "Once the \\ is copied, we can type the following command in our " "terminal:" -msgstr "" +msgstr "一旦复制了 \\ ,我们就可以在终端中键入以下命令:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:92 +#: ../../source/contributor-tutorial-contribute-on-github.rst:91 msgid "**Add upstream**" -msgstr "" +msgstr "**增加上游**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:71 +#: ../../source/contributor-tutorial-contribute-on-github.rst:70 msgid "" "Now we will add an upstream address to our repository. Still in the same " "directroy, we must run the following command:" -msgstr "" +msgstr "现在,我们要为版本库添加一个上游地址。还是在同一目录下,我们必须运行以下命令:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:78 +#: ../../source/contributor-tutorial-contribute-on-github.rst:77 msgid "The following diagram visually explains what we did in the previous steps:" -msgstr "" +msgstr "下图直观地解释了我们在前面步骤中的操作:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:82 +#: ../../source/contributor-tutorial-contribute-on-github.rst:81 msgid "" "The upstream is the GitHub remote address of the parent repository (in " "this case Flower), i.e. the one we eventually want to contribute to and " @@ -1134,412 +1476,438 @@ msgid "" "remote address of the forked repository we created, i.e. the copy (fork) " "in our own account." msgstr "" +"上游是父版本库(这里是 Flower)的 GitHub 远程地址,即我们最终要贡献的版本库,因此需要最新的历史记录。origin " +"只是我们创建的分叉仓库的 GitHub 远程地址,即我们自己账户中的副本(分叉)。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:86 +#: ../../source/contributor-tutorial-contribute-on-github.rst:85 msgid "" "To make sure our local version of the fork is up-to-date with the latest " "changes from the Flower repository, we can execute the following command:" -msgstr "" +msgstr "为了确保本地版本的分叉程序与 Flower 代码库的最新更改保持一致,我们可以执行以下命令:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:95 +#: ../../source/contributor-tutorial-contribute-on-github.rst:94 msgid "Setting up the coding environment" -msgstr "" +msgstr "设置编码环境" -#: ../../source/contributor-tutorial-contribute-on-github.rst:97 +#: ../../source/contributor-tutorial-contribute-on-github.rst:96 msgid "" "This can be achieved by following this `getting started guide for " "contributors`_ (note that you won't need to clone the repository). Once " "you are able to write code and test it, you can finally start making " "changes!" -msgstr "" +msgstr "您可以按照这份 \"贡献者入门指南\"__(注意,您不需要克隆版本库)来实现这一点。一旦您能够编写代码并进行测试,您就可以开始修改了!" -#: ../../source/contributor-tutorial-contribute-on-github.rst:102 +#: ../../source/contributor-tutorial-contribute-on-github.rst:101 msgid "Making changes" -msgstr "" +msgstr "做出改变" -#: ../../source/contributor-tutorial-contribute-on-github.rst:104 +#: ../../source/contributor-tutorial-contribute-on-github.rst:103 msgid "" "Before making any changes make sure you are up-to-date with your " "repository:" -msgstr "" +msgstr "在进行任何更改之前,请确保您的版本库是最新的:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:110 +#: ../../source/contributor-tutorial-contribute-on-github.rst:109 msgid "And with Flower's repository:" -msgstr "" +msgstr "还有Flower的存储库:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:124 +#: ../../source/contributor-tutorial-contribute-on-github.rst:123 msgid "**Create a new branch**" -msgstr "" +msgstr "**创建一个新分支**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:117 +#: ../../source/contributor-tutorial-contribute-on-github.rst:116 msgid "" "To make the history cleaner and easier to work with, it is good practice " "to create a new branch for each feature/project that needs to be " "implemented." -msgstr "" +msgstr "为了使历史记录更简洁、更易于操作,为每个需要实现的功能/项目创建一个新分支是个不错的做法。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:120 +#: ../../source/contributor-tutorial-contribute-on-github.rst:119 msgid "" "To do so, just run the following command inside the repository's " "directory:" -msgstr "" +msgstr "为此,只需在版本库目录下运行以下命令即可:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:127 +#: ../../source/contributor-tutorial-contribute-on-github.rst:126 msgid "**Make changes**" -msgstr "" +msgstr "**进行修改**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:127 +#: ../../source/contributor-tutorial-contribute-on-github.rst:126 msgid "Write great code and create wonderful changes using your favorite editor!" -msgstr "" +msgstr "使用您最喜欢的编辑器编写优秀的代码并创建精彩的更改!" -#: ../../source/contributor-tutorial-contribute-on-github.rst:140 +#: ../../source/contributor-tutorial-contribute-on-github.rst:139 msgid "**Test and format your code**" -msgstr "" +msgstr "**测试并格式化您的代码**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:130 +#: ../../source/contributor-tutorial-contribute-on-github.rst:129 msgid "" "Don't forget to test and format your code! Otherwise your code won't be " "able to be merged into the Flower repository. This is done so the " "codebase stays consistent and easy to understand." -msgstr "" +msgstr "不要忘记测试和格式化您的代码!否则您的代码将无法并入 Flower 代码库。这样做是为了使代码库保持一致并易于理解。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:133 +#: ../../source/contributor-tutorial-contribute-on-github.rst:132 msgid "To do so, we have written a few scripts that you can execute:" -msgstr "" +msgstr "为此,我们编写了一些脚本供您执行:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:152 +#: ../../source/contributor-tutorial-contribute-on-github.rst:151 msgid "**Stage changes**" -msgstr "" +msgstr "**舞台变化**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:143 +#: ../../source/contributor-tutorial-contribute-on-github.rst:142 msgid "" "Before creating a commit that will update your history, you must specify " "to Git which files it needs to take into account." -msgstr "" +msgstr "在创建更新历史记录的提交之前,必须向 Git 说明需要考虑哪些文件。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:145 +#: ../../source/contributor-tutorial-contribute-on-github.rst:144 msgid "This can be done with:" -msgstr "" +msgstr "这可以通过:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:151 +#: ../../source/contributor-tutorial-contribute-on-github.rst:150 msgid "" "To check which files have been modified compared to the last version " "(last commit) and to see which files are staged for commit, you can use " "the :code:`git status` command." -msgstr "" +msgstr "要查看与上一版本(上次提交)相比哪些文件已被修改,以及哪些文件处于提交阶段,可以使用 :code:`git status` 命令。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:162 +#: ../../source/contributor-tutorial-contribute-on-github.rst:161 msgid "**Commit changes**" -msgstr "" +msgstr "**提交更改**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:155 +#: ../../source/contributor-tutorial-contribute-on-github.rst:154 msgid "" "Once you have added all the files you wanted to commit using :code:`git " "add`, you can finally create your commit using this command:" -msgstr "" +msgstr "使用 :code:`git add` 添加完所有要提交的文件后,就可以使用此命令创建提交了:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:161 +#: ../../source/contributor-tutorial-contribute-on-github.rst:160 msgid "" "The \\ is there to explain to others what the commit " "does. It should be written in an imperative style and be concise. An " "example would be :code:`git commit -m \"Add images to README\"`." msgstr "" +" 用于向他人解释提交的作用。它应该以命令式风格书写,并且简明扼要。例如 :code:`git commit " +"-m \"Add images to README\"`。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:173 +#: ../../source/contributor-tutorial-contribute-on-github.rst:172 msgid "**Push the changes to the fork**" -msgstr "" +msgstr "**将更改推送到分叉**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:165 +#: ../../source/contributor-tutorial-contribute-on-github.rst:164 msgid "" "Once we have committed our changes, we have effectively updated our local" " history, but GitHub has no way of knowing this unless we push our " "changes to our origin's remote address:" -msgstr "" +msgstr "一旦提交了修改,我们就有效地更新了本地历史记录,但除非我们将修改推送到原点的远程地址,否则 GitHub 无法得知:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:172 +#: ../../source/contributor-tutorial-contribute-on-github.rst:171 msgid "" "Once this is done, you will see on the GitHub that your forked repo was " "updated with the changes you have made." -msgstr "" +msgstr "完成此操作后,您将在 GitHub 上看到您的分叉仓库已根据您所做的更改进行了更新。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:176 +#: ../../source/contributor-tutorial-contribute-on-github.rst:175 msgid "Creating and merging a pull request (PR)" -msgstr "" +msgstr "创建和合并拉取请求 (PR)" -#: ../../source/contributor-tutorial-contribute-on-github.rst:203 +#: ../../source/contributor-tutorial-contribute-on-github.rst:206 msgid "**Create the PR**" -msgstr "" +msgstr "**创建 PR**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:179 +#: ../../source/contributor-tutorial-contribute-on-github.rst:178 msgid "" "Once you have pushed changes, on the GitHub webpage of your repository " "you should see the following message:" -msgstr "" +msgstr "推送更改后,在仓库的 GitHub 网页上应该会看到以下信息:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:183 -msgid "Otherwise you can always find this option in the `Branches` page." -msgstr "" +#: ../../source/contributor-tutorial-contribute-on-github.rst:182 +#, fuzzy +msgid "Otherwise you can always find this option in the ``Branches`` page." +msgstr "否则,您可以在 \"分支 \"页面找到该选项。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:185 +#: ../../source/contributor-tutorial-contribute-on-github.rst:184 +#, fuzzy msgid "" -"Once you click the `Compare & pull request` button, you should see " +"Once you click the ``Compare & pull request`` button, you should see " "something similar to this:" -msgstr "" +msgstr "点击 \"比较和拉取请求 \"按钮后,您应该会看到类似下面的内容:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:189 +#: ../../source/contributor-tutorial-contribute-on-github.rst:188 msgid "At the top you have an explanation of which branch will be merged where:" -msgstr "" +msgstr "在顶部,你可以看到关于哪个分支将被合并的说明:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:193 +#: ../../source/contributor-tutorial-contribute-on-github.rst:192 msgid "" "In this example you can see that the request is to merge the branch " "``doc-fixes`` from my forked repository to branch ``main`` from the " "Flower repository." -msgstr "" +msgstr "在这个例子中,你可以看到请求将我分叉的版本库中的分支 ``doc-fixes`` 合并到 Flower 版本库中的分支 ``main``。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:195 +#: ../../source/contributor-tutorial-contribute-on-github.rst:194 msgid "" "The input box in the middle is there for you to describe what your PR " "does and to link it to existing issues. We have placed comments (that " "won't be rendered once the PR is opened) to guide you through the " "process." +msgstr "中间的输入框供您描述 PR 的作用,并将其与现有问题联系起来。我们在此放置了注释(一旦 PR 打开,注释将不会显示),以指导您完成整个过程。" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:197 +msgid "" +"It is important to follow the instructions described in comments. For " +"instance, in order to not break how our changelog system works, you " +"should read the information above the ``Changelog entry`` section " +"carefully. You can also checkout some examples and details in the " +":ref:`changelogentry` appendix." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:198 +#: ../../source/contributor-tutorial-contribute-on-github.rst:201 msgid "" "At the bottom you will find the button to open the PR. This will notify " "reviewers that a new PR has been opened and that they should look over it" " to merge or to request changes." -msgstr "" +msgstr "在底部,您可以找到打开 PR 的按钮。这将通知审核人员新的 PR 已经打开,他们应该查看该 PR 以进行合并或要求修改。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:201 +#: ../../source/contributor-tutorial-contribute-on-github.rst:204 msgid "" "If your PR is not yet ready for review, and you don't want to notify " "anyone, you have the option to create a draft pull request:" -msgstr "" +msgstr "如果您的 PR 尚未准备好接受审核,而且您不想通知任何人,您可以选择创建一个草案拉取请求:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:206 +#: ../../source/contributor-tutorial-contribute-on-github.rst:209 msgid "**Making new changes**" -msgstr "" +msgstr "**作出新的改变**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:206 +#: ../../source/contributor-tutorial-contribute-on-github.rst:209 msgid "" "Once the PR has been opened (as draft or not), you can still push new " "commits to it the same way we did before, by making changes to the branch" " associated with the PR." -msgstr "" +msgstr "一旦 PR 被打开(无论是否作为草案),你仍然可以像以前一样,通过修改与 PR 关联的分支来推送新的提交。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:228 +#: ../../source/contributor-tutorial-contribute-on-github.rst:231 msgid "**Review the PR**" -msgstr "" +msgstr "**审查 PR**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:209 +#: ../../source/contributor-tutorial-contribute-on-github.rst:212 msgid "" "Once the PR has been opened or once the draft PR has been marked as " "ready, a review from code owners will be automatically requested:" -msgstr "" +msgstr "一旦 PR 被打开或 PR 草案被标记为就绪,就会自动要求代码所有者进行审核:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:213 +#: ../../source/contributor-tutorial-contribute-on-github.rst:216 msgid "" "Code owners will then look into the code, ask questions, request changes " "or validate the PR." -msgstr "" +msgstr "然后,代码所有者会查看代码、提出问题、要求修改或验证 PR。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:215 +#: ../../source/contributor-tutorial-contribute-on-github.rst:218 msgid "Merging will be blocked if there are ongoing requested changes." -msgstr "" +msgstr "如果有正在进行的更改请求,合并将被阻止。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:219 +#: ../../source/contributor-tutorial-contribute-on-github.rst:222 msgid "" "To resolve them, just push the necessary changes to the branch associated" " with the PR:" -msgstr "" +msgstr "要解决这些问题,只需将必要的更改推送到与 PR 关联的分支即可:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:223 +#: ../../source/contributor-tutorial-contribute-on-github.rst:226 msgid "And resolve the conversation:" -msgstr "" +msgstr "并解决对话:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:227 +#: ../../source/contributor-tutorial-contribute-on-github.rst:230 msgid "" "Once all the conversations have been resolved, you can re-request a " "review." -msgstr "" +msgstr "一旦所有对话都得到解决,您就可以重新申请审核。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:248 +#: ../../source/contributor-tutorial-contribute-on-github.rst:251 msgid "**Once the PR is merged**" -msgstr "" +msgstr "**一旦 PR 被合并**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:231 +#: ../../source/contributor-tutorial-contribute-on-github.rst:234 msgid "" "If all the automatic tests have passed and reviewers have no more changes" " to request, they can approve the PR and merge it." -msgstr "" +msgstr "如果所有自动测试都已通过,且审核员不再需要修改,他们就可以批准 PR 并将其合并。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:235 +#: ../../source/contributor-tutorial-contribute-on-github.rst:238 msgid "" "Once it is merged, you can delete the branch on GitHub (a button should " "appear to do so) and also delete it locally by doing:" -msgstr "" +msgstr "合并后,您可以在 GitHub 上删除该分支(会出现一个删除按钮),也可以在本地删除该分支:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:242 +#: ../../source/contributor-tutorial-contribute-on-github.rst:245 msgid "Then you should update your forked repository by doing:" -msgstr "" +msgstr "然后,你应该更新你的分叉仓库:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:251 +#: ../../source/contributor-tutorial-contribute-on-github.rst:254 msgid "Example of first contribution" -msgstr "" +msgstr "首次捐款实例" -#: ../../source/contributor-tutorial-contribute-on-github.rst:254 +#: ../../source/contributor-tutorial-contribute-on-github.rst:257 msgid "Problem" -msgstr "" +msgstr "问题" -#: ../../source/contributor-tutorial-contribute-on-github.rst:256 +#: ../../source/contributor-tutorial-contribute-on-github.rst:259 msgid "" "For our documentation, we’ve started to use the `Diàtaxis framework " "`_." -msgstr "" +msgstr "对于我们的文档,我们已经开始使用 \"Diàtaxis 框架 `_\"。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:258 +#: ../../source/contributor-tutorial-contribute-on-github.rst:261 msgid "" "Our “How to” guides should have titles that continue the sencence “How to" " …”, for example, “How to upgrade to Flower 1.0”." -msgstr "" +msgstr "我们的 \"如何 \"指南的标题应延续 \"如何...... \"的句式,例如 \"如何升级到 Flower 1.0\"。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:260 +#: ../../source/contributor-tutorial-contribute-on-github.rst:263 msgid "" "Most of our guides do not follow this new format yet, and changing their " "title is (unfortunately) more involved than one might think." -msgstr "" +msgstr "我们的大多数指南还没有采用这种新格式,而更改其标题(不幸的是)比人们想象的要复杂得多。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:262 +#: ../../source/contributor-tutorial-contribute-on-github.rst:265 msgid "" "This issue is about changing the title of a doc from present continious " "to present simple." -msgstr "" +msgstr "这个问题是关于将文档标题从现在进行时改为现在进行时。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:264 +#: ../../source/contributor-tutorial-contribute-on-github.rst:267 msgid "" "Let's take the example of “Saving Progress” which we changed to “Save " "Progress”. Does this pass our check?" -msgstr "" +msgstr "以 \"保存进度 \"为例,我们将其改为 \"保存进度\"。这是否通过了我们的检查?" -#: ../../source/contributor-tutorial-contribute-on-github.rst:266 +#: ../../source/contributor-tutorial-contribute-on-github.rst:269 msgid "Before: ”How to saving progress” ❌" -msgstr "" +msgstr "之前: \"如何保存进度\" ❌" -#: ../../source/contributor-tutorial-contribute-on-github.rst:268 +#: ../../source/contributor-tutorial-contribute-on-github.rst:271 msgid "After: ”How to save progress” ✅" -msgstr "" +msgstr "之后: \"如何保存进度\"✅" -#: ../../source/contributor-tutorial-contribute-on-github.rst:271 +#: ../../source/contributor-tutorial-contribute-on-github.rst:274 msgid "Solution" -msgstr "" +msgstr "解决方案" -#: ../../source/contributor-tutorial-contribute-on-github.rst:273 +#: ../../source/contributor-tutorial-contribute-on-github.rst:276 msgid "" "This is a tiny change, but it’ll allow us to test your end-to-end setup. " "After cloning and setting up the Flower repo, here’s what you should do:" -msgstr "" +msgstr "这只是一个很小的改动,但可以让我们测试你的端到端设置。克隆并设置好 Flower repo 后,你应该这样做:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:275 -msgid "Find the source file in `doc/source`" -msgstr "" +#: ../../source/contributor-tutorial-contribute-on-github.rst:278 +#, fuzzy +msgid "Find the source file in ``doc/source``" +msgstr "在 `doc/source` 中查找源文件" -#: ../../source/contributor-tutorial-contribute-on-github.rst:276 +#: ../../source/contributor-tutorial-contribute-on-github.rst:279 +#, fuzzy msgid "" -"Make the change in the `.rst` file (beware, the dashes under the title " +"Make the change in the ``.rst`` file (beware, the dashes under the title " "should be the same length as the title itself)" -msgstr "" +msgstr "在 `.rst` 文件中进行修改(注意,标题下的破折号应与标题本身的长度相同)" -#: ../../source/contributor-tutorial-contribute-on-github.rst:277 +#: ../../source/contributor-tutorial-contribute-on-github.rst:280 msgid "" -"Build the docs and check the result: ``_" msgstr "" +"构建文档并检查结果: ``_" -#: ../../source/contributor-tutorial-contribute-on-github.rst:280 +#: ../../source/contributor-tutorial-contribute-on-github.rst:283 msgid "Rename file" -msgstr "" +msgstr "重命名文件" -#: ../../source/contributor-tutorial-contribute-on-github.rst:282 +#: ../../source/contributor-tutorial-contribute-on-github.rst:285 msgid "" "You might have noticed that the file name still reflects the old wording." " If we just change the file, then we break all existing links to it - it " "is **very important** to avoid that, breaking links can harm our search " "engine ranking." msgstr "" +"您可能已经注意到,文件名仍然反映了旧的措辞。如果我们只是更改文件,那么就会破坏与该文件的所有现有链接--" +"避免这种情况是***重要的,破坏链接会损害我们的搜索引擎排名。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:285 +#: ../../source/contributor-tutorial-contribute-on-github.rst:288 msgid "Here’s how to change the file name:" -msgstr "" +msgstr "下面是更改文件名的方法:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:287 -msgid "Change the file name to `save-progress.rst`" -msgstr "" +#: ../../source/contributor-tutorial-contribute-on-github.rst:290 +#, fuzzy +msgid "Change the file name to ``save-progress.rst``" +msgstr "将文件名改为`save-progress.rst`" -#: ../../source/contributor-tutorial-contribute-on-github.rst:288 -msgid "Add a redirect rule to `doc/source/conf.py`" -msgstr "" +#: ../../source/contributor-tutorial-contribute-on-github.rst:291 +#, fuzzy +msgid "Add a redirect rule to ``doc/source/conf.py``" +msgstr "在 `doc/source/conf.py` 中添加重定向规则" -#: ../../source/contributor-tutorial-contribute-on-github.rst:290 +#: ../../source/contributor-tutorial-contribute-on-github.rst:293 +#, fuzzy msgid "" -"This will cause a redirect from `saving-progress.html` to `save-" -"progress.html`, old links will continue to work." -msgstr "" +"This will cause a redirect from ``saving-progress.html`` to ``save-" +"progress.html``, old links will continue to work." +msgstr "这将导致从 `saving-progress.html` 重定向到 `save-progress.html`,旧链接将继续工作。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:293 +#: ../../source/contributor-tutorial-contribute-on-github.rst:296 msgid "Apply changes in the index file" -msgstr "" +msgstr "应用索引文件中的更改" -#: ../../source/contributor-tutorial-contribute-on-github.rst:295 +#: ../../source/contributor-tutorial-contribute-on-github.rst:298 +#, fuzzy msgid "" "For the lateral navigation bar to work properly, it is very important to " -"update the `index.rst` file as well. This is where we define the whole " +"update the ``index.rst`` file as well. This is where we define the whole " "arborescence of the navbar." -msgstr "" - -#: ../../source/contributor-tutorial-contribute-on-github.rst:298 -msgid "Find and modify the file name in `index.rst`" -msgstr "" +msgstr "要使横向导航栏正常工作,更新 `index.rst` 文件也非常重要。我们就是在这里定义整个导航栏的结构。" #: ../../source/contributor-tutorial-contribute-on-github.rst:301 +#, fuzzy +msgid "Find and modify the file name in ``index.rst``" +msgstr "查找并修改 `index.rst` 中的文件名" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:304 msgid "Open PR" -msgstr "" +msgstr "开放式 PR" -#: ../../source/contributor-tutorial-contribute-on-github.rst:303 +#: ../../source/contributor-tutorial-contribute-on-github.rst:306 msgid "" "Commit the changes (commit messages are always imperative: “Do " "something”, in this case “Change …”)" -msgstr "" +msgstr "提交更改(提交信息总是命令式的:\"做某事\",这里是 \"更改......\")" -#: ../../source/contributor-tutorial-contribute-on-github.rst:304 +#: ../../source/contributor-tutorial-contribute-on-github.rst:307 msgid "Push the changes to your fork" -msgstr "" +msgstr "将更改推送到分叉" -#: ../../source/contributor-tutorial-contribute-on-github.rst:305 +#: ../../source/contributor-tutorial-contribute-on-github.rst:308 msgid "Open a PR (as shown above)" -msgstr "" +msgstr "打开 PR(如上图所示)" -#: ../../source/contributor-tutorial-contribute-on-github.rst:306 +#: ../../source/contributor-tutorial-contribute-on-github.rst:309 msgid "Wait for it to be approved!" -msgstr "" +msgstr "等待审批!" -#: ../../source/contributor-tutorial-contribute-on-github.rst:307 +#: ../../source/contributor-tutorial-contribute-on-github.rst:310 msgid "Congrats! 🥳 You're now officially a Flower contributor!" -msgstr "" +msgstr "祝贺你 🥳 您现在正式成为 \"Flower \"贡献者!" -#: ../../source/contributor-tutorial-contribute-on-github.rst:311 +#: ../../source/contributor-tutorial-contribute-on-github.rst:314 msgid "How to write a good PR title" -msgstr "" +msgstr "如何撰写好的公关标题" -#: ../../source/contributor-tutorial-contribute-on-github.rst:313 +#: ../../source/contributor-tutorial-contribute-on-github.rst:316 msgid "" "A well-crafted PR title helps team members quickly understand the purpose" " and scope of the changes being proposed. Here's a guide to help you " "write a good GitHub PR title:" -msgstr "" +msgstr "一个精心撰写的公关标题能帮助团队成员迅速了解所提修改的目的和范围。以下指南可帮助您撰写一个好的 GitHub PR 标题:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:315 +#: ../../source/contributor-tutorial-contribute-on-github.rst:318 msgid "" "1. Be Clear and Concise: Provide a clear summary of the changes in a " "concise manner. 1. Use Actionable Verbs: Start with verbs like \"Add,\" " @@ -1548,227 +1916,432 @@ msgid "" "it Short: Avoid lengthy titles for easy readability. 1. Use Proper " "Capitalization and Punctuation: Follow grammar rules for clarity." msgstr "" +"1. 简明扼要: 以简明扼要的方式清楚地概述变化。1. 使用可操作的动词: 使用 \"添加\"、\"更新 \"或 \"修复 " +"\"等动词来表明目的。1. 包含相关信息: 提及受影响的功能或模块以了解上下文。1. 简短:避免冗长的标题,以方便阅读。1. " +"使用正确的大小写和标点符号: 遵守语法规则,以确保清晰。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:321 +#: ../../source/contributor-tutorial-contribute-on-github.rst:324 msgid "" "Let's start with a few examples for titles that should be avoided because" " they do not provide meaningful information:" -msgstr "" +msgstr "让我们先举例说明几个应该避免使用的标题,因为它们不能提供有意义的信息:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:323 +#: ../../source/contributor-tutorial-contribute-on-github.rst:326 msgid "Implement Algorithm" -msgstr "" +msgstr "执行算法" -#: ../../source/contributor-tutorial-contribute-on-github.rst:324 +#: ../../source/contributor-tutorial-contribute-on-github.rst:327 msgid "Database" -msgstr "" +msgstr "数据库" -#: ../../source/contributor-tutorial-contribute-on-github.rst:325 +#: ../../source/contributor-tutorial-contribute-on-github.rst:328 msgid "Add my_new_file.py to codebase" -msgstr "" +msgstr "在代码库中添加 my_new_file.py" -#: ../../source/contributor-tutorial-contribute-on-github.rst:326 +#: ../../source/contributor-tutorial-contribute-on-github.rst:329 msgid "Improve code in module" -msgstr "" +msgstr "改进模块中的代码" -#: ../../source/contributor-tutorial-contribute-on-github.rst:327 +#: ../../source/contributor-tutorial-contribute-on-github.rst:330 msgid "Change SomeModule" -msgstr "" +msgstr "更改 SomeModule" -#: ../../source/contributor-tutorial-contribute-on-github.rst:329 +#: ../../source/contributor-tutorial-contribute-on-github.rst:332 msgid "" "Here are a few positive examples which provide helpful information " "without repeating how they do it, as that is already visible in the " "\"Files changed\" section of the PR:" -msgstr "" +msgstr "这里有几个正面的例子,提供了有用的信息,但没有重复他们是如何做的,因为在 PR 的 \"已更改文件 \"部分已经可以看到:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:331 +#: ../../source/contributor-tutorial-contribute-on-github.rst:334 msgid "Update docs banner to mention Flower Summit 2023" -msgstr "" +msgstr "更新文件横幅,提及 2023 年 Flower 峰会" -#: ../../source/contributor-tutorial-contribute-on-github.rst:332 +#: ../../source/contributor-tutorial-contribute-on-github.rst:335 msgid "Remove unnecessary XGBoost dependency" -msgstr "" +msgstr "移除不必要的 XGBoost 依赖性" -#: ../../source/contributor-tutorial-contribute-on-github.rst:333 +#: ../../source/contributor-tutorial-contribute-on-github.rst:336 msgid "Remove redundant attributes in strategies subclassing FedAvg" -msgstr "" +msgstr "删除 FedAvg 子类化策略中的多余属性" -#: ../../source/contributor-tutorial-contribute-on-github.rst:334 -msgid "Add CI job to deploy the staging system when the `main` branch changes" -msgstr "" +#: ../../source/contributor-tutorial-contribute-on-github.rst:337 +#, fuzzy +msgid "Add CI job to deploy the staging system when the ``main`` branch changes" +msgstr "添加 CI 作业,以便在 \"主 \"分支发生变化时部署暂存系统" -#: ../../source/contributor-tutorial-contribute-on-github.rst:335 +#: ../../source/contributor-tutorial-contribute-on-github.rst:338 msgid "" "Add new amazing library which will be used to improve the simulation " "engine" -msgstr "" +msgstr "添加新的惊人库,用于改进模拟引擎" -#: ../../source/contributor-tutorial-contribute-on-github.rst:339 +#: ../../source/contributor-tutorial-contribute-on-github.rst:342 #: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:548 #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:946 -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:747 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:727 #: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:713 #: ../../source/tutorial-series-what-is-federated-learning.ipynb:367 msgid "Next steps" -msgstr "" +msgstr "接下来的步骤" -#: ../../source/contributor-tutorial-contribute-on-github.rst:341 +#: ../../source/contributor-tutorial-contribute-on-github.rst:344 msgid "" "Once you have made your first PR, and want to contribute more, be sure to" " check out the following :" -msgstr "" +msgstr "一旦您完成了第一份 PR,并希望做出更多贡献,请务必查看以下内容:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:343 +#: ../../source/contributor-tutorial-contribute-on-github.rst:346 msgid "" -"`Good first contributions `_, where you should particularly look " "into the :code:`baselines` contributions." msgstr "" +"好的第一批贡献 `_,在这里你应该特别看看 :code:`baselines` 的贡献。" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:2 -msgid "Get started as a contributor" -msgstr "" +#: ../../source/contributor-tutorial-contribute-on-github.rst:350 +#: ../../source/fed/0000-20200102-fed-template.md:60 +msgid "Appendix" +msgstr "附录" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:5 -msgid "Prerequisites" +#: ../../source/contributor-tutorial-contribute-on-github.rst:355 +#, fuzzy +msgid "Changelog entry" +msgstr "更新日志" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:357 +msgid "" +"When opening a new PR, inside its description, there should be a " +"``Changelog entry`` header." msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:7 -msgid "`Python 3.7 `_ or above" +#: ../../source/contributor-tutorial-contribute-on-github.rst:359 +msgid "" +"Above this header you should see the following comment that explains how " +"to write your changelog entry:" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:8 -msgid "`Poetry 1.3 `_ or above" +#: ../../source/contributor-tutorial-contribute-on-github.rst:361 +msgid "" +"Inside the following 'Changelog entry' section, you should put the " +"description of your changes that will be added to the changelog alongside" +" your PR title." msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:9 -msgid "(Optional) `pyenv `_" +#: ../../source/contributor-tutorial-contribute-on-github.rst:364 +msgid "" +"If the section is completely empty (without any token) or non-existant, " +"the changelog will just contain the title of the PR for the changelog " +"entry, without any description." msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:10 -msgid "(Optional) `pyenv-virtualenv `_" +#: ../../source/contributor-tutorial-contribute-on-github.rst:367 +msgid "" +"If the section contains some text other than tokens, it will use it to " +"add a description to the change." msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:12 +#: ../../source/contributor-tutorial-contribute-on-github.rst:369 msgid "" -"Flower uses :code:`pyproject.toml` to manage dependencies and configure " -"development tools (the ones which support it). Poetry is a build tool " -"which supports `PEP 517 `_." +"If the section contains one of the following tokens it will ignore any " +"other text and put the PR under the corresponding section of the " +"changelog:" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:18 -msgid "Developer Machine Setup" +#: ../../source/contributor-tutorial-contribute-on-github.rst:371 +msgid " is for classifying a PR as a general improvement." msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:20 -msgid "" -"First, clone the `Flower repository `_ " -"from GitHub::" +#: ../../source/contributor-tutorial-contribute-on-github.rst:373 +msgid " is to not add the PR to the changelog" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:26 -msgid "" -"Second, create a virtual environment (and activate it). If you chose to " -"use :code:`pyenv` (with the :code:`pyenv-virtualenv` plugin) and already " -"have it installed , you can use the following convenience script (by " -"default it will use :code:`Python 3.8.17`, but you can change it by " -"providing a specific :code:``)::" +#: ../../source/contributor-tutorial-contribute-on-github.rst:375 +msgid " is to add a general baselines change to the PR" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:33 -msgid "" -"If you don't have :code:`pyenv` installed, you can use the following " -"script that will install pyenv, set it up and create the virtual " -"environment (with :code:`Python 3.8.17` by default)::" +#: ../../source/contributor-tutorial-contribute-on-github.rst:377 +msgid " is to add a general examples change to the PR" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:39 -msgid "" -"Third, install the Flower package in development mode (think :code:`pip " -"install -e`) along with all necessary dependencies::" +#: ../../source/contributor-tutorial-contribute-on-github.rst:379 +msgid " is to add a general sdk change to the PR" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:46 -msgid "Convenience Scripts" +#: ../../source/contributor-tutorial-contribute-on-github.rst:381 +msgid " is to add a general simulations change to the PR" +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:383 +msgid "Note that only one token should be used." msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:48 +#: ../../source/contributor-tutorial-contribute-on-github.rst:385 msgid "" -"The Flower repository contains a number of convenience scripts to make " -"recurring development tasks easier and less error-prone. See the " -":code:`/dev` subdirectory for a full list. The following scripts are " -"amonst the most important ones:" +"Its content must have a specific format. We will break down what each " +"possibility does:" +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:387 +msgid "" +"If the ``### Changelog entry`` section contains nothing or doesn't exist," +" the following text will be added to the changelog::" +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:391 +msgid "" +"If the ``### Changelog entry`` section contains a description (and no " +"token), the following text will be added to the changelog::" +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:397 +msgid "" +"If the ``### Changelog entry`` section contains ````, nothing will " +"change in the changelog." +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:399 +msgid "" +"If the ``### Changelog entry`` section contains ````, the " +"following text will be added to the changelog::" +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:403 +msgid "" +"If the ``### Changelog entry`` section contains ````, the " +"following text will be added to the changelog::" +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:407 +msgid "" +"If the ``### Changelog entry`` section contains ````, the " +"following text will be added to the changelog::" +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:411 +msgid "" +"If the ``### Changelog entry`` section contains ````, the following " +"text will be added to the changelog::" +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:415 +msgid "" +"If the ``### Changelog entry`` section contains ````, the " +"following text will be added to the changelog::" +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:419 +msgid "" +"Note that only one token must be provided, otherwise, only the first " +"action (in the order listed above), will be performed." +msgstr "" + +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:2 +msgid "Get started as a contributor" +msgstr "成为贡献者" + +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:5 +msgid "Prerequisites" +msgstr "先决条件" + +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:7 +#, fuzzy +msgid "`Python 3.8 `_ or above" +msgstr "Python 3.7 `_ 或更高版本" + +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:8 +msgid "`Poetry 1.3 `_ or above" +msgstr "`Poetry 1.3 `_ 或更高版本" + +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:9 +msgid "(Optional) `pyenv `_" +msgstr "(可选) `pyenv `_" + +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:10 +msgid "(Optional) `pyenv-virtualenv `_" +msgstr "(可选) `pyenv-virtualenv `_" + +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:12 +msgid "" +"Flower uses :code:`pyproject.toml` to manage dependencies and configure " +"development tools (the ones which support it). Poetry is a build tool " +"which supports `PEP 517 `_." +msgstr "" +"Flower 使用 :code:`pyproject.toml` 来管理依赖关系和配置开发工具(支持它的)。Poetry 是一种支持 `PEP " +"517 `_ 的构建工具。" + +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:18 +msgid "Developer Machine Setup" +msgstr "开发者机器设置" + +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:21 +msgid "Preliminarities" +msgstr "" + +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:22 +msgid "Some system-wide dependencies are needed." +msgstr "" + +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:25 +msgid "For macOS" +msgstr "" + +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:27 +msgid "" +"Install `homebrew `_. Don't forget the post-" +"installation actions to add `brew` to your PATH." +msgstr "" + +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:28 +msgid "" +"Install `xz` (to install different Python versions) and `pandoc` to build" +" the docs::" +msgstr "" + +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:34 +msgid "For Ubuntu" +msgstr "" + +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:35 +msgid "" +"Ensure you system (Ubuntu 22.04+) is up-to-date, and you have all " +"necessary packages::" +msgstr "" + +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:44 +#, fuzzy +msgid "Create Flower Dev Environment" +msgstr "创建/删除虚拟环境" + +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:46 +#, fuzzy +msgid "" +"1. Clone the `Flower repository `_ from " +"GitHub::" +msgstr "首先,从 GitHub 克隆 \"Flower 存储库 `_\":" + +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:52 +msgid "" +"Let's create the Python environment for all-things Flower. If you wish to" +" use :code:`pyenv`, we provide two convenience scripts that you can use. " +"If you prefer using something else than :code:`pyenv`, create a new " +"environment, activate and skip to the last point where all packages are " +"installed." msgstr "" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:54 -msgid "Create/Delete Virtual Environment" +#, fuzzy +msgid "" +"If you don't have :code:`pyenv` installed, the following script that will" +" install it, set it up, and create the virtual environment (with " +":code:`Python 3.8.17` by default)::" msgstr "" +"如果没有安装 :code:`pyenv`,可以使用以下脚本安装 pyenv、设置并创建虚拟环境(默认使用 " +":code:`Python3.8.17)::" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:62 -msgid "Compile ProtoBuf Definitions" +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:58 +#, fuzzy +msgid "" +"If you already have :code:`pyenv` installed (along with the :code:`pyenv-" +"virtualenv` plugin), you can use the following convenience script (with " +":code:`Python 3.8.17` by default)::" msgstr "" +"如果没有安装 :code:`pyenv`,可以使用以下脚本安装 pyenv、设置并创建虚拟环境(默认使用 " +":code:`Python3.8.17)::" + +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:62 +#, fuzzy +msgid "" +"3. Install the Flower package in development mode (think :code:`pip " +"install -e`) along with all necessary dependencies::" +msgstr "第三,在开发模式下安装 Flower 软件包(想想 :code:`pip install -e`)以及所有必要的依赖项::" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:69 +msgid "Convenience Scripts" +msgstr "便捷脚本" + +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:71 +msgid "" +"The Flower repository contains a number of convenience scripts to make " +"recurring development tasks easier and less error-prone. See the " +":code:`/dev` subdirectory for a full list. The following scripts are " +"amonst the most important ones:" +msgstr "Flower 软件仓库包含大量便捷脚本,可使重复性开发任务更轻松、更不易出错。完整列表请参见 :code:`/dev` 子目录。以下是最重要的脚本:" + +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:77 +msgid "Create/Delete Virtual Environment" +msgstr "创建/删除虚拟环境" + +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:85 +msgid "Compile ProtoBuf Definitions" +msgstr "编译 ProtoBuf 定义" + +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:92 msgid "Auto-Format Code" -msgstr "" +msgstr "自动格式化代码" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:76 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:99 msgid "Run Linters and Tests" -msgstr "" +msgstr "运行分类器和测试" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:83 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:106 msgid "Run Github Actions (CI) locally" -msgstr "" +msgstr "在本地运行 Github 操作 (CI)" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:85 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:108 +#, fuzzy msgid "" "Developers could run the full set of Github Actions workflows under their" -" local environment by using `Act _`. " +" local environment by using `Act `_. " "Please refer to the installation instructions under the linked repository" " and run the next command under Flower main cloned repository folder::" msgstr "" +"开发人员可以使用 `Act _` 在本地环境下运行全套 Github Actions" +" 工作流程。请参考链接仓库下的安装说明,并在 Flower 主克隆仓库文件夹下运行下一条命令::" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:92 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:115 msgid "" "The Flower default workflow would run by setting up the required Docker " "machines underneath." -msgstr "" +msgstr "Flower 默认工作流程将通过在下面设置所需的 Docker 机器来运行。" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:97 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:120 msgid "Build Release" -msgstr "" +msgstr "版本发布" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:99 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:122 msgid "" "Flower uses Poetry to build releases. The necessary command is wrapped in" " a simple script::" -msgstr "" +msgstr "Flower 使用 Poetry 创建发布版本。必要的命令封装在一个简单的脚本中::" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:104 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:127 msgid "" "The resulting :code:`.whl` and :code:`.tar.gz` releases will be stored in" " the :code:`/dist` subdirectory." -msgstr "" +msgstr "生成的 :code:`.whl` 和 :code:`.tar.gz` 版本将存储在 :code:`/dist` 子目录中。" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:109 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:132 msgid "Build Documentation" -msgstr "" +msgstr "构建文档" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:111 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:134 msgid "" "Flower's documentation uses `Sphinx `_. " "There's no convenience script to re-build the documentation yet, but it's" " pretty easy::" msgstr "" +"Flower 的文档使用 `Sphinx `_。目前还没有很方便的脚本来重新构建文档,不过这很容易::" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:117 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:140 msgid "This will generate HTML documentation in ``doc/build/html``." -msgstr "" +msgstr "这将在 ``doc/build/html`` 中生成 HTML 文档。" #: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:2 msgid "Example: FedBN in PyTorch - From Centralized To Federated" -msgstr "" +msgstr "示例: PyTorch 中的 FedBN - 从集中式到联邦式" #: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:4 msgid "" @@ -1778,33 +2351,41 @@ msgid "" "designed for non-iid data. We are using PyTorch to train a Convolutional " "Neural Network(with Batch Normalization layers) on the CIFAR-10 dataset. " "When applying FedBN, only few changes needed compared to `Example: " -"PyTorch - From Centralized To Federated `_." msgstr "" +"本教程将向您展示如何使用 Flower 为现有的机器学习框架构建一个联邦学习的版本,并使用 \"FedBN `_\"(一种针对非 iid 数据设计的联邦训练策略)。我们使用 PyTorch 在 CIFAR-10 " +"数据集上训练一个卷积神经网络(带有Batch Normalization层)。在应用 FedBN 时,只需对 `示例: PyTorch - " +"从集中式到联邦式 `_ 做少量改动。" #: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:9 #: ../../source/example-pytorch-from-centralized-to-federated.rst:10 msgid "Centralized Training" -msgstr "" +msgstr "集中式训练" #: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:10 msgid "" "All files are revised based on `Example: PyTorch - From Centralized To " -"Federated `_. The only thing to do is modifying the file called " ":code:`cifar.py`, revised part is shown below:" msgstr "" +"所有文件均根据 `示例: PyTorch -从集中式到联邦式 `_。唯一要做的就是修改名为 :code:`cifar.py` " +"的文件,修改部分如下所示:" #: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:13 msgid "" "The model architecture defined in class Net() is added with Batch " "Normalization layers accordingly." -msgstr "" +msgstr "类 Net() 中定义的模型架构会相应添加Batch Normalization层。" #: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:41 #: ../../source/example-pytorch-from-centralized-to-federated.rst:157 msgid "You can now run your machine learning workload:" -msgstr "" +msgstr "现在,您可以运行您的机器学习工作了:" #: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:47 msgid "" @@ -1813,28 +2394,35 @@ msgid "" "federated learning system within FedBN, the sytstem consists of one " "server and two clients." msgstr "" +"到目前为止,如果您以前使用过 PyTorch,这一切看起来应该相当熟悉。让我们进行下一步,使用我们所构建的内容在 FedBN " +"中创建一个联邦学习系统,该系统由一个服务器和两个客户端组成。" #: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:51 #: ../../source/example-pytorch-from-centralized-to-federated.rst:167 msgid "Federated Training" -msgstr "" +msgstr "联邦培训" #: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:53 msgid "" "If you have read `Example: PyTorch - From Centralized To Federated " -"`_, the following parts are easy to follow, onyl " ":code:`get_parameters` and :code:`set_parameters` function in " ":code:`client.py` needed to revise. If not, please read the `Example: " -"PyTorch - From Centralized To Federated `_. first." msgstr "" +"如果你读过 `示例: PyTorch - 从集中式到联邦式 `_,下面的部分就很容易理解了,只需要修改 " +":code:`get_parameters` 和 :code:`set_parameters` 中的 :code:`client.py` " +"函数。如果没有,请阅读 `示例: PyTorch - 从集中式到联邦式 `_。" #: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:56 msgid "" "Our example consists of one *server* and two *clients*. In FedBN, " ":code:`server.py` keeps unchanged, we can start the server directly." -msgstr "" +msgstr "我们的示例包括一个*服务器*和两个*客户端*。在 FedBN 中,:code:`server.py` 保持不变,我们可以直接启动服务器。" #: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:62 msgid "" @@ -1843,17 +2431,20 @@ msgid "" "we will exclude batch normalization parameters from model parameter list " "when sending to or receiving from the server." msgstr "" +"最后,我们将修改 *client* 的逻辑,修改 :code:`client.py` 中的 :code:`get_parameters` 和 " +":code:`set_parameters`,在向服务器发送或从服务器接收时,我们将从模型参数列表中排除batch " +"normalization层的参数。" #: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:85 msgid "Now, you can now open two additional terminal windows and run" -msgstr "" +msgstr "现在,您可以打开另外两个终端窗口并运行程序" #: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:91 msgid "" "in each window (make sure that the server is still running before you do " "so) and see your (previously centralized) PyTorch project run federated " "learning with FedBN strategy across two clients. Congratulations!" -msgstr "" +msgstr "确保服务器仍在运行后,然后您就能看到您的 PyTorch 项目(之前是集中式的)通过 FedBN 策略在两个客户端上运行联合学习。祝贺!" #: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:94 #: ../../source/example-jax-from-centralized-to-federated.rst:277 @@ -1861,7 +2452,7 @@ msgstr "" #: ../../source/example-pytorch-from-centralized-to-federated.rst:310 #: ../../source/tutorial-quickstart-jax.rst:283 msgid "Next Steps" -msgstr "" +msgstr "下一步工作" #: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:96 msgid "" @@ -1873,10 +2464,14 @@ msgid "" "using different subsets of CIFAR-10 on each client? How about adding more" " clients?" msgstr "" +"本示例的完整源代码可在 `_ " +"找到。当然,我们的示例有些过于简单,因为两个客户端都加载了完全相同的数据集,这并不真实。让我们准备好进一步探讨这一主题。如在每个客户端使用不同的 " +"CIFAR-10 子集,或者增加客户端的数量。" #: ../../source/example-jax-from-centralized-to-federated.rst:2 msgid "Example: JAX - Run JAX Federated" -msgstr "" +msgstr "示例: JAX - 运行联邦式 JAX" #: ../../source/example-jax-from-centralized-to-federated.rst:4 #: ../../source/tutorial-quickstart-jax.rst:10 @@ -1892,6 +2487,12 @@ msgid "" " tutorial`. Then, we build upon the centralized training code to run the " "training in a federated fashion." msgstr "" +"本教程将向您展示如何使用 Flower 构建现有 JAX 的联邦学习版本。我们将使用 JAX 在 scikit-learn " +"数据集上训练线性回归模型。我们将采用与 `PyTorch - 从集中式到联邦式 " +"`_ 教程中类似的示例结构。首先,我们根据 `JAX 的线性回归 " +"`_" +" 教程构建集中式训练方法。然后,我们在集中式训练代码的基础上以联邦方式运行训练。" #: ../../source/example-jax-from-centralized-to-federated.rst:10 #: ../../source/tutorial-quickstart-jax.rst:16 @@ -1899,11 +2500,13 @@ msgid "" "Before we start building our JAX example, we need install the packages " ":code:`jax`, :code:`jaxlib`, :code:`scikit-learn`, and :code:`flwr`:" msgstr "" +"在开始构建 JAX 示例之前,我们需要安装软件包 :code:`jax`、:code:`jaxlib`、:code:`scikit-learn` " +"和 :code:`flwr`:" #: ../../source/example-jax-from-centralized-to-federated.rst:18 #: ../../source/tutorial-quickstart-jax.rst:24 msgid "Linear Regression with JAX" -msgstr "" +msgstr "使用 JAX 进行线性回归" #: ../../source/example-jax-from-centralized-to-federated.rst:20 #: ../../source/tutorial-quickstart-jax.rst:26 @@ -1913,6 +2516,8 @@ msgid "" "explanation of what's going on then have a look at the official `JAX " "documentation `_." msgstr "" +"首先,我们将简要介绍基于 :code:`Linear Regression` 模型的集中式训练代码。如果您想获得更深入的解释,请参阅官方的 " +"`JAX 文档 `_。" #: ../../source/example-jax-from-centralized-to-federated.rst:23 #: ../../source/tutorial-quickstart-jax.rst:29 @@ -1926,20 +2531,24 @@ msgid "" "not yet import the :code:`flwr` package for federated learning. This will" " be done later." msgstr "" +"让我们创建一个名为 :code:`jax_training.py` 的新文件,其中包含传统(集中式)线性回归训练所需的所有组件。首先,需要导入 " +"JAX 包 :code:`jax` 和 :code:`jaxlib`。此外,我们还需要导入 :code:`sklearn`,因为我们使用 " +":code:`make_regression` 创建数据集,并使用 :code:`train_test_split` " +"将数据集拆分成训练集和测试集。您可以看到,我们还没有导入用于联邦学习的 :code:`flwr` 软件包,这将在稍后完成。" #: ../../source/example-jax-from-centralized-to-federated.rst:37 #: ../../source/tutorial-quickstart-jax.rst:43 msgid "" "The :code:`load_data()` function loads the mentioned training and test " "sets." -msgstr "" +msgstr ":code:`load_data()` 函数会加载上述训练集和测试集。" #: ../../source/example-jax-from-centralized-to-federated.rst:47 #: ../../source/tutorial-quickstart-jax.rst:53 msgid "" "The model architecture (a very simple :code:`Linear Regression` model) is" " defined in :code:`load_model()`." -msgstr "" +msgstr "模型结构(一个非常简单的 :code:`Linear Regression` 线性回归模型)在 :code:`load_model()` 中定义。" #: ../../source/example-jax-from-centralized-to-federated.rst:59 #: ../../source/tutorial-quickstart-jax.rst:65 @@ -1950,6 +2559,9 @@ msgid "" " is separate since JAX takes derivatives with a :code:`grad()` function " "(defined in the :code:`main()` function and called in :code:`train()`)." msgstr "" +"现在,我们需要定义训练函数( :code:`train()`)。它循环遍历训练集,并计算每批训练数据的损失值(函数 " +":code:`loss_fn()`)。由于 JAX 使用 :code:`grad()` 函数提取导数(在 :code:`main()` " +"函数中定义,并在 :code:`train()` 中调用),因此损失函数是独立的。" #: ../../source/example-jax-from-centralized-to-federated.rst:77 #: ../../source/tutorial-quickstart-jax.rst:83 @@ -1957,7 +2569,7 @@ msgid "" "The evaluation of the model is defined in the function " ":code:`evaluation()`. The function takes all test examples and measures " "the loss of the linear regression model." -msgstr "" +msgstr "模型的评估在函数 :code:`evaluation()` 中定义。该函数获取所有测试数据,并计算线性回归模型的损失值。" #: ../../source/example-jax-from-centralized-to-federated.rst:88 #: ../../source/tutorial-quickstart-jax.rst:94 @@ -1967,11 +2579,14 @@ msgid "" "As already mentioned, the :code:`jax.grad()` function is defined in " ":code:`main()` and passed to :code:`train()`." msgstr "" +"在定义了数据加载、模型架构、训练和评估之后,我们就可以把这些放在一起,使用 JAX " +"训练我们的模型了。如前所述,:code:`jax.grad()` 函数在 :code:`main()` 中定义,并传递给 " +":code:`train()`。" #: ../../source/example-jax-from-centralized-to-federated.rst:105 #: ../../source/tutorial-quickstart-jax.rst:111 msgid "You can now run your (centralized) JAX linear regression workload:" -msgstr "" +msgstr "现在您可以运行(集中式)JAX 线性回归工作了:" #: ../../source/example-jax-from-centralized-to-federated.rst:111 #: ../../source/tutorial-quickstart-jax.rst:117 @@ -1979,12 +2594,12 @@ msgid "" "So far this should all look fairly familiar if you've used JAX before. " "Let's take the next step and use what we've built to create a simple " "federated learning system consisting of one server and two clients." -msgstr "" +msgstr "到目前为止,如果你以前使用过 JAX,就会对这一切感到很熟悉。下一步,让我们利用已构建的代码创建一个简单的联邦学习系统(一个服务器和两个客户端)。" #: ../../source/example-jax-from-centralized-to-federated.rst:115 #: ../../source/tutorial-quickstart-jax.rst:121 msgid "JAX meets Flower" -msgstr "" +msgstr "JAX 结合 Flower" #: ../../source/example-jax-from-centralized-to-federated.rst:117 #: ../../source/tutorial-quickstart-jax.rst:123 @@ -1998,6 +2613,8 @@ msgid "" "parameter updates. This describes one round of the federated learning " "process, and we repeat this for multiple rounds." msgstr "" +"把现有工作联邦化的概念始终是相同的,也很容易理解。我们要启动一个*服务器*,然后对连接到*服务器*的*客户端*运行 " +":code:`jax_training.py`中的代码。*服务器*向客户端发送模型参数,*客户端*运行训练并更新参数。更新后的参数被发回*服务器*,然后服务器对所有收到的参数进行平均聚合。以上的描述构成了一轮联邦学习,我们将重复进行多轮学习。" #: ../../source/example-jax-from-centralized-to-federated.rst:123 #: ../../source/example-mxnet-walk-through.rst:204 @@ -2009,13 +2626,15 @@ msgid "" ":code:`flwr`. Next, we use the :code:`start_server` function to start a " "server and tell it to perform three rounds of federated learning." msgstr "" +"我们的示例包括一个*服务器*和两个*客户端*。让我们先设置 :code:`server.py`。*服务器*需要导入 Flower 软件包 " +":code:`flwr`。接下来,我们使用 :code:`start_server` 函数启动服务器,并让它执行三轮联邦学习。" #: ../../source/example-jax-from-centralized-to-federated.rst:133 #: ../../source/example-mxnet-walk-through.rst:214 #: ../../source/example-pytorch-from-centralized-to-federated.rst:191 #: ../../source/tutorial-quickstart-jax.rst:139 msgid "We can already start the *server*:" -msgstr "" +msgstr "我们已经可以启动*服务器*了:" #: ../../source/example-jax-from-centralized-to-federated.rst:139 #: ../../source/tutorial-quickstart-jax.rst:145 @@ -2025,6 +2644,9 @@ msgid "" " *client* needs to import :code:`flwr`, but also :code:`jax` and " ":code:`jaxlib` to update the parameters on our JAX model:" msgstr "" +"最后,我们将在 :code:`client.py` 中定义我们的 *client* 逻辑,并以之前在 " +":code:`jax_training.py` 中定义的 JAX 训练为基础。我们的 *client* 需要导入 " +":code:`flwr`,还需要导入 :code:`jax` 和 :code:`jaxlib` 以更新 JAX 模型的参数:" #: ../../source/example-jax-from-centralized-to-federated.rst:154 #: ../../source/tutorial-quickstart-jax.rst:160 @@ -2040,12 +2662,18 @@ msgid "" "parameters, one method for training the model, and one method for testing" " the model:" msgstr "" +"实现一个 Flower *client*基本上意味着去实现一个 :code:`flwr.client.Client` 或 " +":code:`flwr.client.NumPyClient` 的子类。我们的代码实现将基于 " +":code:`flwr.client.NumPyClient`,并将其命名为 :code:`FlowerClient`。如果使用具有良好 " +"NumPy 互操作性的框架(如 JAX),:code:`NumPyClient` 比 " +":code:`Client`更容易实现,因为它避免了一些不必要的操作。:code:`FlowerClient` " +"需要实现四个方法,两个用于获取/设置模型参数,一个用于训练模型,一个用于测试模型:" #: ../../source/example-jax-from-centralized-to-federated.rst:161 #: ../../source/example-mxnet-walk-through.rst:242 #: ../../source/tutorial-quickstart-jax.rst:167 msgid ":code:`set_parameters (optional)`" -msgstr "" +msgstr ":code:`set_parameters (可选)`" #: ../../source/example-jax-from-centralized-to-federated.rst:160 #: ../../source/example-mxnet-walk-through.rst:241 @@ -2054,12 +2682,12 @@ msgstr "" msgid "" "set the model parameters on the local model that are received from the " "server" -msgstr "" +msgstr "在本地模型上设置从服务器接收的模型参数" #: ../../source/example-jax-from-centralized-to-federated.rst:161 #: ../../source/tutorial-quickstart-jax.rst:167 msgid "transform parameters to NumPy :code:`ndarray`'s" -msgstr "" +msgstr "将参数转换为 NumPy :code:`ndarray`格式" #: ../../source/example-jax-from-centralized-to-federated.rst:162 #: ../../source/example-mxnet-walk-through.rst:243 @@ -2068,17 +2696,17 @@ msgstr "" msgid "" "loop over the list of model parameters received as NumPy " ":code:`ndarray`'s (think list of neural network layers)" -msgstr "" +msgstr "循环遍历以 NumPy :code:`ndarray` 形式接收的模型参数列表(可以看作神经网络的列表)" #: ../../source/example-jax-from-centralized-to-federated.rst:163 #: ../../source/example-mxnet-walk-through.rst:244 #: ../../source/example-pytorch-from-centralized-to-federated.rst:221 #: ../../source/tutorial-quickstart-jax.rst:169 -#: ../../source/tutorial-quickstart-mxnet.rst:169 +#: ../../source/tutorial-quickstart-mxnet.rst:171 #: ../../source/tutorial-quickstart-pytorch.rst:155 #: ../../source/tutorial-quickstart-scikitlearn.rst:108 msgid ":code:`get_parameters`" -msgstr "" +msgstr ":code:`get_parameters`" #: ../../source/example-jax-from-centralized-to-federated.rst:164 #: ../../source/example-mxnet-walk-through.rst:245 @@ -2088,16 +2716,18 @@ msgid "" "get the model parameters and return them as a list of NumPy " ":code:`ndarray`'s (which is what :code:`flwr.client.NumPyClient` expects)" msgstr "" +"获取模型参数,并以 NumPy :code:`ndarray`的列表形式返回(这正是 " +":code:`flwr.client.NumPyClient`所匹配的格式)" #: ../../source/example-jax-from-centralized-to-federated.rst:167 #: ../../source/example-mxnet-walk-through.rst:248 #: ../../source/example-pytorch-from-centralized-to-federated.rst:225 #: ../../source/tutorial-quickstart-jax.rst:173 -#: ../../source/tutorial-quickstart-mxnet.rst:175 +#: ../../source/tutorial-quickstart-mxnet.rst:177 #: ../../source/tutorial-quickstart-pytorch.rst:161 #: ../../source/tutorial-quickstart-scikitlearn.rst:115 msgid ":code:`fit`" -msgstr "" +msgstr ":code:`fit`" #: ../../source/example-jax-from-centralized-to-federated.rst:166 #: ../../source/example-jax-from-centralized-to-federated.rst:170 @@ -2110,41 +2740,41 @@ msgstr "" msgid "" "update the parameters of the local model with the parameters received " "from the server" -msgstr "" +msgstr "用从服务器接收到的参数更新本地模型的参数" #: ../../source/example-jax-from-centralized-to-federated.rst:167 #: ../../source/example-mxnet-walk-through.rst:248 #: ../../source/example-pytorch-from-centralized-to-federated.rst:225 #: ../../source/tutorial-quickstart-jax.rst:173 msgid "train the model on the local training set" -msgstr "" +msgstr "在本地训练集上训练模型" #: ../../source/example-jax-from-centralized-to-federated.rst:168 #: ../../source/tutorial-quickstart-jax.rst:174 msgid "get the updated local model parameters and return them to the server" -msgstr "" +msgstr "获取更新后的本地模型参数并返回服务器" #: ../../source/example-jax-from-centralized-to-federated.rst:172 #: ../../source/example-mxnet-walk-through.rst:253 #: ../../source/example-pytorch-from-centralized-to-federated.rst:230 #: ../../source/tutorial-quickstart-jax.rst:178 -#: ../../source/tutorial-quickstart-mxnet.rst:178 +#: ../../source/tutorial-quickstart-mxnet.rst:180 #: ../../source/tutorial-quickstart-pytorch.rst:164 #: ../../source/tutorial-quickstart-scikitlearn.rst:118 msgid ":code:`evaluate`" -msgstr "" +msgstr ":code:`evaluate`" #: ../../source/example-jax-from-centralized-to-federated.rst:171 #: ../../source/example-mxnet-walk-through.rst:252 #: ../../source/example-pytorch-from-centralized-to-federated.rst:229 #: ../../source/tutorial-quickstart-jax.rst:177 msgid "evaluate the updated model on the local test set" -msgstr "" +msgstr "在本地测试集上评估更新后的模型" #: ../../source/example-jax-from-centralized-to-federated.rst:172 #: ../../source/tutorial-quickstart-jax.rst:178 msgid "return the local loss to the server" -msgstr "" +msgstr "向服务器返回本地损失值" #: ../../source/example-jax-from-centralized-to-federated.rst:174 #: ../../source/tutorial-quickstart-jax.rst:180 @@ -2153,6 +2783,8 @@ msgid "" ":code:`DeviceArray` to :code:`NumPy ndarray` to make them compatible with" " `NumPyClient`." msgstr "" +"具有挑战性的部分是将 JAX 模型参数从 :code:`DeviceArray` 转换为 :code:`NumPy ndarray`,使其与 " +"`NumPyClient` 兼容。" #: ../../source/example-jax-from-centralized-to-federated.rst:176 #: ../../source/tutorial-quickstart-jax.rst:182 @@ -2165,18 +2797,22 @@ msgid "" "annotations to give you a better understanding of the data types that get" " passed around." msgstr "" +"这两个 :code:`NumPyClient` 方法 :code:`fit` 和 :code:`evaluate` 使用了之前在 " +":code:`jax_training.py` 中定义的函数 :code:`train()` 和 " +":code:`evaluate()`。因此,我们在这里要做的就是通过 :code:`NumPyClient` 子类告知 Flower " +"在训练和评估时要调用哪些已定义的函数。我们加入了类型注解,以便让您更好地理解传递的数据类型。" #: ../../source/example-jax-from-centralized-to-federated.rst:245 #: ../../source/tutorial-quickstart-jax.rst:251 msgid "Having defined the federation process, we can run it." -msgstr "" +msgstr "定义了联邦进程后,我们就可以运行它了。" #: ../../source/example-jax-from-centralized-to-federated.rst:268 #: ../../source/example-mxnet-walk-through.rst:347 #: ../../source/example-pytorch-from-centralized-to-federated.rst:301 #: ../../source/tutorial-quickstart-jax.rst:274 msgid "And that's it. You can now open two additional terminal windows and run" -msgstr "" +msgstr "就是这样,现在你可以打开另外两个终端窗口,然后运行" #: ../../source/example-jax-from-centralized-to-federated.rst:274 #: ../../source/tutorial-quickstart-jax.rst:280 @@ -2184,7 +2820,7 @@ msgid "" "in each window (make sure that the server is still running before you do " "so) and see your JAX project run federated learning across two clients. " "Congratulations!" -msgstr "" +msgstr "确保服务器仍在运行,然后在每个客户端窗口就能看到你的 JAX 项目在两个客户端上运行联邦学习了。祝贺!" #: ../../source/example-jax-from-centralized-to-federated.rst:279 #: ../../source/tutorial-quickstart-jax.rst:285 @@ -2194,6 +2830,9 @@ msgid "" "/quickstart-jax>`_. Our example is somewhat over-simplified because both " "clients load the same dataset." msgstr "" +"此示例的源代码经过长期改进,可在此处找到: `Quickstart JAX " +"`_。我们的示例有些过于简单,因为两个客户端都加载了相同的数据集。" #: ../../source/example-jax-from-centralized-to-federated.rst:282 #: ../../source/tutorial-quickstart-jax.rst:288 @@ -2201,11 +2840,11 @@ msgid "" "You're now prepared to explore this topic further. How about using a more" " sophisticated model or using a different dataset? How about adding more " "clients?" -msgstr "" +msgstr "现在,您已准备好进行更深一步探索了。例如使用更复杂的模型或使用不同的数据集会如何?增加更多客户端会如何?" #: ../../source/example-mxnet-walk-through.rst:2 msgid "Example: MXNet - Run MXNet Federated" -msgstr "" +msgstr "示例: MXNet - 运行联邦式 MXNet" #: ../../source/example-mxnet-walk-through.rst:4 msgid "" @@ -2223,16 +2862,25 @@ msgid "" " tutorial. Then, we build upon the centralized training code to run the " "training in a federated fashion." msgstr "" +"本教程将向您展示如何使用 Flower 构建现有 MXNet 的联学习版本。我们将使用 MXNet 在 MNIST " +"数据集上训练一个序列模型。另外,我们将采用与我们的 `PyTorch - 从集中式到联邦式 " +"`_ 教程类似的示例结构。MXNet 和 PyTorch 非常相似,参考 `此处 " +"`_对 MXNet 和 PyTorch " +"进行了详细的比较。首先,我们根据 `手写数字识别 " +"`" +" 教程 建立了集中式训练方法。然后,我们在集中式训练代码的基础上,以联邦方式运行训练。" #: ../../source/example-mxnet-walk-through.rst:10 msgid "" "Before we start setting up our MXNet example, we install the " ":code:`mxnet` and :code:`flwr` packages:" -msgstr "" +msgstr "在开始设置 MXNet 示例之前,我们先安装 :code:`mxnet` 和 :code:`flwr` 软件包:" #: ../../source/example-mxnet-walk-through.rst:19 msgid "MNIST Training with MXNet" -msgstr "" +msgstr "使用 MXNet 进行 MNIST 训练" #: ../../source/example-mxnet-walk-through.rst:21 msgid "" @@ -2241,6 +2889,8 @@ msgid "" " what's going on then have a look at the official `MXNet tutorial " "`_." msgstr "" +"首先,我们将简要介绍基于 :code:`Sequential` 模型的集中式训练代码。如果您想获得更深入的解释,请参阅官方的 `MXNet教程 " +"`_。" #: ../../source/example-mxnet-walk-through.rst:24 msgid "" @@ -2250,10 +2900,12 @@ msgid "" "that we do not yet import the :code:`flwr` package for federated " "learning. This will be done later." msgstr "" +"让我们创建一个名为:code:`mxnet_mnist.py`的新文件,其中包含传统(集中式)MNIST 训练所需的所有组件。首先,需要导入 " +"MXNet 包 :code:`mxnet`。您可以看到,我们尚未导入用于联合学习的 :code:`flwr` 包,这将在稍后完成。" #: ../../source/example-mxnet-walk-through.rst:42 msgid "The :code:`load_data()` function loads the MNIST training and test sets." -msgstr "" +msgstr ":code:`load_data()` 函数加载 MNIST 训练集和测试集。" #: ../../source/example-mxnet-walk-through.rst:57 msgid "" @@ -2261,20 +2913,22 @@ msgid "" "learning workload. The model architecture (a very simple " ":code:`Sequential` model) is defined in :code:`model()`." msgstr "" +"如前所述,我们将使用 MNIST 数据集进行机器学习。模型架构(一个非常简单的 :code:`Sequential` 模型)在 " +":code:`model()` 中定义。" #: ../../source/example-mxnet-walk-through.rst:70 msgid "" "We now need to define the training (function :code:`train()`) which loops" " over the training set and measures the loss for each batch of training " "examples." -msgstr "" +msgstr "现在,我们需要定义训练函数( :code:`train()`),该函数在训练集上循环训练,并计算每批训练示例的损失值。" #: ../../source/example-mxnet-walk-through.rst:123 msgid "" "The evaluation of the model is defined in function :code:`test()`. The " "function loops over all test samples and measures the loss and accuracy " "of the model based on the test dataset." -msgstr "" +msgstr "模型的评估在函数 :code:`test()` 中定义。该函数循环遍历所有测试样本,并根据测试数据集计算模型的损失值和准确度。" #: ../../source/example-mxnet-walk-through.rst:158 msgid "" @@ -2283,10 +2937,12 @@ msgid "" "Note that the GPU/CPU device for the training and testing is defined " "within the :code:`ctx` (context)." msgstr "" +"在定义了数据加载、模型架构、训练和评估之后,我们就可以把所有放在一起,在 MNIST 上训练我们的模型了。请注意,用于训练和测试的 GPU/CPU" +" 设备是在 :code:`ctx`中定义的。" #: ../../source/example-mxnet-walk-through.rst:184 msgid "You can now run your (centralized) MXNet machine learning workload:" -msgstr "" +msgstr "现在,您可以运行(集中式)MXNet 机器学习工作:" #: ../../source/example-mxnet-walk-through.rst:190 msgid "" @@ -2295,10 +2951,12 @@ msgid "" "create a simple federated learning system consisting of one server and " "two clients." msgstr "" +"到目前为止,如果你以前使用过 MXNet(甚至 " +"PyTorch),这一切看起来应该相当熟悉。下一步,让我们利用已构建的内容创建一个简单联邦学习系统(由一个服务器和两个客户端组成)。" #: ../../source/example-mxnet-walk-through.rst:194 msgid "MXNet meets Flower" -msgstr "" +msgstr "MXNet 结合 Flower" #: ../../source/example-mxnet-walk-through.rst:196 msgid "" @@ -2309,6 +2967,8 @@ msgid "" "workloads. This section will show you how Flower can be used to federate " "our centralized MXNet workload." msgstr "" +"由于 MXNet 目前不支持联邦学习,因此无法轻松地直接将 MXNet 用于联邦学习之中。Flower " +"与底层机器学习框架完全无关,因此它可用于任意联邦式机器学习工作。本节将向你展示如何使用 Flower 将我们的集中式 MXNet 改为联邦式训练。" #: ../../source/example-mxnet-walk-through.rst:198 msgid "" @@ -2321,6 +2981,8 @@ msgid "" "parameter updates. This describes one round of the federated learning " "process and we repeat this for multiple rounds." msgstr "" +"将现有模型框架联邦化的概念始终是相同的,也很容易理解。我们必须启动一个*服务器*,然后对连接到*服务器*的*客户端*使用 " +":code:`mxnet_mnist.py`中的代码。*服务器*向客户端发送模型参数,然后*客户端*运行训练并更新参数。更新后的参数被发回*服务器*,然后会对所有收到的参数更新进行平均聚合。以上描述的是一轮联邦学习过程,我们将重复进行多轮学习。" #: ../../source/example-mxnet-walk-through.rst:220 msgid "" @@ -2329,6 +2991,9 @@ msgid "" "Our *client* needs to import :code:`flwr`, but also :code:`mxnet` to " "update the parameters on our MXNet model:" msgstr "" +"最后,我们将在 :code:`client.py` 中定义我们的 *client* 逻辑,并以之前在 :code:`mxnet_mnist.py`" +" 中定义的 MXNet 训练为基础。我们的 *client* 不仅需要导入 :code:`flwr`,还需要导入 " +":code:`mxnet`,以更新 MXNet 模型的参数:" #: ../../source/example-mxnet-walk-through.rst:235 msgid "" @@ -2343,26 +3008,34 @@ msgid "" "parameters, one method for training the model, and one method for testing" " the model:" msgstr "" +"实现 Flower *client*基本上意味着要实现 :code:`flwr.client.Client` 或 " +":code:`flwr.client.NumPyClient` 的子类。我们的代码实现将基于 " +":code:`flwr.client.NumPyClient`,并将其命名为 :code:`MNISTClient`。如果使用具有良好 NumPy" +" 互操作性的框架(如 PyTorch 或 MXNet),:code:`NumPyClient` 比 " +":code:`Client`更容易实现,因为它避免了一些不必要的操作。:code:`MNISTClient` " +"需要实现四个方法,两个用于获取/设置模型参数,一个用于训练模型,一个用于测试模型:" #: ../../source/example-mxnet-walk-through.rst:242 msgid "transform MXNet :code:`NDArray`'s to NumPy :code:`ndarray`'s" -msgstr "" +msgstr "将 MXNet :code:`NDArray` 转换为 NumPy :code:`ndarray`" #: ../../source/example-mxnet-walk-through.rst:249 #: ../../source/example-pytorch-from-centralized-to-federated.rst:226 msgid "get the updated local model weights and return them to the server" -msgstr "" +msgstr "获取更新后的本地模型参数并发送回服务器" #: ../../source/example-mxnet-walk-through.rst:253 #: ../../source/example-pytorch-from-centralized-to-federated.rst:230 msgid "return the local loss and accuracy to the server" -msgstr "" +msgstr "向服务器返回本地损失值和精确度" #: ../../source/example-mxnet-walk-through.rst:255 msgid "" "The challenging part is to transform the MXNet parameters from " ":code:`NDArray` to :code:`NumPy Arrays` to make it readable for Flower." msgstr "" +"具有挑战性的部分是将 MXNet 参数从 :code:`NDArray` 转换为 :code:`NumPy Arrays` 以便 Flower " +"可以读取。" #: ../../source/example-mxnet-walk-through.rst:257 msgid "" @@ -2374,6 +3047,10 @@ msgid "" "annotations to give you a better understanding of the data types that get" " passed around." msgstr "" +"这两个 :code:`NumPyClient` 方法 :code:`fit` 和 :code:`evaluate` 使用了之前在 " +":code:`mxnet_mnist.py` 中定义的函数 :code:`train()` 和 " +":code:`test()`。因此,我们要做的就是通过 :code:`NumPyClient` 子类告知 Flower " +"在训练和评估时要调用哪些已定义的函数。我们加入了类型注解,以便让您更好地理解传递的数据类型。" #: ../../source/example-mxnet-walk-through.rst:319 msgid "" @@ -2381,13 +3058,15 @@ msgid "" " we can put everything together and train our :code:`Sequential` model on" " MNIST." msgstr "" +"在定义了数据加载、模型架构、训练和评估之后,我们就可以将所有内容整合在一起,在 MNIST 上训练我们的 :code:`Sequential` " +"模型。" #: ../../source/example-mxnet-walk-through.rst:353 msgid "" "in each window (make sure that the server is still running before you do " "so) and see your MXNet project run federated learning across two clients." " Congratulations!" -msgstr "" +msgstr "确保服务器仍在运行后,然后就能在每个窗口中看到 MXNet 项目在两个客户端上运行联邦学习了。祝贺!" #: ../../source/example-mxnet-walk-through.rst:358 msgid "" @@ -2399,10 +3078,15 @@ msgid "" " further. How about using a CNN or using a different dataset? How about " "adding more clients?" msgstr "" +"此示例的完整源代码在:\"MXNet: From Centralized To Federated (Code) " +"`_。当然,我们的示例有些过于简单,因为两个客户端都加载了完全相同的数据集,这并不真实。现在您已经准备好进一步探讨了。使用 " +"CNN 或使用不同的数据集会如何?添加更多客户端会如何?" #: ../../source/example-pytorch-from-centralized-to-federated.rst:2 msgid "Example: PyTorch - From Centralized To Federated" -msgstr "" +msgstr "实例: PyTorch - 从集中式到联邦式" #: ../../source/example-pytorch-from-centralized-to-federated.rst:4 msgid "" @@ -2415,6 +3099,9 @@ msgid "" "tutorial. Then, we build upon the centralized training code to run the " "training in a federated fashion." msgstr "" +"本教程将向您展示如何使用 Flower 构建现有机器学习工作的联邦版本。我们使用 PyTorch 在 CIFAR-10 " +"数据集上训练一个卷积神经网络。首先,我们基于 \"Deep Learning with PyTorch " +"`_\"教程,采用集中式训练方法介绍了这项机器学习任务。然后,我们在集中式训练代码的基础上以联邦方式运行训练。" #: ../../source/example-pytorch-from-centralized-to-federated.rst:12 msgid "" @@ -2423,6 +3110,8 @@ msgid "" "look at the official `PyTorch tutorial " "`_." msgstr "" +"我们首先简要介绍一下集中式 CNN 训练代码。如果您想获得更深入的解释,请参阅 PyTorch 官方教程`PyTorch tutorial " +"`_。" #: ../../source/example-pytorch-from-centralized-to-federated.rst:15 msgid "" @@ -2433,6 +3122,9 @@ msgid "" "federated learning. You can keep all these imports as they are even when " "we add the federated learning components at a later point." msgstr "" +"让我们创建一个名为 :code:`cifar.py` 的新文件,其中包含 CIFAR-10 " +"传统(集中)培训所需的所有组件。首先,需要导入所有必需的软件包(如 :code:`torch` 和 " +":code:`torchvision`)。您可以看到,我们没有导入任何用于联邦学习的软件包。即使在以后添加联邦学习组件时,也可以保留所有这些导入。" #: ../../source/example-pytorch-from-centralized-to-federated.rst:32 msgid "" @@ -2440,32 +3132,36 @@ msgid "" "learning workload. The model architecture (a very simple Convolutional " "Neural Network) is defined in :code:`class Net()`." msgstr "" +"如前所述,我们将使用 CIFAR-10 数据集进行机器学习。模型架构(一个非常简单的卷积神经网络)在 :code:`class Net()` " +"中定义。" #: ../../source/example-pytorch-from-centralized-to-federated.rst:56 msgid "" "The :code:`load_data()` function loads the CIFAR-10 training and test " "sets. The :code:`transform` normalized the data after loading." msgstr "" +":code:`load_data()` 函数加载 CIFAR-10 " +"训练集和测试集。加载数据后,:code:`transform`函数对数据进行了归一化处理。" #: ../../source/example-pytorch-from-centralized-to-federated.rst:74 msgid "" "We now need to define the training (function :code:`train()`) which loops" " over the training set, measures the loss, backpropagates it, and then " "takes one optimizer step for each batch of training examples." -msgstr "" +msgstr "现在,我们需要定义训练函数(:code:`train()`),该函数在训练集上循环训练,计算损失值并反向传播,然后为每批训练数据在优化器上执行一个优化步骤。" #: ../../source/example-pytorch-from-centralized-to-federated.rst:76 msgid "" "The evaluation of the model is defined in the function :code:`test()`. " "The function loops over all test samples and measures the loss of the " "model based on the test dataset." -msgstr "" +msgstr "模型的评估在函数 :code:`test()` 中定义。该函数循环遍历所有测试样本,并计算测试数据集的模型损失值。" #: ../../source/example-pytorch-from-centralized-to-federated.rst:136 msgid "" "Having defined the data loading, model architecture, training, and " "evaluation we can put everything together and train our CNN on CIFAR-10." -msgstr "" +msgstr "在确定了数据加载、模型架构、训练和评估之后,我们就可以将所有整合在一起,在 CIFAR-10 上训练我们的 CNN。" #: ../../source/example-pytorch-from-centralized-to-federated.rst:163 msgid "" @@ -2474,6 +3170,8 @@ msgid "" "simple federated learning system consisting of one server and two " "clients." msgstr "" +"到目前为止,如果你以前用过 " +"PyTorch,这一切看起来应该相当熟悉。让我们进行下一步,利用我们所构建的内容创建一个简单联邦学习系统(由一个服务器和两个客户端组成)。" #: ../../source/example-pytorch-from-centralized-to-federated.rst:169 msgid "" @@ -2484,13 +3182,13 @@ msgid "" "previously. Normally, if you'd want to run machine learning workloads in " "a federated fashion, then you'd have to change most of your code and set " "everything up from scratch. This can be a considerable effort." -msgstr "" +msgstr "上一节讨论的简单机器学习项目在单一数据集(CIFAR-10)上训练模型,我们称之为集中学习。如上一节所示,集中学习的概念可能为大多数人所熟知,而且很多人以前都使用过。通常情况下,如果要以联邦方式运行机器学习工作,就必须更改大部分代码,并从头开始设置一切。这可能是一个相当大的工作量。" #: ../../source/example-pytorch-from-centralized-to-federated.rst:173 msgid "" "However, with Flower you can evolve your pre-existing code into a " "federated learning setup without the need for a major rewrite." -msgstr "" +msgstr "不过,有了 Flower,您可以轻松地将已有的代码转变成联邦学习的模式,无需进行大量重写。" #: ../../source/example-pytorch-from-centralized-to-federated.rst:175 msgid "" @@ -2502,6 +3200,8 @@ msgid "" "parameter updates. This describes one round of the federated learning " "process and we repeat this for multiple rounds." msgstr "" +"这个概念很容易理解。我们必须启动一个*服务器*,然后对连接到*服务器*的*客户端*使用 " +":code:`cifar.py`中的代码。*服务器*向客户端发送模型参数,*客户端*运行训练并更新参数。更新后的参数被发回*服务器*,然后会对所有收到的参数更新进行平均聚合。以上描述的是一轮联邦学习过程,我们将重复进行多轮学习。" #: ../../source/example-pytorch-from-centralized-to-federated.rst:197 msgid "" @@ -2510,6 +3210,9 @@ msgid "" "Our *client* needs to import :code:`flwr`, but also :code:`torch` to " "update the paramters on our PyTorch model:" msgstr "" +"最后,我们将在 :code:`client.py` 中定义我们的 *client* 逻辑,并以之前在 :code:`cifar.py` " +"中定义的集中式训练为基础。我们的 *client* 不仅需要导入 :code:`flwr`,还需要导入 :code:`torch`,以更新 " +"PyTorch 模型的参数:" #: ../../source/example-pytorch-from-centralized-to-federated.rst:213 msgid "" @@ -2524,10 +3227,16 @@ msgid "" "getting/setting model parameters, one method for training the model, and " "one method for testing the model:" msgstr "" +"实现 Flower *client*基本上意味着实现 :code:`flwr.client.Client` 或 " +":code:`flwr.client.NumPyClient` 的子类。我们的代码实现将基于 " +":code:`flwr.client.NumPyClient`,并将其命名为 :code:`CifarClient`。如果使用具有良好 NumPy" +" 互操作性的框架(如 PyTorch 或 TensorFlow/Keras),:code:`NumPyClient`的实现比 " +":code:`Client`略微容易一些,因为它避免了一些不必要的操作。:code:`CifarClient` " +"需要实现四个方法,两个用于获取/设置模型参数,一个用于训练模型,一个用于测试模型:" #: ../../source/example-pytorch-from-centralized-to-federated.rst:219 msgid ":code:`set_parameters`" -msgstr "" +msgstr ":code:`set_parameters`" #: ../../source/example-pytorch-from-centralized-to-federated.rst:232 msgid "" @@ -2539,22 +3248,26 @@ msgid "" "annotations to give you a better understanding of the data types that get" " passed around." msgstr "" +"这两个 :code:`NumPyClient` 中的方法 :code:`fit` 和 :code:`evaluate` 使用了之前在 " +":code:`cifar.py` 中定义的函数 :code:`train()` 和 :code:`test()`。因此,我们在这里要做的就是通过 " +":code:`NumPyClient` 子类告知 Flower " +"在训练和评估时要调用哪些已定义的函数。我们加入了类型注解,以便让你更好地理解传递的数据类型。" #: ../../source/example-pytorch-from-centralized-to-federated.rst:280 msgid "" "All that's left to do it to define a function that loads both model and " "data, creates a :code:`CifarClient`, and starts this client. You load " "your data and model by using :code:`cifar.py`. Start :code:`CifarClient` " -"with the function :code:`fl.client.start_numpy_client()` by pointing it " -"at the same IP adress we used in :code:`server.py`:" -msgstr "" +"with the function :code:`fl.client.start_client()` by pointing it at the " +"same IP adress we used in :code:`server.py`:" +msgstr "剩下的就是定义模型和数据加载函数了。创建一个:code:`CifarClient`类,并运行这个客服端。您将通过:code:`cifar.py`加载数据和模型。另外,通过:code:`fl.client.start_client()`函数来运行客户端:code:`CifarClient`,需要保证IP地址和:code:`server.py`中所使用的一致:" #: ../../source/example-pytorch-from-centralized-to-federated.rst:307 msgid "" "in each window (make sure that the server is running before you do so) " "and see your (previously centralized) PyTorch project run federated " "learning across two clients. Congratulations!" -msgstr "" +msgstr "确保服务器正在运行后,您就能看到您的 PyTorch 项目(之前是集中式的)在两个客户端上运行联邦学习了。祝贺!" #: ../../source/example-pytorch-from-centralized-to-federated.rst:312 msgid "" @@ -2566,25 +3279,30 @@ msgid "" " further. How about using different subsets of CIFAR-10 on each client? " "How about adding more clients?" msgstr "" +"本示例的完整源代码为:`PyTorch: 从集中式到联合式 " +"`_。当然,我们的示例有些过于简单,因为两个客户端都加载了完全相同的数据集,这并不真实。现在,您已经准备好进一步探讨这一主题了。比如在每个客户端使用不同的" +" CIFAR-10 子集会如何?增加更多客户端会如何?" #: ../../source/example-walkthrough-pytorch-mnist.rst:2 msgid "Example: Walk-Through PyTorch & MNIST" -msgstr "" +msgstr "实例: PyTorch 和 MNIST 的演练" #: ../../source/example-walkthrough-pytorch-mnist.rst:4 msgid "" "In this tutorial we will learn, how to train a Convolutional Neural " "Network on MNIST using Flower and PyTorch." -msgstr "" +msgstr "在本教程中,我们将学习如何使用 Flower 和 PyTorch 在 MNIST 上训练卷积神经网络。" #: ../../source/example-walkthrough-pytorch-mnist.rst:6 -#: ../../source/tutorial-quickstart-mxnet.rst:14 +#: ../../source/tutorial-quickstart-mxnet.rst:16 #: ../../source/tutorial-quickstart-pytorch.rst:17 #: ../../source/tutorial-quickstart-scikitlearn.rst:14 msgid "" "Our example consists of one *server* and two *clients* all having the " "same model." -msgstr "" +msgstr "我们的例子包括一个*服务器*和两个*客户端*,它们都有相同的模型。" #: ../../source/example-walkthrough-pytorch-mnist.rst:8 #: ../../source/tutorial-quickstart-pytorch.rst:19 @@ -2594,24 +3312,24 @@ msgid "" "the *server* which will aggregate them to produce a better model. " "Finally, the *server* sends this improved version of the model back to " "each *client*. A complete cycle of weight updates is called a *round*." -msgstr "" +msgstr "*客户端*负责在其本地数据集上更新模型参数。然后,这些参数会被发送到*服务器*,由*服务器*聚合后生成一个更好的模型。最后,*服务器*将改进后的模型发送回每个*客户端*。一个完整的模型参数更新周期称为一*轮*。" #: ../../source/example-walkthrough-pytorch-mnist.rst:12 #: ../../source/tutorial-quickstart-pytorch.rst:23 msgid "" "Now that we have a rough idea of what is going on, let's get started. We " "first need to install Flower. You can do this by running :" -msgstr "" +msgstr "现在,我们已经有了一个大致的概念了,那就让我们开始吧。首先,我们需要安装 Flower。可以通过运行 :" #: ../../source/example-walkthrough-pytorch-mnist.rst:18 msgid "" "Since we want to use PyTorch to solve a computer vision task, let's go " "ahead an install PyTorch and the **torchvision** library:" -msgstr "" +msgstr "我们想用 PyTorch 来做计算机视觉任务,需要先安装 PyTorch 和 **torchvision** 库:" #: ../../source/example-walkthrough-pytorch-mnist.rst:26 msgid "Ready... Set... Train!" -msgstr "" +msgstr "准备...设置...训练!" #: ../../source/example-walkthrough-pytorch-mnist.rst:28 msgid "" @@ -2624,42 +3342,47 @@ msgid "" "namely *run-server.sh*, and *run-clients.sh*. Don't be afraid to look " "inside, they are simple enough =)." msgstr "" +"现在我们已经安装了所有的依赖包,让我们用两个客户端和一个服务器来运行一个简单的分布式训练。我们的训练过程和网络架构基于 PyTorch 的 " +"`Basic MNIST Example " +"`_。您会发现用 Flower " +"来封装您的代码并进行联邦学习训练是多么容易。我们为您提供了两个辅助脚本,即 *run-server.sh* 和 *run-" +"clients.sh*。别害怕,它们很简单 =)。" #: ../../source/example-walkthrough-pytorch-mnist.rst:31 msgid "" "Go ahead and launch on a terminal the *run-server.sh* script first as " "follows:" -msgstr "" +msgstr "首先在终端上启动 *run-server.sh* 脚本,如下所示:" #: ../../source/example-walkthrough-pytorch-mnist.rst:38 msgid "Now that the server is up and running, go ahead and launch the clients." -msgstr "" +msgstr "现在服务器已经启动并运行,请继续启动客户端。" #: ../../source/example-walkthrough-pytorch-mnist.rst:45 msgid "" "Et voilà! You should be seeing the training procedure and, after a few " "iterations, the test accuracy for each client." -msgstr "" +msgstr "然后就可以了!您应该能看到训练过程,以及经过几次反复后,每个客户端的测试准确率。" #: ../../source/example-walkthrough-pytorch-mnist.rst:66 msgid "Now, let's see what is really happening inside." -msgstr "" +msgstr "现在,让我们看看里面到底发生了什么。" #: ../../source/example-walkthrough-pytorch-mnist.rst:69 #: ../../source/tutorial-quickstart-ios.rst:129 -#: ../../source/tutorial-quickstart-mxnet.rst:224 +#: ../../source/tutorial-quickstart-mxnet.rst:226 #: ../../source/tutorial-quickstart-pytorch.rst:203 #: ../../source/tutorial-quickstart-scikitlearn.rst:157 #: ../../source/tutorial-quickstart-tensorflow.rst:98 -#: ../../source/tutorial-quickstart-xgboost.rst:306 +#: ../../source/tutorial-quickstart-xgboost.rst:309 msgid "Flower Server" -msgstr "" +msgstr "Flower 服务器" #: ../../source/example-walkthrough-pytorch-mnist.rst:71 msgid "" "Inside the server helper script *run-server.sh* you will find the " "following code that basically runs the :code:`server.py`" -msgstr "" +msgstr "在服务器辅助脚本 *run-server.sh* 中,你可以找到以下代码,这些代码基本上都是运行 :code:`server.py` 的代码" #: ../../source/example-walkthrough-pytorch-mnist.rst:78 msgid "" @@ -2670,32 +3393,35 @@ msgid "" "leave all the configuration possibilities at their default values, as " "seen below." msgstr "" +"我们可以再深入一点,:code:`server.py` 只是启动了一个服务器,该服务器将协调三轮训练。Flower " +"服务器是非常容易修改的,但对于简单的工作,我们可以使用 :ref:`start_server `函数启动服务器,并将所有可能的配置保留为默认值,如下所示。" #: ../../source/example-walkthrough-pytorch-mnist.rst:89 #: ../../source/tutorial-quickstart-ios.rst:34 -#: ../../source/tutorial-quickstart-mxnet.rst:34 +#: ../../source/tutorial-quickstart-mxnet.rst:36 #: ../../source/tutorial-quickstart-pytorch.rst:37 #: ../../source/tutorial-quickstart-scikitlearn.rst:40 #: ../../source/tutorial-quickstart-tensorflow.rst:29 -#: ../../source/tutorial-quickstart-xgboost.rst:52 +#: ../../source/tutorial-quickstart-xgboost.rst:55 msgid "Flower Client" -msgstr "" +msgstr "Flower 客户端" #: ../../source/example-walkthrough-pytorch-mnist.rst:91 msgid "" "Next, let's take a look at the *run-clients.sh* file. You will see that " "it contains the main loop that starts a set of *clients*." -msgstr "" +msgstr "接下来,让我们看看 *run-clients.sh* 文件。您会看到它包含了用来启动多个 *客户端* 的代码。" #: ../../source/example-walkthrough-pytorch-mnist.rst:100 msgid "" "**cid**: is the client ID. It is an integer that uniquely identifies " "client identifier." -msgstr "" +msgstr "**cid**:是客户 ID。它是一个整数,可唯一标识客户标识符。" #: ../../source/example-walkthrough-pytorch-mnist.rst:101 msgid "**sever_address**: String that identifies IP and port of the server." -msgstr "" +msgstr "**sever_address**: 标识服务器 IP 和端口的字符串。" #: ../../source/example-walkthrough-pytorch-mnist.rst:102 msgid "" @@ -2704,6 +3430,8 @@ msgid "" "partition the original MNIST dataset to make sure that every client is " "working on unique subsets of both *training* and *test* sets." msgstr "" +"**nb_clients**: 这定义了正在创建的客户端数量。客户端并不需要这一信息,但它有助于我们对原始 MNIST " +"数据集进行划分,以确保每个客户端都在 *training* 和 *test* 数据集上有独立的数据。" #: ../../source/example-walkthrough-pytorch-mnist.rst:104 msgid "" @@ -2717,6 +3445,12 @@ msgid "" "DataLoaders, the number of epochs in each round, and which device we want" " to use for training (CPU or GPU)." msgstr "" +"我们可以深入看一下 :code:`flwr_example/quickstart-pytorch/client.py`。查看 " +":code:`main` 函数开头的参数解析代码后,你会发现一个对 :code:`mnist.load_data` 的调用。该函数负责分割原始 " +"MNIST 数据集(*training* 和 *test*),并为每个数据集返回一个 " +":code:`torch.utils.data.DataLoader` 。然后,我们实例化一个 " +":code:`PytorchMNISTClient` 对象,其中包含我们的客户端 ID、 " +"DataLoader、每一轮中的遍历数,以及我们希望用于训练的设备(CPU 或 GPU)。" #: ../../source/example-walkthrough-pytorch-mnist.rst:119 msgid "" @@ -2724,22 +3458,26 @@ msgid "" ":code:`fl.client.start_client` along with the server's address as the " "training process begins." msgstr "" +"当训练过程开始时,:code:`PytorchMNISTClient` 对象会连同服务器地址一起传递给 " +":code:`fl.client.start_client`。" #: ../../source/example-walkthrough-pytorch-mnist.rst:123 msgid "A Closer Look" -msgstr "" +msgstr "仔细看一下" #: ../../source/example-walkthrough-pytorch-mnist.rst:125 msgid "" "Now, let's look closely into the :code:`PytorchMNISTClient` inside " ":code:`flwr_example.quickstart-pytorch.mnist` and see what it is doing:" msgstr "" +"现在,让我们仔细研究一下 :code:`flwr_example.quickstart-pytorch.mnist` 中的 " +":code:`PytorchMNISTClient`,看看它在做什么:" #: ../../source/example-walkthrough-pytorch-mnist.rst:226 msgid "" "The first thing to notice is that :code:`PytorchMNISTClient` instantiates" " a CNN model inside its constructor" -msgstr "" +msgstr "首先要注意的是 :code:`PytorchMNISTClient` 在其构造函数中实例化了一个 CNN 模型" #: ../../source/example-walkthrough-pytorch-mnist.rst:244 msgid "" @@ -2747,6 +3485,8 @@ msgid "" "and it is reproduced below. It is the same network found in `Basic MNIST " "Example `_." msgstr "" +"CNN 的代码可在 :code:`quickstart-pytorch.mnist` 下找到,现复制如下。它与 `Basic MNIST " +"Example `_中的网络相同。" #: ../../source/example-walkthrough-pytorch-mnist.rst:290 msgid "" @@ -2754,6 +3494,8 @@ msgid "" "inherits from the :code:`fl.client.Client`, and hence it must implement " "the following methods:" msgstr "" +"第二件要注意的事是 :code:`PytorchMNISTClient` 类继承自 " +":code:`fl.client.Client`,因此它必须实现以下方法:" #: ../../source/example-walkthrough-pytorch-mnist.rst:315 msgid "" @@ -2762,12 +3504,14 @@ msgid "" ":code:`train` function and that :code:`evaluate` calls a :code:`test`: " "function." msgstr "" +"将抽象类与其派生类 :code:`PytorchMNISTClient` 进行比较时,您会发现 :code:`fit` 调用了一个 " +":code:`train` 函数,而 :code:`evaluate` 则调用了一个 :code:`test`: 函数。" #: ../../source/example-walkthrough-pytorch-mnist.rst:317 msgid "" "These functions can both be found inside the same :code:`quickstart-" "pytorch.mnist` module:" -msgstr "" +msgstr "这些函数都可以在同一个 :code:`quickstart-pytorch.mnist` 模块中找到:" #: ../../source/example-walkthrough-pytorch-mnist.rst:437 msgid "" @@ -2778,10 +3522,12 @@ msgid "" "still work flawlessly. As a matter of fact, why not try and modify the " "code to an example of your liking?" msgstr "" +"请注意,这些函数封装了常规的训练和测试循环,并为 :code:`fit` 和 :code:`evaluate` " +"提供了每轮的最终统计数据。您可以用自定义的训练和测试循环来替代它们,并改变网络结构,整个示例仍然可以完美运行。事实上,为什么不按照自己的喜好修改代码呢?" #: ../../source/example-walkthrough-pytorch-mnist.rst:444 msgid "Give It a Try" -msgstr "" +msgstr "试试看" #: ../../source/example-walkthrough-pytorch-mnist.rst:445 msgid "" @@ -2791,36 +3537,39 @@ msgid "" "a few things you could try on your own and get more experience with " "Flower:" msgstr "" +"通过上面的快速入门代码描述,你将对 Flower " +"中*客户端*和*服务器*的工作方式、如何运行一个简单的实验以及客户端封装器的内部结构有一个很好的了解。您可以自己尝试以下内容,以获得更多使用 " +"Flower 的经验:" #: ../../source/example-walkthrough-pytorch-mnist.rst:448 msgid "" "Try and change :code:`PytorchMNISTClient` so it can accept different " "architectures." -msgstr "" +msgstr "尝试修改 :code:`PytorchMNISTClient`,使其可以接受不同的架构。" #: ../../source/example-walkthrough-pytorch-mnist.rst:449 msgid "Modify the :code:`train` function so that it accepts different optimizers" -msgstr "" +msgstr "修改 :code:`train` 函数,使其接受不同的优化器" #: ../../source/example-walkthrough-pytorch-mnist.rst:450 msgid "" "Modify the :code:`test` function so that it proves not only the top-1 " "(regular accuracy) but also the top-5 accuracy?" -msgstr "" +msgstr "修改 :code:`test` 函数,使其不仅能输出前 1 名(常规精确度),还能证明前 5 名的精确度?" #: ../../source/example-walkthrough-pytorch-mnist.rst:451 msgid "" "Go larger! Try to adapt the code to larger images and datasets. Why not " "try training on ImageNet with a ResNet-50?" -msgstr "" +msgstr "让我们尝试让代码适应更大的图像和数据集。为什么不尝试使用 ResNet-50 在 ImageNet 上进行训练呢?" #: ../../source/example-walkthrough-pytorch-mnist.rst:453 msgid "You are ready now. Enjoy learning in a federated way!" -msgstr "" +msgstr "您现在已经准备就绪。尽情享受联邦学习的乐趣吧!" #: ../../source/explanation-differential-privacy.rst:2 msgid "Differential privacy" -msgstr "" +msgstr "差别隐私" #: ../../source/explanation-differential-privacy.rst:4 msgid "" @@ -2829,69 +3578,78 @@ msgid "" "training pipelines defined in any of the various ML frameworks that " "Flower is compatible with." msgstr "" +"Flower 提供了差分隐私 (DP) 封装类,可将 DP-FedAvg 提供的核心 DP 轻松集成到 Flower 兼容的各种 ML " +"框架中定义的训练模式中。" #: ../../source/explanation-differential-privacy.rst:7 +#, fuzzy msgid "" -"Please note that these components are still experimental, the correct " +"Please note that these components are still experimental; the correct " "configuration of DP for a specific task is still an unsolved problem." -msgstr "" +msgstr "请注意,这些组件仍处于试验阶段,如何为特定任务正确配置 DP 仍是一个尚未解决的问题。" #: ../../source/explanation-differential-privacy.rst:10 msgid "" "The name DP-FedAvg is misleading since it can be applied on top of any FL" " algorithm that conforms to the general structure prescribed by the " "FedOpt family of algorithms." -msgstr "" +msgstr "DP-FedAvg 这个名称容易引起误解,因为它可以应用于任何符合 FedOpt 系列算法规定的一般结构的 FL 算法之上。" #: ../../source/explanation-differential-privacy.rst:13 msgid "DP-FedAvg" -msgstr "" +msgstr "DP-FedAvg" #: ../../source/explanation-differential-privacy.rst:15 msgid "" "DP-FedAvg, originally proposed by McMahan et al. [mcmahan]_ and extended " "by Andrew et al. [andrew]_, is essentially FedAvg with the following " "modifications." -msgstr "" +msgstr "DP-FedAvg 最初由McMahan等人提出,并由Andrew等人加以扩展。" #: ../../source/explanation-differential-privacy.rst:17 msgid "" "**Clipping** : The influence of each client's update is bounded by " "clipping it. This is achieved by enforcing a cap on the L2 norm of the " "update, scaling it down if needed." -msgstr "" +msgstr "**裁剪** : 裁剪会影响到每个客户端的模型参数。具体做法是对参数的 L2 准则设置上限,必要时将其缩减。" #: ../../source/explanation-differential-privacy.rst:18 msgid "" "**Noising** : Gaussian noise, calibrated to the clipping threshold, is " "added to the average computed at the server." -msgstr "" +msgstr "**噪声** : 在服务器计算出的平均值中加入高斯噪声,该噪声根据剪切阈值进行校准。" #: ../../source/explanation-differential-privacy.rst:20 +#, fuzzy msgid "" "The distribution of the update norm has been shown to vary from task-to-" -"task and to evolve as training progresses. Therefore, we use an adaptive " -"approach [andrew]_ that continuously adjusts the clipping threshold to " -"track a prespecified quantile of the update norm distribution." -msgstr "" +"task and to evolve as training progresses. This variability is crucial in" +" understanding its impact on differential privacy guarantees, emphasizing" +" the need for an adaptive approach [andrew]_ that continuously adjusts " +"the clipping threshold to track a prespecified quantile of the update " +"norm distribution." +msgstr "事实证明,参数更新准则的分布会随着任务的不同而变化,并随着训练的进展而演变。因此,我们采用了一种自适应方法,该方法会不断调整剪切阈值,以跟踪参数更新准则分布的预设量化值。" #: ../../source/explanation-differential-privacy.rst:23 msgid "Simplifying Assumptions" -msgstr "" +msgstr "简化假设" #: ../../source/explanation-differential-privacy.rst:25 +#, fuzzy msgid "" "We make (and attempt to enforce) a number of assumptions that must be " -"satisfied to ensure that the training process actually realises the " +"satisfied to ensure that the training process actually realizes the " ":math:`(\\epsilon, \\delta)` guarantees the user has in mind when " "configuring the setup." msgstr "" +"我们提出(并试图执行)了一系列必须满足的假设,以确保训练过程真正实现用户在配置设置时所定的 :math:`(\\epsilon,\\delta)`" +" 。" #: ../../source/explanation-differential-privacy.rst:27 msgid "" "**Fixed-size subsampling** :Fixed-size subsamples of the clients must be " "taken at each round, as opposed to variable-sized Poisson subsamples." -msgstr "" +msgstr "** 固定大小的子样本** :与可变大小的泊松分布子样本相比,每轮必须抽取固定大小的客户端子样本。" #: ../../source/explanation-differential-privacy.rst:28 msgid "" @@ -2899,32 +3657,33 @@ msgid "" "weighted equally in the aggregate to eliminate the requirement for the " "server to know in advance the sum of the weights of all clients available" " for selection." -msgstr "" +msgstr "**非加权平均**: 所有客户端的贡献必须加权相等,这样服务器就不需要事先知道所有客户的权重总和。" #: ../../source/explanation-differential-privacy.rst:29 msgid "" "**No client failures** : The set of available clients must stay constant " "across all rounds of training. In other words, clients cannot drop out or" " fail." -msgstr "" +msgstr "**没有失败的客户端** : 在各轮训练中,可用客户端的数量必须保持不变。换句话说,客户端不能退出或失败。" #: ../../source/explanation-differential-privacy.rst:31 +#, fuzzy msgid "" "The first two are useful for eliminating a multitude of complications " -"associated with calibrating the noise to the clipping threshold while the" -" third one is required to comply with the assumptions of the privacy " +"associated with calibrating the noise to the clipping threshold, while " +"the third one is required to comply with the assumptions of the privacy " "analysis." -msgstr "" +msgstr "前两种方法有助于消除将噪声校准为削波阈值所带来的诸多复杂问题,而第三种方法则需要符合隐私分析的假设。" #: ../../source/explanation-differential-privacy.rst:34 msgid "" "These restrictions are in line with constraints imposed by Andrew et al. " "[andrew]_." -msgstr "" +msgstr "这些限制与 Andrew 等人所施加的限制一致。" #: ../../source/explanation-differential-privacy.rst:37 msgid "Customizable Responsibility for Noise injection" -msgstr "" +msgstr "可定制的噪声注入" #: ../../source/explanation-differential-privacy.rst:38 msgid "" @@ -2935,7 +3694,7 @@ msgid "" "amount of noise to the clipped update, with the result that simply " "aggregating the noisy updates is equivalent to the explicit addition of " "noise to the non-noisy aggregate at the server." -msgstr "" +msgstr "与其他在服务器上添加噪声的实现方法不同,您可以配置噪声注入的位置,以便更好地匹配您的威胁模型。我们为用户提供了设置训练的灵活性,使每个客户端都能独立地为剪切参数更新添加少量噪声,这样,只需聚合噪声更新,就相当于在服务器上为非噪声聚合添加噪声了。" #: ../../source/explanation-differential-privacy.rst:41 msgid "" @@ -2945,10 +3704,13 @@ msgid "" "simple maths to show that this is equivalent to each client adding noise " "with scale :math:`\\sigma_\\Delta/\\sqrt{m}`." msgstr "" +"准确地说,我们假设每轮采样的客户端数量为:math:`m`,:math:`\\sigma_\\Delta` " +"为需要添加到模型更新总和中的总高斯噪声的规模,我们就可以用简单的数学方法证明了,这相当于每个客户端都添加了规模为 " +":math:`\\sigma_\\Delta/\\sqrt{m}` 的噪声。" #: ../../source/explanation-differential-privacy.rst:44 msgid "Wrapper-based approach" -msgstr "" +msgstr "基于封装的方法" #: ../../source/explanation-differential-privacy.rst:46 msgid "" @@ -2964,15 +3726,21 @@ msgid "" "classes every time a new class implementing :code:`Strategy` or " ":code:`NumPyClient` is defined." msgstr "" +"在现有工作负载中引入 DP 可以被认为是在其周围增加了一层额外的安全性。受此启发,我们提供了额外的服务器端和客户端逻辑,分别作为 " +":code:`Strategy` 和 :code:`NumPyClient` " +"抽象类实例的封装器,使训练过程具有不同的隐私性。这种基于封装器的方法的优点是可以很容易地与将来有人贡献给 Flower " +"的其他封装器(例如用于安全聚合的封装器)进行组合。使用继承可能会比较繁琐,因为每次定义实现 :code:`Strategy` 或 " +":code:`NumPyClient` 的新类时,都需要创建新的子类。" #: ../../source/explanation-differential-privacy.rst:49 msgid "Server-side logic" -msgstr "" +msgstr "服务器端逻辑" #: ../../source/explanation-differential-privacy.rst:51 +#, fuzzy msgid "" "The first version of our solution was to define a decorator whose " -"constructor accepted, among other things, a boolean valued variable " +"constructor accepted, among other things, a boolean-valued variable " "indicating whether adaptive clipping was to be enabled or not. We quickly" " realized that this would clutter its :code:`__init__()` function with " "variables corresponding to hyperparameters of adaptive clipping that " @@ -2983,10 +3751,17 @@ msgid "" "parameter :code:`server_side_noising`, which, as the name suggests, " "determines where noising is to be performed." msgstr "" +"我们的第一版解决方案是定义一个装饰器,其构造函数接受一个布尔值变量,表示是否启用自适应剪裁。我们很快意识到,这样会使其 " +":code:`__init__()` " +"函数中与自适应裁剪超参数相对应的变量变得杂乱无章,而这些变量在自适应裁剪被禁用时将保持未使用状态。要实现更简洁的功能,可以将该功能拆分为两个装饰器,即" +" :code:`DPFedAvgFixed` 和 " +":code:`DPFedAvgAdaptive`,后者是前者的子类。这两个类的构造函数都接受一个布尔参数 " +":code:`server_side_noising`,顾名思义,它决定了在哪里加噪声。" #: ../../source/explanation-differential-privacy.rst:54 +#: ../../source/ref-api/flwr.server.strategy.DPFedAvgFixed.rst:2 msgid "DPFedAvgFixed" -msgstr "" +msgstr "DPFedAvgFixed" #: ../../source/explanation-differential-privacy.rst:56 msgid "" @@ -2995,6 +3770,8 @@ msgid "" "captured with the help of wrapper logic for just the following two " "methods of the :code:`Strategy` abstract class." msgstr "" +"只需对 :code:`Strategy` 抽象类的以下两个方法进行封装,就能完全捕获 DP-FedAvg " +"原始版本(即执行固定剪裁的版本)所需的服务器端功能。" #: ../../source/explanation-differential-privacy.rst:58 msgid "" @@ -3007,8 +3784,13 @@ msgid "" "entails *post*-processing of the results returned by the wrappee's " "implementation of :code:`configure_fit()`." msgstr "" +":code:`configure_fit()` :由封装的 :code:`Strategy` " +"发送到每个客户端的配置字典需要使用等于裁剪阈值的附加值(在 :code:`dpfedavg_clip_norm` 下键入)进行扩充。并且,如果 " +"server_side_noising=true,则另一个值等于需要在客户端添加的高斯噪声的大小(在 dpfedavg_noise_stddev " +"下键入)。这需要对封装后的configure_fit() 所返回的结果进行后处理。" #: ../../source/explanation-differential-privacy.rst:59 +#, fuzzy msgid "" ":code:`aggregate_fit()`: We check whether any of the sampled clients " "dropped out or failed to upload an update before the round timed out. In " @@ -3019,17 +3801,22 @@ msgid "" ":code:`parameters` field of :code:`FitRes` for each received update and " "setting it to 1. Furthermore, if :code:`server_side_noising=true`, each " "update is perturbed with an amount of noise equal to what it would have " -"been subjected to had client-side noising being enabled. This entails " +"been subjected to had client-side noising being enabled. This entails " "*pre*-processing of the arguments to this method before passing them on " "to the wrappee's implementation of :code:`aggregate_fit()`." msgstr "" +":code:`aggregate_fit()`: " +"我们会检查是否有任何客户端在本轮超时前退出或未能上传参数更新。在这种情况下,我们需要中止当前一轮,丢弃已收到的所有参数更新,然后继续下一轮。另一方面,如果所有客户端都成功响应,我们就必须通过拦截" +" :code:`FitRes` 的 :code:`parameters` 字段并将其设置为 1,强制以不加权的方式平均更新。此外,如果 " +":code:`server_side_noising=true`,每次更新都会受到一定量的噪声扰动,其扰动量相当于启用客户端噪声时的扰动量。 " +"这就需要在将本方法的参数传递给封装的 :code:`aggregate_fit()` 之前,对参数进行*预*处理。" #: ../../source/explanation-differential-privacy.rst:62 msgid "" "We can't directly change the aggregation function of the wrapped strategy" " to force it to add noise to the aggregate, hence we simulate client-side" " noising to implement server-side noising." -msgstr "" +msgstr "我们无法直接改变封装策略的聚合函数,迫使它在聚合中添加噪声,因此我们模拟客户端噪声来实现服务器端噪声。" #: ../../source/explanation-differential-privacy.rst:64 msgid "" @@ -3043,10 +3830,13 @@ msgid "" "required to calculate the amount of noise that must be added to each " "individual update, either by the server or the clients." msgstr "" +"这些变化被整合到一个名为 :code:`DPFedAvgFixed` " +"的类中,其构造函数接受被装饰的策略、剪切阈值和每轮采样的客户数作为必选参数。用户需要指定剪切阈值,因为参数更新规范的数量级在很大程度上取决于正在训练的模型,提供默认值会产生误导。每轮采样的客户端数量是计算服务器或客户在每次参数更新时添加的噪音量所必需的。" #: ../../source/explanation-differential-privacy.rst:67 +#: ../../source/ref-api/flwr.server.strategy.DPFedAvgAdaptive.rst:2 msgid "DPFedAvgAdaptive" -msgstr "" +msgstr "DPFedAvgAdaptive" #: ../../source/explanation-differential-privacy.rst:69 msgid "" @@ -3055,6 +3845,8 @@ msgid "" ":code:`DPFedAvgFixed`. It overrides the above-mentioned methods to do the" " following." msgstr "" +"自适应剪裁所需的附加功能在 :code:`DPFedAvgAdaptive` 中提供,其是 :code:`DPFedAvgFixed` " +"的子类。它重写了上述方法,以实现以下功能。" #: ../../source/explanation-differential-privacy.rst:71 msgid "" @@ -3064,6 +3856,10 @@ msgid "" "interprets as an instruction to include an indicator bit (1 if update " "norm <= clipping threshold, 0 otherwise) in the results returned by it." msgstr "" +":code:`configure_fit()`:它截取由 :code:`super.configure_fit()` 返回的 config " +"字典,并在其中添加键-值对 " +":code:`dpfedavg_adaptive_clip_enabled:True\",客户端将其解释为在返回结果中包含一个指示位(如果参数更新范式" +" <= 剪裁阈值,则为 1,否则为 0)的指令。" #: ../../source/explanation-differential-privacy.rst:73 msgid "" @@ -3071,11 +3867,11 @@ msgid "" ":code:`super.aggregate_fit()` with one to :code:`__update_clip_norm__()`," " a procedure which adjusts the clipping threshold on the basis of the " "indicator bits received from the sampled clients." -msgstr "" +msgstr ":code:`aggregate_fit()`:在调用:code:`super.aggregate_fit()`后,再调用:code:`__update_clip_norm__()`,该过程根据从采样客户端接收到的指示位调整裁剪阈值。" #: ../../source/explanation-differential-privacy.rst:77 msgid "Client-side logic" -msgstr "" +msgstr "客户端逻辑" #: ../../source/explanation-differential-privacy.rst:79 msgid "" @@ -3088,12 +3884,14 @@ msgid "" " work if either (or both) of the following keys are also present in the " "dict." msgstr "" +"客户端所需的功能完全可以通过 :code:`NumPyClient` 抽象类的 :code:`fit()` " +"方法的封装逻辑来实现。准确地说,我们需要对封装客户端计算的参数更新进行处理,以便在必要时将其剪切到服务器作为配置字典的一部分提供的阈值。除此之外,如果配置字典中还存在以下任一(或两个)键,客户端可能还需要执行一些额外的工作。" #: ../../source/explanation-differential-privacy.rst:81 msgid "" ":code:`dpfedavg_noise_stddev` : Generate and add the specified amount of " "noise to the clipped update." -msgstr "" +msgstr "code:`dpfedavg_noise_stddev`:生成并在剪切参数更新中添加指定数量的噪声。" #: ../../source/explanation-differential-privacy.rst:82 msgid "" @@ -3101,10 +3899,12 @@ msgid "" ":code:`FitRes` object being returned to the server with an indicator bit," " calculated as described earlier." msgstr "" +":code:`dpfedavg_adaptive_clip_enabled`:在返回给服务器的 :code:`FitRes` " +"对象中的度量值字典中增加一个指标位,计算方法如前所述。" #: ../../source/explanation-differential-privacy.rst:86 msgid "Performing the :math:`(\\epsilon, \\delta)` analysis" -msgstr "" +msgstr "进行 :math:`(epsilon, \\delta)` 分析" #: ../../source/explanation-differential-privacy.rst:88 msgid "" @@ -3113,39 +3913,49 @@ msgid "" ":math:`\\epsilon` value this would result in for a particular " ":math:`\\delta`, the following script may be used." msgstr "" +"假设您已经训练了 :math:`n` 轮,采样比例为 :math:`q`,噪声乘数为 :math:`z`。为了计算特定 " +":math:`\\delta` 的 :math:`epsilon` 值,可以使用下面的脚本。" #: ../../source/explanation-differential-privacy.rst:98 +#, fuzzy msgid "" -"McMahan, H. Brendan, et al. \"Learning differentially private recurrent " -"language models.\" arXiv preprint arXiv:1710.06963 (2017)." +"McMahan et al. \"Learning Differentially Private Recurrent Language " +"Models.\" International Conference on Learning Representations (ICLR), " +"2017." msgstr "" +"McMahan, H. Brendan等. \"Learning differentially private recurrent " +"language models.\" arXiv preprint arXiv:1710.06963 (2017)." #: ../../source/explanation-differential-privacy.rst:100 +#, fuzzy msgid "" -"Andrew, Galen, et al. \"Differentially private learning with adaptive " +"Andrew, Galen, et al. \"Differentially Private Learning with Adaptive " +"Clipping.\" Advances in Neural Information Processing Systems (NeurIPS), " +"2021." +msgstr "" +"Andrew, Galen等. \"Differentially private learning with adaptive " "clipping.\" Advances in Neural Information Processing Systems 34 (2021): " "17455-17466." -msgstr "" #: ../../source/explanation-federated-evaluation.rst:2 #: ../../source/tutorial-series-what-is-federated-learning.ipynb:292 msgid "Federated evaluation" -msgstr "" +msgstr "联邦学习评估" #: ../../source/explanation-federated-evaluation.rst:4 msgid "" "There are two main approaches to evaluating models in federated learning " "systems: centralized (or server-side) evaluation and federated (or " "client-side) evaluation." -msgstr "" +msgstr "评估联合学习系统中的模型主要有两种方法:集中(或服务器端)评估和联邦(或客户端)评估。" #: ../../source/explanation-federated-evaluation.rst:8 msgid "Centralized Evaluation" -msgstr "" +msgstr "集中评估" #: ../../source/explanation-federated-evaluation.rst:11 msgid "Built-In Strategies" -msgstr "" +msgstr "内置策略" #: ../../source/explanation-federated-evaluation.rst:13 msgid "" @@ -3153,11 +3963,11 @@ msgid "" "evaluation function during initialization. An evaluation function is any " "function that can take the current global model parameters as input and " "return evaluation results:" -msgstr "" +msgstr "所有内置策略都通过在初始化过程中提供一个评估函数来支持集中评估。评估函数是任何可以将当前全局模型参数作为输入并返回评估结果的函数:" #: ../../source/explanation-federated-evaluation.rst:58 msgid "Custom Strategies" -msgstr "" +msgstr "定制策略" #: ../../source/explanation-federated-evaluation.rst:60 msgid "" @@ -3167,30 +3977,32 @@ msgid "" ":code:`evaluate` after parameter aggregation and before federated " "evaluation (see next paragraph)." msgstr "" +":code:`Strategy` 抽象提供了一个名为 :code:`evaluate` " +"的方法,可直接用于评估当前的全局模型参数。服务器会在参数聚合后和联邦评估前调用 :code:`evaluate`(见下段)。" #: ../../source/explanation-federated-evaluation.rst:65 msgid "Federated Evaluation" -msgstr "" +msgstr "联邦评估" #: ../../source/explanation-federated-evaluation.rst:68 msgid "Implementing Federated Evaluation" -msgstr "" +msgstr "实现联邦评估" #: ../../source/explanation-federated-evaluation.rst:70 msgid "" "Client-side evaluation happens in the :code:`Client.evaluate` method and " "can be configured from the server side." -msgstr "" +msgstr "客户端评估在 :code:`Client.evaluate` 方法中进行,并可从服务器端进行配置。" #: ../../source/explanation-federated-evaluation.rst:101 msgid "Configuring Federated Evaluation" -msgstr "" +msgstr "配置联邦评估" #: ../../source/explanation-federated-evaluation.rst:103 msgid "" "Federated evaluation can be configured from the server side. Built-in " "strategies support the following arguments:" -msgstr "" +msgstr "联邦评估可从服务器端进行配置。内置策略支持以下参数:" #: ../../source/explanation-federated-evaluation.rst:105 msgid "" @@ -3201,6 +4013,10 @@ msgid "" "for evaluation. If :code:`fraction_evaluate` is set to :code:`0.0`, " "federated evaluation will be disabled." msgstr "" +":code:`fraction_evaluate`: :code:`float`,定义了被选中进行评估的客户端的比例。如果 " +":code:`fraction_evaluate` 设置为 :code:`0.1`,并且 :code:`100` 个客户端连接到服务器,那么 " +":code:`10` 个客户端将被随机选中进行评估。如果 :code:`fraction_evaluate` 设置为 " +":code:`0.0`,联邦评估将被禁用。" #: ../../source/explanation-federated-evaluation.rst:106 msgid "" @@ -3210,6 +4026,9 @@ msgid "" ":code:`100` clients are connected to the server, then :code:`20` clients " "will be selected for evaluation." msgstr "" +":code:`min_evaluate_clients`:一个 :code:`int`,需要评估的客户的最小数量。如果 " +":code:`fraction_evaluate` 设置为 :code:`0.1`,:code:`min_evaluate_clients` " +"设置为 20,并且有 :code:`100` 个客户端已连接到服务器,那么 :code:`20` 个客户端将被选中进行评估。" #: ../../source/explanation-federated-evaluation.rst:107 msgid "" @@ -3220,6 +4039,9 @@ msgid "" "will wait until more clients are connected before it continues to sample " "clients for evaluation." msgstr "" +":code:`min_available_clients`: " +":code:`int`,定义了在一轮联邦评估开始之前,需要连接到服务器的最小客户端数量。如果连接到服务器的客户端数量少于 " +":code:`min_available_clients`,服务器将等待更多客户端连接后,才继续采样客户端进行评估。" #: ../../source/explanation-federated-evaluation.rst:108 msgid "" @@ -3228,22 +4050,22 @@ msgid "" "be called during each round and provides a convenient way to customize " "client-side evaluation from the server side, for example, to configure " "the number of validation steps performed." -msgstr "" +msgstr "code:`on_evaluate_config_fn`:返回配置字典的函数,该字典将发送给选定的客户端。该函数将在每一轮中被调用,并提供了一种方便的方法来从服务器端自定义客户端评估,例如,配置执行的验证步骤数。" #: ../../source/explanation-federated-evaluation.rst:135 msgid "Evaluating Local Model Updates During Training" -msgstr "" +msgstr "评估训练期间的本地模型更新" #: ../../source/explanation-federated-evaluation.rst:137 msgid "" "Model parameters can also be evaluated during training. " ":code:`Client.fit` can return arbitrary evaluation results as a " "dictionary:" -msgstr "" +msgstr "模型参数也可在训练过程中进行评估。 :code:`Client.fit`可以字典形式返回任意评估结果:" #: ../../source/explanation-federated-evaluation.rst:177 msgid "Full Code Example" -msgstr "" +msgstr "完整代码示例" #: ../../source/explanation-federated-evaluation.rst:179 msgid "" @@ -3252,79 +4074,82 @@ msgid "" "be applied to workloads implemented in any other framework): " "https://github.com/adap/flower/tree/main/examples/advanced-tensorflow" msgstr "" +"有关同时使用集中评估和联邦评估的完整代码示例,请参阅 *Advanced TensorFlow " +"Example*(同样的方法也可应用于任何其他框架中): " +"https://github.com/adap/flower/tree/main/examples/advanced-tensorflow" #: ../../source/fed/0000-20200102-fed-template.md:10 msgid "FED Template" -msgstr "" +msgstr "FED 模板" #: ../../source/fed/0000-20200102-fed-template.md:12 #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:12 msgid "Table of Contents" -msgstr "" +msgstr "目录" #: ../../source/fed/0000-20200102-fed-template.md:14 #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:14 msgid "[Table of Contents](#table-of-contents)" -msgstr "" +msgstr "[目录](#table-of-contents)" #: ../../source/fed/0000-20200102-fed-template.md:15 #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:15 msgid "[Summary](#summary)" -msgstr "" +msgstr "[总结](#summary)" #: ../../source/fed/0000-20200102-fed-template.md:16 #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:16 msgid "[Motivation](#motivation)" -msgstr "" +msgstr "[动机](#motivation)" #: ../../source/fed/0000-20200102-fed-template.md:17 #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:17 msgid "[Goals](#goals)" -msgstr "" +msgstr "[目标](#goals)" #: ../../source/fed/0000-20200102-fed-template.md:18 #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:18 msgid "[Non-Goals](#non-goals)" -msgstr "" +msgstr "[非目标](#non-goals)" #: ../../source/fed/0000-20200102-fed-template.md:19 #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:19 msgid "[Proposal](#proposal)" -msgstr "" +msgstr "[计划](#proposal)" #: ../../source/fed/0000-20200102-fed-template.md:20 #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:23 msgid "[Drawbacks](#drawbacks)" -msgstr "" +msgstr "[缺点](#drawbacks)" #: ../../source/fed/0000-20200102-fed-template.md:21 #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:24 msgid "[Alternatives Considered](#alternatives-considered)" -msgstr "" +msgstr "[备选方案](#alternatives-considered)" #: ../../source/fed/0000-20200102-fed-template.md:22 msgid "[Appendix](#appendix)" -msgstr "" +msgstr "[附录](#appendix)" #: ../../source/fed/0000-20200102-fed-template.md:24 #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:28 #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:76 msgid "Summary" -msgstr "" +msgstr "总结" #: ../../source/fed/0000-20200102-fed-template.md:26 msgid "\\[TODO - sentence 1: summary of the problem\\]" -msgstr "" +msgstr "\\[TODO - 句子 1: 问题概括\\]" #: ../../source/fed/0000-20200102-fed-template.md:28 msgid "\\[TODO - sentence 2: summary of the solution\\]" -msgstr "" +msgstr "\\[TODO - 句子 2: 解决方案概括\\]" #: ../../source/fed/0000-20200102-fed-template.md:30 #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:47 #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:77 msgid "Motivation" -msgstr "" +msgstr "动机" #: ../../source/fed/0000-20200102-fed-template.md:32 #: ../../source/fed/0000-20200102-fed-template.md:36 @@ -3334,126 +4159,122 @@ msgstr "" #: ../../source/fed/0000-20200102-fed-template.md:54 #: ../../source/fed/0000-20200102-fed-template.md:58 msgid "\\[TODO\\]" -msgstr "" +msgstr "\\[TODO\\]" #: ../../source/fed/0000-20200102-fed-template.md:34 #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:53 #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:78 msgid "Goals" -msgstr "" +msgstr "目标" #: ../../source/fed/0000-20200102-fed-template.md:38 #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:59 #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:79 msgid "Non-Goals" -msgstr "" +msgstr "非目标" #: ../../source/fed/0000-20200102-fed-template.md:42 #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:65 #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:80 msgid "Proposal" -msgstr "" +msgstr "提案" #: ../../source/fed/0000-20200102-fed-template.md:46 #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:85 #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:129 msgid "Drawbacks" -msgstr "" +msgstr "缺点" #: ../../source/fed/0000-20200102-fed-template.md:50 #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:86 #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:135 msgid "Alternatives Considered" -msgstr "" +msgstr "备选方案" #: ../../source/fed/0000-20200102-fed-template.md:52 msgid "\\[Alternative 1\\]" -msgstr "" +msgstr "\\[备选 1\\]" #: ../../source/fed/0000-20200102-fed-template.md:56 msgid "\\[Alternative 2\\]" -msgstr "" - -#: ../../source/fed/0000-20200102-fed-template.md:60 -msgid "Appendix" -msgstr "" +msgstr "\\[备选 2\\]" #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:10 msgid "Flower Enhancement Doc" -msgstr "" +msgstr "Flower 改善文档" #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:20 msgid "[Enhancement Doc Template](#enhancement-doc-template)" -msgstr "" +msgstr "[增强文档模版](#enhancement-doc-template)" #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:21 msgid "[Metadata](#metadata)" -msgstr "" +msgstr "[描述数据](#metadata)" #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:22 msgid "[Workflow](#workflow)" -msgstr "" +msgstr "[工作流程](#workflow)" #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:25 msgid "[GitHub Issues](#github-issues)" -msgstr "" +msgstr "[GitHub 问题](#github-issues)" #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:26 msgid "[Google Docs](#google-docs)" -msgstr "" +msgstr "[谷歌文档](#google-docs)" #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:30 msgid "A Flower Enhancement is a standardized development process to" -msgstr "" +msgstr "改善 Flower 功能是一个标准化的开发流程,目的是" #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:32 msgid "provide a common structure for proposing larger changes" -msgstr "" +msgstr "为提出更大规模的改动提供一个共同的结构" #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:33 msgid "ensure that the motivation for a change is clear" -msgstr "" +msgstr "确保改动的动机明确" #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:34 msgid "persist project information in a version control system" -msgstr "" +msgstr "将项目信息保存在版本控制系统中" #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:35 msgid "document the motivation for impactful user-facing changes" -msgstr "" +msgstr "记录面向用户的具有影响力的改动的动机" #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:36 msgid "reserve GitHub issues for tracking work in flight" -msgstr "" +msgstr "保留 GitHub 问题,用于跟踪进行中的工作" #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:37 msgid "" "ensure community participants can successfully drive changes to " "completion across one or more releases while stakeholders are adequately " "represented throughout the process" -msgstr "" +msgstr "确保社区参与者能够成功推动改动,完成一个或多个版本,同时利益相关者在整个过程中得到充分展现" #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:39 msgid "Hence, an Enhancement Doc combines aspects of" -msgstr "" +msgstr "因此,\"增强文件\"将以下方面结合起来" #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:41 msgid "a feature, and effort-tracking document" -msgstr "" +msgstr "一个功能和效力跟踪文档" #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:42 msgid "a product requirements document" -msgstr "" +msgstr "一个产品需要文档" #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:43 msgid "a design document" -msgstr "" +msgstr "一个设计文档" #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:45 msgid "" "into one file, which is created incrementally in collaboration with the " "community." -msgstr "" +msgstr "该文件是与社区合作逐步创建的。" #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:49 msgid "" @@ -3461,6 +4282,8 @@ msgid "" "beyond a single GitHub issue or pull request is required to understand " "and communicate upcoming changes to the project." msgstr "" +"对于向 Flower 提出的远期变更或功能,需要一个超越单个 GitHub 问题或拉取请求(pull " +"request)的抽象概念,以了解和沟通项目即将发生的变更。" #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:51 msgid "" @@ -3469,6 +4292,8 @@ msgid "" "video calls, and hallway conversations into a well-tracked artifact, this" " process aims to enhance communication and discoverability." msgstr "" +"这一流程的目的是减少我们社区中 \"部落知识 \"的数量。通过将决策从 Slack " +"线程、视频通话和走廊对话转移到一个跟踪良好的工作环境中,该流程旨在加强沟通和可发现性。" #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:55 msgid "" @@ -3476,7 +4301,7 @@ msgid "" " process. If an enhancement would be described in either written or " "verbal communication to anyone besides the author or developer, then " "consider creating an Enhancement Doc." -msgstr "" +msgstr "任何较大的、面向用户的增强都应遵循增强流程。如果要以书面或口头形式向作者或开发人员以外的任何人描述增强功能,则应考虑创建改善文档。" #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:57 msgid "" @@ -3484,7 +4309,7 @@ msgid "" " that will impact a large section of the development community should " "also be communicated widely. The Enhancement process is suited for this " "even if it will have zero impact on the typical user or operator." -msgstr "" +msgstr "同样,任何会对开发社区的大部分人产生影响的技术工作(重构、重大架构变更)也应广泛传播。即使对典型用户或操作员的影响为零,改进流程也适用于这种情况。" #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:61 msgid "" @@ -3493,118 +4318,120 @@ msgid "" "adding new Federated Learning algorithms, as these only add features " "without changing how Flower works or is used." msgstr "" +"对于小的改动和添加,通过 \"改善\"程序既耗时又没有必要。例如,这包括添加新的联邦学习算法,因为这只会增加功能,而不会改变 \"Flower " +"\"的工作或使用方式。" #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:63 msgid "" "Enhancements are different from feature requests, as they are already " "providing a laid-out path for implementation and are championed by " "members of the community." -msgstr "" +msgstr "增强功能与功能请求不同,因为它们已经提供了实施路径,并得到了社区成员的支持。" #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:67 msgid "" "An Enhancement is captured in a Markdown file that follows a defined " "template and a workflow to review and store enhancement docs for " "reference — the Enhancement Doc." -msgstr "" +msgstr "增强功能被记录在一个 Markdown 文件中,该文件遵循已定义的模板和工作流程,用于审查和存储增强功能文档(即增强功能文档)以供参考。" #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:69 msgid "Enhancement Doc Template" -msgstr "" +msgstr "增强文档模板" #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:71 msgid "" "Each enhancement doc is provided as a Markdown file having the following " "structure" -msgstr "" +msgstr "每个增强文档都以 Markdown 文件的形式提供,其结构如下" #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:73 msgid "Metadata (as [described below](#metadata) in form of a YAML preamble)" -msgstr "" +msgstr "描述数据([如下所述](#metadata) 以 YAML 前言的形式出现)" #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:74 msgid "Title (same as in metadata)" -msgstr "" +msgstr "标题(与描述数据中的标题相同)" #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:75 msgid "Table of Contents (if needed)" -msgstr "" +msgstr "目录(如有需要)" #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:81 msgid "Notes/Constraints/Caveats (optional)" -msgstr "" +msgstr "注意事项/限制/警告(可选)" #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:82 msgid "Design Details (optional)" -msgstr "" +msgstr "设计细节(可选)" #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:83 msgid "Graduation Criteria" -msgstr "" +msgstr "毕业标准" #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:84 msgid "Upgrade/Downgrade Strategy (if applicable)" -msgstr "" +msgstr "升级/降级策略(如适用)" #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:88 msgid "As a reference, this document follows the above structure." -msgstr "" +msgstr "作为参考,本文件采用上述结构。" #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:90 msgid "Metadata" -msgstr "" +msgstr "描述数据" #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:92 msgid "" "**fed-number** (Required) The `fed-number` of the last Flower Enhancement" " Doc + 1. With this number, it becomes easy to reference other proposals." -msgstr "" +msgstr "**fed-number**(必填)上一个Flower增强文件的 \"fed-number \"+1。有了这个编号,就很容易参考其他提案。" #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:94 msgid "**title** (Required) The title of the proposal in plain language." -msgstr "" +msgstr "**标题** (必填)用简明语言写出提案的标题。" #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:96 msgid "" "**status** (Required) The current status of the proposal. See " "[workflow](#workflow) for the possible states." -msgstr "" +msgstr "**status** (必填)提案的当前状态。有关可能的状态,请参阅 [工作流程](#workflow)。" #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:98 msgid "" "**authors** (Required) A list of authors of the proposal. This is simply " "the GitHub ID." -msgstr "" +msgstr "**作者**(必填) 提案的作者列表。这只是 GitHub ID。" #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:100 msgid "" "**creation-date** (Required) The date that the proposal was first " "submitted in a PR." -msgstr "" +msgstr "**创建日期**(必填) 建议书在 PR 中首次提交的日期。" #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:102 msgid "" "**last-updated** (Optional) The date that the proposal was last changed " "significantly." -msgstr "" +msgstr "**最后更新** (可选)提案最后一次重大修改的日期。" #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:104 msgid "" "**see-also** (Optional) A list of other proposals that are relevant to " "this one." -msgstr "" +msgstr "**另见** (可选)与本提案相关的其他提案清单。" #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:106 msgid "**replaces** (Optional) A list of proposals that this one replaces." -msgstr "" +msgstr "**取代**(可选) 这份提案所取代的提案列表。" #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:108 msgid "**superseded-by** (Optional) A list of proposals that this one supersedes." -msgstr "" +msgstr "**被取代者** (可选) 此提案取代的提案列表。" #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:111 msgid "Workflow" -msgstr "" +msgstr "工作流程" #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:113 msgid "" @@ -3612,7 +4439,7 @@ msgid "" "pitched in the community. As such, it needs a champion, usually the " "author, who shepherds the enhancement. This person also has to find " "committers to Flower willing to review the proposal." -msgstr "" +msgstr "形成增强功能的想法应该已经在社区中讨论过或提出过。因此,它需要一个支持者(通常是作者)来引导增强。这个人还必须找到愿意审核提案的提交者。" #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:115 msgid "" @@ -3622,6 +4449,9 @@ msgid "" "state as part of a pull request. Discussions are done as part of the pull" " request review." msgstr "" +"新的增强功能以 `NNNN-YYYYMMDD-enhancement-title.md` 的文件名签入,其中 `NNNN` " +"是花朵增强文档的编号,并将其转入 `enhancements`。作为拉取请求(pull request)的一部分,所有增强功能都从 " +"`provisional` 状态开始。讨论是作为拉取请求审查的一部分进行的。" #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:117 msgid "" @@ -3631,65 +4461,67 @@ msgid "" "enhancement as part of their description. After the implementation is " "done, the proposal status is changed to `implemented`." msgstr "" +"一旦增强功能通过审核和批准,其状态就会变为 " +"`可实施`。实际的实施工作将在单独的拉取请求中完成。这些拉取请求应在其描述中提及相应的增强功能。实施完成后,提案状态将更改为 `已实施`。" #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:119 msgid "" "Under certain conditions, other states are possible. An Enhancement has " "the following states:" -msgstr "" +msgstr "在某些条件下,还可能出现其他状态。增强提案具有以下状态:" #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:121 msgid "" "`provisional`: The enhancement has been proposed and is actively being " "defined. This is the starting state while the proposal is being fleshed " "out and actively defined and discussed." -msgstr "" +msgstr "`暂定`: 已提出改进建议并正在积极定义。这是在提案得到充实、积极定义和讨论时的起始状态。" #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:122 msgid "`implementable`: The enhancement has been reviewed and approved." -msgstr "" +msgstr "`可实施`: 增强功能已审核通过。" #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:123 msgid "" "`implemented`: The enhancement has been implemented and is no longer " "actively changed." -msgstr "" +msgstr "`已实施`: 增强功能已实施,不再主动更改。" #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:124 msgid "`deferred`: The enhancement is proposed but not actively being worked on." -msgstr "" +msgstr "`推迟`: 已提出改进建议,但尚未积极开展工作。" #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:125 msgid "" "`rejected`: The authors and reviewers have decided that this enhancement " "is not moving forward." -msgstr "" +msgstr "`拒绝`: 作者和审稿人已决定不再推进该增强功能。" #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:126 msgid "`withdrawn`: The authors have withdrawn the enhancement." -msgstr "" +msgstr "`撤回`: 作者已撤回增强功能。" #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:127 msgid "`replaced`: The enhancement has been replaced by a new enhancement." -msgstr "" +msgstr "`已替换`: 增强功能已被新的增强功能取代。" #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:131 msgid "" "Adding an additional process to the ones already provided by GitHub " "(Issues and Pull Requests) adds more complexity and can be a barrier for " "potential first-time contributors." -msgstr "" +msgstr "在 GitHub 已提供的流程(问题和拉取请求)之外再增加一个流程,会增加复杂性,并可能成为潜在首次贡献者的障碍。" #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:133 msgid "" "Expanding the proposal template beyond the single-sentence description " "currently required in the features issue template may be a heavy burden " "for non-native English speakers." -msgstr "" +msgstr "对于英语非母语者来说,将提案模板扩展到目前要求的单句描述之外可能是一个沉重的负担。" #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:137 msgid "GitHub Issues" -msgstr "" +msgstr "GitHub 问题" #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:139 msgid "" @@ -3701,10 +4533,13 @@ msgid "" "parts of the doc. Managing these multiple discussions can be confusing " "when using GitHub Issues." msgstr "" +"使用 GitHub Issues 进行此类改进是可行的。例如,我们可以使用标签来区分和过滤这些问题。主要的问题在于讨论和审查增强功能: " +"GitHub 问题只有一个评论线程。而增强功能通常会同时有多个讨论线程,针对文档的不同部分。在使用 GitHub " +"问题时,管理这些多重讨论会很混乱。" #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:141 msgid "Google Docs" -msgstr "" +msgstr "谷歌文档" #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:143 msgid "" @@ -3715,41 +4550,43 @@ msgid "" "proposals as part of Flower's repository, the potential for missing links" " is much higher." msgstr "" +"谷歌文档允许多线程讨论。但是,由于谷歌文档是在项目之外托管的,因此需要注意它们是否能被社区发现。我们必须管理所有提案的链接列表,并提供给社区使用。与作为" +" Flower 资源库一部分的提案相比,丢失链接的可能性要大得多。" #: ../../source/fed/index.md:1 msgid "FED - Flower Enhancement Doc" -msgstr "" +msgstr "FED - Flower 增强文件" #: ../../source/how-to-aggregate-evaluation-results.rst:2 msgid "Aggregate evaluation results" -msgstr "" +msgstr "整合评估结果" #: ../../source/how-to-aggregate-evaluation-results.rst:4 msgid "" "The Flower server does not prescribe a way to aggregate evaluation " "results, but it enables the user to fully customize result aggregation." -msgstr "" +msgstr "Flower 服务器没有规定整合评估结果的方法,但用户可以完全自定义如何整合。" #: ../../source/how-to-aggregate-evaluation-results.rst:8 msgid "Aggregate Custom Evaluation Results" -msgstr "" +msgstr "自定义整合评估结果" #: ../../source/how-to-aggregate-evaluation-results.rst:10 msgid "" "The same :code:`Strategy`-customization approach can be used to aggregate" " custom evaluation results coming from individual clients. Clients can " "return custom metrics to the server by returning a dictionary:" -msgstr "" +msgstr "同样的 :code:`Strategy` 定制方法也可用于汇总来自单个客户端的自定义评估结果。客户端可以通过返回字典的方式向服务器返回自定义指标:" #: ../../source/how-to-aggregate-evaluation-results.rst:36 msgid "" "The server can then use a customized strategy to aggregate the metrics " "provided in these dictionaries:" -msgstr "" +msgstr "然后,服务器可以使用定制的策略来汇总这些字典中提供的指标:" #: ../../source/how-to-configure-clients.rst:2 msgid "Configure clients" -msgstr "" +msgstr "配置客户端" #: ../../source/how-to-configure-clients.rst:4 msgid "" @@ -3757,11 +4594,11 @@ msgid "" "clients. Configuration values can be used for various purposes. They are," " for example, a popular way to control client-side hyperparameters from " "the server." -msgstr "" +msgstr "除了模型参数,Flower 还可以向客户端发送配置值。配置值有多种用途。它们是一种从服务器控制客户端超参数的常用方法。" #: ../../source/how-to-configure-clients.rst:7 msgid "Configuration values" -msgstr "" +msgstr "配置值" #: ../../source/how-to-configure-clients.rst:9 msgid "" @@ -3770,6 +4607,8 @@ msgid "" "float), ``int``, or ``str`` (or equivalent types in different languages)." " Here is an example of a configuration dictionary in Python:" msgstr "" +"配置值以字典的形式表示,字典的键为 ``str``,值的类型为 ``bool``、``bytes``、``double``(64 " +"位精度浮点型)、``int``或 ``str`(或不同语言中的等效类型)。下面是一个 Python 配置字典的示例:" #: ../../source/how-to-configure-clients.rst:20 msgid "" @@ -3777,6 +4616,8 @@ msgid "" "short) to their ProtoBuf representation, transports them to the client " "using gRPC, and then deserializes them back to Python dictionaries." msgstr "" +"Flower 将这些配置字典(简称 *config dict*)序列化为 ProtoBuf 表示形式,使用 gRPC " +"将其传输到客户端,然后再反序列化为 Python 字典。" #: ../../source/how-to-configure-clients.rst:24 msgid "" @@ -3786,6 +4627,8 @@ msgid "" " by converting them to one of the supported value types (and converting " "them back on the client-side)." msgstr "" +"目前,还不支持在配置字典中直接发送作为值的集合类型(例如,`Set``, `List`, " +"`Map``)。有几种变通方法可将集合转换为支持的值类型之一(并在客户端将其转换回),从而将集合作为值发送。" #: ../../source/how-to-configure-clients.rst:26 msgid "" @@ -3793,11 +4636,11 @@ msgid "" "string, then send the JSON string using the configuration dictionary, and" " then convert the JSON string back to a list of floating-point numbers on" " the client." -msgstr "" +msgstr "例如,可以将浮点数列表转换为 JSON 字符串,然后使用配置字典发送 JSON 字符串,再在客户端将 JSON 字符串转换回浮点数列表。" #: ../../source/how-to-configure-clients.rst:30 msgid "Configuration through built-in strategies" -msgstr "" +msgstr "通过内置策略进行配置" #: ../../source/how-to-configure-clients.rst:32 msgid "" @@ -3808,6 +4651,8 @@ msgid "" "the current round. It then forwards the configuration dictionary to all " "the clients selected during that round." msgstr "" +"向客户端发送配置值的最简单方法是使用内置策略,如 " +":code:`FedAvg`。内置策略支持所谓的配置函数。配置函数是内置策略调用的函数,用于获取当前轮的配置字典。然后,它会将配置字典转发给该轮中选择的所有客户端。" #: ../../source/how-to-configure-clients.rst:34 msgid "" @@ -3815,18 +4660,18 @@ msgid "" "size that the client should use, (b) the current global round of " "federated learning, and (c) the number of epochs to train on the client-" "side. Our configuration function could look like this:" -msgstr "" +msgstr "让我们从一个简单的例子开始。想象一下,我们想要发送给客户端(a)应该使用的批次大小,(b)当前联邦学习的全局轮次,以及(c)客户端训练的遍历数。我们的配置函数可以是这样的:" #: ../../source/how-to-configure-clients.rst:47 msgid "" "To make the built-in strategies use this function, we can pass it to " "``FedAvg`` during initialization using the parameter " ":code:`on_fit_config_fn`:" -msgstr "" +msgstr "为了让内置策略使用这个函数,我们可以在初始化时使用参数 :code:`on_fit_config_fn` 将它传递给 ``FedAvg`` :" #: ../../source/how-to-configure-clients.rst:56 msgid "One the client side, we receive the configuration dictionary in ``fit``:" -msgstr "" +msgstr "在客户端,我们在 ``fit`` 中接收配置字典:" #: ../../source/how-to-configure-clients.rst:67 msgid "" @@ -3835,6 +4680,8 @@ msgid "" " send different configuration values to `evaluate` (for example, to use a" " different batch size)." msgstr "" +"还有一个 `on_evaluate_config_fn` 用于配置评估,其工作方式相同。它们是不同的函数,因为可能需要向 `evaluate` " +"发送不同的配置值(例如,使用不同的批量大小)。" #: ../../source/how-to-configure-clients.rst:69 msgid "" @@ -3845,43 +4692,51 @@ msgid "" "hyperparameter schedule, for example, to increase the number of local " "epochs during later rounds, we could do the following:" msgstr "" +"内置策略每轮都会调用此函数(即每次运行 `Strategy.configure_fit` 或 " +"`Strategy.configure_evaluate` 时)。每轮调用 `on_evaluate_config_fn` " +"允许我们在连续几轮中改变配置指令。例如,如果我们想实现一个超参数时间表,以增加后几轮的本地遍历次数,我们可以这样做:" #: ../../source/how-to-configure-clients.rst:82 msgid "The :code:`FedAvg` strategy will call this function *every round*." -msgstr "" +msgstr "代码:`FedAvg`策略*每轮*都会调用该函数。" #: ../../source/how-to-configure-clients.rst:85 msgid "Configuring individual clients" -msgstr "" +msgstr "配置个别客户端" #: ../../source/how-to-configure-clients.rst:87 msgid "" "In some cases, it is necessary to send different configuration values to " "different clients." -msgstr "" +msgstr "在某些情况下,有必要向不同的客户端发送不同的配置值。" #: ../../source/how-to-configure-clients.rst:89 msgid "" "This can be achieved by customizing an existing strategy or by " "`implementing a custom strategy from scratch " -"`_. " +"`_. " "Here's a nonsensical example that customizes :code:`FedAvg` by adding a " "custom ``\"hello\": \"world\"`` configuration key/value pair to the " "config dict of a *single client* (only the first client in the list, the " "other clients in this round to not receive this \"special\" config " "value):" msgstr "" +"这可以通过定制现有策略或 `从头开始实施一个定制策略 `_来实现。下面是一个无厘头的例子,`FedAvg`通过在*单个客户端*的配置指令(config " +"dict)中添加自定义的``\"hello\": \"world\"``配置键/值对添加到此的配置 dict " +"中(仅列表中的第一个客户端,本轮中的其他客户端不会收到此 \"特殊 \"配置值):" #: ../../source/how-to-configure-logging.rst:2 msgid "Configure logging" -msgstr "" +msgstr "配置日志记录" #: ../../source/how-to-configure-logging.rst:4 msgid "" "The Flower logger keeps track of all core events that take place in " "federated learning workloads. It presents information by default " "following a standard message format:" -msgstr "" +msgstr "Flower 日志记录器会跟踪联邦学习工作负载中发生的所有核心事件。它默认按照标准信息格式提供信息:" #: ../../source/how-to-configure-logging.rst:13 msgid "" @@ -3890,10 +4745,12 @@ msgid "" "took place from, as well as the log message itself. In this way, the " "logger would typically display information on your terminal as follows:" msgstr "" +"相关信息包括:日志信息级别(例如 " +":code:`INFO`、:code:`DEBUG`)、时间戳、日志记录的行以及日志信息本身。这样,日志记录器通常会在终端上显示如下信息:" #: ../../source/how-to-configure-logging.rst:34 msgid "Saving log to file" -msgstr "" +msgstr "将日志保存到文件" #: ../../source/how-to-configure-logging.rst:36 msgid "" @@ -3907,6 +4764,12 @@ msgid "" "`_" " function. For example:" msgstr "" +"默认情况下,Flower 日志会输出到启动联邦学习工作负载的终端。这既适用于基于 gRPC 的联邦学习(即执行 " +":code:`fl.server.start_server` 时),也适用于使用 :code:`VirtualClientEngine` " +"时(即执行 :code:`fl.simulation.start_simulation` " +"时)。在某些情况下,您可能希望将此日志保存到磁盘。为此,您可以调用 `fl.common.logger.configure() " +"`_" +" 函数。例如:" #: ../../source/how-to-configure-logging.rst:53 msgid "" @@ -3915,27 +4778,30 @@ msgid "" "you are running the code from. If we inspect we see the log above is also" " recorded but prefixing with :code:`identifier` each line:" msgstr "" +"通过上述操作,Flower 会将您在终端上看到的日志记录到 " +":code:`log.txt`。该文件将创建在运行代码的同一目录下。如果我们检查一下,就会发现上面的日志也被记录了下来,但每一行都以 " +":code:`identifier` 作为前缀:" #: ../../source/how-to-configure-logging.rst:74 msgid "Log your own messages" -msgstr "" +msgstr "记录自己的信息" #: ../../source/how-to-configure-logging.rst:76 msgid "" "You might expand the information shown by default with the Flower logger " "by adding more messages relevant to your application. You can achieve " "this easily as follows." -msgstr "" +msgstr "您可以通过添加更多与应用程序相关的信息来扩展 Flower 日志记录器默认显示的信息。您可以通过以下方法轻松实现这一目标。" #: ../../source/how-to-configure-logging.rst:102 msgid "" "In this way your logger will show, in addition to the default messages, " "the ones introduced by the clients as specified above." -msgstr "" +msgstr "这样,除默认信息外,您的日志记录器还将显示由客户引入的信息,如上文所述。" #: ../../source/how-to-configure-logging.rst:128 msgid "Log to a remote service" -msgstr "" +msgstr "登录远程服务" #: ../../source/how-to-configure-logging.rst:130 msgid "" @@ -3949,16 +4815,20 @@ msgid "" ":code:`HTTPHandler` should you whish to backup or analyze the logs " "somewhere else." msgstr "" +"此外,:code:`fl.common.logger.configure`函数还允许指定主机,通过本地 Python " +":code:`logging.handler.HTTPHandler`,向该主机推送日志(通过 :code:`POST`)。在基于 " +":code:`gRPC` 的联邦学习工作负载中,这是一个特别有用的功能,否则从所有实体(即服务器和客户端)收集日志可能会很麻烦。请注意,在 " +"Flower 模拟器中,服务器会自动显示所有日志。如果希望在其他地方备份或分析日志,仍可指定 :code:`HTTPHandler`。" #: ../../source/how-to-enable-ssl-connections.rst:2 msgid "Enable SSL connections" -msgstr "" +msgstr "启用 SSL 连接" #: ../../source/how-to-enable-ssl-connections.rst:4 msgid "" "This guide describes how to a SSL-enabled secure Flower server can be " "started and how a Flower client can establish a secure connections to it." -msgstr "" +msgstr "本指南介绍如何启动启用 SSL 的安全 Flower 服务器,以及 Flower 客户端如何与其建立安全连接。" #: ../../source/how-to-enable-ssl-connections.rst:7 msgid "" @@ -3966,6 +4836,8 @@ msgid "" "`here `_." msgstr "" +"有关安全连接的完整代码示例,请参见 `_ 。" #: ../../source/how-to-enable-ssl-connections.rst:10 msgid "" @@ -3973,11 +4845,11 @@ msgid "" "start it. Although it is already SSL-enabled, it might be less " "descriptive on how. Stick to this guide for a deeper introduction to the " "topic." -msgstr "" +msgstr "代码示例附带的 README.md 文件将解释如何启动它。虽然它已经启用了 SSL,但对如何启用可能描述较少。请参考本指南,了解更深入的相关介绍。" #: ../../source/how-to-enable-ssl-connections.rst:16 msgid "Certificates" -msgstr "" +msgstr "证书" #: ../../source/how-to-enable-ssl-connections.rst:18 msgid "" @@ -3987,16 +4859,18 @@ msgid "" "to ask you to run the script in :code:`examples/advanced-" "tensorflow/certificates/generate.sh`" msgstr "" +"使用支持 SSL 的连接需要向服务器和客户端传递证书。在本指南中,我们将生成自签名证书。由于这可能会变得相当复杂,我们将要求你运行 " +":code:`examples/advanced-tensorflow/certificates/generate.sh` 中的脚本" #: ../../source/how-to-enable-ssl-connections.rst:23 msgid "with the following command sequence:" -msgstr "" +msgstr "使用以下命令序列:" #: ../../source/how-to-enable-ssl-connections.rst:30 msgid "" "This will generate the certificates in :code:`examples/advanced-" "tensorflow/.cache/certificates`." -msgstr "" +msgstr "这将在 :code:`examples/advanced-tensorflow/.cache/certificates` 中生成证书。" #: ../../source/how-to-enable-ssl-connections.rst:32 msgid "" @@ -4005,23 +4879,24 @@ msgid "" "complete for production environments. Please refer to other sources " "regarding the issue of correctly generating certificates for production " "environments." -msgstr "" +msgstr "本示例中生成 SSL 证书的方法可作为启发和起点,但不应被视为生产环境的完整方法。有关在生产环境中正确生成证书的问题,请参考其他资料。" #: ../../source/how-to-enable-ssl-connections.rst:36 msgid "" "In case you are a researcher you might be just fine using the self-signed" " certificates generated using the scripts which are part of this guide." -msgstr "" +msgstr "如果你是一名研究人员,使用本指南中的脚本生成的自签名证书就可以了。" #: ../../source/how-to-enable-ssl-connections.rst:41 +#: ../../source/ref-api/flwr.server.Server.rst:2 msgid "Server" -msgstr "" +msgstr "服务器" #: ../../source/how-to-enable-ssl-connections.rst:43 msgid "" "We are now going to show how to write a sever which uses the previously " "generated scripts." -msgstr "" +msgstr "现在,我们将展示如何编写一个使用先前生成的脚本的服务器。" #: ../../source/how-to-enable-ssl-connections.rst:61 msgid "" @@ -4030,18 +4905,20 @@ msgid "" "those files into byte strings, which is the data type " ":code:`start_server` expects." msgstr "" +"在提供证书时,服务器希望得到由三个证书组成的元组。 :code:`Path` 可用于轻松地将这些文件的内容读取为字节字符串,这就是 " +":code:`start_server` 期望的数据类型。" #: ../../source/how-to-enable-ssl-connections.rst:65 #: ../../source/how-to-upgrade-to-flower-1.0.rst:37 -#: ../../source/ref-api-flwr.rst:15 +#: ../../source/ref-api/flwr.client.Client.rst:2 msgid "Client" -msgstr "" +msgstr "客户端" #: ../../source/how-to-enable-ssl-connections.rst:67 msgid "" "We are now going to show how to write a client which uses the previously " "generated scripts:" -msgstr "" +msgstr "现在我们将演示如何编写一个客户端,使用之前生成的脚本:" #: ../../source/how-to-enable-ssl-connections.rst:84 msgid "" @@ -4049,40 +4926,43 @@ msgid "" "encoded root certificates as a byte string. We are again using " ":code:`Path` to simplify reading those as byte strings." msgstr "" +"当设置 :code:`root_certificates` 时,客户端希望 PEM 编码的根证书是字节字符串。我们再次使用 " +":code:`Path` 来简化以字节字符串形式读取证书的过程。" #: ../../source/how-to-enable-ssl-connections.rst:89 +#: ../../source/how-to-use-built-in-mods.rst:85 #: ../../source/tutorial-series-what-is-federated-learning.ipynb:287 msgid "Conclusion" -msgstr "" +msgstr "总结" #: ../../source/how-to-enable-ssl-connections.rst:91 msgid "" "You should now have learned how to generate self-signed certificates " "using the given script, start a SSL-enabled server, and have a client " "establish a secure connection to it." -msgstr "" +msgstr "现在,你应该已经学会了如何使用给定的脚本生成自签名证书、启动启用 SSL 的服务器并让客户端与其建立安全连接。" #: ../../source/how-to-enable-ssl-connections.rst:96 msgid "Additional resources" -msgstr "" +msgstr "补充资源" #: ../../source/how-to-enable-ssl-connections.rst:98 msgid "" "These additional sources might be relevant if you would like to dive " "deeper into the topic of certificates:" -msgstr "" +msgstr "如果您想更深入地了解证书主题,这些额外的资料来源可能有帮助:" #: ../../source/how-to-enable-ssl-connections.rst:100 msgid "`Let's Encrypt `_" -msgstr "" +msgstr "`让我们加密 `_" #: ../../source/how-to-enable-ssl-connections.rst:101 msgid "`certbot `_" -msgstr "" +msgstr "`certbot `_" #: ../../source/how-to-implement-strategies.rst:2 msgid "Implement strategies" -msgstr "" +msgstr "实施策略" #: ../../source/how-to-implement-strategies.rst:4 msgid "" @@ -4093,10 +4973,12 @@ msgid "" "evaluate models. Flower provides a few built-in strategies which are " "based on the same API described below." msgstr "" +"策略抽象类可以实现完全定制的策略。策略基本上就是在服务器上运行的联邦学习算法。策略决定如何对客户端进行采样、如何配置客户端进行训练、如何聚合参数更新以及如何评估模型。Flower" +" 提供了一些内置策略,这些策略基于下文所述的相同 API。" #: ../../source/how-to-implement-strategies.rst:11 msgid "The :code:`Strategy` abstraction" -msgstr "" +msgstr ":code:`策略 ` 抽象类" #: ../../source/how-to-implement-strategies.rst:13 msgid "" @@ -4106,31 +4988,33 @@ msgid "" "implementations have the exact same capabilities at their disposal as " "built-in ones." msgstr "" +"所有策略实现均源自抽象基类 " +":code:`flwr.server.strategy.Strategy`,包括内置实现和第三方实现。这意味着自定义策略实现与内置实现具有完全相同的功能。" #: ../../source/how-to-implement-strategies.rst:18 msgid "" "The strategy abstraction defines a few abstract methods that need to be " "implemented:" -msgstr "" +msgstr "策略抽象定义了一些需要实现的抽象方法:" #: ../../source/how-to-implement-strategies.rst:75 msgid "" "Creating a new strategy means implementing a new :code:`class` (derived " "from the abstract base class :code:`Strategy`) that implements for the " "previously shown abstract methods:" -msgstr "" +msgstr "创建一个新策略意味着要实现一个新的 :code:`class`(从抽象基类 :code:`Strategy` 派生),该类要实现前面显示的抽象方法:" #: ../../source/how-to-implement-strategies.rst:100 msgid "The Flower server calls these methods in the following order:" -msgstr "" +msgstr "Flower 服务器按以下顺序调用这些方法:" #: ../../source/how-to-implement-strategies.rst:177 msgid "The following sections describe each of those methods in more detail." -msgstr "" +msgstr "下文将详细介绍每种方法。" #: ../../source/how-to-implement-strategies.rst:180 msgid "The :code:`initialize_parameters` method" -msgstr "" +msgstr ":code:`初始化参数` 方法" #: ../../source/how-to-implement-strategies.rst:182 msgid "" @@ -4138,13 +5022,15 @@ msgid "" "of an execution. It is responsible for providing the initial global model" " parameters in a serialized form (i.e., as a :code:`Parameters` object)." msgstr "" +":code:`initialize_parameters` 只调用一次,即在执行开始时。它负责以序列化形式(即 " +":code:`Parameters` 对象)提供初始全局模型参数。" #: ../../source/how-to-implement-strategies.rst:184 msgid "" "Built-in strategies return user-provided initial parameters. The " "following example shows how initial parameters can be passed to " ":code:`FedAvg`:" -msgstr "" +msgstr "内置策略会返回用户提供的初始参数。下面的示例展示了如何将初始参数传递给 :code:`FedAvg`:" #: ../../source/how-to-implement-strategies.rst:209 msgid "" @@ -4157,6 +5043,10 @@ msgid "" "useful for prototyping. In practice, it is recommended to always use " "server-side parameter initialization." msgstr "" +"Flower 服务器将调用 :code:`initialize_parameters`,返回传给 " +":code:`initial_parameters` 的参数或 :code:`None`。如果 " +":code:`initialize_parameters` 没有返回任何参数(即 " +":code:`None`),服务器将随机选择一个客户端并要求其提供参数。这只是一个便捷的功能,在实际应用中并不推荐使用,但在原型开发中可能很有用。在实践中,建议始终使用服务器端参数初始化。" #: ../../source/how-to-implement-strategies.rst:213 msgid "" @@ -4165,11 +5055,11 @@ msgid "" " It is also the fundamental capability needed to implement hybrid " "approaches, for example, to fine-tune a pre-trained model using federated" " learning." -msgstr "" +msgstr "服务器端参数初始化是一种强大的机制。例如,它可以用来从先前保存的检查点恢复训练。它也是实现混合方法所需的基本能力,例如,使用联邦学习对预先训练好的模型进行微调。" #: ../../source/how-to-implement-strategies.rst:216 msgid "The :code:`configure_fit` method" -msgstr "" +msgstr ":code:`configure_fit`方法" #: ../../source/how-to-implement-strategies.rst:218 msgid "" @@ -4178,13 +5068,16 @@ msgid "" "round means selecting clients and deciding what instructions to send to " "these clients. The signature of :code:`configure_fit` makes this clear:" msgstr "" +":code:`configure_fit` " +"负责配置即将开始的一轮训练。*配置*在这里是什么意思?配置一轮训练意味着选择客户并决定向这些客户发送什么指令。:code:`configure_fit`" +" 说明了这一点:" #: ../../source/how-to-implement-strategies.rst:231 msgid "" "The return value is a list of tuples, each representing the instructions " "that will be sent to a particular client. Strategy implementations " "usually perform the following steps in :code:`configure_fit`:" -msgstr "" +msgstr "返回值是一个元组列表,每个元组代表将发送到特定客户端的指令。策略实现通常在 :code:`configure_fit` 中执行以下步骤:" #: ../../source/how-to-implement-strategies.rst:233 #: ../../source/how-to-implement-strategies.rst:280 @@ -4192,12 +5085,16 @@ msgid "" "Use the :code:`client_manager` to randomly sample all (or a subset of) " "available clients (each represented as a :code:`ClientProxy` object)" msgstr "" +"使用 :code:`client_manager` 随机抽样所有(或部分)可用客户端(每个客户端都表示为 :code:`ClientProxy` " +"对象)" #: ../../source/how-to-implement-strategies.rst:234 msgid "" "Pair each :code:`ClientProxy` with the same :code:`FitIns` holding the " "current global model :code:`parameters` and :code:`config` dict" msgstr "" +"将每个 :code:`ClientProxy` 与持有当前全局模型 :code:`parameters` 和 :code:`config` " +"dict 的 :code:`FitIns` 配对" #: ../../source/how-to-implement-strategies.rst:236 msgid "" @@ -4206,6 +5103,8 @@ msgid "" "in a round if the corresponding :code:`ClientProxy` is included in the " "the list returned from :code:`configure_fit`." msgstr "" +"更复杂的实现可以使用 :code:`configure_fit` 来实现自定义的客户端选择逻辑。只有当相应的 " +":code:`ClientProxy` 包含在 :code:`configure_fit` 返回的列表中时,客户端才会参与进来。" #: ../../source/how-to-implement-strategies.rst:240 msgid "" @@ -4216,17 +5115,19 @@ msgid "" "different hyperparameters on different clients (via the :code:`config` " "dict)." msgstr "" +"该返回值的结构为用户提供了很大的灵活性。由于指令是按客户端定义的,因此可以向每个客户端发送不同的指令。这使得自定义策略成为可能,例如在不同的客户端上训练不同的模型,或在不同的客户端上使用不同的超参数(通过" +" :code:`config` dict)。" #: ../../source/how-to-implement-strategies.rst:243 msgid "The :code:`aggregate_fit` method" -msgstr "" +msgstr ":code:`aggregate_fit` 方法" #: ../../source/how-to-implement-strategies.rst:245 msgid "" ":code:`aggregate_fit` is responsible for aggregating the results returned" " by the clients that were selected and asked to train in " ":code:`configure_fit`." -msgstr "" +msgstr ":code:`aggregate_fit` 负责汇总在 :code:`configure_fit` 中选择并要求训练的客户端所返回的结果。" #: ../../source/how-to-implement-strategies.rst:258 msgid "" @@ -4235,6 +5136,8 @@ msgid "" ":code:`configure_fit`). :code:`aggregate_fit` therefore receives a list " "of :code:`results`, but also a list of :code:`failures`." msgstr "" +"当然,失败是有可能发生的,因此无法保证服务器会从它发送指令(通过 :code:`configure_fit`)的所有客户端获得结果。因此 " +":code:`aggregate_fit` 会收到 :code:`results` 的列表,但也会收到 :code:`failures` 的列表。" #: ../../source/how-to-implement-strategies.rst:260 msgid "" @@ -4243,10 +5146,13 @@ msgid "" " optional because :code:`aggregate_fit` might decide that the results " "provided are not sufficient for aggregation (e.g., too many failures)." msgstr "" +":code:`aggregate_fit` 返回一个可选的 :code:`Parameters` " +"对象和一个聚合度量的字典。:code:`Parameters` 返回值是可选的,因为 :code:`aggregate_fit` " +"可能会认为所提供的结果不足以进行聚合(例如,失败次数过多)。" #: ../../source/how-to-implement-strategies.rst:263 msgid "The :code:`configure_evaluate` method" -msgstr "" +msgstr ":code:`configure_evaluate`方法" #: ../../source/how-to-implement-strategies.rst:265 msgid "" @@ -4256,19 +5162,24 @@ msgid "" "instructions to send to these clients. The signature of " ":code:`configure_evaluate` makes this clear:" msgstr "" +":code:`configure_evaluate` " +"负责配置下一轮评估。*配置*在这里是什么意思?配置一轮评估意味着选择客户端并决定向这些客户端发送什么指令。:code:`configure_evaluate`" +" 说明了这一点:" #: ../../source/how-to-implement-strategies.rst:278 msgid "" "The return value is a list of tuples, each representing the instructions " "that will be sent to a particular client. Strategy implementations " "usually perform the following steps in :code:`configure_evaluate`:" -msgstr "" +msgstr "返回值是一个元组列表,每个元组代表将发送到特定客户端的指令。策略实现通常在 :code:`configure_evaluate` 中执行以下步骤:" #: ../../source/how-to-implement-strategies.rst:281 msgid "" "Pair each :code:`ClientProxy` with the same :code:`EvaluateIns` holding " "the current global model :code:`parameters` and :code:`config` dict" msgstr "" +"将每个 :code:`ClientProxy` 与持有当前全局模型 :code:`parameters` 和 :code:`config` " +"dict 的 :code:`EvaluateIns` 配对" #: ../../source/how-to-implement-strategies.rst:283 msgid "" @@ -4277,6 +5188,8 @@ msgid "" "in a round if the corresponding :code:`ClientProxy` is included in the " "the list returned from :code:`configure_evaluate`." msgstr "" +"更复杂的实现可以使用 :code:`configure_evaluate` 来实现自定义的客户端选择逻辑。只有当相应的 " +":code:`ClientProxy` 包含在 :code:`configure_evaluate` 返回的列表中时,客户端才会参与进来。" #: ../../source/how-to-implement-strategies.rst:287 msgid "" @@ -4287,10 +5200,12 @@ msgid "" "different hyperparameters on different clients (via the :code:`config` " "dict)." msgstr "" +"该返回值的结构为用户提供了很大的灵活性。由于指令是按客户端定义的,因此可以向每个客户端发送不同的指令。这使得自定义策略可以在不同客户端上评估不同的模型,或在不同客户端上使用不同的超参数(通过" +" :code:`config` dict)。" #: ../../source/how-to-implement-strategies.rst:291 msgid "The :code:`aggregate_evaluate` method" -msgstr "" +msgstr ":code:`aggregate_evaluate` 方法" #: ../../source/how-to-implement-strategies.rst:293 msgid "" @@ -4298,6 +5213,8 @@ msgid "" "returned by the clients that were selected and asked to evaluate in " ":code:`configure_evaluate`." msgstr "" +":code:`aggregate_evaluate` 负责汇总在 :code:`configure_evaluate` " +"中选择并要求评估的客户端返回的结果。" #: ../../source/how-to-implement-strategies.rst:306 msgid "" @@ -4306,6 +5223,9 @@ msgid "" ":code:`configure_evaluate`). :code:`aggregate_evaluate` therefore " "receives a list of :code:`results`, but also a list of :code:`failures`." msgstr "" +"当然,失败是有可能发生的,因此无法保证服务器会从它发送指令(通过 " +":code:`configure_evaluate`)的所有客户端获得结果。因此, :code:`aggregate_evaluate` 会接收 " +":code:`results` 的列表,但也会接收 :code:`failures` 的列表。" #: ../../source/how-to-implement-strategies.rst:308 msgid "" @@ -4314,10 +5234,13 @@ msgid "" "optional because :code:`aggregate_evaluate` might decide that the results" " provided are not sufficient for aggregation (e.g., too many failures)." msgstr "" +":code:`aggregate_evaluate` 返回一个可选的 " +":code:`float`(损失值)和一个聚合指标字典。:code:`float` 返回值是可选的,因为 " +":code:`aggregate_evaluate` 可能会认为所提供的结果不足以进行聚合(例如,失败次数过多)。" #: ../../source/how-to-implement-strategies.rst:311 msgid "The :code:`evaluate` method" -msgstr "" +msgstr ":code:`evaluate`方法" #: ../../source/how-to-implement-strategies.rst:313 msgid "" @@ -4326,6 +5249,9 @@ msgid "" ":code:`configure_evaluate`/:code:`aggregate_evaluate` enables strategies " "to perform both servers-side and client-side (federated) evaluation." msgstr "" +":code:`evaluate` 负责在服务器端评估模型参数。除了 " +":code:`configure_evaluate`/:code:`aggregate_evaluate` 之外,:code:`evaluate`" +" 可以使策略同时执行服务器端和客户端(联邦)评估。" #: ../../source/how-to-implement-strategies.rst:323 msgid "" @@ -4334,82 +5260,127 @@ msgid "" ":code:`evaluate` method might not complete successfully (e.g., it might " "fail to load the server-side evaluation data)." msgstr "" +"返回值也是可选的,因为策略可能不需要执行服务器端评估,或者因为用户定义的 :code:`evaluate` " +"方法可能无法成功完成(例如,它可能无法加载服务器端评估数据)。" #: ../../source/how-to-install-flower.rst:2 msgid "Install Flower" -msgstr "" +msgstr "安装Flower" #: ../../source/how-to-install-flower.rst:6 msgid "Python version" -msgstr "" +msgstr "Python 版本" #: ../../source/how-to-install-flower.rst:12 msgid "Install stable release" +msgstr "安装稳定版" + +#: ../../source/how-to-install-flower.rst:15 +msgid "Using pip" msgstr "" -#: ../../source/how-to-install-flower.rst:14 +#: ../../source/how-to-install-flower.rst:17 msgid "" "Stable releases are available on `PyPI " "`_::" -msgstr "" +msgstr "稳定版本可在 `PyPI `_::" -#: ../../source/how-to-install-flower.rst:18 +#: ../../source/how-to-install-flower.rst:21 msgid "" "For simulations that use the Virtual Client Engine, ``flwr`` should be " "installed with the ``simulation`` extra::" +msgstr "对于使用虚拟客户端引擎的模拟,`flwr`` 应与`simulation`` 一起安装:" + +#: ../../source/how-to-install-flower.rst:27 +msgid "Using conda (or mamba)" msgstr "" -#: ../../source/how-to-install-flower.rst:24 -msgid "Verify installation" +#: ../../source/how-to-install-flower.rst:29 +msgid "Flower can also be installed from the ``conda-forge`` channel." +msgstr "" + +#: ../../source/how-to-install-flower.rst:31 +msgid "" +"If you have not added ``conda-forge`` to your channels, you will first " +"need to run the following::" +msgstr "" + +#: ../../source/how-to-install-flower.rst:36 +msgid "" +"Once the ``conda-forge`` channel has been enabled, ``flwr`` can be " +"installed with ``conda``::" msgstr "" -#: ../../source/how-to-install-flower.rst:26 +#: ../../source/how-to-install-flower.rst:40 +msgid "or with ``mamba``::" +msgstr "" + +#: ../../source/how-to-install-flower.rst:46 +msgid "Verify installation" +msgstr "验证安装" + +#: ../../source/how-to-install-flower.rst:48 +#, fuzzy msgid "" -"The following command can be used to verfiy if Flower was successfully " +"The following command can be used to verify if Flower was successfully " "installed. If everything worked, it should print the version of Flower to" " the command line::" -msgstr "" +msgstr "可以使用以下命令来验证 Flower 是否安装成功。如果一切正常,它将在命令行中打印 Flower 的版本::" -#: ../../source/how-to-install-flower.rst:33 +#: ../../source/how-to-install-flower.rst:55 msgid "Advanced installation options" +msgstr "高级安装选项" + +#: ../../source/how-to-install-flower.rst:58 +#, fuzzy +msgid "Install via Docker" +msgstr "安装Flower" + +#: ../../source/how-to-install-flower.rst:60 +#, fuzzy +msgid "" +"`How to run Flower using Docker `_" msgstr "" +"`TensorFlow快速入门 (教程) `_" -#: ../../source/how-to-install-flower.rst:36 +#: ../../source/how-to-install-flower.rst:63 msgid "Install pre-release" -msgstr "" +msgstr "安装预发布版本" -#: ../../source/how-to-install-flower.rst:38 +#: ../../source/how-to-install-flower.rst:65 msgid "" "New (possibly unstable) versions of Flower are sometimes available as " "pre-release versions (alpha, beta, release candidate) before the stable " "release happens::" -msgstr "" +msgstr "在稳定版发布之前,Flower 的新版本(可能是不稳定版)有时会作为预发布版本(alpha、beta、候选发布版本)提供::" -#: ../../source/how-to-install-flower.rst:42 +#: ../../source/how-to-install-flower.rst:69 msgid "" "For simulations that use the Virtual Client Engine, ``flwr`` pre-releases" " should be installed with the ``simulation`` extra::" -msgstr "" +msgstr "对于使用虚拟客户端引擎的模拟,`flwr``预发行版应与`simulation``一起安装:" -#: ../../source/how-to-install-flower.rst:47 +#: ../../source/how-to-install-flower.rst:74 msgid "Install nightly release" -msgstr "" +msgstr "安装隔夜版本" -#: ../../source/how-to-install-flower.rst:49 +#: ../../source/how-to-install-flower.rst:76 msgid "" "The latest (potentially unstable) changes in Flower are available as " "nightly releases::" -msgstr "" +msgstr "Flower 中最新(可能不稳定)的更改以隔夜发布的形式提供::" -#: ../../source/how-to-install-flower.rst:53 +#: ../../source/how-to-install-flower.rst:80 msgid "" "For simulations that use the Virtual Client Engine, ``flwr-nightly`` " "should be installed with the ``simulation`` extra::" -msgstr "" +msgstr "对于使用虚拟客户端引擎的模拟,`flwr-nightly`应与`simulation`一起安装:" #: ../../source/how-to-monitor-simulation.rst:2 msgid "Monitor simulation" -msgstr "" +msgstr "监控模拟" #: ../../source/how-to-monitor-simulation.rst:4 msgid "" @@ -4419,16 +5390,18 @@ msgid "" "constrain the total usage. Insights from resource consumption can help " "you make smarter decisions and speed up the execution time." msgstr "" +"Flower 允许您在运行模拟时监控系统资源。此外,Flower " +"仿真引擎功能强大,能让您决定如何按客户端方式分配资源并限制总使用量。从资源消耗中获得的观察可以帮助您做出更明智的决策,并加快执行时间。" #: ../../source/how-to-monitor-simulation.rst:6 msgid "" "The specific instructions assume you are using macOS and have the " "`Homebrew `_ package manager installed." -msgstr "" +msgstr "具体说明假定你使用的是 macOS,并且安装了 `Homebrew `_ 软件包管理器。" #: ../../source/how-to-monitor-simulation.rst:10 msgid "Downloads" -msgstr "" +msgstr "下载" #: ../../source/how-to-monitor-simulation.rst:16 msgid "" @@ -4437,88 +5410,91 @@ msgid "" "collected data. They are both well integrated with `Ray " "`_ which Flower uses under the hood." msgstr "" +"`Prometheus `_ 用于收集数据,而 `Grafana " +"`_ 则能让你将收集到的数据可视化。它们都与 Flower 在引擎下使用的 `Ray " +"`_ 紧密集成。" #: ../../source/how-to-monitor-simulation.rst:18 msgid "" "Overwrite the configuration files (depending on your device, it might be " "installed on a different path)." -msgstr "" +msgstr "重写配置文件(根据设备的不同,可能安装在不同的路径上)。" #: ../../source/how-to-monitor-simulation.rst:20 msgid "If you are on an M1 Mac, it should be:" -msgstr "" +msgstr "如果你使用的是 M1 Mac,应该是这样:" #: ../../source/how-to-monitor-simulation.rst:27 msgid "On the previous generation Intel Mac devices, it should be:" -msgstr "" +msgstr "在上一代英特尔 Mac 设备上,应该是这样:" #: ../../source/how-to-monitor-simulation.rst:34 msgid "" "Open the respective configuration files and change them. Depending on " "your device, use one of the two following commands:" -msgstr "" +msgstr "打开相应的配置文件并修改它们。根据设备情况,使用以下两个命令之一:" #: ../../source/how-to-monitor-simulation.rst:44 msgid "" "and then delete all the text in the file and paste a new Prometheus " "config you see below. You may adjust the time intervals to your " "requirements:" -msgstr "" +msgstr "然后删除文件中的所有文本,粘贴一个新的 Prometheus 配置文件,如下所示。您可以根据需要调整时间间隔:" #: ../../source/how-to-monitor-simulation.rst:59 msgid "" "Now after you have edited the Prometheus configuration, do the same with " "the Grafana configuration files. Open those using one of the following " "commands as before:" -msgstr "" +msgstr "编辑完 Prometheus 配置后,请对 Grafana 配置文件执行同样的操作。与之前一样,使用以下命令之一打开这些文件:" #: ../../source/how-to-monitor-simulation.rst:69 msgid "" "Your terminal editor should open and allow you to apply the following " "configuration as before." -msgstr "" +msgstr "您的终端编辑器应该会打开,并允许您像之前一样应用以下配置。" #: ../../source/how-to-monitor-simulation.rst:84 msgid "" "Congratulations, you just downloaded all the necessary software needed " "for metrics tracking. Now, let’s start it." -msgstr "" +msgstr "恭喜您,您刚刚下载了指标跟踪所需的所有软件。现在,让我们开始吧。" #: ../../source/how-to-monitor-simulation.rst:88 msgid "Tracking metrics" -msgstr "" +msgstr "跟踪指标" #: ../../source/how-to-monitor-simulation.rst:90 msgid "" "Before running your Flower simulation, you have to start the monitoring " "tools you have just installed and configured." -msgstr "" +msgstr "在运行 Flower 模拟之前,您必须启动刚刚安装和配置的监控工具。" #: ../../source/how-to-monitor-simulation.rst:97 msgid "" "Please include the following argument in your Python code when starting a" " simulation." -msgstr "" +msgstr "开始模拟时,请在 Python 代码中加入以下参数。" #: ../../source/how-to-monitor-simulation.rst:108 msgid "Now, you are ready to start your workload." -msgstr "" +msgstr "现在,您可以开始工作了。" #: ../../source/how-to-monitor-simulation.rst:110 msgid "" "Shortly after the simulation starts, you should see the following logs in" " your terminal:" -msgstr "" +msgstr "模拟启动后不久,您就会在终端中看到以下日志:" #: ../../source/how-to-monitor-simulation.rst:117 msgid "You can look at everything at ``_ ." -msgstr "" +msgstr "您可以在 ``_ 查看所有内容。" #: ../../source/how-to-monitor-simulation.rst:119 msgid "" "It's a Ray Dashboard. You can navigate to Metrics (on the left panel, the" " lowest option)." -msgstr "" +msgstr "这是一个 Ray Dashboard。您可以导航到 \"度量标准\"(左侧面板,最低选项)。" #: ../../source/how-to-monitor-simulation.rst:121 msgid "" @@ -4528,23 +5504,26 @@ msgid "" "can only use Grafana to explore the metrics. You can start Grafana by " "going to ``http://localhost:3000/``." msgstr "" +"或者,您也可以点击右上角的 \"在 Grafana 中查看\",在 Grafana 中查看它们。请注意,Ray " +"仪表盘只能在模拟期间访问。模拟结束后,您只能使用 Grafana 浏览指标。您可以访问 ``http://localhost:3000/``启动 " +"Grafana。" #: ../../source/how-to-monitor-simulation.rst:123 msgid "" "After you finish the visualization, stop Prometheus and Grafana. This is " "important as they will otherwise block, for example port :code:`3000` on " "your machine as long as they are running." -msgstr "" +msgstr "完成可视化后,请停止 Prometheus 和 Grafana。这一点很重要,否则只要它们在运行,就会阻塞机器上的端口 :code:`3000`。" #: ../../source/how-to-monitor-simulation.rst:132 msgid "Resource allocation" -msgstr "" +msgstr "资源分配" #: ../../source/how-to-monitor-simulation.rst:134 msgid "" "You must understand how the Ray library works to efficiently allocate " "system resources to simulation clients on your own." -msgstr "" +msgstr "您必须了解 Ray 库是如何工作的,才能有效地为自己的仿真客户端分配系统资源。" #: ../../source/how-to-monitor-simulation.rst:136 msgid "" @@ -4555,27 +5534,29 @@ msgid "" "You will learn more about that in the later part of this blog. You can " "check the system resources by running the following:" msgstr "" +"最初,模拟(由 Ray " +"在引擎下处理)默认使用系统上的所有可用资源启动,并在客户端之间共享。但这并不意味着它会将资源平均分配给所有客户端,也不意味着模型训练会在所有客户端同时进行。您将在本博客的后半部分了解到更多相关信息。您可以运行以下命令检查系统资源:" #: ../../source/how-to-monitor-simulation.rst:143 msgid "In Google Colab, the result you see might be similar to this:" -msgstr "" +msgstr "在 Google Colab 中,您看到的结果可能与此类似:" #: ../../source/how-to-monitor-simulation.rst:155 msgid "" "However, you can overwrite the defaults. When starting a simulation, do " "the following (you don't need to overwrite all of them):" -msgstr "" +msgstr "不过,您可以覆盖默认值。开始模拟时,请执行以下操作(不必全部覆盖):" #: ../../source/how-to-monitor-simulation.rst:175 msgid "Let’s also specify the resource for a single client." -msgstr "" +msgstr "我们还可以为单个客户指定资源。" #: ../../source/how-to-monitor-simulation.rst:205 msgid "" "Now comes the crucial part. Ray will start a new client only when it has " "all the required resources (such that they run in parallel) when the " "resources allow." -msgstr "" +msgstr "现在到了关键部分。只有在资源允许的情况下,Ray 才会在拥有所有所需资源(如并行运行)时启动新客户端。" #: ../../source/how-to-monitor-simulation.rst:207 msgid "" @@ -4586,146 +5567,354 @@ msgid "" ":code:`client_num_gpus = 2`, the simulation wouldn't start (even if you " "had 2 GPUs but decided to set 1 in :code:`ray_init_args`)." msgstr "" +"在上面的示例中,将只运行一个客户端,因此您的客户端不会并发运行。设置 :code:`client_num_gpus = 0.5` " +"将允许运行两个客户端,从而使它们能够并发运行。请注意,所需的资源不要超过可用资源。如果您指定 :code:`client_num_gpus = " +"2`,模拟将无法启动(即使您有 2 个 GPU,但决定在 :code:`ray_init_args` 中设置为 1)。" #: ../../source/how-to-monitor-simulation.rst:212 ../../source/ref-faq.rst:2 msgid "FAQ" -msgstr "" +msgstr "常见问题" #: ../../source/how-to-monitor-simulation.rst:214 msgid "Q: I don't see any metrics logged." -msgstr "" +msgstr "问:我没有看到任何指标记录。" #: ../../source/how-to-monitor-simulation.rst:216 msgid "" "A: The timeframe might not be properly set. The setting is in the top " "right corner (\"Last 30 minutes\" by default). Please change the " "timeframe to reflect the period when the simulation was running." -msgstr "" +msgstr "答:时间范围可能没有正确设置。设置在右上角(默认为 \"最后 30 分钟\")。请更改时间框架,以反映模拟运行的时间段。" #: ../../source/how-to-monitor-simulation.rst:218 msgid "" "Q: I see “Grafana server not detected. Please make sure the Grafana " "server is running and refresh this page” after going to the Metrics tab " "in Ray Dashboard." -msgstr "" +msgstr "问:我看到 \"未检测到 Grafana 服务器。请确保 Grafana 服务器正在运行并刷新此页面\"。" #: ../../source/how-to-monitor-simulation.rst:220 msgid "" "A: You probably don't have Grafana running. Please check the running " "services" -msgstr "" +msgstr "答:您可能没有运行 Grafana。请检查正在运行的服务" #: ../../source/how-to-monitor-simulation.rst:226 msgid "" "Q: I see \"This site can't be reached\" when going to " "``_." -msgstr "" +msgstr "问:在访问 ``_时,我看到 \"无法访问该网站\"。" #: ../../source/how-to-monitor-simulation.rst:228 msgid "" "A: Either the simulation has already finished, or you still need to start" " Prometheus." -msgstr "" +msgstr "答:要么模拟已经完成,要么您还需要启动Prometheus。" #: ../../source/how-to-monitor-simulation.rst:232 msgid "Resources" -msgstr "" +msgstr "资源" #: ../../source/how-to-monitor-simulation.rst:234 msgid "" "Ray Dashboard: ``_" -msgstr "" +msgstr "Ray 仪表盘: ``_" #: ../../source/how-to-monitor-simulation.rst:236 msgid "" "Ray Metrics: ``_" msgstr "" +"Ray 指标: ``_" -#: ../../source/how-to-run-simulations.rst:2 -msgid "Run simulations" +#: ../../source/how-to-run-flower-using-docker.rst:2 +msgid "Run Flower using Docker" msgstr "" -#: ../../source/how-to-run-simulations.rst:8 +#: ../../source/how-to-run-flower-using-docker.rst:4 msgid "" -"Simulating Federated Learning workloads is useful for a multitude of use-" -"cases: you might want to run your workload on a large cohort of clients " -"but without having to source, configure and mange a large number of " -"physical devices; you might want to run your FL workloads as fast as " -"possible on the compute systems you have access to without having to go " -"through a complex setup process; you might want to validate your " -"algorithm on different scenarios at varying levels of data and system " -"heterogeneity, client availability, privacy budgets, etc. These are among" -" some of the use-cases where simulating FL workloads makes sense. Flower " -"can accommodate these scenarios by means of its `VirtualClientEngine " -"`_ or " -"VCE." +"The simplest way to get started with Flower is by using the pre-made " +"Docker images, which you can find on `Docker Hub " +"`_." msgstr "" -#: ../../source/how-to-run-simulations.rst:10 +#: ../../source/how-to-run-flower-using-docker.rst:7 +msgid "Before you start, make sure that the Docker daemon is running:" +msgstr "" + +#: ../../source/how-to-run-flower-using-docker.rst:14 msgid "" -"The :code:`VirtualClientEngine` schedules, launches and manages `virtual`" -" clients. These clients are identical to `non-virtual` clients (i.e. the " -"ones you launch via the command `flwr.client.start_numpy_client `_) in the sense that they can be configure " -"by creating a class inheriting, for example, from " -"`flwr.client.NumPyClient `_ " -"and therefore behave in an identical way. In addition to that, clients " -"managed by the :code:`VirtualClientEngine` are:" +"If you do not see the version of Docker but instead get an error saying " +"that the command was not found, you will need to install Docker first. " +"You can find installation instruction `here `_." msgstr "" -#: ../../source/how-to-run-simulations.rst:12 +#: ../../source/how-to-run-flower-using-docker.rst:20 msgid "" -"resource-aware: this means that each client gets assigned a portion of " -"the compute and memory on your system. You as a user can control this at " -"the beginning of the simulation and allows you to control the degree of " -"parallelism of your Flower FL simulation. The fewer the resources per " -"client, the more clients can run concurrently on the same hardware." +"On Linux, Docker commands require ``sudo`` privilege. If you want to " +"avoid using ``sudo``, you can follow the `Post-installation steps " +"`_ on the " +"official Docker website." msgstr "" -#: ../../source/how-to-run-simulations.rst:13 +#: ../../source/how-to-run-flower-using-docker.rst:25 +#, fuzzy +msgid "Flower server" +msgstr "Flower 服务器" + +#: ../../source/how-to-run-flower-using-docker.rst:28 +#, fuzzy +msgid "Quickstart" +msgstr "快速入门 JAX" + +#: ../../source/how-to-run-flower-using-docker.rst:30 +msgid "If you're looking to try out Flower, you can use the following command:" +msgstr "" + +#: ../../source/how-to-run-flower-using-docker.rst:37 msgid "" -"self-managed: this means that you as a user do not need to launch clients" -" manually, instead this gets delegated to :code:`VirtualClientEngine`'s " -"internals." +"The command will pull the Docker image with the tag " +"``1.7.0-py3.11-ubuntu22.04`` from Docker Hub. The tag contains the " +"information which Flower, Python and Ubuntu is used. In this case, it " +"uses Flower 1.7.0, Python 3.11 and Ubuntu 22.04. The ``--rm`` flag tells " +"Docker to remove the container after it exits." msgstr "" -#: ../../source/how-to-run-simulations.rst:14 +#: ../../source/how-to-run-flower-using-docker.rst:44 msgid "" -"ephemeral: this means that a client is only materialized when it is " -"required in the FL process (e.g. to do `fit() `_). The object is destroyed afterwards," -" releasing the resources it was assigned and allowing in this way other " -"clients to participate." +"By default, the Flower server keeps state in-memory. When using the " +"Docker flag ``--rm``, the state is not persisted between container " +"starts. We will show below how to save the state in a file on your host " +"system." msgstr "" -#: ../../source/how-to-run-simulations.rst:16 +#: ../../source/how-to-run-flower-using-docker.rst:48 msgid "" -"The :code:`VirtualClientEngine` implements `virtual` clients using `Ray " -"`_, an open-source framework for scalable Python " -"workloads. In particular, Flower's :code:`VirtualClientEngine` makes use " -"of `Actors `_ to " -"spawn `virtual` clients and run their workload." +"The ``-p :`` flag tells Docker to map the ports " +"``9091``/``9092`` of the host to ``9091``/``9092`` of the container, " +"allowing you to access the Driver API on ``http://localhost:9091`` and " +"the Fleet API on ``http://localhost:9092``. Lastly, any flag that comes " +"after the tag is passed to the Flower server. Here, we are passing the " +"flag ``--insecure``." msgstr "" -#: ../../source/how-to-run-simulations.rst:20 -msgid "Launch your Flower simulation" +#: ../../source/how-to-run-flower-using-docker.rst:55 +msgid "" +"The ``--insecure`` flag enables insecure communication (using HTTP, not " +"HTTPS) and should only be used for testing purposes. We strongly " +"recommend enabling `SSL `_ when " +"deploying to a production environment." msgstr "" -#: ../../source/how-to-run-simulations.rst:22 +#: ../../source/how-to-run-flower-using-docker.rst:60 msgid "" -"Running Flower simulations still require you to define your client class," -" a strategy, and utility functions to download and load (and potentially " +"You can use ``--help`` to view all available flags that the server " +"supports:" +msgstr "" + +#: ../../source/how-to-run-flower-using-docker.rst:67 +msgid "Mounting a volume to store the state on the host system" +msgstr "" + +#: ../../source/how-to-run-flower-using-docker.rst:69 +msgid "" +"If you want to persist the state of the server on your host system, all " +"you need to do is specify a path where you want to save the file on your " +"host system and a name for the database file. In the example below, we " +"tell Docker via the flag ``-v`` to mount the user's home directory " +"(``~/`` on your host) into the ``/app/`` directory of the container. " +"Furthermore, we use the flag ``--database`` to specify the name of the " +"database file." +msgstr "" + +#: ../../source/how-to-run-flower-using-docker.rst:82 +msgid "" +"As soon as the server starts, the file ``state.db`` is created in the " +"user's home directory on your host system. If the file already exists, " +"the server tries to restore the state from the file. To start the server " +"with an empty database, simply remove the ``state.db`` file." +msgstr "" + +#: ../../source/how-to-run-flower-using-docker.rst:87 +#, fuzzy +msgid "Enabling SSL for secure connections" +msgstr "启用 SSL 连接" + +#: ../../source/how-to-run-flower-using-docker.rst:89 +msgid "" +"To enable SSL, you will need a CA certificate, a server certificate and a" +" server private key." +msgstr "" + +#: ../../source/how-to-run-flower-using-docker.rst:92 +msgid "" +"For testing purposes, you can generate your own self-signed certificates." +" The `Enable SSL connections `_ page contains a section that " +"will guide you through the process." +msgstr "" + +#: ../../source/how-to-run-flower-using-docker.rst:96 +msgid "" +"Assuming all files we need are in the local ``certificates`` directory, " +"we can use the flag ``-v`` to mount the local directory into the " +"``/app/`` directory of the container. This allows the server to access " +"the files within the container. Finally, we pass the names of the " +"certificates to the server with the ``--certificates`` flag." +msgstr "" + +#: ../../source/how-to-run-flower-using-docker.rst:108 +msgid "Using a different Flower or Python version" +msgstr "" + +#: ../../source/how-to-run-flower-using-docker.rst:110 +msgid "" +"If you want to use a different version of Flower or Python, you can do so" +" by changing the tag. All versions we provide are available on `Docker " +"Hub `_." +msgstr "" + +#: ../../source/how-to-run-flower-using-docker.rst:114 +msgid "Pinning a Docker image to a specific version" +msgstr "" + +#: ../../source/how-to-run-flower-using-docker.rst:116 +msgid "" +"It may happen that we update the images behind the tags. Such updates " +"usually include security updates of system dependencies that should not " +"change the functionality of Flower. However, if you want to ensure that " +"you always use the same image, you can specify the hash of the image " +"instead of the tag." +msgstr "" + +#: ../../source/how-to-run-flower-using-docker.rst:121 +msgid "" +"The following command returns the current image hash referenced by the " +"``server:1.7.0-py3.11-ubuntu22.04`` tag:" +msgstr "" + +#: ../../source/how-to-run-flower-using-docker.rst:128 +msgid "Next, we can pin the hash when running a new server container:" +msgstr "" + +#: ../../source/how-to-run-flower-using-docker.rst:137 +#, fuzzy +msgid "Setting environment variables" +msgstr "设置编码环境" + +#: ../../source/how-to-run-flower-using-docker.rst:139 +msgid "" +"To set a variable inside a Docker container, you can use the ``-e " +"=`` flag." +msgstr "" + +#: ../../source/how-to-run-simulations.rst:2 +msgid "Run simulations" +msgstr "运行模拟" + +#: ../../source/how-to-run-simulations.rst:8 +msgid "" +"Simulating Federated Learning workloads is useful for a multitude of use-" +"cases: you might want to run your workload on a large cohort of clients " +"but without having to source, configure and mange a large number of " +"physical devices; you might want to run your FL workloads as fast as " +"possible on the compute systems you have access to without having to go " +"through a complex setup process; you might want to validate your " +"algorithm on different scenarios at varying levels of data and system " +"heterogeneity, client availability, privacy budgets, etc. These are among" +" some of the use-cases where simulating FL workloads makes sense. Flower " +"can accommodate these scenarios by means of its `VirtualClientEngine " +"`_ or " +"VCE." +msgstr "" +"模拟联邦学习工作负载可用于多种案例:您可能希望在大量客户端上运行您的工作负载,但无需采购、配置和管理大量物理设备;您可能希望在您可以访问的计算系统上尽可能快地运行您的" +" FL 工作负载,而无需经过复杂的设置过程;您可能希望在不同数据和系统异构性、客户端可用性、隐私预算等不同水平的场景中验证您的算法。这些都是模拟 " +"FL 工作负载的一些案例。Flower 可以通过其 \"虚拟客户端引擎\"(VirtualClientEngine)_或 VCE 来匹配这些情况。" + +#: ../../source/how-to-run-simulations.rst:10 +msgid "" +"The :code:`VirtualClientEngine` schedules, launches and manages `virtual`" +" clients. These clients are identical to `non-virtual` clients (i.e. the " +"ones you launch via the command `flwr.client.start_client `_) in the sense that they can be configure by " +"creating a class inheriting, for example, from `flwr.client.NumPyClient " +"`_ and therefore behave in an " +"identical way. In addition to that, clients managed by the " +":code:`VirtualClientEngine` are:" +msgstr "" +":code:`VirtualClientEngine`用来规划,启动和管理`虚拟`客户端。这些客户端跟`非虚拟`客户端是一样的(即为您通过`flwr.client.start_client" +" `_启动的客户端),因为它们可以通过创建一个继承自 " +"`flwr.client.NumPyClient `_ " +"的类进行配置,因此其行为方式相同。另外,由 `VirtualClientEngine` 管理的客户端有:" + +#: ../../source/how-to-run-simulations.rst:12 +msgid "" +"resource-aware: this means that each client gets assigned a portion of " +"the compute and memory on your system. You as a user can control this at " +"the beginning of the simulation and allows you to control the degree of " +"parallelism of your Flower FL simulation. The fewer the resources per " +"client, the more clients can run concurrently on the same hardware." +msgstr "" +"资源感知:这意味着每个客户端都会分配到系统中的一部分计算和内存。作为用户,您可以在模拟开始时对其进行控制,从而控制 Flower FL " +"模拟的并行程度。每个客户端的资源越少,在同一硬件上并发运行的客户端就越多。" + +#: ../../source/how-to-run-simulations.rst:13 +msgid "" +"self-managed: this means that you as a user do not need to launch clients" +" manually, instead this gets delegated to :code:`VirtualClientEngine`'s " +"internals." +msgstr "自管理:这意味着用户无需手动启动客户端,而是由 :code:`VirtualClientEngine` 负责。" + +#: ../../source/how-to-run-simulations.rst:14 +msgid "" +"ephemeral: this means that a client is only materialized when it is " +"required in the FL process (e.g. to do `fit() `_). The object is destroyed afterwards," +" releasing the resources it was assigned and allowing in this way other " +"clients to participate." +msgstr "" +"即时性:这意味着客户端只有在 FL 进程中需要它时才会被实体化(例如执行 `fit() `_ " +")。之后该对象将被销毁,释放分配给它的资源,并允许其他客户端以这种方式参与。" + +#: ../../source/how-to-run-simulations.rst:16 +msgid "" +"The :code:`VirtualClientEngine` implements `virtual` clients using `Ray " +"`_, an open-source framework for scalable Python " +"workloads. In particular, Flower's :code:`VirtualClientEngine` makes use " +"of `Actors `_ to " +"spawn `virtual` clients and run their workload." +msgstr "" +":code:`VirtualClientEngine`使用`Ray " +"`_来实现`虚拟`客户端,这是一个用于可扩展 Python 工作负载的开源框架。特别地,Flower 的" +" :code:`VirtualClientEngine` 使用 `Actors `_ 来生成 `virtual` 客户端并运行它们的工作负载。" + +#: ../../source/how-to-run-simulations.rst:20 +msgid "Launch your Flower simulation" +msgstr "启动 Flower 模拟" + +#: ../../source/how-to-run-simulations.rst:22 +msgid "" +"Running Flower simulations still require you to define your client class," +" a strategy, and utility functions to download and load (and potentially " "partition) your dataset. With that out of the way, launching your " "simulation is done with `start_simulation `_ and a minimal example looks" " as follows:" msgstr "" +"运行 Flower 模拟器仍然需要定义客户端类、策略以及下载和加载(可能还需要分割)数据集的实用程序。在完成这些工作后,就可以使用 " +"\"start_simulation `_\" 来启动模拟了,一个最简单的示例如下:" #: ../../source/how-to-run-simulations.rst:44 msgid "VirtualClientEngine resources" -msgstr "" +msgstr "虚拟客户端引擎资源" #: ../../source/how-to-run-simulations.rst:45 msgid "" @@ -4740,10 +5929,16 @@ msgid "" " documentation. Do not set :code:`ray_init_args` if you want the VCE to " "use all your system's CPUs and GPUs." msgstr "" +"默认情况下,VCE 可以访问所有系统资源(即所有 CPU、所有 GPU 等),因为这也是启动 Ray " +"时的默认行为。不过,在某些设置中,您可能希望限制有多少系统资源用于模拟。您可以通过 :code:`ray_init_args` 输入到 " +":code:`start_simulation` 的参数来做到这一点,VCE 会在内部将该参数传递给 Ray 的 :code:`ray.init`" +" 命令。有关您可以配置的设置的完整列表,请查看 `ray.init `_ 文档。如果希望 VCE 使用系统中所有的 CPU 和 " +"GPU,请不要设置 :code:`ray_init_args`。" #: ../../source/how-to-run-simulations.rst:62 msgid "Assigning client resources" -msgstr "" +msgstr "分配客户端资源" #: ../../source/how-to-run-simulations.rst:63 msgid "" @@ -4751,6 +5946,8 @@ msgid "" " nothing else) to each virtual client. This means that if your system has" " 10 cores, that many virtual clients can be concurrently running." msgstr "" +"默认情况下,:code:`VirtualClientEngine` 会为每个虚拟客户端分配一个 CPU " +"内核(不分配其他任何内核)。这意味着,如果系统有 10 个内核,那么可以同时运行这么多虚拟客户端。" #: ../../source/how-to-run-simulations.rst:65 msgid "" @@ -4762,20 +5959,24 @@ msgid "" " Two keys are internally used by Ray to schedule and spawn workloads (in " "our case Flower clients):" msgstr "" +"通常情况下,您可能希望根据 FL 工作负载的复杂性(即计算和内存占用)来调整分配给客户端的资源。您可以在启动模拟时将参数 " +"`client_resources` 设置为 `start_simulation `_ 。Ray " +"内部使用两个键来调度和生成工作负载(在我们的例子中是 Flower 客户端):" #: ../../source/how-to-run-simulations.rst:67 msgid ":code:`num_cpus` indicates the number of CPU cores a client would get." -msgstr "" +msgstr ":code:`num_cpus` 表示客户端将获得的 CPU 内核数量。" #: ../../source/how-to-run-simulations.rst:68 msgid "" ":code:`num_gpus` indicates the **ratio** of GPU memory a client gets " "assigned." -msgstr "" +msgstr ":code:`num_gpus` 表示分配给客户端的 GPU 内存的**比例**。" #: ../../source/how-to-run-simulations.rst:70 msgid "Let's see a few examples:" -msgstr "" +msgstr "让我们来看几个例子:" #: ../../source/how-to-run-simulations.rst:89 msgid "" @@ -4789,6 +5990,11 @@ msgid "" "simulating a client sampled by the strategy) and then will execute them " "in a resource-aware manner in batches of 8." msgstr "" +"虽然 :code:`client_resources` 可用来控制 FL " +"模拟的并发程度,但这并不能阻止您在同一轮模拟中运行几十、几百甚至上千个客户端,并拥有数量级更多的 " +"\"休眠\"(即不参与一轮模拟)客户端。比方说,您希望每轮有 100 个客户端,但您的系统只能同时容纳 8 " +"个客户端。:code:`VirtualClientEngine` 将安排运行 100 " +"个工作(每个工作模拟策略采样的一个客户端),然后以资源感知的方式分批执行。" #: ../../source/how-to-run-simulations.rst:91 msgid "" @@ -4797,10 +6003,12 @@ msgid "" "look at the `Ray documentation `_." msgstr "" +"要了解资源如何用于调度 FL 客户端以及如何定义自定义资源的所有复杂细节,请查看 `Ray 文档 " +"`_。" #: ../../source/how-to-run-simulations.rst:94 msgid "Simulation examples" -msgstr "" +msgstr "模拟示例" #: ../../source/how-to-run-simulations.rst:96 msgid "" @@ -4808,6 +6016,8 @@ msgid "" "Tensorflow/Keras and PyTorch are provided in the `Flower repository " "`_. You can run them on Google Colab too:" msgstr "" +"在 Tensorflow/Keras 和 PyTorch 中进行 Flower 模拟的几个可随时运行的完整示例已在 `Flower 库 " +"`_ 中提供。您也可以在 Google Colab 上运行它们:" #: ../../source/how-to-run-simulations.rst:98 msgid "" @@ -4815,6 +6025,8 @@ msgid "" "`_: 100 clients collaboratively train a MLP model on MNIST." msgstr "" +"Tensorflow/Keras模拟 `_:100个客户端在MNIST上协作训练一个MLP模型。" #: ../../source/how-to-run-simulations.rst:99 msgid "" @@ -4822,31 +6034,33 @@ msgid "" "/simulation-pytorch>`_: 100 clients collaboratively train a CNN model on " "MNIST." msgstr "" +"PyTorch 模拟 `_:100 个客户端在 MNIST 上协作训练一个 CNN 模型。" #: ../../source/how-to-run-simulations.rst:104 msgid "Multi-node Flower simulations" -msgstr "" +msgstr "多节点 Flower 模拟" #: ../../source/how-to-run-simulations.rst:106 msgid "" "Flower's :code:`VirtualClientEngine` allows you to run FL simulations " "across multiple compute nodes. Before starting your multi-node simulation" " ensure that you:" -msgstr "" +msgstr "Flower 的 :code:`VirtualClientEngine` 允许您在多个计算节点上运行 FL 模拟。在开始多节点模拟之前,请确保:" #: ../../source/how-to-run-simulations.rst:108 msgid "Have the same Python environment in all nodes." -msgstr "" +msgstr "所有节点都有相同的 Python 环境。" #: ../../source/how-to-run-simulations.rst:109 msgid "Have a copy of your code (e.g. your entire repo) in all nodes." -msgstr "" +msgstr "在所有节点上都有一份代码副本(例如整个软件包)。" #: ../../source/how-to-run-simulations.rst:110 msgid "" "Have a copy of your dataset in all nodes (more about this in " ":ref:`simulation considerations `)" -msgstr "" +msgstr "在所有节点中都有一份数据集副本(更多相关信息请参阅 :ref:`模拟注意事项`)" #: ../../source/how-to-run-simulations.rst:111 msgid "" @@ -4854,6 +6068,9 @@ msgid "" "`_ so the " ":code:`VirtualClientEngine` attaches to a running Ray instance." msgstr "" +"将 :code:`ray_init_args={\"address\"=\"auto\"}`传递给 `start_simulation `_ ,这样 " +":code:`VirtualClientEngine`就会连接到正在运行的 Ray 实例。" #: ../../source/how-to-run-simulations.rst:112 msgid "" @@ -4861,6 +6078,8 @@ msgid "" "--head`. This command will print a few lines, one of which indicates how " "to attach other nodes to the head node." msgstr "" +"在头部节点上启动 Ray:在终端上输入 :code:`raystart--" +"head`。该命令将打印几行输出,其中一行说明如何将其他节点连接到头部节点。" #: ../../source/how-to-run-simulations.rst:113 msgid "" @@ -4868,29 +6087,31 @@ msgid "" "starting the head and execute it on terminal of a new node: for example " ":code:`ray start --address='192.168.1.132:6379'`" msgstr "" +"将其他节点附加到头部节点:复制启动头部后显示的命令,并在新节点的终端上执行:例如 :code:`ray start " +"--address='192.168.1.132:6379'`" #: ../../source/how-to-run-simulations.rst:115 msgid "" "With all the above done, you can run your code from the head node as you " "would if the simulation was running on a single node." -msgstr "" +msgstr "完成上述所有操作后,您就可以在头部节点上运行代码了,就像在单个节点上运行模拟一样。" #: ../../source/how-to-run-simulations.rst:117 msgid "" "Once your simulation is finished, if you'd like to dismantle your cluster" " you simply need to run the command :code:`ray stop` in each node's " "terminal (including the head node)." -msgstr "" +msgstr "模拟结束后,如果要拆除集群,只需在每个节点(包括头部节点)的终端运行 :code:`ray stop` 命令即可。" #: ../../source/how-to-run-simulations.rst:120 msgid "Multi-node simulation good-to-know" -msgstr "" +msgstr "了解多节点模拟" #: ../../source/how-to-run-simulations.rst:122 msgid "" "Here we list a few interesting functionality when running multi-node FL " "simulations:" -msgstr "" +msgstr "在此,我们列举了运行多节点 FL 模拟时的一些有趣功能:" #: ../../source/how-to-run-simulations.rst:124 msgid "" @@ -4898,6 +6119,8 @@ msgid "" " well as the total resources available to the " ":code:`VirtualClientEngine`." msgstr "" +"使用 :code:`ray status` 查看连接到头部节点的所有节点,以及 :code:`VirtualClientEngine` " +"可用的总资源。" #: ../../source/how-to-run-simulations.rst:126 msgid "" @@ -4910,16 +6133,20 @@ msgid "" "gpus=` in any :code:`ray start` command (including " "when starting the head)" msgstr "" +"将新节点附加到头部节点时,头部节点将可见其所有资源(即所有 CPU 和 GPU)。这意味着 :code:`VirtualClientEngine`" +" 可以调度尽可能多的 \"虚拟 \"客户端来运行该节点。在某些设置中,您可能希望将某些资源排除在模拟之外。为此,您可以在任何 :code:`ray" +" start` 命令(包括启动头部时)中添加 `--num-cpus=`和/或 `--num-" +"gpus=`" #: ../../source/how-to-run-simulations.rst:132 msgid "Considerations for simulations" -msgstr "" +msgstr "模拟的注意事项" #: ../../source/how-to-run-simulations.rst:135 msgid "" "We are actively working on these fronts so to make it trivial to run any " "FL workload with Flower simulation." -msgstr "" +msgstr "我们正在积极开展这些方面的工作,以便使 FL 工作负载与 Flower 模拟的运行变得轻而易举。" #: ../../source/how-to-run-simulations.rst:138 msgid "" @@ -4931,10 +6158,13 @@ msgid "" " mind when designing your FL pipeline with Flower. We also highlight a " "couple of current limitations in our implementation." msgstr "" +"当前的 VCE 允许您在模拟模式下运行联邦学习工作负载,无论您是在个人笔记本电脑上建立简单的场景原型,还是要在多个高性能 GPU 节点上训练复杂的" +" FL情景。虽然我们为 VCE 增加了更多的功能,但以下几点强调了在使用 Flower 设计 FL " +"时需要注意的一些事项。我们还强调了我们的实现中目前存在的一些局限性。" #: ../../source/how-to-run-simulations.rst:141 msgid "GPU resources" -msgstr "" +msgstr "GPU 资源" #: ../../source/how-to-run-simulations.rst:143 msgid "" @@ -4942,6 +6172,8 @@ msgid "" ":code:`num_gpus` in :code:`client_resources`. This being said, Ray (used " "internally by the VCE) is by default:" msgstr "" +"VCE 会为指定 :code:`client_resources` 中 :code:`num_gpus` 关键字的客户端分配 GPU " +"内存份额。也就是说,Ray(VCE 内部使用)是默认的:" #: ../../source/how-to-run-simulations.rst:146 msgid "" @@ -4950,12 +6182,14 @@ msgid "" "different (e.g. 32GB and 8GB) VRAM amounts, they both would run 2 clients" " concurrently." msgstr "" +"不知道 GPU 上可用的总 VRAM。这意味着,如果您设置 :code:`num_gpus=0.5`,而系统中有两个不同(如 32GB 和 " +"8GB)VRAM 的 GPU,它们都将同时运行 2 个客户端。" #: ../../source/how-to-run-simulations.rst:147 msgid "" "not aware of other unrelated (i.e. not created by the VCE) workloads are " "running on the GPU. Two takeaways from this are:" -msgstr "" +msgstr "不知道 GPU 上正在运行其他无关(即不是由 VCE 创建)的工作负载。从中可以得到以下两点启示:" #: ../../source/how-to-run-simulations.rst:149 msgid "" @@ -4963,6 +6197,8 @@ msgid "" "aggregation (by instance when making use of the `evaluate method `_)" msgstr "" +"您的 Flower 服务器可能需要 GPU 来评估聚合后的 \"全局模型\"(例如在使用 \"评估方法\"`_时)" #: ../../source/how-to-run-simulations.rst:150 msgid "" @@ -4971,6 +6207,8 @@ msgid "" ":code:`CUDA_VISIBLE_DEVICES=\"\"` when launching your " "experiment." msgstr "" +"如果您想在同一台机器上运行多个独立的 Flower 模拟,则需要在启动实验时使用 " +":code:`CUDA_VISIBLE_DEVICES=\"\"` 屏蔽 GPU。" #: ../../source/how-to-run-simulations.rst:153 msgid "" @@ -4979,10 +6217,12 @@ msgid "" "situation of client using more VRAM than the ratio specified when " "starting the simulation." msgstr "" +"此外,传递给 :code:`client_resources` 的 GPU 资源限制并不是 \"强制 \"的(即可以超出),这可能导致客户端使用的" +" VRAM 超过启动模拟时指定的比例。" #: ../../source/how-to-run-simulations.rst:156 msgid "TensorFlow with GPUs" -msgstr "" +msgstr "使用 GPU 的 TensorFlow" #: ../../source/how-to-run-simulations.rst:158 msgid "" @@ -4995,6 +6235,10 @@ msgid "" "default behavior by `enabling memory growth " "`_." msgstr "" +"在 TensorFlow `_ 中使用 GPU 时,几乎所有进程可见的" +" GPU 内存都将被映射。TensorFlow 这样做是出于优化目的。然而,在 FL 模拟等设置中,我们希望将 GPU 分割成多个 \"虚拟 " +"\"客户端,这并不是一个理想的机制。幸运的是,我们可以通过 `启用内存增长 " +"`_来禁用这一默认行为。" #: ../../source/how-to-run-simulations.rst:160 msgid "" @@ -5005,6 +6249,9 @@ msgid "" "In this case, to enable GPU growth for TF workloads. It would look as " "follows:" msgstr "" +"这需要在主进程(也就是服务器运行的地方)和 VCE 创建的每个角色中完成。通过 " +":code:`actor_kwargs`,我们可以传递保留关键字`\"on_actor_init_fn\"`,以指定在角色初始化时执行的函数。在本例中,为了使" +" TF 工作负载的 GPU 增长,它看起来如下:" #: ../../source/how-to-run-simulations.rst:179 msgid "" @@ -5012,10 +6259,13 @@ msgid "" "`_ example." msgstr "" +"这正是 \"Tensorflow/Keras 模拟 " +"`_\"示例中使用的机制。" #: ../../source/how-to-run-simulations.rst:183 msgid "Multi-node setups" -msgstr "" +msgstr "多节点设置" #: ../../source/how-to-run-simulations.rst:185 msgid "" @@ -5029,6 +6279,11 @@ msgid "" "nodes or a dataset serving mechanism (e.g. using nfs, a database) to " "circumvent data duplication." msgstr "" +"VCE 目前不提供控制特定 \"虚拟 " +"\"客户端在哪个节点上执行的方法。换句话说,如果不止一个节点拥有客户端运行所需的资源,那么这些节点中的任何一个都可能被调度到客户端工作负载上。在 " +"FL " +"进程的稍后阶段(即在另一轮中),同一客户端可以由不同的节点执行。根据客户访问数据集的方式,这可能需要在所有节点上复制所有数据集分区,或采用数据集服务机制(如使用" +" nfs 或数据库)来避免数据重复。" #: ../../source/how-to-run-simulations.rst:187 msgid "" @@ -5040,21 +6295,24 @@ msgid "" " above also since, in some way, the client's dataset could be seen as a " "type of `state`." msgstr "" +"根据定义,虚拟客户端是 \"无状态 \"的,因为它们具有即时性。客户机状态可以作为 Flower " +"客户机类的一部分来实现,但用户需要确保将其保存到持久存储(如数据库、磁盘)中,而且无论客户机在哪个节点上运行,都能在以后检索到。这也与上述观点有关,因为在某种程度上,客户端的数据集可以被视为一种" +" \"状态\"。" #: ../../source/how-to-save-and-load-model-checkpoints.rst:2 msgid "Save and load model checkpoints" -msgstr "" +msgstr "保存和加载模型检查点" #: ../../source/how-to-save-and-load-model-checkpoints.rst:4 msgid "" "Flower does not automatically save model updates on the server-side. This" " how-to guide describes the steps to save (and load) model checkpoints in" " Flower." -msgstr "" +msgstr "Flower 不会在服务器端自动保存模型更新。本指南将介绍在 Flower 中保存(和加载)模型检查点的步骤。" #: ../../source/how-to-save-and-load-model-checkpoints.rst:8 msgid "Model checkpointing" -msgstr "" +msgstr "模型检查点" #: ../../source/how-to-save-and-load-model-checkpoints.rst:10 msgid "" @@ -5069,10 +6327,15 @@ msgid "" " before it returns those aggregated weights to the caller (i.e., the " "server):" msgstr "" +"模型更新可通过自定义 :code:`Strategy` " +"方法在服务器端持久化。实现自定义策略始终是一种选择,但在许多情况下,简单地自定义现有策略可能更方便。下面的代码示例定义了一个新的 " +":code:`SaveModelStrategy`,它自定义了现有的内置 :code:`FedAvg` " +"策略。特别是,它通过调用基类(:code:`FedAvg`)中的 :code:`aggregate_fit` 来定制 " +":code:`aggregate_fit`。然后继续保存返回的(聚合)参数,然后再将这些聚合参数返回给调用者(即服务器):" #: ../../source/how-to-save-and-load-model-checkpoints.rst:47 msgid "Save and load PyTorch checkpoints" -msgstr "" +msgstr "保存和加载 PyTorch 检查点" #: ../../source/how-to-save-and-load-model-checkpoints.rst:49 msgid "" @@ -5083,17 +6346,26 @@ msgid "" "transformed into the PyTorch ``state_dict`` following the ``OrderedDict``" " class structure." msgstr "" +"与前面的例子类似,但多了几个步骤,我们将展示如何存储一个 PyTorch 检查点,我们将使用 ``torch.save`` " +"函数。首先,``aggregate_fit`` 返回一个 ``Parameters`` 对象,它必须被转换成一个 NumPy " +"``ndarray`` 的列表,然后这些对象按照 ``OrderedDict`` 类结构被转换成 PyTorch `state_dict` 对象。" #: ../../source/how-to-save-and-load-model-checkpoints.rst:85 msgid "" "To load your progress, you simply append the following lines to your " "code. Note that this will iterate over all saved checkpoints and load the" " latest one:" +msgstr "要加载进度,只需在代码中添加以下几行。请注意,这将遍历所有已保存的检查点,并加载最新的检查点:" + +#: ../../source/how-to-save-and-load-model-checkpoints.rst:97 +msgid "" +"Return/use this object of type ``Parameters`` wherever necessary, such as" +" in the ``initial_parameters`` when defining a ``Strategy``." msgstr "" #: ../../source/how-to-upgrade-to-flower-1.0.rst:2 msgid "Upgrade to Flower 1.0" -msgstr "" +msgstr "升级至 Flower 1.0" #: ../../source/how-to-upgrade-to-flower-1.0.rst:4 msgid "" @@ -5102,32 +6374,34 @@ msgid "" "series releases), there are a few breaking changes that make it necessary" " to change the code of existing 0.x-series projects." msgstr "" +"Flower 1.0 正式发布。除了新功能,Flower 1.0 还为未来的发展奠定了稳定的基础。与 Flower 0.19(以及其他 0.x " +"系列版本)相比,有一些破坏性改动需要修改现有 0.x 系列项目的代码。" #: ../../source/how-to-upgrade-to-flower-1.0.rst:8 msgid "Install update" -msgstr "" +msgstr "安装更新" #: ../../source/how-to-upgrade-to-flower-1.0.rst:10 msgid "" "Here's how to update an existing installation to Flower 1.0 using either " "pip or Poetry:" -msgstr "" +msgstr "下面介绍如何使用 pip 或 Poetry 将现有安装更新到 Flower 1.0:" #: ../../source/how-to-upgrade-to-flower-1.0.rst:12 msgid "pip: add ``-U`` when installing." -msgstr "" +msgstr "pip: 安装时添加 ``-U``." #: ../../source/how-to-upgrade-to-flower-1.0.rst:14 msgid "" "``python -m pip install -U flwr`` (when using ``start_server`` and " "``start_client``)" -msgstr "" +msgstr "`python -m pip install -U flwr``(当使用`start_server`和`start_client`时)" #: ../../source/how-to-upgrade-to-flower-1.0.rst:15 msgid "" "``python -m pip install -U flwr[simulation]`` (when using " "``start_simulation``)" -msgstr "" +msgstr "`python -m pip install -U flwr[simulation]``(当使用`start_simulation``时)" #: ../../source/how-to-upgrade-to-flower-1.0.rst:17 msgid "" @@ -5135,40 +6409,44 @@ msgid "" "reinstall (don't forget to delete ``poetry.lock`` via ``rm poetry.lock`` " "before running ``poetry install``)." msgstr "" +"Poetry:更新 ``pyproject.toml`` 中的 ``flwr`` 依赖包,然后重新安装(运行 ``poetry install``" +" 前,别忘了通过 ``rm poetry.lock` 删除 ``poetry.lock`)。" #: ../../source/how-to-upgrade-to-flower-1.0.rst:19 msgid "``flwr = \"^1.0.0\"`` (when using ``start_server`` and ``start_client``)" -msgstr "" +msgstr "``flwr = \"^1.0.0\"`` (当使用 ``start_server` 和 ``start_client` 时)" #: ../../source/how-to-upgrade-to-flower-1.0.rst:20 msgid "" "``flwr = { version = \"^1.0.0\", extras = [\"simulation\"] }`` (when " "using ``start_simulation``)" msgstr "" +"``flwr = { version = \"^1.0.0\", extras = [\"simulation\"] " +"}``(当使用``start_simulation``时)" #: ../../source/how-to-upgrade-to-flower-1.0.rst:24 msgid "Required changes" -msgstr "" +msgstr "所需变更" #: ../../source/how-to-upgrade-to-flower-1.0.rst:26 msgid "The following breaking changes require manual updates." -msgstr "" +msgstr "以下更改需要手动更新。" #: ../../source/how-to-upgrade-to-flower-1.0.rst:29 msgid "General" -msgstr "" +msgstr "一般情况" #: ../../source/how-to-upgrade-to-flower-1.0.rst:31 msgid "" "Pass all arguments as keyword arguments (not as positional arguments). " "Here's an example:" -msgstr "" +msgstr "将所有参数作为关键字参数传递(而不是位置参数)。下面是一个例子:" #: ../../source/how-to-upgrade-to-flower-1.0.rst:33 msgid "" "Flower 0.19 (positional arguments): ``start_client(\"127.0.0.1:8080\", " "FlowerClient())``" -msgstr "" +msgstr "Flower 0.19 (位置参数): ``start_client(\"127.0.0.1:8080\", FlowerClient())``" #: ../../source/how-to-upgrade-to-flower-1.0.rst:34 msgid "" @@ -5176,34 +6454,44 @@ msgid "" "``start_client(server_address=\"127.0.0.1:8080\", " "client=FlowerClient())``" msgstr "" +"Flower 1.0(关键字参数): ``start_client(server_address=\"127.0.0.1:8080\", " +"client=FlowerClient())``" #: ../../source/how-to-upgrade-to-flower-1.0.rst:39 msgid "" "Subclasses of ``NumPyClient``: change ``def get_parameters(self):``` to " "``def get_parameters(self, config):``" msgstr "" +"NumPyClient的子类:将``def get_parameters(self):```改为``def " +"get_parameters(self,config):``" #: ../../source/how-to-upgrade-to-flower-1.0.rst:40 msgid "" "Subclasses of ``Client``: change ``def get_parameters(self):``` to ``def " "get_parameters(self, ins: GetParametersIns):``" msgstr "" +"客户端 \"的子类:将 \"get_parameters(self): \"改为 \"get_parameters(self, ins: " +"GetParametersIns):\"" #: ../../source/how-to-upgrade-to-flower-1.0.rst:43 msgid "Strategies / ``start_server`` / ``start_simulation``" -msgstr "" +msgstr "策略 / ``start_server`` / ``start_simulation``" #: ../../source/how-to-upgrade-to-flower-1.0.rst:45 msgid "" "Pass ``ServerConfig`` (instead of a dictionary) to ``start_server`` and " "``start_simulation``. Here's an example:" msgstr "" +"向 ``start_server`` 和 ``start_simulation` 传递 ``ServerConfig``(而不是 " +"dictionary)。下面是一个例子:" #: ../../source/how-to-upgrade-to-flower-1.0.rst:47 msgid "" "Flower 0.19: ``start_server(..., config={\"num_rounds\": 3, " "\"round_timeout\": 600.0}, ...)``" msgstr "" +"Flower 0.19: ``start_server(..., config={\"num_rounds\": 3, " +"\"round_timeout\": 600.0}, ...)``" #: ../../source/how-to-upgrade-to-flower-1.0.rst:48 msgid "" @@ -5211,12 +6499,15 @@ msgid "" "config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " "...)``" msgstr "" +"Flower 1.0: ``start_server(..., " +"config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " +"...)``" #: ../../source/how-to-upgrade-to-flower-1.0.rst:50 msgid "" "Replace ``num_rounds=1`` in ``start_simulation`` with the new " "``config=ServerConfig(...)`` (see previous item)" -msgstr "" +msgstr "将`start_simulation``中的`num_rounds=1``替换为新的`config=ServerConfig(...)`(参见前一项)" #: ../../source/how-to-upgrade-to-flower-1.0.rst:51 msgid "" @@ -5225,18 +6516,20 @@ msgid "" " configuring the strategy to sample all clients for evaluation after the " "last round of training." msgstr "" +"删除调用 ``start_server`` 时的 ``force_final_distributed_eval` " +"参数。可以通过配置策略,在最后一轮训练后对所有客户端进行抽样评估,从而启用对所有客户端的分布式评估。" #: ../../source/how-to-upgrade-to-flower-1.0.rst:52 msgid "Rename parameter/ndarray conversion functions:" -msgstr "" +msgstr "重命名参数/数组转换函数:" #: ../../source/how-to-upgrade-to-flower-1.0.rst:54 msgid "``parameters_to_weights`` --> ``parameters_to_ndarrays``" -msgstr "" +msgstr "``parameters_to_weights`` --> ``parameters_to_ndarrays``" #: ../../source/how-to-upgrade-to-flower-1.0.rst:55 msgid "``weights_to_parameters`` --> ``ndarrays_to_parameters``" -msgstr "" +msgstr "``weights_to_parameters`` --> ``ndarrays_to_parameters``" #: ../../source/how-to-upgrade-to-flower-1.0.rst:57 msgid "" @@ -5247,22 +6540,26 @@ msgid "" "without passing a strategy instance) should now manually initialize " "FedAvg with ``fraction_fit`` and ``fraction_evaluate`` set to ``0.1``." msgstr "" +"策略初始化:如果策略依赖于 ``fraction_fit`` 和 ``fraction_evaluate`` 的默认值,请手动将 " +"``fraction_fit`` 和 ``fraction_evaluate`` 设置为 ``0.1``。未手动创建策略的项目(调用 " +"``start_server` 或 ``start_simulation` 时未传递策略实例)现在应手动初始化 FedAvg,并将 " +"`fraction_fit` 和 `fraction_evaluate` 设为 `0.1``。" #: ../../source/how-to-upgrade-to-flower-1.0.rst:58 msgid "Rename built-in strategy parameters (e.g., ``FedAvg``):" -msgstr "" +msgstr "重命名内置策略参数(例如,`FedAvg``):" #: ../../source/how-to-upgrade-to-flower-1.0.rst:60 msgid "``fraction_eval`` --> ``fraction_evaluate``" -msgstr "" +msgstr "``fraction_eval`` --> ``fraction_evaluate``" #: ../../source/how-to-upgrade-to-flower-1.0.rst:61 msgid "``min_eval_clients`` --> ``min_evaluate_clients``" -msgstr "" +msgstr "``min_eval_clients`` --> ``min_evaluate_clients``" #: ../../source/how-to-upgrade-to-flower-1.0.rst:62 msgid "``eval_fn`` --> ``evaluate_fn``" -msgstr "" +msgstr "``eval_fn`` --> ``evaluate_fn``" #: ../../source/how-to-upgrade-to-flower-1.0.rst:64 msgid "" @@ -5270,16 +6567,21 @@ msgid "" "functions, for example, ``configure_fit``, ``aggregate_fit``, " "``configure_evaluate``, ``aggregate_evaluate``, and ``evaluate_fn``." msgstr "" +"将 `rnd` 更名为 `server_round`。这会影响多个方法和函数,例如 " +"``configure_fit``、``aggregate_fit``、``configure_evaluate``、`aggregate_evaluate``" +" 和 ``evaluate_fn``。" #: ../../source/how-to-upgrade-to-flower-1.0.rst:65 msgid "Add ``server_round`` and ``config`` to ``evaluate_fn``:" -msgstr "" +msgstr "在 ``evaluate_fn` 中添加 ``server_round` 和 ``config`:" #: ../../source/how-to-upgrade-to-flower-1.0.rst:67 msgid "" "Flower 0.19: ``def evaluate(parameters: NDArrays) -> " "Optional[Tuple[float, Dict[str, Scalar]]]:``" msgstr "" +"Flower 0.19: ``def evaluate(parameters: NDArrays) -> " +"Optional[Tuple[float, Dict[str, Scalar]]]:``" #: ../../source/how-to-upgrade-to-flower-1.0.rst:68 msgid "" @@ -5287,10 +6589,13 @@ msgid "" "config: Dict[str, Scalar]) -> Optional[Tuple[float, Dict[str, " "Scalar]]]:``" msgstr "" +"Flower 1.0: ``def evaluate(server_round: int, parameters: NDArrays, " +"config: Dict[str, Scalar]) -> Optional[Tuple[float, Dict[str, " +"Scalar]]]:``" #: ../../source/how-to-upgrade-to-flower-1.0.rst:71 msgid "Custom strategies" -msgstr "" +msgstr "定制策略" #: ../../source/how-to-upgrade-to-flower-1.0.rst:73 msgid "" @@ -5300,34 +6605,42 @@ msgid "" "``List[Union[Tuple[ClientProxy, EvaluateRes], BaseException]]`` (in " "``aggregate_evaluate``)" msgstr "" +"参数``failures``的类型已从``List[BaseException]``变为``List[Union[Tuple[ClientProxy," +" FitRes], " +"BaseException]]``(在``agregate_fit``中)和``List[Union[Tuple[ClientProxy, " +"EvaluateRes], BaseException]]``(在``agregate_evaluate``中)" #: ../../source/how-to-upgrade-to-flower-1.0.rst:74 msgid "" "The ``Strategy`` method ``evaluate`` now receives the current round of " "federated learning/evaluation as the first parameter:" -msgstr "" +msgstr "``Strategy``方法 的``evaluate``现在会接收当前一轮联邦学习/评估作为第一个参数:" #: ../../source/how-to-upgrade-to-flower-1.0.rst:76 msgid "" "Flower 0.19: ``def evaluate(self, parameters: Parameters) -> " "Optional[Tuple[float, Dict[str, Scalar]]]:``" msgstr "" +"Flower 0.19: ``def evaluate(self, parameters: Parameters) -> " +"Optional[Tuple[float, Dict[str, Scalar]]]:```" #: ../../source/how-to-upgrade-to-flower-1.0.rst:77 msgid "" "Flower 1.0: ``def evaluate(self, server_round: int, parameters: " "Parameters) -> Optional[Tuple[float, Dict[str, Scalar]]]:``" msgstr "" +"Flower 1.0: ``def evaluate(self, server_round: int, parameters: " +"Parameters) -> Optional[Tuple[float, Dict[str, Scalar]]]:``" #: ../../source/how-to-upgrade-to-flower-1.0.rst:80 msgid "Optional improvements" -msgstr "" +msgstr "可选的改进措施" #: ../../source/how-to-upgrade-to-flower-1.0.rst:82 msgid "" "Along with the necessary changes above, there are a number of potential " "improvements that just became possible:" -msgstr "" +msgstr "除了上述必要的改动之外,还有一些潜在的改进措施:" #: ../../source/how-to-upgrade-to-flower-1.0.rst:84 msgid "" @@ -5335,6 +6648,8 @@ msgid "" "``NumPyClient``. If you, for example, use server-side evaluation, then " "empy placeholder implementations of ``evaluate`` are no longer necessary." msgstr "" +"删除 ``Client`` 或 ``NumPyClient`` 子类中的 \"占位符 " +"\"方法。例如,如果你使用服务器端评估,那么就不再需要``evaluate``的 \"空占位符 \"实现。" #: ../../source/how-to-upgrade-to-flower-1.0.rst:85 msgid "" @@ -5342,10 +6657,13 @@ msgid "" "``start_simulation(..., config=flwr.server.ServerConfig(num_rounds=3, " "round_timeout=600.0), ...)``" msgstr "" +"通过 ``start_simulation`` 配置循环超时: ``start_simulation(..., " +"config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " +"...)``" #: ../../source/how-to-upgrade-to-flower-1.0.rst:89 msgid "Further help" -msgstr "" +msgstr "更多帮助" #: ../../source/how-to-upgrade-to-flower-1.0.rst:91 msgid "" @@ -5353,67 +6671,190 @@ msgid "" "`_ are already updated" " to Flower 1.0, they can serve as a reference for using the Flower 1.0 " "API. If there are further questionsm, `join the Flower Slack " -"`_ and use the channgel ``#questions``." +"`_ and use the channgel ``#questions``." +msgstr "" +"大多数官方的 `Flower 代码示例 `_" +" 已经更新到 Flower 1.0,它们可以作为使用 Flower 1.0 API 的参考。如果还有其他问题,请加入 Flower Slack " +"`_ 并使用 \"#questions``\"。" + +#: ../../source/how-to-use-built-in-mods.rst:2 +msgid "Use Built-in Mods" +msgstr "" + +#: ../../source/how-to-use-built-in-mods.rst:4 +msgid "" +"**Note: This tutorial covers experimental features. The functionality and" +" interfaces may change in future versions.**" +msgstr "" + +#: ../../source/how-to-use-built-in-mods.rst:6 +msgid "" +"In this tutorial, we will learn how to utilize built-in mods to augment " +"the behavior of a ``ClientApp``. Mods (sometimes also called Modifiers) " +"allow us to perform operations before and after a task is processed in " +"the ``ClientApp``." +msgstr "" + +#: ../../source/how-to-use-built-in-mods.rst:9 +msgid "What are Mods?" +msgstr "" + +#: ../../source/how-to-use-built-in-mods.rst:11 +msgid "" +"A Mod is a callable that wraps around a ``ClientApp``. It can manipulate " +"or inspect the incoming ``Message`` and the resulting outgoing " +"``Message``. The signature for a ``Mod`` is as follows:" +msgstr "" + +#: ../../source/how-to-use-built-in-mods.rst:18 +msgid "A typical mod function might look something like this:" +msgstr "" + +#: ../../source/how-to-use-built-in-mods.rst:31 +msgid "Using Mods" +msgstr "" + +#: ../../source/how-to-use-built-in-mods.rst:33 +msgid "To use mods in your ``ClientApp``, you can follow these steps:" +msgstr "" + +#: ../../source/how-to-use-built-in-mods.rst:36 +msgid "1. Import the required mods" +msgstr "" + +#: ../../source/how-to-use-built-in-mods.rst:38 +msgid "First, import the built-in mod you intend to use:" +msgstr "" + +#: ../../source/how-to-use-built-in-mods.rst:46 +msgid "2. Define your client function" +msgstr "" + +#: ../../source/how-to-use-built-in-mods.rst:48 +msgid "" +"Define your client function (``client_fn``) that will be wrapped by the " +"mod(s):" +msgstr "" + +#: ../../source/how-to-use-built-in-mods.rst:57 +msgid "3. Create the ``ClientApp`` with mods" +msgstr "" + +#: ../../source/how-to-use-built-in-mods.rst:59 +msgid "" +"Create your ``ClientApp`` and pass the mods as a list to the ``mods`` " +"argument. The order in which you provide the mods matters:" +msgstr "" + +#: ../../source/how-to-use-built-in-mods.rst:72 +#, fuzzy +msgid "Order of execution" +msgstr "停用" + +#: ../../source/how-to-use-built-in-mods.rst:74 +msgid "" +"When the ``ClientApp`` runs, the mods are executed in the order they are " +"provided in the list:" +msgstr "" + +#: ../../source/how-to-use-built-in-mods.rst:76 +msgid "``example_mod_1`` (outermost mod)" +msgstr "" + +#: ../../source/how-to-use-built-in-mods.rst:77 +msgid "``example_mod_2`` (next mod)" +msgstr "" + +#: ../../source/how-to-use-built-in-mods.rst:78 +msgid "" +"Message handler (core function that handles the incoming ``Message`` and " +"returns the outgoing ``Message``)" +msgstr "" + +#: ../../source/how-to-use-built-in-mods.rst:79 +msgid "``example_mod_2`` (on the way back)" +msgstr "" + +#: ../../source/how-to-use-built-in-mods.rst:80 +msgid "``example_mod_1`` (outermost mod on the way back)" +msgstr "" + +#: ../../source/how-to-use-built-in-mods.rst:82 +msgid "" +"Each mod has a chance to inspect and modify the incoming ``Message`` " +"before passing it to the next mod, and likewise with the outgoing " +"``Message`` before returning it up the stack." +msgstr "" + +#: ../../source/how-to-use-built-in-mods.rst:87 +msgid "" +"By following this guide, you have learned how to effectively use mods to " +"enhance your ``ClientApp``'s functionality. Remember that the order of " +"mods is crucial and affects how the input and output are processed." +msgstr "" + +#: ../../source/how-to-use-built-in-mods.rst:89 +msgid "Enjoy building more robust and flexible ``ClientApp``s with mods!" msgstr "" #: ../../source/how-to-use-strategies.rst:2 msgid "Use strategies" -msgstr "" +msgstr "使用策略" #: ../../source/how-to-use-strategies.rst:4 msgid "" "Flower allows full customization of the learning process through the " ":code:`Strategy` abstraction. A number of built-in strategies are " "provided in the core framework." -msgstr "" +msgstr "Flower 允许通过 :code:`Strategy` 抽象类对学习过程进行完全定制。核心框架中提供了许多内置策略。" #: ../../source/how-to-use-strategies.rst:6 msgid "" "There are three ways to customize the way Flower orchestrates the " "learning process on the server side:" -msgstr "" +msgstr "有三种方法可以自定义 Flower 在服务器端协调学习过程的方式:" #: ../../source/how-to-use-strategies.rst:8 msgid "Use an existing strategy, for example, :code:`FedAvg`" -msgstr "" +msgstr "使用现有策略,例如 :code:`FedAvg`" #: ../../source/how-to-use-strategies.rst:9 #: ../../source/how-to-use-strategies.rst:40 msgid "Customize an existing strategy with callback functions" -msgstr "" +msgstr "使用回调函数定制现有策略" #: ../../source/how-to-use-strategies.rst:10 #: ../../source/how-to-use-strategies.rst:87 msgid "Implement a novel strategy" -msgstr "" +msgstr "实施新策略" #: ../../source/how-to-use-strategies.rst:14 msgid "Use an existing strategy" -msgstr "" +msgstr "使用现有策略" #: ../../source/how-to-use-strategies.rst:16 msgid "" "Flower comes with a number of popular federated learning strategies " "built-in. A built-in strategy can be instantiated as follows:" -msgstr "" +msgstr "Flower 内置了许多流行的联邦学习策略。内置策略的实例化方法如下:" #: ../../source/how-to-use-strategies.rst:25 msgid "" "This creates a strategy with all parameters left at their default values " "and passes it to the :code:`start_server` function. It is usually " "recommended to adjust a few parameters during instantiation:" -msgstr "" +msgstr "这会创建一个所有参数都保持默认值的策略,并将其传递给 :code:`start_server` 函数。通常建议在实例化过程中调整一些参数:" #: ../../source/how-to-use-strategies.rst:42 msgid "" "Existing strategies provide several ways to customize their behaviour. " "Callback functions allow strategies to call user-provided code during " "execution." -msgstr "" +msgstr "现有的策略提供了多种自定义行为的方法。回调函数允许策略在执行过程中调用用户提供的代码。" #: ../../source/how-to-use-strategies.rst:45 msgid "Configuring client fit and client evaluate" -msgstr "" +msgstr "配置客户匹配和客户评估" #: ../../source/how-to-use-strategies.rst:47 msgid "" @@ -5425,6 +6866,9 @@ msgid "" " and :code:`client.evaluate` functions during each round of federated " "learning." msgstr "" +"服务器可以通过向 :code:`on_fit_config_fn` " +"提供一个函数,在每一轮向客户端传递新的配置值。提供的函数将被策略调用,并且必须返回一个配置键值对的字典,该字典将被发送到客户端。在每一轮联邦学习期间,它必须返回一个任意配置值" +" dictionary :code:`client.fit`和 :code:`client.evaluate`函数。" #: ../../source/how-to-use-strategies.rst:75 msgid "" @@ -5434,6 +6878,8 @@ msgid "" "the dictionary returned by the :code:`on_fit_config_fn` in its own " ":code:`client.fit()` function." msgstr "" +":code:`on_fit_config_fn`可用于将任意配置值从服务器传递到客户端,并在每一轮改变这些值,例如,调整学习率。客户端将在自己的 " +":code:`client.fit()` 函数中接收 :code:`on_fit_config_fn` 返回的字典。" #: ../../source/how-to-use-strategies.rst:78 msgid "" @@ -5441,16 +6887,18 @@ msgid "" ":code:`on_evaluate_config_fn` to customize the configuration sent to " ":code:`client.evaluate()`" msgstr "" +"与 :code:`on_fit_config_fn` 类似,还有 :code:`on_evaluate_config_fn` 用于定制发送到 " +":code:`client.evaluate()` 的配置" #: ../../source/how-to-use-strategies.rst:81 msgid "Configuring server-side evaluation" -msgstr "" +msgstr "配置服务器端评估" #: ../../source/how-to-use-strategies.rst:83 msgid "" "Server-side evaluation can be enabled by passing an evaluation function " "to :code:`evaluate_fn`." -msgstr "" +msgstr "服务器端评估可通过向 :code:`evaluate_fn` 传递评估函数来启用。" #: ../../source/how-to-use-strategies.rst:89 msgid "" @@ -5458,85 +6906,87 @@ msgid "" "the most flexibility. Read the `Implementing Strategies `_ guide to learn more." msgstr "" +"编写完全自定义的策略涉及的内容较多,但灵活性最高。阅读 `实施策略 _ " +"指南,了解更多信息。" #: ../../source/index.rst:34 msgid "Tutorial" -msgstr "" +msgstr "教程" #: ../../source/index.rst:44 msgid "Quickstart tutorials" -msgstr "" +msgstr "快速入门教程" #: ../../source/index.rst:75 ../../source/index.rst:79 msgid "How-to guides" -msgstr "" +msgstr "操作指南" -#: ../../source/index.rst:95 +#: ../../source/index.rst:97 msgid "Legacy example guides" -msgstr "" +msgstr "旧版指南范例" -#: ../../source/index.rst:106 ../../source/index.rst:110 +#: ../../source/index.rst:108 ../../source/index.rst:112 msgid "Explanations" -msgstr "" +msgstr "说明" -#: ../../source/index.rst:122 +#: None:-1 msgid "API reference" -msgstr "" +msgstr "应用程序接口参考" -#: ../../source/index.rst:129 +#: ../../source/index.rst:137 msgid "Reference docs" -msgstr "" +msgstr "参考文档" -#: ../../source/index.rst:145 +#: ../../source/index.rst:153 msgid "Contributor tutorials" -msgstr "" +msgstr "贡献者教程" -#: ../../source/index.rst:152 +#: ../../source/index.rst:160 msgid "Contributor how-to guides" -msgstr "" +msgstr "投稿指南" -#: ../../source/index.rst:164 +#: ../../source/index.rst:173 msgid "Contributor explanations" -msgstr "" +msgstr "贡献者解释" -#: ../../source/index.rst:170 +#: ../../source/index.rst:179 msgid "Contributor references" -msgstr "" +msgstr "贡献者参考资料" #: ../../source/index.rst:-1 msgid "" "Check out the documentation of the main Flower Framework enabling easy " "Python development for Federated Learning." -msgstr "" +msgstr "查看主 Flower Framework 的文档,轻松实现联邦学习的 Python 开发。" #: ../../source/index.rst:2 msgid "Flower Framework Documentation" -msgstr "" +msgstr "Flower 框架文档" #: ../../source/index.rst:7 msgid "" -"Welcome to Flower's documentation. `Flower `_ is a " +"Welcome to Flower's documentation. `Flower `_ is a " "friendly federated learning framework." -msgstr "" +msgstr "欢迎访问 Flower 文档。`Flower `_ 是一个友好的联邦学习框架。" #: ../../source/index.rst:11 msgid "Join the Flower Community" -msgstr "" +msgstr "加入 Flower 社区" #: ../../source/index.rst:13 msgid "" "The Flower Community is growing quickly - we're a friendly group of " "researchers, engineers, students, professionals, academics, and other " "enthusiasts." -msgstr "" +msgstr "Flower 社区发展迅速--我们是一个由研究人员、工程师、学生、专业人士、学者和其他爱好者组成的友好团体。" #: ../../source/index.rst:15 msgid "Join us on Slack" -msgstr "" +msgstr "在 Slack 上加入我们" #: ../../source/index.rst:23 msgid "Flower Framework" -msgstr "" +msgstr "Flower 框架" #: ../../source/index.rst:25 msgid "" @@ -5545,16 +6995,18 @@ msgid "" "setting. One of Flower's design goals was to make this simple. Read on to" " learn more." msgstr "" +"该用户指南面向希望使用 Flower 将现有机器学习工作负载引入联邦环境的研究人员和开发人员。Flower " +"的设计目标之一就是让这一切变得简单。请继续阅读,了解更多信息。" #: ../../source/index.rst:30 msgid "Tutorials" -msgstr "" +msgstr "教程" #: ../../source/index.rst:32 msgid "" "A learning-oriented series of federated learning tutorials, the best " "place to start." -msgstr "" +msgstr "以学习为导向的联邦学习教程系列,最好的起点。" #: ../../source/index.rst:62 msgid "" @@ -5568,82 +7020,357 @@ msgid "" "` | :doc:`Android ` | :doc:`iOS `" msgstr "" +"快速入门教程: :doc:`PyTorch ` | :doc:`TensorFlow " +"` | :doc:`🤗 Transformers ` | :doc:`JAX ` | " +":doc:`Pandas ` | :doc:`fastai ` | :doc:`PyTorch Lightning ` | :doc:`MXNet ` | :doc" +":`scikit-learn ` | :doc:`XGBoost " +"` | :doc:`Android ` | :doc:`iOS `" #: ../../source/index.rst:64 msgid "We also made video tutorials for PyTorch:" -msgstr "" +msgstr "我们还为 PyTorch 制作了视频教程:" #: ../../source/index.rst:69 msgid "And TensorFlow:" -msgstr "" +msgstr "还有 TensorFlow:" #: ../../source/index.rst:77 msgid "" "Problem-oriented how-to guides show step-by-step how to achieve a " "specific goal." -msgstr "" +msgstr "以问题为导向的 \"如何做 \"指南逐步展示如何实现特定目标。" -#: ../../source/index.rst:108 +#: ../../source/index.rst:110 msgid "" "Understanding-oriented concept guides explain and discuss key topics and " "underlying ideas behind Flower and collaborative AI." -msgstr "" +msgstr "以理解为导向的概念指南解释并讨论了Flower和协作式人工智能背后的关键主题和基本思想。" -#: ../../source/index.rst:118 +#: ../../source/index.rst:120 msgid "References" -msgstr "" +msgstr "参考资料" -#: ../../source/index.rst:120 +#: ../../source/index.rst:122 msgid "Information-oriented API reference and other reference material." +msgstr "以信息为导向的 API 参考资料和其他参考资料。" + +#: ../../source/index.rst:131::1 +msgid ":py:obj:`flwr `\\" msgstr "" -#: ../../source/index.rst:140 -msgid "Contributor docs" +#: ../../source/index.rst:131::1 flwr:1 of +msgid "Flower main package." msgstr "" -#: ../../source/index.rst:142 +#: ../../source/index.rst:148 +msgid "Contributor docs" +msgstr "贡献者文档" + +#: ../../source/index.rst:150 msgid "" "The Flower community welcomes contributions. The following docs are " "intended to help along the way." -msgstr "" +msgstr "Flower 社区欢迎您的贡献。以下文档旨在为您提供帮助。" #: ../../source/ref-api-cli.rst:2 msgid "Flower CLI reference" -msgstr "" +msgstr "Flower CLI 参考" #: ../../source/ref-api-cli.rst:7 -msgid "flower-server" -msgstr "" +msgid "flower-superlink" +msgstr "flower-superlink" #: ../../source/ref-api-cli.rst:17 msgid "flower-driver-api" -msgstr "" +msgstr "flower-driver-api" #: ../../source/ref-api-cli.rst:27 msgid "flower-fleet-api" +msgstr "flower-fleet-api" + +#: ../../source/ref-api/flwr.rst:2 +#, fuzzy +msgid "flwr" +msgstr "Flower" + +#: ../../source/ref-api/flwr.rst:25 ../../source/ref-api/flwr.server.rst:48 +msgid "Modules" +msgstr "" + +#: ../../source/ref-api/flwr.rst:35::1 +msgid ":py:obj:`flwr.client `\\" +msgstr "" + +#: ../../source/ref-api/flwr.rst:35::1 flwr.client:1 of +msgid "Flower client." +msgstr "Flower 客户端。" + +#: ../../source/ref-api/flwr.rst:35::1 +msgid ":py:obj:`flwr.common `\\" +msgstr "" + +#: ../../source/ref-api/flwr.rst:35::1 flwr.common:1 of +msgid "Common components shared between server and client." +msgstr "服务器和客户端共享的通用组件。" + +#: ../../source/ref-api/flwr.rst:35::1 +msgid ":py:obj:`flwr.server `\\" msgstr "" -#: ../../source/ref-api-flwr.rst:2 -msgid "flwr (Python API reference)" +#: ../../source/ref-api/flwr.rst:35::1 +#: ../../source/ref-api/flwr.server.rst:37::1 flwr.server:1 +#: flwr.server.server.Server:1 of +msgid "Flower server." +msgstr "Flower 服务器。" + +#: ../../source/ref-api/flwr.rst:35::1 +msgid ":py:obj:`flwr.simulation `\\" msgstr "" -#: ../../source/ref-api-flwr.rst:8 +#: ../../source/ref-api/flwr.rst:35::1 flwr.simulation:1 of +#, fuzzy +msgid "Flower simulation." +msgstr "运行模拟" + +#: ../../source/ref-api/flwr.client.rst:2 msgid "client" +msgstr "客户端" + +#: ../../source/ref-api/flwr.client.rst:13 +#: ../../source/ref-api/flwr.common.rst:13 +#: ../../source/ref-api/flwr.server.driver.rst:13 +#: ../../source/ref-api/flwr.server.rst:13 +#: ../../source/ref-api/flwr.simulation.rst:13 +#, fuzzy +msgid "Functions" +msgstr "四种函数:" + +#: ../../source/ref-api/flwr.client.rst:24::1 +msgid ":py:obj:`run_client_app `\\ \\(\\)" msgstr "" -#: flwr.client:1 of -msgid "Flower client." +#: ../../source/ref-api/flwr.client.rst:24::1 +#: flwr.client.app.run_client_app:1 of +#, fuzzy +msgid "Run Flower client app." +msgstr "Flower 客户端。" + +#: ../../source/ref-api/flwr.client.rst:24::1 +msgid "" +":py:obj:`start_client `\\ \\(\\*\\, " +"server\\_address\\[\\, client\\_fn\\, ...\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.client.rst:24::1 +#: flwr.client.app.start_client:1 of +msgid "Start a Flower client node which connects to a Flower server." +msgstr "启动一个 Flower 客户节点,连接到 Flower 服务器。" + +#: ../../source/ref-api/flwr.client.rst:24::1 +msgid "" +":py:obj:`start_numpy_client `\\ \\(\\*\\," +" server\\_address\\, client\\)" +msgstr "" + +#: ../../source/ref-api/flwr.client.rst:24::1 +#: flwr.client.app.start_numpy_client:1 of +msgid "Start a Flower NumPyClient which connects to a gRPC server." +msgstr "启动 Flower NumPyClient,连接到 gRPC 服务器。" + +#: ../../source/ref-api/flwr.client.rst:26 +#: ../../source/ref-api/flwr.common.rst:31 +#: ../../source/ref-api/flwr.server.driver.rst:24 +#: ../../source/ref-api/flwr.server.rst:28 +#: ../../source/ref-api/flwr.server.strategy.rst:17 +msgid "Classes" +msgstr "" + +#: ../../source/ref-api/flwr.client.rst:33::1 +msgid ":py:obj:`Client `\\ \\(\\)" msgstr "" +#: ../../source/ref-api/flwr.client.rst:33::1 #: flwr.client.client.Client:1 of msgid "Abstract base class for Flower clients." +msgstr "Flower 客户端的抽象基类。" + +#: ../../source/ref-api/flwr.client.rst:33::1 +msgid "" +":py:obj:`ClientApp `\\ \\(client\\_fn\\[\\, " +"mods\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.client.rst:33::1 +#: flwr.client.clientapp.ClientApp:1 of +#, fuzzy +msgid "Flower ClientApp." +msgstr "Flower 客户端。" + +#: ../../source/ref-api/flwr.client.rst:33::1 +msgid ":py:obj:`NumPyClient `\\ \\(\\)" msgstr "" +#: ../../source/ref-api/flwr.client.rst:33::1 +#: flwr.client.numpy_client.NumPyClient:1 of +msgid "Abstract base class for Flower clients using NumPy." +msgstr "使用 NumPy 的 Flower 客户端的抽象基类。" + +#: flwr.client.client.Client:1 flwr.client.numpy_client.NumPyClient:1 +#: flwr.server.client_manager.ClientManager:1 +#: flwr.server.strategy.strategy.Strategy:1 of +msgid "Bases: :py:class:`~abc.ABC`" +msgstr "" + +#: ../../source/ref-api/flwr.client.Client.rst:15 +#: ../../source/ref-api/flwr.client.ClientApp.rst:15 +#: ../../source/ref-api/flwr.client.NumPyClient.rst:15 +#: ../../source/ref-api/flwr.common.ClientMessage.rst:15 +#: ../../source/ref-api/flwr.common.DisconnectRes.rst:15 +#: ../../source/ref-api/flwr.common.EvaluateIns.rst:15 +#: ../../source/ref-api/flwr.common.EvaluateRes.rst:15 +#: ../../source/ref-api/flwr.common.FitIns.rst:15 +#: ../../source/ref-api/flwr.common.FitRes.rst:15 +#: ../../source/ref-api/flwr.common.GetParametersIns.rst:15 +#: ../../source/ref-api/flwr.common.GetParametersRes.rst:15 +#: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:15 +#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:15 +#: ../../source/ref-api/flwr.common.Parameters.rst:15 +#: ../../source/ref-api/flwr.common.ReconnectIns.rst:15 +#: ../../source/ref-api/flwr.common.ServerMessage.rst:15 +#: ../../source/ref-api/flwr.common.Status.rst:15 +#: ../../source/ref-api/flwr.server.ClientManager.rst:15 +#: ../../source/ref-api/flwr.server.History.rst:15 +#: ../../source/ref-api/flwr.server.Server.rst:15 +#: ../../source/ref-api/flwr.server.ServerConfig.rst:15 +#: ../../source/ref-api/flwr.server.SimpleClientManager.rst:15 +#: ../../source/ref-api/flwr.server.driver.Driver.rst:15 +#: ../../source/ref-api/flwr.server.driver.GrpcDriver.rst:15 +#: ../../source/ref-api/flwr.server.strategy.Bulyan.rst:15 +#: ../../source/ref-api/flwr.server.strategy.DPFedAvgAdaptive.rst:15 +#: ../../source/ref-api/flwr.server.strategy.DPFedAvgFixed.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FaultTolerantFedAvg.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedAdagrad.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedAdam.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedAvg.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedAvgAndroid.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedAvgM.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedMedian.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedOpt.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedProx.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedTrimmedAvg.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedXgbBagging.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedXgbCyclic.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedXgbNnAvg.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedYogi.rst:15 +#: ../../source/ref-api/flwr.server.strategy.Krum.rst:15 +#: ../../source/ref-api/flwr.server.strategy.QFedAvg.rst:15 +#: ../../source/ref-api/flwr.server.strategy.Strategy.rst:15 +msgid "Methods" +msgstr "" + +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +msgid ":py:obj:`evaluate `\\ \\(ins\\)" +msgstr "" + +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 #: flwr.client.client.Client.evaluate:1 #: flwr.client.numpy_client.NumPyClient.evaluate:1 of msgid "Evaluate the provided parameters using the locally held dataset." +msgstr "使用本地数据集评估所提供的参数。" + +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +msgid ":py:obj:`fit `\\ \\(ins\\)" +msgstr "" + +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +#: flwr.client.client.Client.fit:1 of +msgid "Refine the provided parameters using the locally held dataset." +msgstr "利用本地数据集完善所提供的参数。" + +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +msgid ":py:obj:`get_context `\\ \\(\\)" +msgstr "" + +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: flwr.client.client.Client.get_context:1 +#: flwr.client.numpy_client.NumPyClient.get_context:1 of +#, fuzzy +msgid "Get the run context from this client." +msgstr "评估客户端的反应。" + +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +msgid ":py:obj:`get_parameters `\\ \\(ins\\)" +msgstr "" + +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: flwr.client.client.Client.get_parameters:1 +#: flwr.client.numpy_client.NumPyClient.get_parameters:1 of +msgid "Return the current local model parameters." +msgstr "返回当前本地模型参数。" + +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +msgid ":py:obj:`get_properties `\\ \\(ins\\)" +msgstr "" + +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +#: flwr.client.client.Client.get_properties:1 of +msgid "Return set of client's properties." +msgstr "返回客户端的属性集。" + +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +msgid ":py:obj:`set_context `\\ \\(context\\)" +msgstr "" + +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: flwr.client.client.Client.set_context:1 +#: flwr.client.numpy_client.NumPyClient.set_context:1 of +msgid "Apply a run context to this client." +msgstr "" + +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +msgid ":py:obj:`to_client `\\ \\(\\)" msgstr "" +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +#: flwr.client.client.Client.to_client:1 of +msgid "Return client (itself)." +msgstr "返回客户端(本身)。" + +#: ../../source/ref-api/flwr.client.Client.rst:46 +#: ../../source/ref-api/flwr.client.NumPyClient.rst:46 +#: ../../source/ref-api/flwr.common.ClientMessage.rst:25 +#: ../../source/ref-api/flwr.common.Code.rst:19 +#: ../../source/ref-api/flwr.common.DisconnectRes.rst:25 +#: ../../source/ref-api/flwr.common.EvaluateIns.rst:25 +#: ../../source/ref-api/flwr.common.EvaluateRes.rst:25 +#: ../../source/ref-api/flwr.common.EventType.rst:19 +#: ../../source/ref-api/flwr.common.FitIns.rst:25 +#: ../../source/ref-api/flwr.common.FitRes.rst:25 +#: ../../source/ref-api/flwr.common.GetParametersIns.rst:25 +#: ../../source/ref-api/flwr.common.GetParametersRes.rst:25 +#: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:25 +#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:25 +#: ../../source/ref-api/flwr.common.Parameters.rst:25 +#: ../../source/ref-api/flwr.common.ReconnectIns.rst:25 +#: ../../source/ref-api/flwr.common.ServerMessage.rst:25 +#: ../../source/ref-api/flwr.common.Status.rst:25 +#: ../../source/ref-api/flwr.server.ServerConfig.rst:25 +msgid "Attributes" +msgstr "" + +#: flwr.client.client.Client.evaluate:1::1 of +msgid ":py:obj:`context `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.Parameters.rst:2 #: flwr.client.app.start_client flwr.client.app.start_numpy_client #: flwr.client.client.Client.evaluate flwr.client.client.Client.fit #: flwr.client.client.Client.get_parameters @@ -5652,20 +7379,23 @@ msgstr "" #: flwr.client.numpy_client.NumPyClient.fit #: flwr.client.numpy_client.NumPyClient.get_parameters #: flwr.client.numpy_client.NumPyClient.get_properties -#: flwr.server.app.start_server flwr.server.strategy.bulyan.Bulyan.__init__ +#: flwr.server.app.start_server +#: flwr.server.client_manager.ClientManager.register +#: flwr.server.client_manager.ClientManager.unregister +#: flwr.server.client_manager.SimpleClientManager.register +#: flwr.server.client_manager.SimpleClientManager.unregister +#: flwr.server.client_manager.SimpleClientManager.wait_for +#: flwr.server.driver.app.start_driver flwr.server.driver.driver.Driver +#: flwr.server.strategy.bulyan.Bulyan #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.__init__ -#: flwr.server.strategy.fedadagrad.FedAdagrad.__init__ -#: flwr.server.strategy.fedadam.FedAdam.__init__ -#: flwr.server.strategy.fedavg.FedAvg.__init__ -#: flwr.server.strategy.fedavgm.FedAvgM.__init__ -#: flwr.server.strategy.fedopt.FedOpt.__init__ -#: flwr.server.strategy.fedprox.FedProx.__init__ -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg.__init__ -#: flwr.server.strategy.fedyogi.FedYogi.__init__ -#: flwr.server.strategy.krum.Krum.__init__ -#: flwr.server.strategy.qfedavg.QFedAvg.__init__ +#: flwr.server.strategy.fedadagrad.FedAdagrad +#: flwr.server.strategy.fedadam.FedAdam flwr.server.strategy.fedavg.FedAvg +#: flwr.server.strategy.fedavg_android.FedAvgAndroid +#: flwr.server.strategy.fedavgm.FedAvgM flwr.server.strategy.fedopt.FedOpt +#: flwr.server.strategy.fedprox.FedProx +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg +#: flwr.server.strategy.fedyogi.FedYogi flwr.server.strategy.krum.Krum #: flwr.server.strategy.strategy.Strategy.aggregate_evaluate #: flwr.server.strategy.strategy.Strategy.aggregate_fit #: flwr.server.strategy.strategy.Strategy.configure_evaluate @@ -5674,14 +7404,14 @@ msgstr "" #: flwr.server.strategy.strategy.Strategy.initialize_parameters #: flwr.simulation.app.start_simulation of msgid "Parameters" -msgstr "" +msgstr "参数" #: flwr.client.client.Client.evaluate:3 of msgid "" "The evaluation instructions containing (global) model parameters received" " from the server and a dictionary of configuration values used to " "customize the local evaluation process." -msgstr "" +msgstr "评估指令包含从服务器接收的(全局)模型参数,以及用于定制本地评估流程的配置值字典。" #: flwr.client.client.Client.evaluate flwr.client.client.Client.fit #: flwr.client.client.Client.get_parameters @@ -5691,6 +7421,12 @@ msgstr "" #: flwr.client.numpy_client.NumPyClient.get_parameters #: flwr.client.numpy_client.NumPyClient.get_properties #: flwr.server.app.start_server +#: flwr.server.client_manager.ClientManager.num_available +#: flwr.server.client_manager.ClientManager.register +#: flwr.server.client_manager.SimpleClientManager.num_available +#: flwr.server.client_manager.SimpleClientManager.register +#: flwr.server.client_manager.SimpleClientManager.wait_for +#: flwr.server.driver.app.start_driver #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit #: flwr.server.strategy.strategy.Strategy.aggregate_evaluate @@ -5701,13 +7437,13 @@ msgstr "" #: flwr.server.strategy.strategy.Strategy.initialize_parameters #: flwr.simulation.app.start_simulation of msgid "Returns" -msgstr "" +msgstr "返回" #: flwr.client.client.Client.evaluate:8 of msgid "" "The evaluation result containing the loss on the local dataset and other " "details such as the number of local data examples used for evaluation." -msgstr "" +msgstr "评估结果包含本地数据集上的损失值和其他详细信息,如用于评估的本地数据的数量。" #: flwr.client.client.Client.evaluate flwr.client.client.Client.fit #: flwr.client.client.Client.get_parameters @@ -5715,6 +7451,12 @@ msgstr "" #: flwr.client.numpy_client.NumPyClient.get_parameters #: flwr.client.numpy_client.NumPyClient.get_properties #: flwr.server.app.start_server +#: flwr.server.client_manager.ClientManager.num_available +#: flwr.server.client_manager.ClientManager.register +#: flwr.server.client_manager.SimpleClientManager.num_available +#: flwr.server.client_manager.SimpleClientManager.register +#: flwr.server.client_manager.SimpleClientManager.wait_for +#: flwr.server.driver.app.start_driver #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit #: flwr.server.strategy.strategy.Strategy.aggregate_evaluate @@ -5725,130 +7467,142 @@ msgstr "" #: flwr.server.strategy.strategy.Strategy.initialize_parameters #: flwr.simulation.app.start_simulation of msgid "Return type" -msgstr "" - -#: flwr.client.client.Client.fit:1 of -msgid "Refine the provided parameters using the locally held dataset." -msgstr "" +msgstr "返回类型" #: flwr.client.client.Client.fit:3 of msgid "" "The training instructions containing (global) model parameters received " "from the server and a dictionary of configuration values used to " "customize the local training process." -msgstr "" +msgstr "训练指令,包含从服务器接收的(全局)模型参数,以及用于定制本地训练过程的配置值字典。" #: flwr.client.client.Client.fit:8 of msgid "" "The training result containing updated parameters and other details such " "as the number of local training examples used for training." -msgstr "" - -#: flwr.client.client.Client.get_parameters:1 -#: flwr.client.numpy_client.NumPyClient.get_parameters:1 of -msgid "Return the current local model parameters." -msgstr "" +msgstr "训练结果包含更新的参数和其他详细信息,如用于训练的本地训练示例的数量。" #: flwr.client.client.Client.get_parameters:3 of msgid "" "The get parameters instructions received from the server containing a " "dictionary of configuration values." -msgstr "" +msgstr "从服务器接收的获取参数指令包含配置值字典。" #: flwr.client.client.Client.get_parameters:7 of msgid "The current local model parameters." -msgstr "" - -#: flwr.client.client.Client.get_properties:1 of -msgid "Return set of client's properties." -msgstr "" +msgstr "当前的本地模型参数。" #: flwr.client.client.Client.get_properties:3 of msgid "" "The get properties instructions received from the server containing a " "dictionary of configuration values." -msgstr "" +msgstr "从服务器接收的获取属性指令包含配置值字典。" #: flwr.client.client.Client.get_properties:7 of msgid "The current client properties." -msgstr "" - -#: flwr.client.client.Client.to_client:1 of -msgid "Return client (itself)." -msgstr "" +msgstr "当前客户端属性。" + +#: ../../source/ref-api/flwr.client.ClientApp.rst:2 +#, fuzzy +msgid "ClientApp" +msgstr "客户端" + +#: flwr.client.clientapp.ClientApp:1 flwr.common.typing.ClientMessage:1 +#: flwr.common.typing.DisconnectRes:1 flwr.common.typing.EvaluateIns:1 +#: flwr.common.typing.EvaluateRes:1 flwr.common.typing.FitIns:1 +#: flwr.common.typing.FitRes:1 flwr.common.typing.GetParametersIns:1 +#: flwr.common.typing.GetParametersRes:1 flwr.common.typing.GetPropertiesIns:1 +#: flwr.common.typing.GetPropertiesRes:1 flwr.common.typing.Parameters:1 +#: flwr.common.typing.ReconnectIns:1 flwr.common.typing.ServerMessage:1 +#: flwr.common.typing.Status:1 flwr.server.app.ServerConfig:1 +#: flwr.server.driver.driver.Driver:1 +#: flwr.server.driver.grpc_driver.GrpcDriver:1 flwr.server.history.History:1 +#: flwr.server.server.Server:1 of +msgid "Bases: :py:class:`object`" +msgstr "" + +#: flwr.client.app.start_client:33 flwr.client.app.start_numpy_client:36 +#: flwr.client.clientapp.ClientApp:4 flwr.server.app.start_server:41 +#: flwr.server.driver.app.start_driver:30 of +msgid "Examples" +msgstr "实例" -#: ../../source/ref-api-flwr.rst:24 -msgid "start_client" +#: flwr.client.clientapp.ClientApp:5 of +msgid "" +"Assuming a typical `Client` implementation named `FlowerClient`, you can " +"wrap it in a `ClientApp` as follows:" msgstr "" -#: flwr.client.app.start_client:1 of -msgid "Start a Flower client node which connects to a Flower server." +#: flwr.client.clientapp.ClientApp:16 of +msgid "" +"If the above code is in a Python module called `client`, it can be " +"started as follows:" msgstr "" -#: flwr.client.app.start_client:3 flwr.client.app.start_numpy_client:3 of +#: flwr.client.clientapp.ClientApp:21 of msgid "" -"The IPv4 or IPv6 address of the server. If the Flower server runs on the " -"same machine on port 8080, then `server_address` would be " -"`\"[::]:8080\"`." +"In this `client:app` example, `client` refers to the Python module " +"`client.py` in which the previous code lives in and `app` refers to the " +"global attribute `app` that points to an object of type `ClientApp`." msgstr "" -#: flwr.client.app.start_client:7 of -msgid "..." -msgstr "" +#: ../../source/ref-api/flwr.client.NumPyClient.rst:2 +msgid "NumPyClient" +msgstr "NumPyClient" -#: flwr.client.app.start_client:9 of -msgid "A callable that instantiates a Client. (default: None)" +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +msgid "" +":py:obj:`evaluate `\\ \\(parameters\\, " +"config\\)" msgstr "" -#: flwr.client.app.start_client:11 of -msgid "" -"An implementation of the abstract base class `flwr.client.Client` " -"(default: None)" +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +msgid ":py:obj:`fit `\\ \\(parameters\\, config\\)" msgstr "" -#: flwr.client.app.start_client:14 flwr.client.app.start_numpy_client:9 of -msgid "" -"The maximum length of gRPC messages that can be exchanged with the Flower" -" server. The default should be sufficient for most models. Users who " -"train very large models might need to increase this value. Note that the " -"Flower server needs to be started with the same value (see " -"`flwr.server.start_server`), otherwise it will not know about the " -"increased limit and block larger messages." +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: flwr.client.numpy_client.NumPyClient.fit:1 of +msgid "Train the provided parameters using the locally held dataset." +msgstr "使用本地数据集训练所提供的参数。" + +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +msgid ":py:obj:`get_context `\\ \\(\\)" msgstr "" -#: flwr.client.app.start_client:21 flwr.client.app.start_numpy_client:16 of +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 msgid "" -"The PEM-encoded root certificates as a byte string or a path string. If " -"provided, a secure connection using the certificates will be established " -"to an SSL-enabled Flower server." +":py:obj:`get_parameters `\\ " +"\\(config\\)" msgstr "" -#: flwr.client.app.start_client:25 flwr.client.app.start_numpy_client:20 of +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 msgid "" -"Configure the transport layer. Allowed values: - 'grpc-bidi': gRPC, " -"bidirectional streaming - 'grpc-rere': gRPC, request-response " -"(experimental) - 'rest': HTTP (experimental)" +":py:obj:`get_properties `\\ " +"\\(config\\)" msgstr "" -#: flwr.client.app.start_client:32 flwr.client.app.start_numpy_client:27 -#: flwr.server.app.start_server:41 of -msgid "Examples" -msgstr "" +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: flwr.client.numpy_client.NumPyClient.get_properties:1 of +msgid "Return a client's set of properties." +msgstr "返回客户端的属性集。" -#: flwr.client.app.start_client:33 of -msgid "Starting a gRPC client with an insecure server connection:" +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +msgid "" +":py:obj:`set_context `\\ " +"\\(context\\)" msgstr "" -#: flwr.client.app.start_client:43 flwr.client.app.start_numpy_client:35 of -msgid "Starting an SSL-enabled gRPC client:" +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +msgid ":py:obj:`to_client `\\ \\(\\)" msgstr "" -#: ../../source/ref-api-flwr.rst:32 -msgid "NumPyClient" -msgstr "" +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: flwr.client.numpy_client.NumPyClient.to_client:1 of +msgid "Convert to object to Client type and return it." +msgstr "将对象转换为客户类型并返回。" -#: flwr.client.numpy_client.NumPyClient:1 of -msgid "Abstract base class for Flower clients using NumPy." +#: flwr.client.numpy_client.NumPyClient.evaluate:1::1 of +msgid ":py:obj:`context `\\" msgstr "" #: flwr.client.numpy_client.NumPyClient.evaluate:3 @@ -5859,7 +7613,7 @@ msgstr "" #: flwr.server.strategy.strategy.Strategy.configure_fit:5 #: flwr.server.strategy.strategy.Strategy.evaluate:8 of msgid "The current (global) model parameters." -msgstr "" +msgstr "当前(全局)模型参数。" #: flwr.client.numpy_client.NumPyClient.evaluate:5 of msgid "" @@ -5867,7 +7621,7 @@ msgid "" "on the client. It can be used to communicate arbitrary values from the " "server to the client, for example, to influence the number of examples " "used for evaluation." -msgstr "" +msgstr "允许服务器影响客户端评估的配置参数。它可用于将任意值从服务器传送到客户端,例如,影响用于评估的示例数量。" #: flwr.client.numpy_client.NumPyClient.evaluate:11 of msgid "" @@ -5877,16 +7631,19 @@ msgid "" "arbitrary string keys to values of type bool, bytes, float, int, or " "str. It can be used to communicate arbitrary values back to the server." msgstr "" +"**loss** (*float*) -- 模型在本地数据集上的评估损失值。**num_examples** (*int*) -- " +"用于评估的示例数量。**metrics** (*Dict[str, Scalar]*) -- 将任意字符串键映射到 " +"bool、bytes、float、int 或 str 类型值的字典。它可用于将任意值传回服务器。" #: flwr.client.numpy_client.NumPyClient.evaluate:11 of msgid "" "**loss** (*float*) -- The evaluation loss of the model on the local " "dataset." -msgstr "" +msgstr "**loss** (*float*) -- 模型在本地数据集上的评估损失值。" #: flwr.client.numpy_client.NumPyClient.evaluate:12 of msgid "**num_examples** (*int*) -- The number of examples used for evaluation." -msgstr "" +msgstr "**num_examples** (*int*) -- 用于评估的示例数量。" #: flwr.client.numpy_client.NumPyClient.evaluate:13 #: flwr.client.numpy_client.NumPyClient.fit:13 of @@ -5895,6 +7652,8 @@ msgid "" "string keys to values of type bool, bytes, float, int, or str. It can be " "used to communicate arbitrary values back to the server." msgstr "" +"**metrics** (*Dict[str, Scalar]*) -- 将任意字符串键映射到 bool、bytes、float、int 或 " +"str 类型值的字典。它可用于将任意值传回服务器。" #: flwr.client.numpy_client.NumPyClient.evaluate:19 of msgid "" @@ -5902,10 +7661,8 @@ msgid "" "format (int, float, float, Dict[str, Scalar]) have been deprecated and " "removed since Flower 0.19." msgstr "" - -#: flwr.client.numpy_client.NumPyClient.fit:1 of -msgid "Train the provided parameters using the locally held dataset." -msgstr "" +"自 Flower 0.19 起,之前的返回类型格式(int、float、float)和扩展格式(int、float、float、Dict[str," +" Scalar])已被弃用和移除。" #: flwr.client.numpy_client.NumPyClient.fit:5 of msgid "" @@ -5913,7 +7670,7 @@ msgid "" "the client. It can be used to communicate arbitrary values from the " "server to the client, for example, to set the number of (local) training " "epochs." -msgstr "" +msgstr "允许服务器影响客户端训练的配置参数。它可用于将任意值从服务器传送到客户端,例如设置(本地)训练遍历数。" #: flwr.client.numpy_client.NumPyClient.fit:11 of msgid "" @@ -5923,36 +7680,35 @@ msgid "" "string keys to values of type bool, bytes, float, int, or str. It can " "be used to communicate arbitrary values back to the server." msgstr "" +"**parameters** (*NDArrays*) -- 本地更新的模型参数。**num_examples** (*int*) -- " +"用于训练的示例数量。**metrics** (*Dict[str, Scalar]*) -- 将任意字符串键映射到 " +"bool、bytes、float、int 或 str 类型值的字典。它可用于将任意值传回服务器。" #: flwr.client.numpy_client.NumPyClient.fit:11 of msgid "**parameters** (*NDArrays*) -- The locally updated model parameters." -msgstr "" +msgstr "**parameters** (*NDArrays*) -- 本地更新的模型参数。" #: flwr.client.numpy_client.NumPyClient.fit:12 of msgid "**num_examples** (*int*) -- The number of examples used for training." -msgstr "" +msgstr "**num_examples** (*int*) -- 用于训练的数据数量。" #: flwr.client.numpy_client.NumPyClient.get_parameters:3 of msgid "" "Configuration parameters requested by the server. This can be used to " "tell the client which parameters are needed along with some Scalar " "attributes." -msgstr "" +msgstr "服务器请求的配置参数。这可以用来告诉客户端需要哪些参数以及一些标量属性。" #: flwr.client.numpy_client.NumPyClient.get_parameters:8 of msgid "**parameters** -- The local model parameters as a list of NumPy ndarrays." -msgstr "" - -#: flwr.client.numpy_client.NumPyClient.get_properties:1 of -msgid "Return a client's set of properties." -msgstr "" +msgstr "**parameters** -- NumPy ndarrays 的本地模型参数列表。" #: flwr.client.numpy_client.NumPyClient.get_properties:3 of msgid "" "Configuration parameters requested by the server. This can be used to " "tell the client which properties are needed along with some Scalar " "attributes." -msgstr "" +msgstr "服务器请求的配置参数。这可以用来告诉客户端需要哪些属性以及一些标量属性。" #: flwr.client.numpy_client.NumPyClient.get_properties:8 of msgid "" @@ -5960,1275 +7716,4600 @@ msgid "" " type bool, bytes, float, int, or str. It can be used to communicate " "arbitrary property values back to the server." msgstr "" +"**properties** -- 将任意字符串键映射到 bool、bytes、float、int 或 str " +"类型值的字典。它可用于将任意属性值传回服务器。" -#: flwr.client.numpy_client.NumPyClient.to_client:1 of -msgid "Convert to object to Client type and return it." +#: ../../source/ref-api/flwr.client.run_client_app.rst:2 +msgid "run\\_client\\_app" msgstr "" -#: ../../source/ref-api-flwr.rst:41 -msgid "start_numpy_client" -msgstr "" +#: ../../source/ref-api/flwr.client.start_client.rst:2 +#, fuzzy +msgid "start\\_client" +msgstr "启动客户端" -#: flwr.client.app.start_numpy_client:1 of -msgid "Start a Flower NumPyClient which connects to a gRPC server." +#: flwr.client.app.start_client:3 flwr.client.app.start_numpy_client:9 of +msgid "" +"The IPv4 or IPv6 address of the server. If the Flower server runs on the " +"same machine on port 8080, then `server_address` would be " +"`\"[::]:8080\"`." msgstr "" +"服务器的 IPv4 或 IPv6 地址:如果 Flower 服务器在同一台机器上运行,端口为 " +"8080,则`server_address`应为`\"[::]:8080\"`。" -#: flwr.client.app.start_numpy_client:7 of -msgid "An implementation of the abstract base class `flwr.client.NumPyClient`." -msgstr "" +#: flwr.client.app.start_client:7 of +msgid "A callable that instantiates a Client. (default: None)" +msgstr "用于实例化客户端的可调用程序。(默认值:无)" -#: flwr.client.app.start_numpy_client:28 of -msgid "Starting a client with an insecure server connection:" -msgstr "" +#: flwr.client.app.start_client:9 of +msgid "" +"An implementation of the abstract base class `flwr.client.Client` " +"(default: None)" +msgstr "抽象基类 `flwr.client.Client` 的实现(默认值:无)" -#: ../../source/ref-api-flwr.rst:49 -msgid "start_simulation" +#: flwr.client.app.start_client:12 flwr.client.app.start_numpy_client:15 of +msgid "" +"The maximum length of gRPC messages that can be exchanged with the Flower" +" server. The default should be sufficient for most models. Users who " +"train very large models might need to increase this value. Note that the " +"Flower server needs to be started with the same value (see " +"`flwr.server.start_server`), otherwise it will not know about the " +"increased limit and block larger messages." msgstr "" +"可与 Flower 服务器交换的 gRPC 信息的最大长度:默认值对大多数模型都足够了。训练超大模型的用户可能需要增加该值。请注意,Flower " +"服务器需要以相同的值启动(请参阅 `flwr.server.start_server`),否则它将不知道增加的限制并阻止更大的消息。" -#: flwr.simulation.app.start_simulation:1 of -msgid "Start a Ray-based Flower simulation server." -msgstr "" +#: flwr.client.app.start_client:19 flwr.client.app.start_numpy_client:22 +#: flwr.server.driver.app.start_driver:21 of +msgid "" +"The PEM-encoded root certificates as a byte string or a path string. If " +"provided, a secure connection using the certificates will be established " +"to an SSL-enabled Flower server." +msgstr "字节字符串或路径字符串形式的 PEM 编码根证书。如果提供,将使用这些证书与启用 SSL 的 Flower 服务器建立安全连接。" -#: flwr.simulation.app.start_simulation:3 of +#: flwr.client.app.start_client:23 flwr.client.app.start_numpy_client:26 of msgid "" -"A function creating client instances. The function must take a single " -"`str` argument called `cid`. It should return a single client instance of" -" type Client. Note that the created client instances are ephemeral and " -"will often be destroyed after a single method invocation. Since client " -"instances are not long-lived, they should not attempt to carry state over" -" method invocations. Any state required by the instance (model, dataset, " -"hyperparameters, ...) should be (re-)created in either the call to " -"`client_fn` or the call to any of the client methods (e.g., load " -"evaluation data in the `evaluate` method itself)." +"Starts an insecure gRPC connection when True. Enables HTTPS connection " +"when False, using system certificates if `root_certificates` is None." msgstr "" -#: flwr.simulation.app.start_simulation:13 of +#: flwr.client.app.start_client:26 flwr.client.app.start_numpy_client:29 of msgid "" -"The total number of clients in this simulation. This must be set if " -"`clients_ids` is not set and vice-versa." +"Configure the transport layer. Allowed values: - 'grpc-bidi': gRPC, " +"bidirectional streaming - 'grpc-rere': gRPC, request-response " +"(experimental) - 'rest': HTTP (experimental)" msgstr "" +"配置传输层:允许的值包括 - 'grpc-bidi': gRPC,双向流 - 'grpc-rere': gRPC,请求-响应(实验性) - " +"'rest': HTTP(实验性)" -#: flwr.simulation.app.start_simulation:16 of +#: flwr.client.app.start_client:34 flwr.client.app.start_numpy_client:37 of +msgid "Starting a gRPC client with an insecure server connection:" +msgstr "使用不安全的服务器连接启动 gRPC 客户端:" + +#: flwr.client.app.start_client:41 flwr.client.app.start_numpy_client:44 of +#, fuzzy +msgid "Starting an SSL-enabled gRPC client using system certificates:" +msgstr "启动支持 SSL 的 gRPC 客户端:" + +#: flwr.client.app.start_client:52 flwr.client.app.start_numpy_client:52 of +#, fuzzy +msgid "Starting an SSL-enabled gRPC client using provided certificates:" +msgstr "启动支持 SSL 的 gRPC 客户端:" + +#: ../../source/ref-api/flwr.client.start_numpy_client.rst:2 +#, fuzzy +msgid "start\\_numpy\\_client" +msgstr "start_numpy_client" + +#: flwr.client.app.start_numpy_client:5 of msgid "" -"List `client_id`s for each client. This is only required if `num_clients`" -" is not set. Setting both `num_clients` and `clients_ids` with " -"`len(clients_ids)` not equal to `num_clients` generates an error." +"This function is deprecated since 1.7.0. Use " +":code:`flwr.client.start_client` instead and first convert your " +":code:`NumPyClient` to type :code:`flwr.client.Client` by executing its " +":code:`to_client()` method." msgstr "" -#: flwr.simulation.app.start_simulation:20 of -msgid "" -"\"num_gpus\": 0.0}` CPU and GPU resources for a single client. Supported " -"keys are `num_cpus` and `num_gpus`. To understand the GPU utilization " -"caused by `num_gpus`, as well as using custom resources, please consult " -"the Ray documentation." +#: flwr.client.app.start_numpy_client:13 of +msgid "An implementation of the abstract base class `flwr.client.NumPyClient`." +msgstr "抽象基类 `flwr.client.NumPyClient` 的实现。" + +#: ../../source/ref-api/flwr.common.rst:2 +msgid "common" +msgstr "常见" + +#: ../../source/ref-api/flwr.common.rst:29::1 +msgid ":py:obj:`bytes_to_ndarray `\\ \\(tensor\\)" msgstr "" -#: flwr.simulation.app.start_simulation:25 of +#: ../../source/ref-api/flwr.common.rst:29::1 +#: flwr.common.parameter.bytes_to_ndarray:1 of +msgid "Deserialize NumPy ndarray from bytes." +msgstr "从字节反序列化 NumPy ndarray。" + +#: ../../source/ref-api/flwr.common.rst:29::1 msgid "" -"An implementation of the abstract base class `flwr.server.Server`. If no " -"instance is provided, then `start_server` will create one." +":py:obj:`configure `\\ \\(identifier\\[\\, " +"filename\\, host\\]\\)" msgstr "" -#: flwr.server.app.start_server:9 flwr.simulation.app.start_simulation:28 of +#: ../../source/ref-api/flwr.common.rst:29::1 +#: flwr.common.logger.configure:1 of +msgid "Configure logging to file and/or remote log server." +msgstr "配置将日志记录到文件和/或远程日志服务器。" + +#: ../../source/ref-api/flwr.common.rst:29::1 msgid "" -"Currently supported values are `num_rounds` (int, default: 1) and " -"`round_timeout` in seconds (float, default: None)." +":py:obj:`event `\\ \\(event\\_type\\[\\, " +"event\\_details\\]\\)" msgstr "" -#: flwr.simulation.app.start_simulation:31 of +#: ../../source/ref-api/flwr.common.rst:29::1 +#: flwr.common.telemetry.event:1 of +msgid "Submit create_event to ThreadPoolExecutor to avoid blocking." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:29::1 msgid "" -"An implementation of the abstract base class `flwr.server.Strategy`. If " -"no strategy is provided, then `start_server` will use " -"`flwr.server.strategy.FedAvg`." +":py:obj:`log `\\ \\(level\\, msg\\, \\*args\\, " +"\\*\\*kwargs\\)" msgstr "" -#: flwr.simulation.app.start_simulation:35 of +#: ../../source/ref-api/flwr.common.rst:29::1 logging.Logger.log:1 +#: of +msgid "Log 'msg % args' with the integer severity 'level'." +msgstr "以整数严重性 \"级别 \"记录 \"msg % args\"。" + +#: ../../source/ref-api/flwr.common.rst:29::1 +msgid ":py:obj:`ndarray_to_bytes `\\ \\(ndarray\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:29::1 +#: flwr.common.parameter.ndarray_to_bytes:1 of +msgid "Serialize NumPy ndarray to bytes." +msgstr "将 NumPy ndarray 序列化为字节。" + +#: ../../source/ref-api/flwr.common.rst:29::1 +msgid ":py:obj:`now `\\ \\(\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:29::1 +#: flwr.common.date.now:1 of +msgid "Construct a datetime from time.time() with time zone set to UTC." +msgstr "从 time.time() 生成日期时间,时区设置为 UTC。" + +#: ../../source/ref-api/flwr.common.rst:29::1 msgid "" -"An implementation of the abstract base class `flwr.server.ClientManager`." -" If no implementation is provided, then `start_simulation` will use " -"`flwr.server.client_manager.SimpleClientManager`." +":py:obj:`ndarrays_to_parameters `\\ " +"\\(ndarrays\\)" msgstr "" -#: flwr.simulation.app.start_simulation:39 of +#: ../../source/ref-api/flwr.common.rst:29::1 +#: flwr.common.parameter.ndarrays_to_parameters:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.ndarrays_to_parameters:1 +#: of +msgid "Convert NumPy ndarrays to parameters object." +msgstr "将 NumPy ndarrays 转换为参数对象。" + +#: ../../source/ref-api/flwr.common.rst:29::1 msgid "" -"Optional dictionary containing arguments for the call to `ray.init`. If " -"ray_init_args is None (the default), Ray will be initialized with the " -"following default args: { \"ignore_reinit_error\": True, " -"\"include_dashboard\": False } An empty dictionary can be used " -"(ray_init_args={}) to prevent any arguments from being passed to " -"ray.init." +":py:obj:`parameters_to_ndarrays `\\ " +"\\(parameters\\)" msgstr "" -#: flwr.simulation.app.start_simulation:39 of +#: ../../source/ref-api/flwr.common.rst:29::1 +#: flwr.common.parameter.parameters_to_ndarrays:1 of +msgid "Convert parameters object to NumPy ndarrays." +msgstr "将参数对象转换为 NumPy ndarrays。" + +#: ../../source/ref-api/flwr.common.rst:52::1 msgid "" -"Optional dictionary containing arguments for the call to `ray.init`. If " -"ray_init_args is None (the default), Ray will be initialized with the " -"following default args:" +":py:obj:`ClientMessage `\\ " +"\\(\\[get\\_properties\\_res\\, ...\\]\\)" msgstr "" -#: flwr.simulation.app.start_simulation:43 of -msgid "{ \"ignore_reinit_error\": True, \"include_dashboard\": False }" +#: ../../source/ref-api/flwr.common.rst:52::1 +#: flwr.common.typing.ClientMessage:1 of +msgid "ClientMessage is a container used to hold one result message." +msgstr "ClientMessage 是用于容纳一条结果信息的容器。" + +#: ../../source/ref-api/flwr.common.rst:52::1 +msgid ":py:obj:`Code `\\ \\(value\\)" msgstr "" -#: flwr.simulation.app.start_simulation:45 of -msgid "" -"An empty dictionary can be used (ray_init_args={}) to prevent any " -"arguments from being passed to ray.init." +#: ../../source/ref-api/flwr.common.rst:52::1 +#: flwr.common.typing.Code:1 of +msgid "Client status codes." +msgstr "客户端状态代码。" + +#: ../../source/ref-api/flwr.common.rst:52::1 +msgid ":py:obj:`DisconnectRes `\\ \\(reason\\)" msgstr "" -#: flwr.simulation.app.start_simulation:48 of +#: ../../source/ref-api/flwr.common.rst:52::1 +#: flwr.common.typing.DisconnectRes:1 of +msgid "DisconnectRes message from client to server." +msgstr "客户端向服务器发送 DisconnectRes 信息。" + +#: ../../source/ref-api/flwr.common.rst:52::1 msgid "" -"Set to True to prevent `ray.shutdown()` in case " -"`ray.is_initialized()=True`." +":py:obj:`EvaluateIns `\\ \\(parameters\\, " +"config\\)" msgstr "" -#: flwr.simulation.app.start_simulation:50 of +#: ../../source/ref-api/flwr.common.rst:52::1 +#: flwr.common.typing.EvaluateIns:1 of +msgid "Evaluate instructions for a client." +msgstr "评估客户端的指示。" + +#: ../../source/ref-api/flwr.common.rst:52::1 msgid "" -"Optionally specify the type of actor to use. The actor object, which " -"persists throughout the simulation, will be the process in charge of " -"running the clients' jobs (i.e. their `fit()` method)." +":py:obj:`EvaluateRes `\\ \\(status\\, loss\\, " +"num\\_examples\\, metrics\\)" msgstr "" -#: flwr.simulation.app.start_simulation:54 of +#: ../../source/ref-api/flwr.common.rst:52::1 +#: flwr.common.typing.EvaluateRes:1 of +msgid "Evaluate response from a client." +msgstr "评估客户端的反应。" + +#: ../../source/ref-api/flwr.common.rst:52::1 +msgid ":py:obj:`EventType `\\ \\(value\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:52::1 +#: flwr.common.telemetry.EventType:1 of +msgid "Types of telemetry events." +msgstr "遥测事件类型。" + +#: ../../source/ref-api/flwr.common.rst:52::1 +msgid ":py:obj:`FitIns `\\ \\(parameters\\, config\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:52::1 +#: flwr.common.typing.FitIns:1 of +msgid "Fit instructions for a client." +msgstr "为客户提供安装说明。" + +#: ../../source/ref-api/flwr.common.rst:52::1 msgid "" -"If you want to create your own Actor classes, you might need to pass some" -" input argument. You can use this dictionary for such purpose." +":py:obj:`FitRes `\\ \\(status\\, parameters\\, " +"num\\_examples\\, metrics\\)" msgstr "" -#: flwr.simulation.app.start_simulation:57 of +#: ../../source/ref-api/flwr.common.rst:52::1 +#: flwr.common.typing.FitRes:1 of +msgid "Fit response from a client." +msgstr "来自客户端的合适回复。" + +#: ../../source/ref-api/flwr.common.rst:52::1 +msgid ":py:obj:`GetParametersIns `\\ \\(config\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:52::1 +#: flwr.common.typing.GetParametersIns:1 of +msgid "Parameters request for a client." +msgstr "客户端的参数请求。" + +#: ../../source/ref-api/flwr.common.rst:52::1 msgid "" -"(default: \"DEFAULT\") Optional string (\"DEFAULT\" or \"SPREAD\") for " -"the VCE to choose in which node the actor is placed. If you are an " -"advanced user needed more control you can use lower-level scheduling " -"strategies to pin actors to specific compute nodes (e.g. via " -"NodeAffinitySchedulingStrategy). Please note this is an advanced feature." -" For all details, please refer to the Ray documentation: " -"https://docs.ray.io/en/latest/ray-core/scheduling/index.html" +":py:obj:`GetParametersRes `\\ \\(status\\, " +"parameters\\)" msgstr "" -#: flwr.simulation.app.start_simulation:66 of -msgid "**hist** -- Object containing metrics from training." +#: ../../source/ref-api/flwr.common.rst:52::1 +#: flwr.common.typing.GetParametersRes:1 of +msgid "Response when asked to return parameters." +msgstr "要求返回参数时的响应。" + +#: ../../source/ref-api/flwr.common.rst:52::1 +msgid ":py:obj:`GetPropertiesIns `\\ \\(config\\)" msgstr "" -#: ../../source/ref-api-flwr.rst:57 -msgid "server" +#: ../../source/ref-api/flwr.common.rst:52::1 +#: flwr.common.typing.GetPropertiesIns:1 of +msgid "Properties request for a client." +msgstr "客户端的属性请求。" + +#: ../../source/ref-api/flwr.common.rst:52::1 +msgid "" +":py:obj:`GetPropertiesRes `\\ \\(status\\, " +"properties\\)" msgstr "" -#: flwr.server:1 of -msgid "Flower server." +#: ../../source/ref-api/flwr.common.rst:52::1 +#: flwr.common.typing.GetPropertiesRes:1 of +msgid "Properties response from a client." +msgstr "来自客户端的属性响应。" + +#: ../../source/ref-api/flwr.common.rst:52::1 +msgid ":py:obj:`NDArray `\\" msgstr "" -#: ../../source/ref-api-flwr.rst:65 -msgid "server.start_server" +#: ../../source/ref-api/flwr.common.rst:52::1 +msgid "" +"alias of :py:class:`~numpy.ndarray`\\ [:py:obj:`~typing.Any`, " +":py:class:`~numpy.dtype`\\ [:py:obj:`~typing.Any`]]" msgstr "" -#: flwr.server.app.start_server:1 of -msgid "Start a Flower server using the gRPC transport layer." +#: ../../source/ref-api/flwr.common.rst:52::1 +msgid "" +":py:obj:`Parameters `\\ \\(tensors\\, " +"tensor\\_type\\)" msgstr "" -#: flwr.server.app.start_server:3 of -msgid "The IPv4 or IPv6 address of the server. Defaults to `\"[::]:8080\"`." +#: ../../source/ref-api/flwr.common.rst:52::1 +#: flwr.common.typing.Parameters:1 of +msgid "Model parameters." +msgstr "模型参数。" + +#: ../../source/ref-api/flwr.common.rst:52::1 +msgid ":py:obj:`ReconnectIns `\\ \\(seconds\\)" msgstr "" -#: flwr.server.app.start_server:5 of +#: ../../source/ref-api/flwr.common.rst:52::1 +#: flwr.common.typing.ReconnectIns:1 of +msgid "ReconnectIns message from server to client." +msgstr "服务器发送给客户端的重新连接信息。" + +#: ../../source/ref-api/flwr.common.rst:52::1 msgid "" -"A server implementation, either `flwr.server.Server` or a subclass " -"thereof. If no instance is provided, then `start_server` will create one." +":py:obj:`ServerMessage `\\ " +"\\(\\[get\\_properties\\_ins\\, ...\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:52::1 +#: flwr.common.typing.ServerMessage:1 of +msgid "ServerMessage is a container used to hold one instruction message." +msgstr "ServerMessage 是用于容纳一条指令信息的容器。" + +#: ../../source/ref-api/flwr.common.rst:52::1 +msgid ":py:obj:`Status `\\ \\(code\\, message\\)" +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:52::1 +#: flwr.common.typing.Status:1 of +msgid "Client status." +msgstr "客户端状态。" + +#: ../../source/ref-api/flwr.common.ClientMessage.rst:2 +#, fuzzy +msgid "ClientMessage" +msgstr "客户端" + +#: ../../source/ref-api/flwr.common.ClientMessage.rst:31::1 +msgid ":py:obj:`evaluate_res `\\" msgstr "" -#: flwr.server.app.start_server:12 of +#: ../../source/ref-api/flwr.common.ClientMessage.rst:31::1 +msgid ":py:obj:`fit_res `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.ClientMessage.rst:31::1 msgid "" -"An implementation of the abstract base class " -"`flwr.server.strategy.Strategy`. If no strategy is provided, then " -"`start_server` will use `flwr.server.strategy.FedAvg`." +":py:obj:`get_parameters_res " +"`\\" msgstr "" -#: flwr.server.app.start_server:16 of +#: ../../source/ref-api/flwr.common.ClientMessage.rst:31::1 msgid "" -"An implementation of the abstract base class `flwr.server.ClientManager`." -" If no implementation is provided, then `start_server` will use " -"`flwr.server.client_manager.SimpleClientManager`." +":py:obj:`get_properties_res " +"`\\" msgstr "" -#: flwr.server.app.start_server:21 of +#: ../../source/ref-api/flwr.common.Code.rst:2 +msgid "Code" +msgstr "" + +#: flwr.common.typing.Code:1 of +msgid "Bases: :py:class:`~enum.Enum`" +msgstr "" + +#: ../../source/ref-api/flwr.common.Code.rst:26::1 +msgid ":py:obj:`OK `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.Code.rst:26::1 msgid "" -"The maximum length of gRPC messages that can be exchanged with the Flower" -" clients. The default should be sufficient for most models. Users who " -"train very large models might need to increase this value. Note that the " -"Flower clients need to be started with the same value (see " -"`flwr.client.start_client`), otherwise clients will not know about the " -"increased limit and block larger messages." +":py:obj:`GET_PROPERTIES_NOT_IMPLEMENTED " +"`\\" msgstr "" -#: flwr.server.app.start_server:28 of +#: ../../source/ref-api/flwr.common.Code.rst:26::1 msgid "" -"Tuple containing root certificate, server certificate, and private key to" -" start a secure SSL-enabled server. The tuple is expected to have three " -"bytes elements in the following order: * CA certificate. * " -"server certificate. * server private key." +":py:obj:`GET_PARAMETERS_NOT_IMPLEMENTED " +"`\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.Code.rst:26::1 +msgid ":py:obj:`FIT_NOT_IMPLEMENTED `\\" msgstr "" -#: flwr.server.app.start_server:28 of +#: ../../source/ref-api/flwr.common.Code.rst:26::1 msgid "" -"Tuple containing root certificate, server certificate, and private key to" -" start a secure SSL-enabled server. The tuple is expected to have three " -"bytes elements in the following order:" +":py:obj:`EVALUATE_NOT_IMPLEMENTED " +"`\\" msgstr "" -#: flwr.server.app.start_server:32 of -msgid "CA certificate." +#: ../../source/ref-api/flwr.common.DisconnectRes.rst:2 +msgid "DisconnectRes" msgstr "" -#: flwr.server.app.start_server:33 of -msgid "server certificate." +#: ../../source/ref-api/flwr.common.DisconnectRes.rst:28::1 +msgid ":py:obj:`reason `\\" msgstr "" -#: flwr.server.app.start_server:34 of -msgid "server private key." +#: ../../source/ref-api/flwr.common.EvaluateIns.rst:2 +#, fuzzy +msgid "EvaluateIns" +msgstr "说明" + +#: ../../source/ref-api/flwr.common.EvaluateIns.rst:29::1 +msgid ":py:obj:`parameters `\\" msgstr "" -#: flwr.server.app.start_server:37 of -msgid "**hist** -- Object containing training and evaluation metrics." +#: ../../source/ref-api/flwr.common.EvaluateIns.rst:29::1 +msgid ":py:obj:`config `\\" msgstr "" -#: flwr.server.app.start_server:42 of -msgid "Starting an insecure server:" +#: ../../source/ref-api/flwr.common.EvaluateRes.rst:2 +msgid "EvaluateRes" msgstr "" -#: flwr.server.app.start_server:46 of -msgid "Starting an SSL-enabled server:" +#: ../../source/ref-api/flwr.common.EvaluateRes.rst:31::1 +msgid ":py:obj:`status `\\" msgstr "" -#: ../../source/ref-api-flwr.rst:73 -msgid "server.strategy" +#: ../../source/ref-api/flwr.common.EvaluateRes.rst:31::1 +msgid ":py:obj:`loss `\\" msgstr "" -#: flwr.server.strategy:1 of -msgid "Contains the strategy abstraction and different implementations." +#: ../../source/ref-api/flwr.common.EvaluateRes.rst:31::1 +msgid ":py:obj:`num_examples `\\" msgstr "" -#: ../../source/ref-api-flwr.rst:81 -msgid "server.strategy.Strategy" +#: ../../source/ref-api/flwr.common.EvaluateRes.rst:31::1 +msgid ":py:obj:`metrics `\\" msgstr "" -#: flwr.server.strategy.strategy.Strategy:1 of -msgid "Abstract base class for server strategy implementations." +#: ../../source/ref-api/flwr.common.EventType.rst:2 +#, fuzzy +msgid "EventType" +msgstr "返回类型" + +#: flwr.common.telemetry.EventType:1 of +msgid "Bases: :py:class:`str`, :py:class:`~enum.Enum`" msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1 of -msgid "Aggregate evaluation results." +#: ../../source/ref-api/flwr.common.EventType.rst:42::1 +msgid ":py:obj:`PING `\\" msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:3 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:6 -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:3 -#: flwr.server.strategy.strategy.Strategy.aggregate_fit:3 -#: flwr.server.strategy.strategy.Strategy.configure_evaluate:3 -#: flwr.server.strategy.strategy.Strategy.configure_fit:3 -#: flwr.server.strategy.strategy.Strategy.evaluate:6 of -msgid "The current round of federated learning." +#: ../../source/ref-api/flwr.common.EventType.rst:42::1 +msgid ":py:obj:`START_CLIENT_ENTER `\\" msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:5 of -msgid "" -"Successful updates from the previously selected and configured clients. " -"Each pair of `(ClientProxy, FitRes` constitutes a successful update from " -"one of the previously selected clients. Not that not all previously " -"selected clients are necessarily included in this list: a client might " -"drop out and not submit a result. For each client that did not submit an " -"update, there should be an `Exception` in `failures`." +#: ../../source/ref-api/flwr.common.EventType.rst:42::1 +msgid ":py:obj:`START_CLIENT_LEAVE `\\" msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:13 -#: flwr.server.strategy.strategy.Strategy.aggregate_fit:13 of -msgid "Exceptions that occurred while the server was waiting for client updates." +#: ../../source/ref-api/flwr.common.EventType.rst:42::1 +msgid ":py:obj:`START_SERVER_ENTER `\\" msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:16 of +#: ../../source/ref-api/flwr.common.EventType.rst:42::1 +msgid ":py:obj:`START_SERVER_LEAVE `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.EventType.rst:42::1 msgid "" -"**aggregation_result** -- The aggregated evaluation result. Aggregation " -"typically uses some variant of a weighted average." +":py:obj:`RUN_DRIVER_API_ENTER " +"`\\" msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_fit:1 of -msgid "Aggregate training results." +#: ../../source/ref-api/flwr.common.EventType.rst:42::1 +msgid "" +":py:obj:`RUN_DRIVER_API_LEAVE " +"`\\" msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_fit:5 of +#: ../../source/ref-api/flwr.common.EventType.rst:42::1 msgid "" -"Successful updates from the previously selected and configured clients. " -"Each pair of `(ClientProxy, FitRes)` constitutes a successful update from" -" one of the previously selected clients. Not that not all previously " -"selected clients are necessarily included in this list: a client might " -"drop out and not submit a result. For each client that did not submit an " -"update, there should be an `Exception` in `failures`." +":py:obj:`RUN_FLEET_API_ENTER " +"`\\" msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_fit:17 of +#: ../../source/ref-api/flwr.common.EventType.rst:42::1 msgid "" -"**parameters** -- If parameters are returned, then the server will treat " -"these as the new global model parameters (i.e., it will replace the " -"previous parameters with the ones returned from this method). If `None` " -"is returned (e.g., because there were only failures and no viable " -"results) then the server will no update the previous model parameters, " -"the updates received in this round are discarded, and the global model " -"parameters remain the same." +":py:obj:`RUN_FLEET_API_LEAVE " +"`\\" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.configure_evaluate:1 -#: flwr.server.strategy.qfedavg.QFedAvg.configure_evaluate:1 -#: flwr.server.strategy.strategy.Strategy.configure_evaluate:1 of -msgid "Configure the next round of evaluation." +#: ../../source/ref-api/flwr.common.EventType.rst:42::1 +msgid "" +":py:obj:`RUN_SUPERLINK_ENTER " +"`\\" msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:7 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:10 -#: flwr.server.strategy.strategy.Strategy.configure_evaluate:7 -#: flwr.server.strategy.strategy.Strategy.configure_fit:7 -#: flwr.server.strategy.strategy.Strategy.initialize_parameters:3 of -msgid "The client manager which holds all currently connected clients." +#: ../../source/ref-api/flwr.common.EventType.rst:42::1 +msgid "" +":py:obj:`RUN_SUPERLINK_LEAVE " +"`\\" msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:10 -#: flwr.server.strategy.strategy.Strategy.configure_evaluate:10 of +#: ../../source/ref-api/flwr.common.EventType.rst:42::1 msgid "" -"**evaluate_configuration** -- A list of tuples. Each tuple in the list " -"identifies a `ClientProxy` and the `EvaluateIns` for this particular " -"`ClientProxy`. If a particular `ClientProxy` is not included in this " -"list, it means that this `ClientProxy` will not participate in the next " -"round of federated evaluation." +":py:obj:`START_SIMULATION_ENTER " +"`\\" msgstr "" -#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive.configure_fit:1 -#: flwr.server.strategy.fedavg.FedAvg.configure_fit:1 -#: flwr.server.strategy.fedprox.FedProx.configure_fit:1 -#: flwr.server.strategy.qfedavg.QFedAvg.configure_fit:1 -#: flwr.server.strategy.strategy.Strategy.configure_fit:1 of -msgid "Configure the next round of training." +#: ../../source/ref-api/flwr.common.EventType.rst:42::1 +msgid "" +":py:obj:`START_SIMULATION_LEAVE " +"`\\" msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:13 -#: flwr.server.strategy.strategy.Strategy.configure_fit:10 of -msgid "" -"**fit_configuration** -- A list of tuples. Each tuple in the list " -"identifies a `ClientProxy` and the `FitIns` for this particular " -"`ClientProxy`. If a particular `ClientProxy` is not included in this " -"list, it means that this `ClientProxy` will not participate in the next " -"round of federated learning." +#: ../../source/ref-api/flwr.common.EventType.rst:42::1 +msgid ":py:obj:`DRIVER_CONNECT `\\" msgstr "" -#: flwr.server.strategy.strategy.Strategy.evaluate:1 of -msgid "Evaluate the current model parameters." +#: ../../source/ref-api/flwr.common.EventType.rst:42::1 +msgid ":py:obj:`DRIVER_DISCONNECT `\\" msgstr "" -#: flwr.server.strategy.strategy.Strategy.evaluate:3 of -msgid "" -"This function can be used to perform centralized (i.e., server-side) " -"evaluation of model parameters." +#: ../../source/ref-api/flwr.common.EventType.rst:42::1 +msgid ":py:obj:`START_DRIVER_ENTER `\\" msgstr "" -#: flwr.server.strategy.strategy.Strategy.evaluate:11 of -msgid "" -"**evaluation_result** -- The evaluation result, usually a Tuple " -"containing loss and a dictionary containing task-specific metrics (e.g., " -"accuracy)." +#: ../../source/ref-api/flwr.common.EventType.rst:42::1 +msgid ":py:obj:`START_DRIVER_LEAVE `\\" msgstr "" -#: flwr.server.strategy.strategy.Strategy.initialize_parameters:1 of -msgid "Initialize the (global) model parameters." +#: ../../source/ref-api/flwr.common.EventType.rst:42::1 +msgid "" +":py:obj:`RUN_CLIENT_APP_ENTER " +"`\\" msgstr "" -#: flwr.server.strategy.strategy.Strategy.initialize_parameters:6 of +#: ../../source/ref-api/flwr.common.EventType.rst:42::1 msgid "" -"**parameters** -- If parameters are returned, then the server will treat " -"these as the initial global model parameters." +":py:obj:`RUN_CLIENT_APP_LEAVE " +"`\\" msgstr "" -#: ../../source/ref-api-flwr.rst:90 -msgid "server.strategy.FedAvg" +#: ../../source/ref-api/flwr.common.EventType.rst:42::1 +msgid "" +":py:obj:`RUN_SERVER_APP_ENTER " +"`\\" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg:1 of -msgid "Configurable FedAvg strategy implementation." +#: ../../source/ref-api/flwr.common.EventType.rst:42::1 +msgid "" +":py:obj:`RUN_SERVER_APP_LEAVE " +"`\\" msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.__init__:1 -#: flwr.server.strategy.fedavg.FedAvg.__init__:1 -#: flwr.server.strategy.qfedavg.QFedAvg.__init__:1 of -msgid "Federated Averaging strategy." +#: ../../source/ref-api/flwr.common.FitIns.rst:2 +msgid "FitIns" msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.__init__:3 -#: flwr.server.strategy.fedavg.FedAvg.__init__:3 -#: flwr.server.strategy.qfedavg.QFedAvg.__init__:3 of -msgid "Implementation based on https://arxiv.org/abs/1602.05629" +#: ../../source/ref-api/flwr.common.FitIns.rst:29::1 +msgid ":py:obj:`parameters `\\" msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.__init__:5 -#: flwr.server.strategy.fedavg.FedAvg.__init__:5 -#: flwr.server.strategy.fedprox.FedProx.__init__:37 -#: flwr.server.strategy.qfedavg.QFedAvg.__init__:5 of -msgid "" -"Fraction of clients used during training. In case `min_fit_clients` is " -"larger than `fraction_fit * available_clients`, `min_fit_clients` will " -"still be sampled. Defaults to 1.0." +#: ../../source/ref-api/flwr.common.FitIns.rst:29::1 +msgid ":py:obj:`config `\\" msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.__init__:9 -#: flwr.server.strategy.fedavg.FedAvg.__init__:9 -#: flwr.server.strategy.fedprox.FedProx.__init__:41 -#: flwr.server.strategy.qfedavg.QFedAvg.__init__:9 of -msgid "" -"Fraction of clients used during validation. In case " -"`min_evaluate_clients` is larger than `fraction_evaluate * " -"available_clients`, `min_evaluate_clients` will still be sampled. " -"Defaults to 1.0." +#: ../../source/ref-api/flwr.common.FitRes.rst:2 +msgid "FitRes" msgstr "" -#: flwr.server.strategy.bulyan.Bulyan.__init__:9 -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.__init__:13 -#: flwr.server.strategy.fedadagrad.FedAdagrad.__init__:9 -#: flwr.server.strategy.fedadam.FedAdam.__init__:9 -#: flwr.server.strategy.fedavg.FedAvg.__init__:13 -#: flwr.server.strategy.fedavgm.FedAvgM.__init__:9 -#: flwr.server.strategy.fedopt.FedOpt.__init__:9 -#: flwr.server.strategy.fedprox.FedProx.__init__:45 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg.__init__:7 -#: flwr.server.strategy.fedyogi.FedYogi.__init__:9 -#: flwr.server.strategy.krum.Krum.__init__:7 -#: flwr.server.strategy.qfedavg.QFedAvg.__init__:13 of -msgid "Minimum number of clients used during training. Defaults to 2." +#: ../../source/ref-api/flwr.common.FitRes.rst:31::1 +msgid ":py:obj:`status `\\" msgstr "" -#: flwr.server.strategy.bulyan.Bulyan.__init__:11 -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.__init__:15 -#: flwr.server.strategy.fedadagrad.FedAdagrad.__init__:11 -#: flwr.server.strategy.fedadam.FedAdam.__init__:11 -#: flwr.server.strategy.fedavg.FedAvg.__init__:15 -#: flwr.server.strategy.fedavgm.FedAvgM.__init__:11 -#: flwr.server.strategy.fedopt.FedOpt.__init__:11 -#: flwr.server.strategy.fedprox.FedProx.__init__:47 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg.__init__:9 -#: flwr.server.strategy.fedyogi.FedYogi.__init__:11 -#: flwr.server.strategy.krum.Krum.__init__:9 -#: flwr.server.strategy.qfedavg.QFedAvg.__init__:15 of -msgid "Minimum number of clients used during validation. Defaults to 2." +#: ../../source/ref-api/flwr.common.FitRes.rst:31::1 +msgid ":py:obj:`parameters `\\" msgstr "" -#: flwr.server.strategy.bulyan.Bulyan.__init__:13 -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.__init__:17 -#: flwr.server.strategy.fedadagrad.FedAdagrad.__init__:13 -#: flwr.server.strategy.fedadam.FedAdam.__init__:13 -#: flwr.server.strategy.fedavg.FedAvg.__init__:17 -#: flwr.server.strategy.fedavgm.FedAvgM.__init__:13 -#: flwr.server.strategy.fedopt.FedOpt.__init__:13 -#: flwr.server.strategy.fedprox.FedProx.__init__:49 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg.__init__:11 -#: flwr.server.strategy.fedyogi.FedYogi.__init__:13 -#: flwr.server.strategy.krum.Krum.__init__:11 -#: flwr.server.strategy.qfedavg.QFedAvg.__init__:17 of -msgid "Minimum number of total clients in the system. Defaults to 2." +#: ../../source/ref-api/flwr.common.FitRes.rst:31::1 +msgid ":py:obj:`num_examples `\\" msgstr "" -#: flwr.server.strategy.bulyan.Bulyan.__init__:17 -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.__init__:19 -#: flwr.server.strategy.fedadagrad.FedAdagrad.__init__:15 -#: flwr.server.strategy.fedadam.FedAdam.__init__:15 -#: flwr.server.strategy.fedavg.FedAvg.__init__:19 -#: flwr.server.strategy.fedavgm.FedAvgM.__init__:15 -#: flwr.server.strategy.fedopt.FedOpt.__init__:15 -#: flwr.server.strategy.fedprox.FedProx.__init__:51 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg.__init__:13 -#: flwr.server.strategy.fedyogi.FedYogi.__init__:15 -#: flwr.server.strategy.krum.Krum.__init__:18 -#: flwr.server.strategy.qfedavg.QFedAvg.__init__:19 of -msgid "Optional function used for validation. Defaults to None." +#: ../../source/ref-api/flwr.common.FitRes.rst:31::1 +msgid ":py:obj:`metrics `\\" msgstr "" -#: flwr.server.strategy.bulyan.Bulyan.__init__:19 -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.__init__:21 -#: flwr.server.strategy.fedadagrad.FedAdagrad.__init__:17 -#: flwr.server.strategy.fedadam.FedAdam.__init__:17 -#: flwr.server.strategy.fedavg.FedAvg.__init__:21 -#: flwr.server.strategy.fedavgm.FedAvgM.__init__:17 -#: flwr.server.strategy.fedopt.FedOpt.__init__:17 -#: flwr.server.strategy.fedprox.FedProx.__init__:53 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg.__init__:15 -#: flwr.server.strategy.fedyogi.FedYogi.__init__:17 -#: flwr.server.strategy.krum.Krum.__init__:20 -#: flwr.server.strategy.qfedavg.QFedAvg.__init__:21 of -msgid "Function used to configure training. Defaults to None." -msgstr "" +#: ../../source/ref-api/flwr.common.GetParametersIns.rst:2 +#, fuzzy +msgid "GetParametersIns" +msgstr "参数" -#: flwr.server.strategy.bulyan.Bulyan.__init__:21 -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.__init__:23 -#: flwr.server.strategy.fedadagrad.FedAdagrad.__init__:19 -#: flwr.server.strategy.fedadam.FedAdam.__init__:19 -#: flwr.server.strategy.fedavg.FedAvg.__init__:23 -#: flwr.server.strategy.fedavgm.FedAvgM.__init__:19 -#: flwr.server.strategy.fedopt.FedOpt.__init__:19 -#: flwr.server.strategy.fedprox.FedProx.__init__:55 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg.__init__:17 -#: flwr.server.strategy.fedyogi.FedYogi.__init__:19 -#: flwr.server.strategy.krum.Krum.__init__:22 -#: flwr.server.strategy.qfedavg.QFedAvg.__init__:23 of -msgid "Function used to configure validation. Defaults to None." +#: ../../source/ref-api/flwr.common.GetParametersIns.rst:28::1 +msgid ":py:obj:`config `\\" msgstr "" -#: flwr.server.strategy.bulyan.Bulyan.__init__:23 -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.__init__:25 -#: flwr.server.strategy.fedadagrad.FedAdagrad.__init__:25 -#: flwr.server.strategy.fedadam.FedAdam.__init__:21 -#: flwr.server.strategy.fedavg.FedAvg.__init__:25 -#: flwr.server.strategy.fedavgm.FedAvgM.__init__:21 -#: flwr.server.strategy.fedopt.FedOpt.__init__:21 -#: flwr.server.strategy.fedprox.FedProx.__init__:57 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg.__init__:19 -#: flwr.server.strategy.fedyogi.FedYogi.__init__:21 -#: flwr.server.strategy.krum.Krum.__init__:24 -#: flwr.server.strategy.qfedavg.QFedAvg.__init__:25 of -msgid "Whether or not accept rounds containing failures. Defaults to True." -msgstr "" +#: ../../source/ref-api/flwr.common.GetParametersRes.rst:2 +#, fuzzy +msgid "GetParametersRes" +msgstr "参数" -#: flwr.server.strategy.bulyan.Bulyan.__init__:25 -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.__init__:27 -#: flwr.server.strategy.fedadagrad.FedAdagrad.__init__:27 -#: flwr.server.strategy.fedadam.FedAdam.__init__:23 -#: flwr.server.strategy.fedavg.FedAvg.__init__:27 -#: flwr.server.strategy.fedavgm.FedAvgM.__init__:23 -#: flwr.server.strategy.fedopt.FedOpt.__init__:23 -#: flwr.server.strategy.fedprox.FedProx.__init__:59 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg.__init__:21 -#: flwr.server.strategy.fedyogi.FedYogi.__init__:23 -#: flwr.server.strategy.krum.Krum.__init__:26 -#: flwr.server.strategy.qfedavg.QFedAvg.__init__:27 of -msgid "Initial global model parameters." +#: ../../source/ref-api/flwr.common.GetParametersRes.rst:29::1 +msgid ":py:obj:`status `\\" msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.__init__:29 -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.__init__:31 -#: flwr.server.strategy.fedadagrad.FedAdagrad.__init__:21 -#: flwr.server.strategy.fedadagrad.FedAdagrad.__init__:23 -#: flwr.server.strategy.fedadam.FedAdam.__init__:25 -#: flwr.server.strategy.fedadam.FedAdam.__init__:27 -#: flwr.server.strategy.fedavg.FedAvg.__init__:29 -#: flwr.server.strategy.fedavg.FedAvg.__init__:31 -#: flwr.server.strategy.fedopt.FedOpt.__init__:25 -#: flwr.server.strategy.fedopt.FedOpt.__init__:27 -#: flwr.server.strategy.fedprox.FedProx.__init__:61 -#: flwr.server.strategy.fedprox.FedProx.__init__:63 -#: flwr.server.strategy.fedyogi.FedYogi.__init__:25 -#: flwr.server.strategy.fedyogi.FedYogi.__init__:27 -#: flwr.server.strategy.qfedavg.QFedAvg.__init__:29 -#: flwr.server.strategy.qfedavg.QFedAvg.__init__:31 of -msgid "Metrics aggregation function, optional." +#: ../../source/ref-api/flwr.common.GetParametersRes.rst:29::1 +msgid ":py:obj:`parameters `\\" msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1 of -msgid "Aggregate evaluation losses using weighted average." +#: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:2 +msgid "GetPropertiesIns" msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_fit:1 -#: flwr.server.strategy.fedadagrad.FedAdagrad.aggregate_fit:1 -#: flwr.server.strategy.fedadam.FedAdam.aggregate_fit:1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_fit:1 -#: flwr.server.strategy.fedavgm.FedAvgM.aggregate_fit:1 -#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg.aggregate_fit:1 -#: flwr.server.strategy.fedyogi.FedYogi.aggregate_fit:1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_fit:1 of -msgid "Aggregate fit results using weighted average." +#: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:28::1 +msgid ":py:obj:`config `\\" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.evaluate:1 -#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg.evaluate:1 of -msgid "Evaluate model parameters using an evaluation function." +#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:2 +msgid "GetPropertiesRes" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.initialize_parameters:1 -#: flwr.server.strategy.fedavgm.FedAvgM.initialize_parameters:1 of -msgid "Initialize global model parameters." +#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:29::1 +msgid ":py:obj:`status `\\" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.num_evaluation_clients:1 -#: flwr.server.strategy.qfedavg.QFedAvg.num_evaluation_clients:1 of -msgid "Use a fraction of available clients for evaluation." +#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:29::1 +msgid ":py:obj:`properties `\\" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.num_fit_clients:1 -#: flwr.server.strategy.qfedavg.QFedAvg.num_fit_clients:1 of -msgid "Return the sample size and the required number of available clients." +#: ../../source/ref-api/flwr.common.NDArray.rst:2 +msgid "NDArray" msgstr "" -#: ../../source/ref-api-flwr.rst:101 -msgid "server.strategy.FedAvgM" +#: ../../source/ref-api/flwr.common.Parameters.rst:29::1 +msgid ":py:obj:`tensors `\\" msgstr "" -#: flwr.server.strategy.fedavgm.FedAvgM:1 -#: flwr.server.strategy.fedmedian.FedMedian:1 of -msgid "Configurable FedAvg with Momentum strategy implementation." +#: ../../source/ref-api/flwr.common.Parameters.rst:29::1 +msgid ":py:obj:`tensor_type `\\" msgstr "" -#: flwr.server.strategy.fedavgm.FedAvgM.__init__:1 of -msgid "Federated Averaging with Momentum strategy." -msgstr "" +#: ../../source/ref-api/flwr.common.ReconnectIns.rst:2 +#, fuzzy +msgid "ReconnectIns" +msgstr "启用 SSL 连接" -#: flwr.server.strategy.fedavgm.FedAvgM.__init__:3 of -msgid "Implementation based on https://arxiv.org/pdf/1909.06335.pdf" +#: ../../source/ref-api/flwr.common.ReconnectIns.rst:28::1 +msgid ":py:obj:`seconds `\\" msgstr "" -#: flwr.server.strategy.fedavgm.FedAvgM.__init__:5 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg.__init__:3 -#: flwr.server.strategy.krum.Krum.__init__:3 of -msgid "Fraction of clients used during training. Defaults to 0.1." +#: ../../source/ref-api/flwr.common.ServerMessage.rst:2 +#, fuzzy +msgid "ServerMessage" +msgstr "服务器端" + +#: ../../source/ref-api/flwr.common.ServerMessage.rst:31::1 +msgid ":py:obj:`evaluate_ins `\\" msgstr "" -#: flwr.server.strategy.fedavgm.FedAvgM.__init__:7 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg.__init__:5 -#: flwr.server.strategy.krum.Krum.__init__:5 of -msgid "Fraction of clients used during validation. Defaults to 0.1." +#: ../../source/ref-api/flwr.common.ServerMessage.rst:31::1 +msgid ":py:obj:`fit_ins `\\" msgstr "" -#: flwr.server.strategy.fedavgm.FedAvgM.__init__:25 of +#: ../../source/ref-api/flwr.common.ServerMessage.rst:31::1 msgid "" -"Server-side learning rate used in server-side optimization. Defaults to " -"1.0." +":py:obj:`get_parameters_ins " +"`\\" msgstr "" -#: flwr.server.strategy.fedavgm.FedAvgM.__init__:28 of -msgid "Server-side momentum factor used for FedAvgM. Defaults to 0.0." +#: ../../source/ref-api/flwr.common.ServerMessage.rst:31::1 +msgid "" +":py:obj:`get_properties_ins " +"`\\" msgstr "" -#: ../../source/ref-api-flwr.rst:112 -msgid "server.strategy.FedMedian" -msgstr "" +#: ../../source/ref-api/flwr.common.Status.rst:2 +#, fuzzy +msgid "Status" +msgstr "客户端状态。" -#: flwr.server.strategy.fedmedian.FedMedian.aggregate_fit:1 of -msgid "Aggregate fit results using median." +#: ../../source/ref-api/flwr.common.Status.rst:29::1 +msgid ":py:obj:`code `\\" msgstr "" -#: ../../source/ref-api-flwr.rst:122 -msgid "server.strategy.QFedAvg" +#: ../../source/ref-api/flwr.common.Status.rst:29::1 +msgid ":py:obj:`message `\\" msgstr "" -#: flwr.server.strategy.qfedavg.QFedAvg:1 of -msgid "Configurable QFedAvg strategy implementation." +#: ../../source/ref-api/flwr.common.bytes_to_ndarray.rst:2 +msgid "bytes\\_to\\_ndarray" msgstr "" -#: ../../source/ref-api-flwr.rst:133 -msgid "server.strategy.FaultTolerantFedAvg" -msgstr "" +#: ../../source/ref-api/flwr.common.configure.rst:2 +#, fuzzy +msgid "configure" +msgstr "配置日志记录" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg:1 of -msgid "Configurable fault-tolerant FedAvg strategy implementation." +#: ../../source/ref-api/flwr.common.event.rst:2 +msgid "event" msgstr "" -#: ../../source/ref-api-flwr.rst:144 -msgid "server.strategy.FedOpt" +#: ../../source/ref-api/flwr.common.log.rst:2 +msgid "log" msgstr "" -#: flwr.server.strategy.fedopt.FedOpt:1 of -msgid "Configurable FedAdagrad strategy implementation." -msgstr "" +#: logging.Logger.log:3 of +msgid "" +"To pass exception information, use the keyword argument exc_info with a " +"true value, e.g." +msgstr "要传递异常信息,请使用带 true 值的关键字参数 exc_info,例如。" -#: flwr.server.strategy.fedopt.FedOpt.__init__:1 of -msgid "Federated Optim strategy interface." -msgstr "" +#: logging.Logger.log:6 of +#, python-format +msgid "logger.log(level, \"We have a %s\", \"mysterious problem\", exc_info=1)" +msgstr "logger.log(level, \"We have a %s\", \"mysterious problem\", exc_info=1)" -#: flwr.server.strategy.fedadagrad.FedAdagrad.__init__:3 -#: flwr.server.strategy.fedadam.FedAdam.__init__:3 -#: flwr.server.strategy.fedopt.FedOpt.__init__:3 -#: flwr.server.strategy.fedyogi.FedYogi.__init__:3 of -msgid "Implementation based on https://arxiv.org/abs/2003.00295v5" +#: ../../source/ref-api/flwr.common.ndarray_to_bytes.rst:2 +msgid "ndarray\\_to\\_bytes" msgstr "" -#: flwr.server.strategy.bulyan.Bulyan.__init__:5 -#: flwr.server.strategy.fedadagrad.FedAdagrad.__init__:5 -#: flwr.server.strategy.fedadam.FedAdam.__init__:5 -#: flwr.server.strategy.fedopt.FedOpt.__init__:5 -#: flwr.server.strategy.fedyogi.FedYogi.__init__:5 of -msgid "Fraction of clients used during training. Defaults to 1.0." +#: ../../source/ref-api/flwr.common.ndarrays_to_parameters.rst:2 +msgid "ndarrays\\_to\\_parameters" msgstr "" -#: flwr.server.strategy.bulyan.Bulyan.__init__:7 -#: flwr.server.strategy.fedadagrad.FedAdagrad.__init__:7 -#: flwr.server.strategy.fedadam.FedAdam.__init__:7 -#: flwr.server.strategy.fedopt.FedOpt.__init__:7 -#: flwr.server.strategy.fedyogi.FedYogi.__init__:7 of -msgid "Fraction of clients used during validation. Defaults to 1.0." +#: ../../source/ref-api/flwr.common.now.rst:2 +msgid "now" msgstr "" -#: flwr.server.strategy.fedadagrad.FedAdagrad.__init__:29 -#: flwr.server.strategy.fedadam.FedAdam.__init__:29 -#: flwr.server.strategy.fedopt.FedOpt.__init__:29 -#: flwr.server.strategy.fedyogi.FedYogi.__init__:29 of -msgid "Server-side learning rate. Defaults to 1e-1." +#: ../../source/ref-api/flwr.common.parameters_to_ndarrays.rst:2 +msgid "parameters\\_to\\_ndarrays" msgstr "" -#: flwr.server.strategy.fedadagrad.FedAdagrad.__init__:31 -#: flwr.server.strategy.fedadam.FedAdam.__init__:31 -#: flwr.server.strategy.fedopt.FedOpt.__init__:31 -#: flwr.server.strategy.fedyogi.FedYogi.__init__:31 of -msgid "Client-side learning rate. Defaults to 1e-1." -msgstr "" +#: ../../source/ref-api/flwr.server.rst:2 +msgid "server" +msgstr "服务器" -#: flwr.server.strategy.fedopt.FedOpt.__init__:33 of -msgid "Momentum parameter. Defaults to 0.0." +#: ../../source/ref-api/flwr.server.rst:26::1 +msgid ":py:obj:`run_driver_api `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.fedopt.FedOpt.__init__:35 of -msgid "Second moment parameter. Defaults to 0.0." -msgstr "" +#: ../../source/ref-api/flwr.server.rst:26::1 +#: flwr.server.app.run_driver_api:1 of +#, fuzzy +msgid "Run Flower server (Driver API)." +msgstr "flower-driver-api" -#: flwr.server.strategy.fedadagrad.FedAdagrad.__init__:33 -#: flwr.server.strategy.fedadam.FedAdam.__init__:37 -#: flwr.server.strategy.fedopt.FedOpt.__init__:37 -#: flwr.server.strategy.fedyogi.FedYogi.__init__:37 of -msgid "Controls the algorithm's degree of adaptability. Defaults to 1e-9." +#: ../../source/ref-api/flwr.server.rst:26::1 +msgid ":py:obj:`run_fleet_api `\\ \\(\\)" msgstr "" -#: ../../source/ref-api-flwr.rst:155 -msgid "server.strategy.FedProx" -msgstr "" +#: ../../source/ref-api/flwr.server.rst:26::1 +#: flwr.server.app.run_fleet_api:1 of +#, fuzzy +msgid "Run Flower server (Fleet API)." +msgstr "Flower 服务器。" -#: flwr.server.strategy.fedprox.FedProx:1 of -msgid "Configurable FedProx strategy implementation." +#: ../../source/ref-api/flwr.server.rst:26::1 +msgid ":py:obj:`run_server_app `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.fedprox.FedProx.__init__:1 of -msgid "Federated Optimization strategy." -msgstr "" +#: ../../source/ref-api/flwr.server.rst:26::1 +#: flwr.server.app.run_server_app:1 of +#, fuzzy +msgid "Run Flower server app." +msgstr "Flower 服务器。" -#: flwr.server.strategy.fedprox.FedProx.__init__:3 of -msgid "Implementation based on https://arxiv.org/abs/1812.06127" +#: ../../source/ref-api/flwr.server.rst:26::1 +msgid ":py:obj:`run_superlink `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.fedprox.FedProx.__init__:5 of -msgid "" -"The strategy in itself will not be different than FedAvg, the client " -"needs to be adjusted. A proximal term needs to be added to the loss " -"function during the training:" +#: ../../source/ref-api/flwr.server.rst:26::1 +#: flwr.server.app.run_superlink:1 of +msgid "Run Flower server (Driver API and Fleet API)." msgstr "" -#: flwr.server.strategy.fedprox.FedProx.__init__:9 of +#: ../../source/ref-api/flwr.server.rst:26::1 msgid "" -"\\\\frac{\\\\mu}{2} || w - w^t ||^2\n" -"\n" +":py:obj:`start_server `\\ \\(\\*\\[\\, " +"server\\_address\\, server\\, ...\\]\\)" msgstr "" -#: flwr.server.strategy.fedprox.FedProx.__init__:12 of -msgid "" -"Where $w^t$ are the global parameters and $w$ are the local weights the " -"function will be optimized with." -msgstr "" +#: ../../source/ref-api/flwr.server.rst:26::1 +#: flwr.server.app.start_server:1 of +msgid "Start a Flower server using the gRPC transport layer." +msgstr "使用 gRPC 传输层启动 Flower 服务器。" -#: flwr.server.strategy.fedprox.FedProx.__init__:15 of -msgid "In PyTorch, for example, the loss would go from:" +#: ../../source/ref-api/flwr.server.rst:37::1 +msgid ":py:obj:`ClientManager `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.fedprox.FedProx.__init__:21 of -msgid "To:" +#: ../../source/ref-api/flwr.server.rst:37::1 +#: flwr.server.client_manager.ClientManager:1 of +#, fuzzy +msgid "Abstract base class for managing Flower clients." +msgstr "Flower 客户端的抽象基类。" + +#: ../../source/ref-api/flwr.server.rst:37::1 +msgid ":py:obj:`History `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.fedprox.FedProx.__init__:30 of +#: ../../source/ref-api/flwr.server.rst:37::1 +#: flwr.server.history.History:1 of +#, fuzzy +msgid "History class for training and/or evaluation metrics collection." +msgstr "**hist** -- 包含训练和评估指标的对象。" + +#: ../../source/ref-api/flwr.server.rst:37::1 msgid "" -"With `global_params` being a copy of the parameters before the training " -"takes place." +":py:obj:`Server `\\ \\(\\*\\, client\\_manager\\[\\, " +"strategy\\]\\)" msgstr "" -#: flwr.server.strategy.fedprox.FedProx.__init__:65 of +#: ../../source/ref-api/flwr.server.rst:37::1 +#, fuzzy msgid "" -"The weight of the proximal term used in the optimization. 0.0 makes this " -"strategy equivalent to FedAvg, and the higher the coefficient, the more " -"regularization will be used (that is, the client parameters will need to " -"be closer to the server parameters during training)." +":py:obj:`ServerConfig `\\ \\(\\[num\\_rounds\\," +" round\\_timeout\\]\\)" msgstr "" +"Flower 1.0: ``start_server(..., " +"config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " +"...)``" -#: flwr.server.strategy.fedprox.FedProx.configure_fit:3 of -msgid "Sends the proximal factor mu to the clients" -msgstr "" +#: ../../source/ref-api/flwr.server.rst:37::1 +#: flwr.server.app.ServerConfig:1 of +#, fuzzy +msgid "Flower server config." +msgstr "Flower 服务器。" -#: ../../source/ref-api-flwr.rst:166 -msgid "server.strategy.FedAdagrad" +#: ../../source/ref-api/flwr.server.rst:37::1 +msgid ":py:obj:`SimpleClientManager `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.fedadagrad.FedAdagrad:1 of -msgid "FedAdagrad strategy - Adaptive Federated Optimization using Adagrad." +#: ../../source/ref-api/flwr.server.rst:37::1 +#: flwr.server.client_manager.SimpleClientManager:1 of +#, fuzzy +msgid "Provides a pool of available clients." +msgstr "使用部分可用客户进行评估。" + +#: ../../source/ref-api/flwr.server.rst:56::1 +msgid ":py:obj:`flwr.server.driver `\\" msgstr "" -#: flwr.server.strategy.fedadagrad.FedAdagrad:3 -#: flwr.server.strategy.fedadam.FedAdam:3 -#: flwr.server.strategy.fedyogi.FedYogi:5 of -msgid "Paper: https://arxiv.org/abs/2003.00295" +#: ../../source/ref-api/flwr.server.rst:56::1 flwr.server.driver:1 +#: of +#, fuzzy +msgid "Flower driver SDK." +msgstr "Flower 服务器。" + +#: ../../source/ref-api/flwr.server.rst:56::1 +#, fuzzy +msgid ":py:obj:`flwr.server.strategy `\\" +msgstr "server.strategy.Strategy" + +#: ../../source/ref-api/flwr.server.rst:56::1 +#: flwr.server.strategy:1 of +msgid "Contains the strategy abstraction and different implementations." +msgstr "包含策略抽象和不同的实现方法。" + +#: ../../source/ref-api/flwr.server.ClientManager.rst:2 +#, fuzzy +msgid "ClientManager" +msgstr "客户端" + +#: flwr.server.client_manager.ClientManager.all:1::1 of +msgid ":py:obj:`all `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.fedadagrad.FedAdagrad.__init__:1 -#: flwr.server.strategy.fedadam.FedAdam.__init__:1 of -msgid "Federated learning strategy using Adagrad on server-side." +#: flwr.server.client_manager.ClientManager.all:1 +#: flwr.server.client_manager.ClientManager.all:1::1 +#: flwr.server.client_manager.SimpleClientManager.all:1 +#: flwr.server.client_manager.SimpleClientManager.all:1::1 of +msgid "Return all available clients." msgstr "" -#: ../../source/ref-api-flwr.rst:177 -msgid "server.strategy.FedAdam" +#: flwr.server.client_manager.ClientManager.all:1::1 of +msgid ":py:obj:`num_available `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.fedadam.FedAdam:1 of -msgid "FedAdam - Adaptive Federated Optimization using Adam." +#: flwr.server.client_manager.ClientManager.all:1::1 +#: flwr.server.client_manager.ClientManager.num_available:1 +#: flwr.server.client_manager.SimpleClientManager.all:1::1 +#: flwr.server.client_manager.SimpleClientManager.num_available:1 of +#, fuzzy +msgid "Return the number of available clients." +msgstr "返回样本大小和所需的可用客户数量。" + +#: flwr.server.client_manager.ClientManager.all:1::1 of +msgid ":py:obj:`register `\\ \\(client\\)" msgstr "" -#: flwr.server.strategy.fedadam.FedAdam.__init__:33 -#: flwr.server.strategy.fedyogi.FedYogi.__init__:33 of -msgid "Momentum parameter. Defaults to 0.9." +#: flwr.server.client_manager.ClientManager.all:1::1 +#: flwr.server.client_manager.ClientManager.register:1 +#: flwr.server.client_manager.SimpleClientManager.all:1::1 +#: flwr.server.client_manager.SimpleClientManager.register:1 of +msgid "Register Flower ClientProxy instance." msgstr "" -#: flwr.server.strategy.fedadam.FedAdam.__init__:35 -#: flwr.server.strategy.fedyogi.FedYogi.__init__:35 of -msgid "Second moment parameter. Defaults to 0.99." +#: flwr.server.client_manager.ClientManager.all:1::1 of +msgid "" +":py:obj:`sample `\\ " +"\\(num\\_clients\\[\\, min\\_num\\_clients\\, criterion\\]\\)" msgstr "" -#: ../../source/ref-api-flwr.rst:188 -msgid "server.strategy.FedYogi" +#: flwr.server.client_manager.ClientManager.all:1::1 +#: flwr.server.client_manager.ClientManager.sample:1 +#: flwr.server.client_manager.SimpleClientManager.all:1::1 +#: flwr.server.client_manager.SimpleClientManager.sample:1 of +msgid "Sample a number of Flower ClientProxy instances." msgstr "" -#: flwr.server.strategy.fedyogi.FedYogi:1 of -msgid "FedYogi [Reddi et al., 2020] strategy." +#: flwr.server.client_manager.ClientManager.all:1::1 of +msgid ":py:obj:`unregister `\\ \\(client\\)" msgstr "" -#: flwr.server.strategy.fedyogi.FedYogi:3 of -msgid "Adaptive Federated Optimization using Yogi." +#: flwr.server.client_manager.ClientManager.all:1::1 +#: flwr.server.client_manager.ClientManager.unregister:1 +#: flwr.server.client_manager.SimpleClientManager.all:1::1 +#: flwr.server.client_manager.SimpleClientManager.unregister:1 of +msgid "Unregister Flower ClientProxy instance." msgstr "" -#: flwr.server.strategy.fedyogi.FedYogi.__init__:1 of -msgid "Federated learning strategy using Yogi on server-side." +#: flwr.server.client_manager.ClientManager.all:1::1 of +msgid "" +":py:obj:`wait_for `\\ " +"\\(num\\_clients\\, timeout\\)" msgstr "" -#: ../../source/ref-api-flwr.rst:199 -msgid "server.strategy.FedTrimmedAvg" +#: flwr.server.client_manager.ClientManager.all:1::1 +#: flwr.server.client_manager.ClientManager.wait_for:1 +#: flwr.server.client_manager.SimpleClientManager.all:1::1 +#: flwr.server.client_manager.SimpleClientManager.wait_for:1 of +msgid "Wait until at least `num_clients` are available." msgstr "" -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:1 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg.__init__:1 of -msgid "Federated Averaging with Trimmed Mean [Dong Yin, et al., 2021]." +#: flwr.server.client_manager.ClientManager.num_available:3 +#: flwr.server.client_manager.SimpleClientManager.num_available:3 of +msgid "**num_available** -- The number of currently available clients." msgstr "" -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:3 of -msgid "Paper: https://arxiv.org/abs/1803.01498" +#: flwr.server.client_manager.ClientManager.register:6 +#: flwr.server.client_manager.SimpleClientManager.register:6 of +msgid "" +"**success** -- Indicating if registration was successful. False if " +"ClientProxy is already registered or can not be registered for any " +"reason." msgstr "" -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg.__init__:23 of -msgid "Fraction to cut off of both tails of the distribution. Defaults to 0.2." +#: flwr.server.client_manager.ClientManager.unregister:3 +#: flwr.server.client_manager.SimpleClientManager.unregister:3 of +msgid "This method is idempotent." msgstr "" -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg.aggregate_fit:1 of -msgid "Aggregate fit results using trimmed average." +#: ../../source/ref-api/flwr.server.History.rst:2 +msgid "History" msgstr "" -#: ../../source/ref-api-flwr.rst:210 -msgid "server.strategy.Krum" +#: flwr.server.history.History.add_loss_centralized:1::1 of +msgid "" +":py:obj:`add_loss_centralized " +"`\\ \\(server\\_round\\, " +"loss\\)" msgstr "" -#: flwr.server.strategy.krum.Krum:1 of -msgid "Configurable Krum strategy implementation." +#: flwr.server.history.History.add_loss_centralized:1 +#: flwr.server.history.History.add_loss_centralized:1::1 of +#, fuzzy +msgid "Add one loss entry (from centralized evaluation)." +msgstr "集中评估" + +#: flwr.server.history.History.add_loss_centralized:1::1 of +msgid "" +":py:obj:`add_loss_distributed " +"`\\ \\(server\\_round\\, " +"loss\\)" msgstr "" -#: flwr.server.strategy.krum.Krum.__init__:1 of -msgid "Krum strategy." +#: flwr.server.history.History.add_loss_centralized:1::1 +#: flwr.server.history.History.add_loss_distributed:1 of +msgid "Add one loss entry (from distributed evaluation)." msgstr "" -#: flwr.server.strategy.bulyan.Bulyan.__init__:15 -#: flwr.server.strategy.krum.Krum.__init__:13 of -msgid "Number of malicious clients in the system. Defaults to 0." +#: flwr.server.history.History.add_loss_centralized:1::1 of +msgid "" +":py:obj:`add_metrics_centralized " +"`\\ \\(server\\_round\\, " +"metrics\\)" msgstr "" -#: flwr.server.strategy.krum.Krum.__init__:15 of +#: flwr.server.history.History.add_loss_centralized:1::1 +#: flwr.server.history.History.add_metrics_centralized:1 of +#, fuzzy +msgid "Add metrics entries (from centralized evaluation)." +msgstr "集中评估" + +#: flwr.server.history.History.add_loss_centralized:1::1 of msgid "" -"Number of clients to keep before averaging (MultiKrum). Defaults to 0, in" -" that case classical Krum is applied." +":py:obj:`add_metrics_distributed " +"`\\ \\(server\\_round\\, " +"metrics\\)" msgstr "" -#: flwr.server.strategy.krum.Krum.aggregate_fit:1 of -msgid "Aggregate fit results using Krum." +#: flwr.server.history.History.add_loss_centralized:1::1 +#: flwr.server.history.History.add_metrics_distributed:1 of +#, fuzzy +msgid "Add metrics entries (from distributed evaluation)." +msgstr "定制的集中/分布式评估" + +#: flwr.server.history.History.add_loss_centralized:1::1 of +msgid "" +":py:obj:`add_metrics_distributed_fit " +"`\\ \\(server\\_round\\," +" ...\\)" msgstr "" -#: ../../source/ref-api-flwr.rst:220 -msgid "server.strategy.Bulyan" +#: flwr.server.history.History.add_loss_centralized:1::1 +#: flwr.server.history.History.add_metrics_distributed_fit:1 of +msgid "Add metrics entries (from distributed fit)." msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:1 of -msgid "Bulyan strategy implementation." +#: flwr.server.server.Server.client_manager:1::1 of +msgid ":py:obj:`client_manager `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.bulyan.Bulyan.__init__:1 of -msgid "Bulyan strategy." +#: flwr.server.server.Server.client_manager:1 +#: flwr.server.server.Server.client_manager:1::1 of +#, fuzzy +msgid "Return ClientManager." +msgstr "返回客户端(本身)。" + +#: flwr.server.server.Server.client_manager:1::1 of +msgid "" +":py:obj:`disconnect_all_clients " +"`\\ \\(timeout\\)" msgstr "" -#: flwr.server.strategy.bulyan.Bulyan.__init__:3 of -msgid "Implementation based on https://arxiv.org/abs/1802.07927." +#: flwr.server.server.Server.client_manager:1::1 +#: flwr.server.server.Server.disconnect_all_clients:1 of +msgid "Send shutdown signal to all clients." msgstr "" -#: flwr.server.strategy.bulyan.Bulyan.__init__:27 of +#: flwr.server.server.Server.client_manager:1::1 of msgid "" -"Byzantine resilient aggregation rule that is used as the first step of " -"the Bulyan (e.g., Krum)" +":py:obj:`evaluate_round `\\ " +"\\(server\\_round\\, timeout\\)" msgstr "" -#: flwr.server.strategy.bulyan.Bulyan.__init__:29 of -msgid "arguments to the first_aggregation rule" +#: flwr.server.server.Server.client_manager:1::1 +#: flwr.server.server.Server.evaluate_round:1 of +#, fuzzy +msgid "Validate current global model on a number of clients." +msgstr "当前(全局)模型参数。" + +#: flwr.server.server.Server.client_manager:1::1 of +msgid ":py:obj:`fit `\\ \\(num\\_rounds\\, timeout\\)" msgstr "" -#: flwr.server.strategy.bulyan.Bulyan.aggregate_fit:1 of -msgid "Aggregate fit results using Bulyan." +#: flwr.server.server.Server.client_manager:1::1 +#: flwr.server.server.Server.fit:1 of +#, fuzzy +msgid "Run federated averaging for a number of rounds." +msgstr "联邦平均动量策略。" + +#: flwr.server.server.Server.client_manager:1::1 of +msgid "" +":py:obj:`fit_round `\\ \\(server\\_round\\," +" timeout\\)" msgstr "" -#: ../../source/ref-api-flwr.rst:231 -msgid "server.strategy.FedXgbNnAvg" +#: flwr.server.server.Server.client_manager:1::1 +#: flwr.server.server.Server.fit_round:1 of +#, fuzzy +msgid "Perform a single round of federated averaging." +msgstr "本轮联邦学习。" + +#: flwr.server.server.Server.client_manager:1::1 of +msgid "" +":py:obj:`set_max_workers `\\ " +"\\(max\\_workers\\)" msgstr "" -#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg:1 of -msgid "Configurable FedXgbNnAvg strategy implementation." +#: flwr.server.server.Server.client_manager:1::1 +#: flwr.server.server.Server.set_max_workers:1 of +msgid "Set the max_workers used by ThreadPoolExecutor." msgstr "" -#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg.__init__:1 of -msgid "Federated XGBoost [Ma et al., 2023] strategy." +#: flwr.server.server.Server.client_manager:1::1 of +msgid ":py:obj:`set_strategy `\\ \\(strategy\\)" msgstr "" -#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg.__init__:3 of -msgid "Implementation based on https://arxiv.org/abs/2304.07537." +#: flwr.server.server.Server.client_manager:1::1 +#: flwr.server.server.Server.set_strategy:1 of +#, fuzzy +msgid "Replace server strategy." +msgstr "server.strategy" + +#: ../../source/ref-api/flwr.server.ServerConfig.rst:2 +#, fuzzy +msgid "ServerConfig" +msgstr "服务器" + +#: flwr.server.app.ServerConfig:3 of +msgid "" +"All attributes have default values which allows users to configure just " +"the ones they care about." msgstr "" -#: ../../source/ref-api-flwr.rst:242 -msgid "server.strategy.DPFedAvgAdaptive" +#: ../../source/ref-api/flwr.server.ServerConfig.rst:29::1 +msgid ":py:obj:`num_rounds `\\" msgstr "" -#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive:1 of -msgid "Wrapper for configuring a Strategy for DP with Adaptive Clipping." +#: ../../source/ref-api/flwr.server.ServerConfig.rst:29::1 +msgid ":py:obj:`round_timeout `\\" msgstr "" -#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive.aggregate_fit:1 of -msgid "Aggregate training results as in DPFedAvgFixed and update clip norms." +#: ../../source/ref-api/flwr.server.SimpleClientManager.rst:2 +msgid "SimpleClientManager" msgstr "" -#: ../../source/ref-api-flwr.rst:253 -msgid "server.strategy.DPFedAvgFixed" +#: flwr.server.client_manager.SimpleClientManager:1 of +msgid "Bases: :py:class:`~flwr.server.client_manager.ClientManager`" msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed:1 of -msgid "Wrapper for configuring a Strategy for DP with Fixed Clipping." +#: flwr.server.client_manager.SimpleClientManager.all:1::1 of +msgid ":py:obj:`all `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1 of -msgid "Aggregate evaluation losses using the given strategy." +#: flwr.server.client_manager.SimpleClientManager.all:1::1 of +msgid "" +":py:obj:`num_available `\\" +" \\(\\)" msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_fit:1 of -msgid "Aggregate training results using unweighted aggregation." +#: flwr.server.client_manager.SimpleClientManager.all:1::1 of +msgid "" +":py:obj:`register `\\ " +"\\(client\\)" msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:1 of -msgid "Configure the next round of evaluation using the specified strategy." +#: flwr.server.client_manager.SimpleClientManager.all:1::1 of +msgid "" +":py:obj:`sample `\\ " +"\\(num\\_clients\\[\\, min\\_num\\_clients\\, criterion\\]\\)" msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:1 of +#: flwr.server.client_manager.SimpleClientManager.all:1::1 of msgid "" -"Configure the next round of training incorporating Differential Privacy " -"(DP)." +":py:obj:`unregister `\\ " +"\\(client\\)" msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:3 of +#: flwr.server.client_manager.SimpleClientManager.all:1::1 of msgid "" -"Configuration of the next training round includes information related to " -"DP, such as clip norm and noise stddev." +":py:obj:`wait_for `\\ " +"\\(num\\_clients\\[\\, timeout\\]\\)" msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.evaluate:1 of -msgid "Evaluate model parameters using an evaluation function from the strategy." +#: flwr.server.client_manager.SimpleClientManager.wait_for:3 of +msgid "" +"Blocks until the requested number of clients is available or until a " +"timeout is reached. Current timeout default: 1 day." msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.initialize_parameters:1 of -msgid "Initialize global model parameters using given strategy." +#: flwr.server.client_manager.SimpleClientManager.wait_for:6 of +msgid "The number of clients to wait for." msgstr "" -#: ../../source/ref-api-flwr.rst:261 -msgid "common" +#: flwr.server.client_manager.SimpleClientManager.wait_for:8 of +msgid "The time in seconds to wait for, defaults to 86400 (24h)." msgstr "" -#: flwr.common:1 of -msgid "Common components shared between server and client." +#: flwr.server.client_manager.SimpleClientManager.wait_for:11 of +msgid "**success**" msgstr "" -#: flwr.common.typing.ClientMessage:1 of -msgid "ClientMessage is a container used to hold one result message." +#: ../../source/ref-api/flwr.server.driver.rst:2 +#, fuzzy +msgid "driver" +msgstr "服务器" + +#: ../../source/ref-api/flwr.server.driver.rst:22::1 +msgid "" +":py:obj:`start_driver `\\ \\(\\*\\[\\, " +"server\\_address\\, server\\, ...\\]\\)" msgstr "" -#: flwr.common.typing.Code:1 of -msgid "Client status codes." +#: ../../source/ref-api/flwr.server.driver.rst:22::1 +#: flwr.server.driver.app.start_driver:1 of +#, fuzzy +msgid "Start a Flower Driver API server." +msgstr "启动基于 Ray 的Flower模拟服务器。" + +#: ../../source/ref-api/flwr.server.driver.rst:30::1 +msgid "" +":py:obj:`Driver `\\ " +"\\(\\[driver\\_service\\_address\\, ...\\]\\)" msgstr "" -#: flwr.common.typing.DisconnectRes:1 of -msgid "DisconnectRes message from client to server." +#: ../../source/ref-api/flwr.server.driver.rst:30::1 +#: flwr.server.driver.driver.Driver:1 of +msgid "`Driver` class provides an interface to the Driver API." msgstr "" -#: flwr.common.typing.EvaluateIns:1 of -msgid "Evaluate instructions for a client." +#: ../../source/ref-api/flwr.server.driver.rst:30::1 +msgid "" +":py:obj:`GrpcDriver `\\ " +"\\(\\[driver\\_service\\_address\\, ...\\]\\)" msgstr "" -#: flwr.common.typing.EvaluateRes:1 of -msgid "Evaluate response from a client." +#: ../../source/ref-api/flwr.server.driver.rst:30::1 +#: flwr.server.driver.grpc_driver.GrpcDriver:1 of +msgid "`GrpcDriver` provides access to the gRPC Driver API/service." msgstr "" -#: flwr.common.telemetry.EventType:1 of -msgid "Types of telemetry events." +#: ../../source/ref-api/flwr.server.driver.Driver.rst:2 +#, fuzzy +msgid "Driver" +msgstr "服务器" + +#: flwr.server.driver.driver.Driver:3 of +#, fuzzy +msgid "" +"The IPv4 or IPv6 address of the Driver API server. Defaults to " +"`\"[::]:9091\"`." +msgstr "服务器的 IPv4 或 IPv6 地址。默认为 `\"[::]:8080\"。" + +#: flwr.server.app.start_server:28 flwr.server.driver.driver.Driver:6 of +msgid "" +"Tuple containing root certificate, server certificate, and private key to" +" start a secure SSL-enabled server. The tuple is expected to have three " +"bytes elements in the following order: * CA certificate. * " +"server certificate. * server private key." msgstr "" +"包含根证书、服务器证书和私钥的元组,用于启动启用 SSL 的安全服务器。元组应按以下顺序包含三个字节元素: * CA 证书,* 服务器证书, * " +"服务器私钥。" + +#: flwr.server.app.start_server:28 flwr.server.driver.driver.Driver:6 of +msgid "" +"Tuple containing root certificate, server certificate, and private key to" +" start a secure SSL-enabled server. The tuple is expected to have three " +"bytes elements in the following order:" +msgstr "包含根证书、服务器证书和私钥的元组,用于启动启用 SSL 的安全服务器。元组应按以下顺序包含三个字节元素:" + +#: flwr.server.app.start_server:32 flwr.server.driver.driver.Driver:10 of +msgid "CA certificate." +msgstr "CA 证书。" + +#: flwr.server.app.start_server:33 flwr.server.driver.driver.Driver:11 of +msgid "server certificate." +msgstr "服务器证书。" + +#: flwr.server.app.start_server:34 flwr.server.driver.driver.Driver:12 of +msgid "server private key." +msgstr "服务器私人密钥。" + +#: flwr.server.driver.driver.Driver.get_nodes:1::1 of +msgid ":py:obj:`get_nodes `\\ \\(\\)" +msgstr "" + +#: flwr.server.driver.driver.Driver.get_nodes:1 +#: flwr.server.driver.driver.Driver.get_nodes:1::1 of +msgid "Get node IDs." +msgstr "" + +#: flwr.server.driver.driver.Driver.get_nodes:1::1 of +msgid "" +":py:obj:`pull_task_res `\\ " +"\\(task\\_ids\\)" +msgstr "" + +#: flwr.server.driver.driver.Driver.get_nodes:1::1 +#: flwr.server.driver.driver.Driver.pull_task_res:1 +#: flwr.server.driver.grpc_driver.GrpcDriver.connect:1::1 +#: flwr.server.driver.grpc_driver.GrpcDriver.pull_task_res:1 of +#, fuzzy +msgid "Get task results." +msgstr "汇总训练结果。" + +#: flwr.server.driver.driver.Driver.get_nodes:1::1 of +msgid "" +":py:obj:`push_task_ins `\\ " +"\\(task\\_ins\\_list\\)" +msgstr "" + +#: flwr.server.driver.driver.Driver.get_nodes:1::1 +#: flwr.server.driver.driver.Driver.push_task_ins:1 +#: flwr.server.driver.grpc_driver.GrpcDriver.connect:1::1 +#: flwr.server.driver.grpc_driver.GrpcDriver.push_task_ins:1 of +msgid "Schedule tasks." +msgstr "" + +#: ../../source/ref-api/flwr.server.driver.GrpcDriver.rst:2 +msgid "GrpcDriver" +msgstr "" + +#: flwr.server.driver.grpc_driver.GrpcDriver.connect:1::1 of +msgid ":py:obj:`connect `\\ \\(\\)" +msgstr "" + +#: flwr.server.driver.grpc_driver.GrpcDriver.connect:1 +#: flwr.server.driver.grpc_driver.GrpcDriver.connect:1::1 of +msgid "Connect to the Driver API." +msgstr "" + +#: flwr.server.driver.grpc_driver.GrpcDriver.connect:1::1 of +msgid "" +":py:obj:`create_run `\\ " +"\\(req\\)" +msgstr "" + +#: flwr.server.driver.grpc_driver.GrpcDriver.connect:1::1 +#: flwr.server.driver.grpc_driver.GrpcDriver.create_run:1 of +#, fuzzy +msgid "Request for run ID." +msgstr "Flower 基线申请" + +#: flwr.server.driver.grpc_driver.GrpcDriver.connect:1::1 of +msgid ":py:obj:`disconnect `\\ \\(\\)" +msgstr "" + +#: flwr.server.driver.grpc_driver.GrpcDriver.connect:1::1 +#: flwr.server.driver.grpc_driver.GrpcDriver.disconnect:1 of +msgid "Disconnect from the Driver API." +msgstr "" + +#: flwr.server.driver.grpc_driver.GrpcDriver.connect:1::1 of +msgid ":py:obj:`get_nodes `\\ \\(req\\)" +msgstr "" + +#: flwr.server.driver.grpc_driver.GrpcDriver.connect:1::1 +#: flwr.server.driver.grpc_driver.GrpcDriver.get_nodes:1 of +#, fuzzy +msgid "Get client IDs." +msgstr "返回客户端(本身)。" + +#: flwr.server.driver.grpc_driver.GrpcDriver.connect:1::1 of +msgid "" +":py:obj:`pull_task_res `\\ " +"\\(req\\)" +msgstr "" + +#: flwr.server.driver.grpc_driver.GrpcDriver.connect:1::1 of +msgid "" +":py:obj:`push_task_ins `\\ " +"\\(req\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.driver.start_driver.rst:2 +#, fuzzy +msgid "start\\_driver" +msgstr "启动客户端" + +#: flwr.server.driver.app.start_driver:3 of +#, fuzzy +msgid "" +"The IPv4 or IPv6 address of the Driver API server. Defaults to " +"`\"[::]:8080\"`." +msgstr "服务器的 IPv4 或 IPv6 地址。默认为 `\"[::]:8080\"。" + +#: flwr.server.driver.app.start_driver:6 of +#, fuzzy +msgid "" +"A server implementation, either `flwr.server.Server` or a subclass " +"thereof. If no instance is provided, then `start_driver` will create one." +msgstr "服务器实现,可以是 `flwr.server.Server` 或其子类。如果没有提供实例,`start_server` 将创建一个。" + +#: flwr.server.app.start_server:9 flwr.server.driver.app.start_driver:10 +#: flwr.simulation.app.start_simulation:28 of +msgid "" +"Currently supported values are `num_rounds` (int, default: 1) and " +"`round_timeout` in seconds (float, default: None)." +msgstr "目前支持的值有:`num_rounds`(int,默认值:1)和以秒为单位的`round_timeout`(float,默认值:无)。" + +#: flwr.server.app.start_server:12 flwr.server.driver.app.start_driver:13 of +msgid "" +"An implementation of the abstract base class " +"`flwr.server.strategy.Strategy`. If no strategy is provided, then " +"`start_server` will use `flwr.server.strategy.FedAvg`." +msgstr "" +"抽象基类 `flwr.server.strategy.Strategy` 的实现。如果没有提供策略,`start_server` 将使用 " +"`flwr.server.strategy.FedAvg`。" + +#: flwr.server.driver.app.start_driver:17 of +#, fuzzy +msgid "" +"An implementation of the class `flwr.server.ClientManager`. If no " +"implementation is provided, then `start_driver` will use " +"`flwr.server.SimpleClientManager`." +msgstr "" +"抽象基类 `flwr.server.ClientManager` 的实现。如果没有提供实现,`start_server` 将使用 " +"`flwr.server.client_manager.SimpleClientManager`。" + +#: flwr.server.app.start_server:37 flwr.server.driver.app.start_driver:26 of +msgid "**hist** -- Object containing training and evaluation metrics." +msgstr "**hist** -- 包含训练和评估指标的对象。" + +#: flwr.server.driver.app.start_driver:31 of +#, fuzzy +msgid "Starting a driver that connects to an insecure server:" +msgstr "启动不安全的服务器:" + +#: flwr.server.driver.app.start_driver:35 of +#, fuzzy +msgid "Starting a driver that connects to an SSL-enabled server:" +msgstr "启动支持 SSL 的服务器:" + +#: ../../source/ref-api/flwr.server.run_driver_api.rst:2 +#, fuzzy +msgid "run\\_driver\\_api" +msgstr "flower-driver-api" + +#: ../../source/ref-api/flwr.server.run_fleet_api.rst:2 +msgid "run\\_fleet\\_api" +msgstr "" + +#: ../../source/ref-api/flwr.server.run_server_app.rst:2 +msgid "run\\_server\\_app" +msgstr "" + +#: ../../source/ref-api/flwr.server.run_superlink.rst:2 +#, fuzzy +msgid "run\\_superlink" +msgstr "flower-superlink" + +#: ../../source/ref-api/flwr.server.start_server.rst:2 +#, fuzzy +msgid "start\\_server" +msgstr "server.start_server" + +#: flwr.server.app.start_server:3 of +msgid "The IPv4 or IPv6 address of the server. Defaults to `\"[::]:8080\"`." +msgstr "服务器的 IPv4 或 IPv6 地址。默认为 `\"[::]:8080\"。" + +#: flwr.server.app.start_server:5 of +msgid "" +"A server implementation, either `flwr.server.Server` or a subclass " +"thereof. If no instance is provided, then `start_server` will create one." +msgstr "服务器实现,可以是 `flwr.server.Server` 或其子类。如果没有提供实例,`start_server` 将创建一个。" + +#: flwr.server.app.start_server:16 of +msgid "" +"An implementation of the abstract base class `flwr.server.ClientManager`." +" If no implementation is provided, then `start_server` will use " +"`flwr.server.client_manager.SimpleClientManager`." +msgstr "" +"抽象基类 `flwr.server.ClientManager` 的实现。如果没有提供实现,`start_server` 将使用 " +"`flwr.server.client_manager.SimpleClientManager`。" + +#: flwr.server.app.start_server:21 of +msgid "" +"The maximum length of gRPC messages that can be exchanged with the Flower" +" clients. The default should be sufficient for most models. Users who " +"train very large models might need to increase this value. Note that the " +"Flower clients need to be started with the same value (see " +"`flwr.client.start_client`), otherwise clients will not know about the " +"increased limit and block larger messages." +msgstr "" +"可与 Flower 客户端交换的 gRPC 消息的最大长度:默认值对大多数模型都足够了。训练超大模型的用户可能需要增加该值。请注意,Flower " +"客户端需要以相同的值启动(请参阅 `flwr.client.start_client`),否则客户端将不知道已增加的限制并阻止更大的消息。" + +#: flwr.server.app.start_server:42 of +msgid "Starting an insecure server:" +msgstr "启动不安全的服务器:" + +#: flwr.server.app.start_server:46 of +msgid "Starting an SSL-enabled server:" +msgstr "启动支持 SSL 的服务器:" + +#: ../../source/ref-api/flwr.server.strategy.rst:2 +#, fuzzy +msgid "strategy" +msgstr "Krum 策略。" + +#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +msgid "" +":py:obj:`FaultTolerantFedAvg " +"`\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg:1 of +msgid "Configurable fault-tolerant FedAvg strategy implementation." +msgstr "可配置的容错 FedAvg 策略实施。" + +#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +msgid "" +":py:obj:`FedAdagrad `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: flwr.server.strategy.fedadagrad.FedAdagrad:1 of +msgid "FedAdagrad strategy - Adaptive Federated Optimization using Adagrad." +msgstr "FedAdagrad 策略 - 使用 Adagrad 进行自适应联合优化。" + +#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +msgid "" +":py:obj:`FedAdam `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: flwr.server.strategy.fedadam.FedAdam:1 of +msgid "FedAdam - Adaptive Federated Optimization using Adam." +msgstr "FedAdam - 使用 Adam 进行自适应联合优化。" + +#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +msgid "" +":py:obj:`FedAvg `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: flwr.server.strategy.fedavg.FedAvg:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:1 of +msgid "Federated Averaging strategy." +msgstr "联邦平均策略。" + +#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +msgid "" +":py:obj:`FedXgbNnAvg `\\ \\(\\*args\\, " +"\\*\\*kwargs\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg:1 of +msgid "Configurable FedXgbNnAvg strategy implementation." +msgstr "可配置的 FedXgbNAvg 策略实施。" + +#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +msgid "" +":py:obj:`FedXgbBagging `\\ " +"\\(\\[evaluate\\_function\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging:1 of +#, fuzzy +msgid "Configurable FedXgbBagging strategy implementation." +msgstr "可配置的 FedXgbNAvg 策略实施。" + +#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +msgid "" +":py:obj:`FedXgbCyclic `\\ " +"\\(\\*\\*kwargs\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic:1 of +#, fuzzy +msgid "Configurable FedXgbCyclic strategy implementation." +msgstr "可配置的 FedAvg 策略实施。" + +#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +msgid "" +":py:obj:`FedAvgAndroid `\\ " +"\\(\\*\\[\\, fraction\\_fit\\, ...\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +msgid "" +":py:obj:`FedAvgM `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: flwr.server.strategy.fedavgm.FedAvgM:1 of +msgid "Federated Averaging with Momentum strategy." +msgstr "联邦平均动量策略。" + +#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +msgid "" +":py:obj:`FedOpt `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: flwr.server.strategy.fedopt.FedOpt:1 of +#, fuzzy +msgid "Federated Optim strategy." +msgstr "联邦优化策略。" + +#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +msgid "" +":py:obj:`FedProx `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: flwr.server.strategy.fedprox.FedProx:1 of +msgid "Federated Optimization strategy." +msgstr "联邦优化策略。" + +#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +msgid "" +":py:obj:`FedYogi `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: flwr.server.strategy.fedyogi.FedYogi:1 of +msgid "FedYogi [Reddi et al., 2020] strategy." +msgstr "FedYogi [Reddi 等人,2020] 策略。" + +#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +msgid "" +":py:obj:`QFedAvg `\\ \\(\\*\\[\\, " +"q\\_param\\, qffl\\_learning\\_rate\\, ...\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: flwr.server.strategy.qfedavg.QFedAvg:1 of +msgid "Configurable QFedAvg strategy implementation." +msgstr "可配置的 QFedAvg 策略实施。" + +#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +msgid "" +":py:obj:`FedMedian `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: flwr.server.strategy.fedmedian.FedMedian:1 of +#, fuzzy +msgid "Configurable FedMedian strategy implementation." +msgstr "可配置的 FedAvg 策略实施。" + +#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +msgid "" +":py:obj:`FedTrimmedAvg `\\ " +"\\(\\*\\[\\, fraction\\_fit\\, ...\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:1 of +msgid "Federated Averaging with Trimmed Mean [Dong Yin, et al., 2021]." +msgstr "带修剪均值的联邦平均法[Dong Yin 等,2021]。" + +#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +msgid "" +":py:obj:`Krum `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: flwr.server.strategy.krum.Krum:1 of +#, fuzzy +msgid "Krum [Blanchard et al., 2017] strategy." +msgstr "FedYogi [Reddi 等人,2020] 策略。" + +#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +msgid "" +":py:obj:`Bulyan `\\ \\(\\*\\, " +"fraction\\_fit\\, fraction\\_evaluate\\, ...\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: flwr.server.strategy.bulyan.Bulyan:1 of +msgid "Bulyan strategy." +msgstr "Bulyan 策略。" + +#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +msgid "" +":py:obj:`DPFedAvgAdaptive `\\ " +"\\(strategy\\, num\\_sampled\\_clients\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive:1 of +msgid "Wrapper for configuring a Strategy for DP with Adaptive Clipping." +msgstr "用于配置具有自适应剪切功能的 DP 策略的包装器。" + +#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +msgid "" +":py:obj:`DPFedAvgFixed `\\ " +"\\(strategy\\, num\\_sampled\\_clients\\, ...\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed:1 of +msgid "Wrapper for configuring a Strategy for DP with Fixed Clipping." +msgstr "封装器,用于为具有固定剪切功能的 DP 配置策略。" + +#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#, fuzzy +msgid ":py:obj:`Strategy `\\ \\(\\)" +msgstr "server.strategy.Strategy" + +#: ../../source/ref-api/flwr.server.strategy.rst:41::1 +#: flwr.server.strategy.strategy.Strategy:1 of +msgid "Abstract base class for server strategy implementations." +msgstr "服务器策略实现的抽象基类。" + +#: ../../source/ref-api/flwr.server.strategy.Bulyan.rst:2 +msgid "Bulyan" +msgstr "" + +#: flwr.server.strategy.bulyan.Bulyan:1 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg:1 +#: flwr.server.strategy.fedavgm.FedAvgM:1 +#: flwr.server.strategy.fedmedian.FedMedian:1 +#: flwr.server.strategy.fedopt.FedOpt:1 flwr.server.strategy.fedprox.FedProx:1 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging:1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic:1 +#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg:1 +#: flwr.server.strategy.krum.Krum:1 flwr.server.strategy.qfedavg.QFedAvg:1 of +#, fuzzy +msgid "Bases: :py:class:`~flwr.server.strategy.fedavg.FedAvg`" +msgstr "server.strategy.DPFedAvgFixed" + +#: flwr.server.strategy.bulyan.Bulyan:3 of +msgid "Implementation based on https://arxiv.org/abs/1802.07927." +msgstr "实施基于 https://arxiv.org/abs/1802.07927。" + +#: flwr.server.strategy.bulyan.Bulyan:5 +#: flwr.server.strategy.fedadagrad.FedAdagrad:5 +#: flwr.server.strategy.fedadam.FedAdam:5 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:5 +#: flwr.server.strategy.fedavgm.FedAvgM:5 flwr.server.strategy.fedopt.FedOpt:5 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:5 +#: flwr.server.strategy.fedyogi.FedYogi:5 flwr.server.strategy.krum.Krum:5 of +msgid "Fraction of clients used during training. Defaults to 1.0." +msgstr "训练期间使用客户的比例。默认为 1.0。" + +#: flwr.server.strategy.bulyan.Bulyan:7 +#: flwr.server.strategy.fedadagrad.FedAdagrad:7 +#: flwr.server.strategy.fedadam.FedAdam:7 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:7 +#: flwr.server.strategy.fedavgm.FedAvgM:7 flwr.server.strategy.fedopt.FedOpt:7 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:7 +#: flwr.server.strategy.fedyogi.FedYogi:7 flwr.server.strategy.krum.Krum:7 of +msgid "Fraction of clients used during validation. Defaults to 1.0." +msgstr "验证过程中使用的客户端比例。默认为 1.0。" + +#: flwr.server.strategy.bulyan.Bulyan:9 +#: flwr.server.strategy.fedadagrad.FedAdagrad:9 +#: flwr.server.strategy.fedadam.FedAdam:9 flwr.server.strategy.fedavg.FedAvg:13 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:9 +#: flwr.server.strategy.fedavgm.FedAvgM:9 flwr.server.strategy.fedopt.FedOpt:9 +#: flwr.server.strategy.fedprox.FedProx:45 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:9 +#: flwr.server.strategy.fedyogi.FedYogi:9 flwr.server.strategy.krum.Krum:9 of +msgid "Minimum number of clients used during training. Defaults to 2." +msgstr "训练期间使用的最少客户数。默认为 2。" + +#: flwr.server.strategy.bulyan.Bulyan:11 +#: flwr.server.strategy.fedadagrad.FedAdagrad:11 +#: flwr.server.strategy.fedadam.FedAdam:11 +#: flwr.server.strategy.fedavg.FedAvg:15 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:11 +#: flwr.server.strategy.fedavgm.FedAvgM:11 +#: flwr.server.strategy.fedopt.FedOpt:11 +#: flwr.server.strategy.fedprox.FedProx:47 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:11 +#: flwr.server.strategy.fedyogi.FedYogi:11 flwr.server.strategy.krum.Krum:11 of +msgid "Minimum number of clients used during validation. Defaults to 2." +msgstr "验证过程中使用的最少客户端数量。默认为 2。" + +#: flwr.server.strategy.bulyan.Bulyan:13 +#: flwr.server.strategy.fedadagrad.FedAdagrad:13 +#: flwr.server.strategy.fedadam.FedAdam:13 +#: flwr.server.strategy.fedavg.FedAvg:17 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:13 +#: flwr.server.strategy.fedavgm.FedAvgM:13 +#: flwr.server.strategy.fedopt.FedOpt:13 +#: flwr.server.strategy.fedprox.FedProx:49 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:13 +#: flwr.server.strategy.fedyogi.FedYogi:13 flwr.server.strategy.krum.Krum:13 of +msgid "Minimum number of total clients in the system. Defaults to 2." +msgstr "系统中客户总数的最小值。默认为 2。" + +#: flwr.server.strategy.bulyan.Bulyan:15 flwr.server.strategy.krum.Krum:15 of +msgid "Number of malicious clients in the system. Defaults to 0." +msgstr "系统中恶意客户端的数量。默认为 0。" + +#: flwr.server.strategy.bulyan.Bulyan:17 +#: flwr.server.strategy.fedadagrad.FedAdagrad:15 +#: flwr.server.strategy.fedadam.FedAdam:15 +#: flwr.server.strategy.fedavg.FedAvg:19 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:15 +#: flwr.server.strategy.fedavgm.FedAvgM:15 +#: flwr.server.strategy.fedopt.FedOpt:15 +#: flwr.server.strategy.fedprox.FedProx:51 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:15 +#: flwr.server.strategy.fedyogi.FedYogi:17 +#: flwr.server.strategy.fedyogi.FedYogi:18 +#: flwr.server.strategy.fedyogi.FedYogi:19 flwr.server.strategy.krum.Krum:20 of +msgid "Optional function used for validation. Defaults to None." +msgstr "用于验证的可选函数。默认为 \"无\"。" + +#: flwr.server.strategy.bulyan.Bulyan:19 +#: flwr.server.strategy.fedadagrad.FedAdagrad:17 +#: flwr.server.strategy.fedadam.FedAdam:17 +#: flwr.server.strategy.fedavg.FedAvg:21 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:17 +#: flwr.server.strategy.fedavgm.FedAvgM:17 +#: flwr.server.strategy.fedopt.FedOpt:17 +#: flwr.server.strategy.fedprox.FedProx:53 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:17 +#: flwr.server.strategy.fedyogi.FedYogi:20 flwr.server.strategy.krum.Krum:22 of +msgid "Function used to configure training. Defaults to None." +msgstr "用于配置训练的功能。默认为 \"无\"。" + +#: flwr.server.strategy.bulyan.Bulyan:21 +#: flwr.server.strategy.fedadagrad.FedAdagrad:19 +#: flwr.server.strategy.fedadam.FedAdam:19 +#: flwr.server.strategy.fedavg.FedAvg:23 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:19 +#: flwr.server.strategy.fedavgm.FedAvgM:19 +#: flwr.server.strategy.fedopt.FedOpt:19 +#: flwr.server.strategy.fedprox.FedProx:55 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:19 +#: flwr.server.strategy.fedyogi.FedYogi:22 flwr.server.strategy.krum.Krum:24 of +msgid "Function used to configure validation. Defaults to None." +msgstr "用于配置验证的函数。默认为 \"无\"。" + +#: flwr.server.strategy.bulyan.Bulyan:23 +#: flwr.server.strategy.fedadagrad.FedAdagrad:25 +#: flwr.server.strategy.fedadam.FedAdam:21 +#: flwr.server.strategy.fedavg.FedAvg:25 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:21 +#: flwr.server.strategy.fedavgm.FedAvgM:21 +#: flwr.server.strategy.fedopt.FedOpt:21 +#: flwr.server.strategy.fedprox.FedProx:57 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:21 +#: flwr.server.strategy.fedyogi.FedYogi:24 flwr.server.strategy.krum.Krum:26 of +msgid "Whether or not accept rounds containing failures. Defaults to True." +msgstr "是否接受包含失败的轮。默认为 True。" + +#: flwr.server.strategy.bulyan.Bulyan:25 +#: flwr.server.strategy.fedadagrad.FedAdagrad:27 +#: flwr.server.strategy.fedadam.FedAdam:23 +#: flwr.server.strategy.fedavg.FedAvg:27 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:24 +#: flwr.server.strategy.fedavgm.FedAvgM:23 +#: flwr.server.strategy.fedopt.FedOpt:23 +#: flwr.server.strategy.fedprox.FedProx:59 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:23 +#: flwr.server.strategy.fedyogi.FedYogi:26 flwr.server.strategy.krum.Krum:28 of +msgid "Initial global model parameters." +msgstr "初始全局模型参数。" + +#: flwr.server.strategy.bulyan.Bulyan:27 of +msgid "" +"Byzantine resilient aggregation rule that is used as the first step of " +"the Bulyan (e.g., Krum)" +msgstr "Byzantine弹性聚合规则,用作 Bulyan 的第一步(如 Krum)" + +#: flwr.server.strategy.bulyan.Bulyan:29 of +msgid "arguments to the first_aggregation rule" +msgstr "第一聚类规则的参数" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\, " +"results\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +msgid "Aggregate evaluation losses using weighted average." +msgstr "采用加权平均法计算评估损失总额。" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" +msgstr "" + +#: flwr.server.strategy.bulyan.Bulyan.aggregate_fit:1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "Aggregate fit results using Bulyan." +msgstr "使用 Bulyan 技术汇总拟合结果。" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\, " +"parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.configure_evaluate:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.configure_evaluate:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.configure_evaluate:1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.configure_evaluate:1 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: flwr.server.strategy.strategy.Strategy.configure_evaluate:1 of +msgid "Configure the next round of evaluation." +msgstr "配置下一轮评估。" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive.configure_fit:1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.configure_fit:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.configure_fit:1 +#: flwr.server.strategy.fedprox.FedProx.configure_fit:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.configure_fit:1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.configure_fit:1 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: flwr.server.strategy.strategy.Strategy.configure_fit:1 of +msgid "Configure the next round of training." +msgstr "配置下一轮训练。" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" +msgstr "" + +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.evaluate:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.evaluate:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.evaluate:1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg.evaluate:1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +msgid "Evaluate model parameters using an evaluation function." +msgstr "使用评估函数评估模型参数。" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" +msgstr "" + +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.initialize_parameters:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.initialize_parameters:1 +#: flwr.server.strategy.fedavgm.FedAvgM.initialize_parameters:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +msgid "Initialize global model parameters." +msgstr "初始化全局模型参数。" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" + +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.num_evaluation_clients:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.num_evaluation_clients:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.num_evaluation_clients:1 of +msgid "Use a fraction of available clients for evaluation." +msgstr "使用部分可用客户进行评估。" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_fit_clients `\\" +" \\(num\\_available\\_clients\\)" +msgstr "" + +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.num_fit_clients:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.num_fit_clients:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.num_fit_clients:1 of +msgid "Return the sample size and the required number of available clients." +msgstr "返回样本大小和所需的可用客户数量。" + +#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive:1 of +msgid "Bases: :py:class:`~flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed`" +msgstr "" + +#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive:3 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed:3 of +msgid "This class is deprecated and will be removed in a future release." +msgstr "" + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of +msgid "Aggregate evaluation losses using the given strategy." +msgstr "使用给定的策略汇总评估损失。" + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_fit " +"`\\ " +"\\(server\\_round\\, results\\, failures\\)" +msgstr "" + +#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive.aggregate_fit:1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of +msgid "Aggregate training results as in DPFedAvgFixed and update clip norms." +msgstr "汇总 DPFedAvgFixed 中的训练结果并更新片段标准。" + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:1 of +msgid "Configure the next round of evaluation using the specified strategy." +msgstr "使用指定策略配置下一轮评估。" + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_fit " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" +msgstr "" + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.evaluate:1 of +msgid "Evaluate model parameters using an evaluation function from the strategy." +msgstr "使用策略中的评估函数评估模型参数。" + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" +msgstr "" + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.initialize_parameters:1 of +msgid "Initialize global model parameters using given strategy." +msgstr "使用给定的策略初始化全局模型参数。" + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:3 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:6 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:3 +#: flwr.server.strategy.strategy.Strategy.aggregate_fit:3 +#: flwr.server.strategy.strategy.Strategy.configure_evaluate:3 +#: flwr.server.strategy.strategy.Strategy.configure_fit:3 +#: flwr.server.strategy.strategy.Strategy.evaluate:6 of +msgid "The current round of federated learning." +msgstr "本轮联邦学习。" + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:7 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:10 +#: flwr.server.strategy.strategy.Strategy.configure_evaluate:7 +#: flwr.server.strategy.strategy.Strategy.configure_fit:7 +#: flwr.server.strategy.strategy.Strategy.initialize_parameters:3 of +msgid "The client manager which holds all currently connected clients." +msgstr "客户端管理器,用于管理当前连接的所有客户端。" + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:10 +#: flwr.server.strategy.strategy.Strategy.configure_evaluate:10 of +msgid "" +"**evaluate_configuration** -- A list of tuples. Each tuple in the list " +"identifies a `ClientProxy` and the `EvaluateIns` for this particular " +"`ClientProxy`. If a particular `ClientProxy` is not included in this " +"list, it means that this `ClientProxy` will not participate in the next " +"round of federated evaluation." +msgstr "" +"**evaluate_configuration** -- " +"一个元组列表。列表中的每个元组都标识了一个`ClientProxy`和该特定`ClientProxy`的`EvaluateIns`。如果某个特定的" +" `ClientProxy` 未包含在此列表中,则表示该 `ClientProxy` 将不参与下一轮联合评估。" + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed:1 +#: flwr.server.strategy.fedavg.FedAvg:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:1 of +msgid "Bases: :py:class:`~flwr.server.strategy.strategy.Strategy`" +msgstr "" + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_fit " +"`\\ " +"\\(server\\_round\\, results\\, failures\\)" +msgstr "" + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_fit:1 of +msgid "Aggregate training results using unweighted aggregation." +msgstr "使用非加权汇总法汇总训练结果。" + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_fit " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:1 of +msgid "" +"Configure the next round of training incorporating Differential Privacy " +"(DP)." +msgstr "配置包含差分隐私 (DP) 的下一轮训练。" + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" +msgstr "" + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" +msgstr "" + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:3 of +msgid "" +"Configuration of the next training round includes information related to " +"DP, such as clip norm and noise stddev." +msgstr "下一轮训练的配置包括与 DP 相关的信息,如片段规范和噪声 stddev。" + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:13 +#: flwr.server.strategy.strategy.Strategy.configure_fit:10 of +msgid "" +"**fit_configuration** -- A list of tuples. Each tuple in the list " +"identifies a `ClientProxy` and the `FitIns` for this particular " +"`ClientProxy`. If a particular `ClientProxy` is not included in this " +"list, it means that this `ClientProxy` will not participate in the next " +"round of federated learning." +msgstr "" +"**fit_configuration** -- " +"一个元组列表。列表中的每个元组都标识了一个`ClientProxy`和该特定`ClientProxy`的`FitIns'。如果某个特定的`ClientProxy`不在此列表中,则表示该`ClientProxy`将不参加下一轮联合学习。" + +#: ../../source/ref-api/flwr.server.strategy.FaultTolerantFedAvg.rst:2 +#, fuzzy +msgid "FaultTolerantFedAvg" +msgstr "server.strategy.FaultTolerantFedAvg" + +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_fit " +"`\\ " +"\\(server\\_round\\, results\\, failures\\)" +msgstr "" + +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_fit:1 +#: flwr.server.strategy.fedadagrad.FedAdagrad.aggregate_fit:1 +#: flwr.server.strategy.fedadam.FedAdam.aggregate_fit:1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_fit:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_fit:1 +#: flwr.server.strategy.fedavgm.FedAvgM.aggregate_fit:1 +#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg.aggregate_fit:1 +#: flwr.server.strategy.fedyogi.FedYogi.aggregate_fit:1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_fit:1 of +msgid "Aggregate fit results using weighted average." +msgstr "使用加权平均法汇总拟合结果。" + +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_fit " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" +msgstr "" + +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" +msgstr "" + +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" + +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.FedAdagrad.rst:2 +#: ../../source/ref-changelog.md:839 +msgid "FedAdagrad" +msgstr "FedAdagrad" + +#: flwr.server.strategy.fedadagrad.FedAdagrad:1 +#: flwr.server.strategy.fedadam.FedAdam:1 +#: flwr.server.strategy.fedyogi.FedYogi:1 of +msgid "Bases: :py:class:`~flwr.server.strategy.fedopt.FedOpt`" +msgstr "" + +#: flwr.server.strategy.fedadagrad.FedAdagrad:3 +#: flwr.server.strategy.fedadam.FedAdam:3 flwr.server.strategy.fedopt.FedOpt:3 +#: flwr.server.strategy.fedyogi.FedYogi:3 of +msgid "Implementation based on https://arxiv.org/abs/2003.00295v5" +msgstr "实施基于 https://arxiv.org/abs/2003.00295v5" + +#: flwr.server.strategy.fedadagrad.FedAdagrad:21 +#: flwr.server.strategy.fedadagrad.FedAdagrad:23 +#: flwr.server.strategy.fedadam.FedAdam:25 +#: flwr.server.strategy.fedadam.FedAdam:27 +#: flwr.server.strategy.fedavg.FedAvg:29 flwr.server.strategy.fedavg.FedAvg:31 +#: flwr.server.strategy.fedopt.FedOpt:25 flwr.server.strategy.fedopt.FedOpt:27 +#: flwr.server.strategy.fedprox.FedProx:61 +#: flwr.server.strategy.fedprox.FedProx:63 +#: flwr.server.strategy.fedyogi.FedYogi:28 +#: flwr.server.strategy.fedyogi.FedYogi:30 of +msgid "Metrics aggregation function, optional." +msgstr "指标汇总功能,可选。" + +#: flwr.server.strategy.fedadagrad.FedAdagrad:29 +#: flwr.server.strategy.fedadam.FedAdam:29 +#: flwr.server.strategy.fedopt.FedOpt:29 of +msgid "Server-side learning rate. Defaults to 1e-1." +msgstr "服务器端学习率。默认为 1e-1。" + +#: flwr.server.strategy.fedadagrad.FedAdagrad:31 +#: flwr.server.strategy.fedadam.FedAdam:31 +#: flwr.server.strategy.fedopt.FedOpt:31 of +msgid "Client-side learning rate. Defaults to 1e-1." +msgstr "客户端学习率。默认为 1e-1。" + +#: flwr.server.strategy.fedadagrad.FedAdagrad:33 +#: flwr.server.strategy.fedadam.FedAdam:37 +#: flwr.server.strategy.fedopt.FedOpt:37 of +msgid "Controls the algorithm's degree of adaptability. Defaults to 1e-9." +msgstr "控制算法的适应度。默认为 1e-9。" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_fit `\\" +" \\(server\\_round\\, results\\, failures\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_fit `\\" +" \\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.FedAdam.rst:2 +#, fuzzy +msgid "FedAdam" +msgstr "FedAdagrad" + +#: flwr.server.strategy.fedadam.FedAdam:33 +#: flwr.server.strategy.fedyogi.FedYogi:36 of +msgid "Momentum parameter. Defaults to 0.9." +msgstr "动量参数。默认为 0.9。" + +#: flwr.server.strategy.fedadam.FedAdam:35 +#: flwr.server.strategy.fedyogi.FedYogi:38 of +msgid "Second moment parameter. Defaults to 0.99." +msgstr "第二动量参数。默认为 0.99。" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\," +" results\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.FedAvg.rst:2 +#, fuzzy +msgid "FedAvg" +msgstr "DP-FedAvg" + +#: flwr.server.strategy.fedavg.FedAvg:3 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:3 of +msgid "Implementation based on https://arxiv.org/abs/1602.05629" +msgstr "实施基于 https://arxiv.org/abs/1602.05629" + +#: flwr.server.strategy.fedavg.FedAvg:5 flwr.server.strategy.fedprox.FedProx:37 +#: of +msgid "" +"Fraction of clients used during training. In case `min_fit_clients` is " +"larger than `fraction_fit * available_clients`, `min_fit_clients` will " +"still be sampled. Defaults to 1.0." +msgstr "" +"训练过程中使用的客户端比例。如果 `min_fit_clients` 大于 `fraction_fit * " +"available_clients`,则仍会对 `min_fit_clients` 进行采样。默认为 1.0。" + +#: flwr.server.strategy.fedavg.FedAvg:9 flwr.server.strategy.fedprox.FedProx:41 +#: of +msgid "" +"Fraction of clients used during validation. In case " +"`min_evaluate_clients` is larger than `fraction_evaluate * " +"available_clients`, `min_evaluate_clients` will still be sampled. " +"Defaults to 1.0." +msgstr "" +"验证过程中使用的客户端的比例。如果 `min_evaluate_clients` 大于 `fraction_evaluate * " +"available_clients`,则仍会对 `min_evaluate_clients` 进行采样。默认为 1.0。" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\, " +"results\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\, " +"parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_fit_clients `\\" +" \\(num\\_available\\_clients\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.FedAvgAndroid.rst:2 +#, fuzzy +msgid "FedAvgAndroid" +msgstr "DPFedAvgAdaptive" + +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_fit " +"`\\ " +"\\(server\\_round\\, results\\, failures\\)" +msgstr "" + +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`bytes_to_ndarray " +"`\\ \\(tensor\\)" +msgstr "" + +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.bytes_to_ndarray:1 of +#, fuzzy +msgid "Deserialize NumPy array from bytes." +msgstr "从字节反序列化 NumPy ndarray。" + +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_fit " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" +msgstr "" + +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" +msgstr "" + +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`ndarray_to_bytes " +"`\\ \\(ndarray\\)" +msgstr "" + +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.ndarray_to_bytes:1 of +#, fuzzy +msgid "Serialize NumPy array to bytes." +msgstr "将 NumPy ndarray 序列化为字节。" + +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`ndarrays_to_parameters " +"`\\ " +"\\(ndarrays\\)" +msgstr "" + +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" + +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" + +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`parameters_to_ndarrays " +"`\\ " +"\\(parameters\\)" +msgstr "" + +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.parameters_to_ndarrays:1 +#: of +#, fuzzy +msgid "Convert parameters object to NumPy weights." +msgstr "将参数对象转换为 NumPy ndarrays。" + +#: ../../source/ref-api/flwr.server.strategy.FedAvgM.rst:2 +#, fuzzy +msgid "FedAvgM" +msgstr "DP-FedAvg" + +#: flwr.server.strategy.fedavgm.FedAvgM:3 of +#, fuzzy +msgid "Implementation based on https://arxiv.org/abs/1909.06335" +msgstr "实施基于 https://arxiv.org/pdf/1909.06335.pdf" + +#: flwr.server.strategy.fedavgm.FedAvgM:25 of +msgid "" +"Server-side learning rate used in server-side optimization. Defaults to " +"1.0." +msgstr "服务器端优化中使用的服务器端学习率。默认为 1.0。" + +#: flwr.server.strategy.fedavgm.FedAvgM:28 of +msgid "Server-side momentum factor used for FedAvgM. Defaults to 0.0." +msgstr "用于 FedAvgM 的服务器端动量因子。默认为 0.0。" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\," +" results\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.FedMedian.rst:2 +msgid "FedMedian" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedmedian.FedMedian.aggregate_fit:1 of +msgid "Aggregate fit results using median." +msgstr "使用中位数汇总拟合结果。" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.FedOpt.rst:2 +msgid "FedOpt" +msgstr "" + +#: flwr.server.strategy.fedopt.FedOpt:33 of +msgid "Momentum parameter. Defaults to 0.0." +msgstr "动量参数。默认为 0.0。" + +#: flwr.server.strategy.fedopt.FedOpt:35 of +msgid "Second moment parameter. Defaults to 0.0." +msgstr "第二动量参数。默认为 0.0。" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\, " +"results\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\, " +"parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_fit_clients `\\" +" \\(num\\_available\\_clients\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.FedProx.rst:2 +msgid "FedProx" +msgstr "" + +#: flwr.server.strategy.fedprox.FedProx:3 of +msgid "Implementation based on https://arxiv.org/abs/1812.06127" +msgstr "实施基于 https://arxiv.org/abs/1812.06127" + +#: flwr.server.strategy.fedprox.FedProx:5 of +msgid "" +"The strategy in itself will not be different than FedAvg, the client " +"needs to be adjusted. A proximal term needs to be added to the loss " +"function during the training:" +msgstr "策略本身与 FedAvg 并无不同,客户端需要进行调整。在训练过程中,需要在损失函数中添加一个近端项:" + +#: flwr.server.strategy.fedprox.FedProx:9 of +msgid "" +"\\\\frac{\\\\mu}{2} || w - w^t ||^2\n" +"\n" +msgstr "" +"\\\\frac{\\\\mu}{2} || w - w^t ||^2\n" +"\n" + +#: flwr.server.strategy.fedprox.FedProx:12 of +msgid "" +"Where $w^t$ are the global parameters and $w$ are the local weights the " +"function will be optimized with." +msgstr "其中,$w^t$ 是全局参数,$w$ 是优化函数的局部权重。" + +#: flwr.server.strategy.fedprox.FedProx:15 of +msgid "In PyTorch, for example, the loss would go from:" +msgstr "例如,在 PyTorch 中,损失将从:" + +#: flwr.server.strategy.fedprox.FedProx:21 of +msgid "To:" +msgstr "致:" + +#: flwr.server.strategy.fedprox.FedProx:30 of +msgid "" +"With `global_params` being a copy of the parameters before the training " +"takes place." +msgstr "其中,\"global_params \"是训练前的参数副本。" + +#: flwr.server.strategy.fedprox.FedProx:65 of +msgid "" +"The weight of the proximal term used in the optimization. 0.0 makes this " +"strategy equivalent to FedAvg, and the higher the coefficient, the more " +"regularization will be used (that is, the client parameters will need to " +"be closer to the server parameters during training)." +msgstr "" +"优化中使用的近端项权重。0.0 使该策略等同于 " +"FedAvg,系数越大,使用的正则化就越多(也就是说,在训练过程中,客户端参数需要更接近服务器参数)。" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\," +" results\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" + +#: flwr.server.strategy.fedprox.FedProx.configure_fit:3 of +msgid "Sends the proximal factor mu to the clients" +msgstr "向客户发送近端因子mu" + +#: ../../source/ref-api/flwr.server.strategy.FedTrimmedAvg.rst:2 +#, fuzzy +msgid "FedTrimmedAvg" +msgstr "server.strategy.FedTrimmedAvg" + +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:3 of +#, fuzzy +msgid "Implemented based on: https://arxiv.org/abs/1803.01498" +msgstr "实施基于 https://arxiv.org/abs/1802.07927。" + +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:25 of +msgid "Fraction to cut off of both tails of the distribution. Defaults to 0.2." +msgstr "截取分布两个尾部的分数。默认为 0.2。" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_fit " +"`\\ " +"\\(server\\_round\\, results\\, failures\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg.aggregate_fit:1 of +msgid "Aggregate fit results using trimmed average." +msgstr "使用修剪平均值汇总拟合结果。" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_fit " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.FedXgbBagging.rst:2 +msgid "FedXgbBagging" +msgstr "" + +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of +#, fuzzy +msgid "Aggregate evaluation metrics using average." +msgstr "采用加权平均法计算评估损失总额。" + +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_fit " +"`\\ " +"\\(server\\_round\\, results\\, failures\\)" +msgstr "" + +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_fit:1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_fit:1 of +#, fuzzy +msgid "Aggregate fit results using bagging." +msgstr "使用 Bulyan 技术汇总拟合结果。" + +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_fit " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" +msgstr "" + +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" +msgstr "" + +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" + +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.FedXgbCyclic.rst:2 +msgid "FedXgbCyclic" +msgstr "" + +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_fit " +"`\\ \\(server\\_round\\," +" results\\, failures\\)" +msgstr "" + +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_fit " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" +msgstr "" + +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" +msgstr "" + +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" + +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.FedXgbNnAvg.rst:2 +#, fuzzy +msgid "FedXgbNnAvg" +msgstr "DP-FedAvg" + +#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg:5 of +msgid "" +"This strategy is deprecated, but a copy of it is available in Flower " +"Baselines: " +"https://github.com/adap/flower/tree/main/baselines/hfedxgboost." +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_fit " +"`\\ \\(server\\_round\\, " +"results\\, failures\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_fit " +"`\\ \\(server\\_round\\, " +"parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.FedYogi.rst:2 +msgid "FedYogi" +msgstr "" + +#: flwr.server.strategy.fedyogi.FedYogi:32 of +#, fuzzy +msgid "Server-side learning rate. Defaults to 1e-2." +msgstr "服务器端学习率。默认为 1e-1。" + +#: flwr.server.strategy.fedyogi.FedYogi:34 of +#, fuzzy +msgid "Client-side learning rate. Defaults to 0.0316." +msgstr "客户端学习率。默认为 1e-1。" + +#: flwr.server.strategy.fedyogi.FedYogi:40 of +#, fuzzy +msgid "Controls the algorithm's degree of adaptability. Defaults to 1e-3." +msgstr "控制算法的适应度。默认为 1e-9。" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\," +" results\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.Krum.rst:2 +msgid "Krum" +msgstr "" + +#: flwr.server.strategy.krum.Krum:3 of +#, fuzzy +msgid "Implementation based on https://arxiv.org/abs/1703.02757" +msgstr "实施基于 https://arxiv.org/abs/2304.07537。" + +#: flwr.server.strategy.krum.Krum:17 of +msgid "" +"Number of clients to keep before averaging (MultiKrum). Defaults to 0, in" +" that case classical Krum is applied." +msgstr "求平均值前保留的客户端数量(MultiKrum)。默认值为 0,在这种情况下会应用经典 Krum。" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\, " +"results\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.krum.Krum.aggregate_fit:1 of +msgid "Aggregate fit results using Krum." +msgstr "使用 Krum 汇总拟合结果。" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\, " +"parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_fit_clients `\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.QFedAvg.rst:2 +#, fuzzy +msgid "QFedAvg" +msgstr "DP-FedAvg" + +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\," +" results\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" +msgstr "" + +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" +msgstr "" + +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" +msgstr "" + +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" + +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.Strategy.rst:2 +#, fuzzy +msgid "Strategy" +msgstr "Krum 策略。" + +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of +msgid "Aggregate evaluation results." +msgstr "聚合评估结果。" + +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" +msgstr "" + +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: flwr.server.strategy.strategy.Strategy.aggregate_fit:1 of +msgid "Aggregate training results." +msgstr "汇总训练结果。" + +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" +msgstr "" + +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: flwr.server.strategy.strategy.Strategy.evaluate:1 of +msgid "Evaluate the current model parameters." +msgstr "评估当前的模型参数。" + +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" +msgstr "" + +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: flwr.server.strategy.strategy.Strategy.initialize_parameters:1 of +msgid "Initialize the (global) model parameters." +msgstr "初始化(全局)模型参数。" + +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:5 of +msgid "" +"Successful updates from the previously selected and configured clients. " +"Each pair of `(ClientProxy, FitRes` constitutes a successful update from " +"one of the previously selected clients. Not that not all previously " +"selected clients are necessarily included in this list: a client might " +"drop out and not submit a result. For each client that did not submit an " +"update, there should be an `Exception` in `failures`." +msgstr "" +"从先前选定和配置的客户端进行的成功更新。每一对`(ClientProxy, " +"FitRes)`都是来自先前选定客户端的一次成功更新。但并非所有先前选定的客户机都一定包含在此列表中:客户机可能会退出,不提交结果。对于每个没有提交更新的客户端,`failures`中都应该有一个`Exception`。" + +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:13 +#: flwr.server.strategy.strategy.Strategy.aggregate_fit:13 of +msgid "Exceptions that occurred while the server was waiting for client updates." +msgstr "服务器等待客户端更新时发生的异常。" + +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:16 of +msgid "" +"**aggregation_result** -- The aggregated evaluation result. Aggregation " +"typically uses some variant of a weighted average." +msgstr "**aggregation_result** -- 汇总的评估结果。聚合通常使用某种加权平均值。" + +#: flwr.server.strategy.strategy.Strategy.aggregate_fit:5 of +msgid "" +"Successful updates from the previously selected and configured clients. " +"Each pair of `(ClientProxy, FitRes)` constitutes a successful update from" +" one of the previously selected clients. Not that not all previously " +"selected clients are necessarily included in this list: a client might " +"drop out and not submit a result. For each client that did not submit an " +"update, there should be an `Exception` in `failures`." +msgstr "" +"来自先前选定和配置的客户端的成功更新。每一对`(ClientProxy, " +"FitRes)`都构成先前选定的客户端之一的一次成功更新。但并非所有先前选定的客户机都一定包含在此列表中:客户机可能会退出,不提交结果。对于每个没有提交更新的客户端,\"失败" +" \"中都应该有一个 \"异常\"。" + +#: flwr.server.strategy.strategy.Strategy.aggregate_fit:17 of +msgid "" +"**parameters** -- If parameters are returned, then the server will treat " +"these as the new global model parameters (i.e., it will replace the " +"previous parameters with the ones returned from this method). If `None` " +"is returned (e.g., because there were only failures and no viable " +"results) then the server will no update the previous model parameters, " +"the updates received in this round are discarded, and the global model " +"parameters remain the same." +msgstr "" +"**parameters** -- 如果返回参数,那么服务器将把这些参数作为新的全局模型参数(即用本方法返回的参数替换之前的参数)。如果返回 " +"\"无\"(例如,因为只有失败而没有可行的结果),那么服务器将不再更新之前的模型参数,本轮收到的更新将被丢弃,全局模型参数保持不变。" + +#: flwr.server.strategy.strategy.Strategy.evaluate:3 of +msgid "" +"This function can be used to perform centralized (i.e., server-side) " +"evaluation of model parameters." +msgstr "该函数可用于对模型参数进行集中(即服务器端)评估。" + +#: flwr.server.strategy.strategy.Strategy.evaluate:11 of +msgid "" +"**evaluation_result** -- The evaluation result, usually a Tuple " +"containing loss and a dictionary containing task-specific metrics (e.g., " +"accuracy)." +msgstr "**evaluation_result** -- 评估结果,通常是一个元组,包含损失值和一个字典,字典中包含特定任务的指标(如准确率)。" + +#: flwr.server.strategy.strategy.Strategy.initialize_parameters:6 of +msgid "" +"**parameters** -- If parameters are returned, then the server will treat " +"these as the initial global model parameters." +msgstr "**parameters** -- 如果返回参数,服务器将把这些参数视为初始全局模型参数。" + +#: ../../source/ref-api/flwr.simulation.rst:2 +#, fuzzy +msgid "simulation" +msgstr "运行模拟" + +#: ../../source/ref-api/flwr.simulation.rst:17::1 +msgid "" +":py:obj:`start_simulation `\\ \\(\\*\\," +" client\\_fn\\[\\, ...\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.simulation.rst:17::1 +#: flwr.simulation.app.start_simulation:1 of +msgid "Start a Ray-based Flower simulation server." +msgstr "启动基于 Ray 的Flower模拟服务器。" + +#: ../../source/ref-api/flwr.simulation.start_simulation.rst:2 +#, fuzzy +msgid "start\\_simulation" +msgstr "start_simulation" + +#: flwr.simulation.app.start_simulation:3 of +msgid "" +"A function creating client instances. The function must take a single " +"`str` argument called `cid`. It should return a single client instance of" +" type Client. Note that the created client instances are ephemeral and " +"will often be destroyed after a single method invocation. Since client " +"instances are not long-lived, they should not attempt to carry state over" +" method invocations. Any state required by the instance (model, dataset, " +"hyperparameters, ...) should be (re-)created in either the call to " +"`client_fn` or the call to any of the client methods (e.g., load " +"evaluation data in the `evaluate` method itself)." +msgstr "" +"创建客户端实例的函数。该函数必须接受一个名为 `cid` 的 `str` 参数。它应返回一个 Client " +"类型的客户端实例。请注意,创建的客户端实例是短暂的,通常在调用一个方法后就会被销毁。由于客户机实例不是长期存在的,它们不应试图在方法调用时携带状态数据。实例所需的任何状态数据(模型、数据集、超参数......)都应在调用" +" `client_fn` 或任何客户端方法(例如,在 `evaluate` 方法中加载评估数据)时(重新)创建。" + +#: flwr.simulation.app.start_simulation:13 of +msgid "" +"The total number of clients in this simulation. This must be set if " +"`clients_ids` is not set and vice-versa." +msgstr "本次模拟的客户总数。如果未设置 `clients_ids`,则必须设置该参数,反之亦然。" + +#: flwr.simulation.app.start_simulation:16 of +msgid "" +"List `client_id`s for each client. This is only required if `num_clients`" +" is not set. Setting both `num_clients` and `clients_ids` with " +"`len(clients_ids)` not equal to `num_clients` generates an error." +msgstr "" +"列出每个客户的 `client_id`。只有在未设置 `num_clients` " +"时才需要这样做。同时设置`num_clients`和`clients_ids`,且`len(clients_ids)`不等于`num_clients`,会产生错误。" + +#: flwr.simulation.app.start_simulation:20 of +#, fuzzy +msgid "" +"CPU and GPU resources for a single client. Supported keys are `num_cpus` " +"and `num_gpus`. To understand the GPU utilization caused by `num_gpus`, " +"as well as using custom resources, please consult the Ray documentation." +msgstr "" +"\"num_gpus\": 0.0` 单个客户端的 CPU 和 GPU 资源。支持的键值为 `num_cpus` 和 `num_gpus`。要了解" +" `num_gpus` 所导致的 GPU 利用率,以及使用自定义资源的情况,请查阅 Ray 文档。" + +#: flwr.simulation.app.start_simulation:25 of +msgid "" +"An implementation of the abstract base class `flwr.server.Server`. If no " +"instance is provided, then `start_server` will create one." +msgstr "抽象基类 `flwr.server.Server`的实现。如果没有提供实例,`start_server` 将创建一个。" + +#: flwr.simulation.app.start_simulation:31 of +msgid "" +"An implementation of the abstract base class `flwr.server.Strategy`. If " +"no strategy is provided, then `start_server` will use " +"`flwr.server.strategy.FedAvg`." +msgstr "" +"抽象基类 `flwr.server.strategy` 的实现。如果没有提供策略,`start_server` 将使用 " +"`flwr.server.strategy.FedAvg`。" + +#: flwr.simulation.app.start_simulation:35 of +msgid "" +"An implementation of the abstract base class `flwr.server.ClientManager`." +" If no implementation is provided, then `start_simulation` will use " +"`flwr.server.client_manager.SimpleClientManager`." +msgstr "" +"抽象基类 `flwr.server.ClientManager` 的实现。如果没有提供实现,`start_simulation` 将使用 " +"`flwr.server.client_manager.SimpleClientManager`。" + +#: flwr.simulation.app.start_simulation:39 of +msgid "" +"Optional dictionary containing arguments for the call to `ray.init`. If " +"ray_init_args is None (the default), Ray will be initialized with the " +"following default args: { \"ignore_reinit_error\": True, " +"\"include_dashboard\": False } An empty dictionary can be used " +"(ray_init_args={}) to prevent any arguments from being passed to " +"ray.init." +msgstr "" +"可选字典,包含调用 `ray.init` 时的参数。如果 ray_init_args 为 None(默认值),则将使用以下默认参数初始化 Ray:" +" { \"ignore_reinit_error\": True, \"include_dashboard\": False } " +"可以使用空字典(ray_init_args={})来防止向 ray.init 传递任何参数。" + +#: flwr.simulation.app.start_simulation:39 of +msgid "" +"Optional dictionary containing arguments for the call to `ray.init`. If " +"ray_init_args is None (the default), Ray will be initialized with the " +"following default args:" +msgstr "可选字典,包含调用 `ray.init` 时的参数。如果 ray_init_args 为 None(默认值),则将使用以下默认参数初始化 Ray:" + +#: flwr.simulation.app.start_simulation:43 of +msgid "{ \"ignore_reinit_error\": True, \"include_dashboard\": False }" +msgstr "{ \"ignore_reinit_error\": True, \"include_dashboard\": False }" + +#: flwr.simulation.app.start_simulation:45 of +msgid "" +"An empty dictionary can be used (ray_init_args={}) to prevent any " +"arguments from being passed to ray.init." +msgstr "可以使用空字典 (ray_init_args={}) 来防止向 ray.init 传递任何参数。" + +#: flwr.simulation.app.start_simulation:48 of +msgid "" +"Set to True to prevent `ray.shutdown()` in case " +"`ray.is_initialized()=True`." +msgstr "设为 True 可在 `ray.is_initialized()=True` 情况下阻止 `ray.shutdown()` 。" + +#: flwr.simulation.app.start_simulation:50 of +msgid "" +"Optionally specify the type of actor to use. The actor object, which " +"persists throughout the simulation, will be the process in charge of " +"running the clients' jobs (i.e. their `fit()` method)." +msgstr "可选择指定要使用的actor类型。actor对象将在整个模拟过程中持续存在,它将是负责运行客户端作业(即其 `fit()`方法)的进程。" + +#: flwr.simulation.app.start_simulation:54 of +msgid "" +"If you want to create your own Actor classes, you might need to pass some" +" input argument. You can use this dictionary for such purpose." +msgstr "如果您想创建自己的 Actor 类,可能需要传递一些输入参数。为此,您可以使用本字典。" + +#: flwr.simulation.app.start_simulation:57 of +msgid "" +"(default: \"DEFAULT\") Optional string (\"DEFAULT\" or \"SPREAD\") for " +"the VCE to choose in which node the actor is placed. If you are an " +"advanced user needed more control you can use lower-level scheduling " +"strategies to pin actors to specific compute nodes (e.g. via " +"NodeAffinitySchedulingStrategy). Please note this is an advanced feature." +" For all details, please refer to the Ray documentation: " +"https://docs.ray.io/en/latest/ray-core/scheduling/index.html" +msgstr "" +"(默认:\"DEFAULT\")可选字符串(\"DEFAULT \"或 \"SPREAD\"),供 VCE " +"选择将行为体放置在哪个节点上。如果你是需要更多控制权的高级用户,可以使用低级调度策略将actor固定到特定计算节点(例如,通过 " +"NodeAffinitySchedulingStrategy)。请注意,这是一项高级功能。有关详细信息,请参阅 Ray " +"文档:https://docs.ray.io/en/latest/ray-core/scheduling/index.html" + +#: flwr.simulation.app.start_simulation:66 of +msgid "**hist** -- Object containing metrics from training." +msgstr "**hist** -- 包含训练指标的对象。" + +#: ../../source/ref-changelog.md:1 +msgid "Changelog" +msgstr "更新日志" + +#: ../../source/ref-changelog.md:3 +msgid "Unreleased" +msgstr "尚未发布" + +#: ../../source/ref-changelog.md:5 ../../source/ref-changelog.md:17 +#: ../../source/ref-changelog.md:110 ../../source/ref-changelog.md:210 +#: ../../source/ref-changelog.md:294 ../../source/ref-changelog.md:358 +#: ../../source/ref-changelog.md:416 ../../source/ref-changelog.md:485 +#: ../../source/ref-changelog.md:614 ../../source/ref-changelog.md:656 +#: ../../source/ref-changelog.md:723 ../../source/ref-changelog.md:789 +#: ../../source/ref-changelog.md:834 ../../source/ref-changelog.md:873 +#: ../../source/ref-changelog.md:906 ../../source/ref-changelog.md:956 +msgid "What's new?" +msgstr "有什么新内容?" + +#: ../../source/ref-changelog.md:7 ../../source/ref-changelog.md:80 +#: ../../source/ref-changelog.md:192 ../../source/ref-changelog.md:282 +#: ../../source/ref-changelog.md:346 ../../source/ref-changelog.md:404 +#: ../../source/ref-changelog.md:473 ../../source/ref-changelog.md:535 +#: ../../source/ref-changelog.md:554 ../../source/ref-changelog.md:710 +#: ../../source/ref-changelog.md:781 ../../source/ref-changelog.md:818 +#: ../../source/ref-changelog.md:861 +msgid "Incompatible changes" +msgstr "不兼容的更改" + +#: ../../source/ref-changelog.md:9 +#, fuzzy +msgid "v1.7.0 (2024-02-05)" +msgstr "v1.3.0 (2023-02-06)" + +#: ../../source/ref-changelog.md:11 ../../source/ref-changelog.md:104 +#: ../../source/ref-changelog.md:204 ../../source/ref-changelog.md:288 +#: ../../source/ref-changelog.md:352 ../../source/ref-changelog.md:410 +#: ../../source/ref-changelog.md:479 ../../source/ref-changelog.md:548 +msgid "Thanks to our contributors" +msgstr "感谢我们的贡献者" + +#: ../../source/ref-changelog.md:13 ../../source/ref-changelog.md:106 +#: ../../source/ref-changelog.md:206 ../../source/ref-changelog.md:290 +#: ../../source/ref-changelog.md:354 ../../source/ref-changelog.md:412 +msgid "" +"We would like to give our special thanks to all the contributors who made" +" the new version of Flower possible (in `git shortlog` order):" +msgstr "在此,我们要特别感谢所有为 Flower 的新版本做出贡献的人员(按 `git shortlog` 顺序排列):" + +#: ../../source/ref-changelog.md:15 +#, fuzzy +msgid "" +"`Aasheesh Singh`, `Adam Narozniak`, `Aml Hassan Esmil`, `Charles " +"Beauville`, `Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo " +"Gabrielli`, `Gustavo Bertoli`, `HelinLin`, `Heng Pan`, `Javier`, `M S " +"Chaitanya Kumar`, `Mohammad Naseri`, `Nikos Vlachakis`, `Pritam Neog`, " +"`Robert Kuska`, `Robert Steiner`, `Taner Topal`, `Yahia Salaheldin " +"Shaaban`, `Yan Gao`, `Yasar Abbas` " +msgstr "" +"`Adam Narozniak`, `Anass Anhari`, `Charles Beauville`, `Dana-Farber`, " +"`Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo Gabrielli`, `Gustavo " +"Bertoli`, `Heng Pan`, `Javier`, `Mahdi`, `Steven Hé (Sīchàng)`, `Taner " +"Topal`, `achiverram28`, `danielnugraha`, `eunchung`, `ruthgal` " + +#: ../../source/ref-changelog.md:19 +#, fuzzy +msgid "" +"**Introduce stateful clients (experimental)** " +"([#2770](https://github.com/adap/flower/pull/2770), " +"[#2686](https://github.com/adap/flower/pull/2686), " +"[#2696](https://github.com/adap/flower/pull/2696), " +"[#2643](https://github.com/adap/flower/pull/2643), " +"[#2769](https://github.com/adap/flower/pull/2769))" +msgstr "" +"** baselines的普通更新** ([#2301](https://github.com/adap/flower/pull/2301), " +"[#2305](https://github.com/adap/flower/pull/2305), " +"[#2307](https://github.com/adap/flower/pull/2307), " +"[#2327](https://github.com/adap/flower/pull/2327), " +"[#2435](https://github.com/adap/flower/pull/2435))" + +#: ../../source/ref-changelog.md:21 +msgid "" +"Subclasses of `Client` and `NumPyClient` can now store local state that " +"remains on the client. Let's start with the highlight first: this new " +"feature is compatible with both simulated clients (via " +"`start_simulation`) and networked clients (via `start_client`). It's also" +" the first preview of new abstractions like `Context` and `RecordSet`. " +"Clients can access state of type `RecordSet` via `state: RecordSet = " +"self.context.state`. Changes to this `RecordSet` are preserved across " +"different rounds of execution to enable stateful computations in a " +"unified way across simulation and deployment." +msgstr "" + +#: ../../source/ref-changelog.md:23 +#, fuzzy +msgid "" +"**Improve performance** " +"([#2293](https://github.com/adap/flower/pull/2293))" +msgstr "**改进示例笔记** ([#2005](https://github.com/adap/flower/pull/2005))" + +#: ../../source/ref-changelog.md:25 +msgid "" +"Flower is faster than ever. All `FedAvg`-derived strategies now use in-" +"place aggregation to reduce memory consumption. The Flower client " +"serialization/deserialization has been rewritten from the ground up, " +"which results in significant speedups, especially when the client-side " +"training time is short." +msgstr "" + +#: ../../source/ref-changelog.md:27 +#, fuzzy +msgid "" +"**Support Federated Learning with Apple MLX and Flower** " +"([#2693](https://github.com/adap/flower/pull/2693))" +msgstr "" +"** 添加使用 fastai 和 Flower 进行联邦学习的新示例** " +"([#1598](https://github.com/adap/flower/pull/1598))" + +#: ../../source/ref-changelog.md:29 +msgid "" +"Flower has official support for federated learning using [Appple " +"MLX](https://ml-explore.github.io/mlx) via the new `quickstart-mlx` code " +"example." +msgstr "" + +#: ../../source/ref-changelog.md:31 +#, fuzzy +msgid "" +"**Introduce new XGBoost cyclic strategy** " +"([#2666](https://github.com/adap/flower/pull/2666), " +"[#2668](https://github.com/adap/flower/pull/2668))" +msgstr "" +"**介绍 iOS SDK(预览版)** ([#1621](https://github.com/adap/flower/pull/1621), " +"[#1764](https://github.com/adap/flower/pull/1764))" + +#: ../../source/ref-changelog.md:33 +msgid "" +"A new strategy called `FedXgbCyclic` supports a client-by-client style of" +" training (often called cyclic). The `xgboost-comprehensive` code example" +" shows how to use it in a full project. In addition to that, `xgboost-" +"comprehensive` now also supports simulation mode. With this, Flower " +"offers best-in-class XGBoost support." +msgstr "" + +#: ../../source/ref-changelog.md:35 +#, fuzzy +msgid "" +"**Support Python 3.11** " +"([#2394](https://github.com/adap/flower/pull/2394))" +msgstr "** 支持 Python 3.10** ([#1320](https://github.com/adap/flower/pull/1320))" + +#: ../../source/ref-changelog.md:37 +msgid "" +"Framework tests now run on Python 3.8, 3.9, 3.10, and 3.11. This will " +"ensure better support for users using more recent Python versions." +msgstr "" + +#: ../../source/ref-changelog.md:39 +#, fuzzy +msgid "" +"**Update gRPC and ProtoBuf dependencies** " +"([#2814](https://github.com/adap/flower/pull/2814))" +msgstr "" +"**更新 REST API 以支持创建和删除节点** " +"([#2283](https://github.com/adap/flower/pull/2283))" + +#: ../../source/ref-changelog.md:41 +msgid "" +"The `grpcio` and `protobuf` dependencies were updated to their latest " +"versions for improved security and performance." +msgstr "" + +#: ../../source/ref-changelog.md:43 +#, fuzzy +msgid "" +"**Introduce Docker image for Flower server** " +"([#2700](https://github.com/adap/flower/pull/2700), " +"[#2688](https://github.com/adap/flower/pull/2688), " +"[#2705](https://github.com/adap/flower/pull/2705), " +"[#2695](https://github.com/adap/flower/pull/2695), " +"[#2747](https://github.com/adap/flower/pull/2747), " +"[#2746](https://github.com/adap/flower/pull/2746), " +"[#2680](https://github.com/adap/flower/pull/2680), " +"[#2682](https://github.com/adap/flower/pull/2682), " +"[#2701](https://github.com/adap/flower/pull/2701))" +msgstr "" +"** 支持 SSL 的服务器和客户端** ([#842](https://github.com/adap/flower/pull/842), " +"[#844](https://github.com/adap/flower/pull/844), " +"[#845](https://github.com/adap/flower/pull/845), " +"[#847](https://github.com/adap/flower/pull/847), " +"[#993](https://github.com/adap/flower/pull/993), " +"[#994](https://github.com/adap/flower/pull/994))" + +#: ../../source/ref-changelog.md:45 +msgid "" +"The Flower server can now be run using an official Docker image. A new " +"how-to guide explains [how to run Flower using " +"Docker](https://flower.ai/docs/framework/how-to-run-flower-using-" +"docker.html). An official Flower client Docker image will follow." +msgstr "" + +#: ../../source/ref-changelog.md:47 +#, fuzzy +msgid "" +"**Introduce** `flower-via-docker-compose` **example** " +"([#2626](https://github.com/adap/flower/pull/2626))" +msgstr "" +"**介绍Flower Android SDK** " +"([#2131](https://github.com/adap/flower/pull/2131))" + +#: ../../source/ref-changelog.md:49 +#, fuzzy +msgid "" +"**Introduce** `quickstart-sklearn-tabular` **example** " +"([#2719](https://github.com/adap/flower/pull/2719))" +msgstr "**引入 start_driver**([#1697](https://github.com/adap/flower/pull/1697))" + +#: ../../source/ref-changelog.md:51 +#, fuzzy +msgid "" +"**Introduce** `custom-metrics` **example** " +"([#1958](https://github.com/adap/flower/pull/1958))" +msgstr "**引入 start_driver**([#1697](https://github.com/adap/flower/pull/1697))" + +#: ../../source/ref-changelog.md:53 +#, fuzzy +msgid "" +"**Update code examples to use Flower Datasets** " +"([#2450](https://github.com/adap/flower/pull/2450), " +"[#2456](https://github.com/adap/flower/pull/2456), " +"[#2318](https://github.com/adap/flower/pull/2318), " +"[#2712](https://github.com/adap/flower/pull/2712))" +msgstr "" +"更新开发人员工具([#1231](https://github.com/adap/flower/pull/1231), " +"[#1276](https://github.com/adap/flower/pull/1276), " +"[#1301](https://github.com/adap/flower/pull/1301), " +"[#1310](https://github.com/adap/flower/pull/1310)" + +#: ../../source/ref-changelog.md:55 +msgid "" +"Several code examples were updated to use [Flower " +"Datasets](https://flower.ai/docs/datasets/)." +msgstr "" + +#: ../../source/ref-changelog.md:57 +#, fuzzy +msgid "" +"**General updates to Flower Examples** " +"([#2381](https://github.com/adap/flower/pull/2381), " +"[#2805](https://github.com/adap/flower/pull/2805), " +"[#2782](https://github.com/adap/flower/pull/2782), " +"[#2806](https://github.com/adap/flower/pull/2806), " +"[#2829](https://github.com/adap/flower/pull/2829), " +"[#2825](https://github.com/adap/flower/pull/2825), " +"[#2816](https://github.com/adap/flower/pull/2816), " +"[#2726](https://github.com/adap/flower/pull/2726), " +"[#2659](https://github.com/adap/flower/pull/2659), " +"[#2655](https://github.com/adap/flower/pull/2655))" +msgstr "" +"**改进(试验性)驱动程序应用程序接口** ([#1663](https://github.com/adap/flower/pull/1663)," +" [#1666](https://github.com/adap/flower/pull/1666), " +"[#1667](https://github.com/adap/flower/pull/1667), " +"[#1664](https://github.com/adap/flower/pull/1664), " +"[#1675](https://github.com/adap/flower/pull/1675), " +"[#1676](https://github.com/adap/flower/pull/1676), " +"[#1693](https://github.com/adap/flower/pull/1693), " +"[#1662](https://github.com/adap/flower/pull/1662), " +"[#1794](https://github.com/adap/flower/pull/1794))" -#: flwr.common.typing.FitIns:1 of -msgid "Fit instructions for a client." +#: ../../source/ref-changelog.md:59 +msgid "Many Flower code examples received substantial updates." msgstr "" -#: flwr.common.typing.FitRes:1 of -msgid "Fit response from a client." -msgstr "" +#: ../../source/ref-changelog.md:61 ../../source/ref-changelog.md:154 +msgid "**Update Flower Baselines**" +msgstr "**更新 Flower Baselines**" -#: flwr.common.typing.GetParametersIns:1 of -msgid "Parameters request for a client." +#: ../../source/ref-changelog.md:63 +#, fuzzy +msgid "" +"HFedXGBoost ([#2226](https://github.com/adap/flower/pull/2226), " +"[#2771](https://github.com/adap/flower/pull/2771))" msgstr "" +"FedBN ([#2608](https://github.com/adap/flower/pull/2608), " +"[#2615](https://github.com/adap/flower/pull/2615))" -#: flwr.common.typing.GetParametersRes:1 of -msgid "Response when asked to return parameters." -msgstr "" +#: ../../source/ref-changelog.md:64 +#, fuzzy +msgid "FedVSSL ([#2412](https://github.com/adap/flower/pull/2412))" +msgstr "FjORD [#2431](https://github.com/adap/flower/pull/2431)" -#: flwr.common.typing.GetPropertiesIns:1 of -msgid "Properties request for a client." -msgstr "" +#: ../../source/ref-changelog.md:65 +#, fuzzy +msgid "FedNova ([#2179](https://github.com/adap/flower/pull/2179))" +msgstr "FjORD [#2431](https://github.com/adap/flower/pull/2431)" -#: flwr.common.typing.GetPropertiesRes:1 of -msgid "Properties response from a client." -msgstr "" +#: ../../source/ref-changelog.md:66 +#, fuzzy +msgid "HeteroFL ([#2439](https://github.com/adap/flower/pull/2439))" +msgstr "FedMeta [#2438](https://github.com/adap/flower/pull/2438)" -#: flwr.common.typing.Parameters:1 of -msgid "Model parameters." -msgstr "" +#: ../../source/ref-changelog.md:67 +#, fuzzy +msgid "FedAvgM ([#2246](https://github.com/adap/flower/pull/2246))" +msgstr "FedPer [#2266](https://github.com/adap/flower/pull/2266)" + +#: ../../source/ref-changelog.md:68 +#, fuzzy +msgid "FedPara ([#2722](https://github.com/adap/flower/pull/2722))" +msgstr "FedPer [#2266](https://github.com/adap/flower/pull/2266)" + +#: ../../source/ref-changelog.md:70 +#, fuzzy +msgid "" +"**Improve documentation** " +"([#2674](https://github.com/adap/flower/pull/2674), " +"[#2480](https://github.com/adap/flower/pull/2480), " +"[#2826](https://github.com/adap/flower/pull/2826), " +"[#2727](https://github.com/adap/flower/pull/2727), " +"[#2761](https://github.com/adap/flower/pull/2761), " +"[#2900](https://github.com/adap/flower/pull/2900))" +msgstr "" +"** 更新文档** ([#1629](https://github.com/adap/flower/pull/1629), " +"[#1628](https://github.com/adap/flower/pull/1628), " +"[#1620](https://github.com/adap/flower/pull/1620), " +"[#1618](https://github.com/adap/flower/pull/1618), " +"[#1617](https://github.com/adap/flower/pull/1617), " +"[#1613](https://github.com/adap/flower/pull/1613), " +"[#1614](https://github.com/adap/flower/pull/1614)))" + +#: ../../source/ref-changelog.md:72 +msgid "" +"**Improved testing and development infrastructure** " +"([#2797](https://github.com/adap/flower/pull/2797), " +"[#2676](https://github.com/adap/flower/pull/2676), " +"[#2644](https://github.com/adap/flower/pull/2644), " +"[#2656](https://github.com/adap/flower/pull/2656), " +"[#2848](https://github.com/adap/flower/pull/2848), " +"[#2675](https://github.com/adap/flower/pull/2675), " +"[#2735](https://github.com/adap/flower/pull/2735), " +"[#2767](https://github.com/adap/flower/pull/2767), " +"[#2732](https://github.com/adap/flower/pull/2732), " +"[#2744](https://github.com/adap/flower/pull/2744), " +"[#2681](https://github.com/adap/flower/pull/2681), " +"[#2699](https://github.com/adap/flower/pull/2699), " +"[#2745](https://github.com/adap/flower/pull/2745), " +"[#2734](https://github.com/adap/flower/pull/2734), " +"[#2731](https://github.com/adap/flower/pull/2731), " +"[#2652](https://github.com/adap/flower/pull/2652), " +"[#2720](https://github.com/adap/flower/pull/2720), " +"[#2721](https://github.com/adap/flower/pull/2721), " +"[#2717](https://github.com/adap/flower/pull/2717), " +"[#2864](https://github.com/adap/flower/pull/2864), " +"[#2694](https://github.com/adap/flower/pull/2694), " +"[#2709](https://github.com/adap/flower/pull/2709), " +"[#2658](https://github.com/adap/flower/pull/2658), " +"[#2796](https://github.com/adap/flower/pull/2796), " +"[#2692](https://github.com/adap/flower/pull/2692), " +"[#2657](https://github.com/adap/flower/pull/2657), " +"[#2813](https://github.com/adap/flower/pull/2813), " +"[#2661](https://github.com/adap/flower/pull/2661), " +"[#2398](https://github.com/adap/flower/pull/2398))" +msgstr "" + +#: ../../source/ref-changelog.md:74 +msgid "" +"The Flower testing and development infrastructure has received " +"substantial updates. This makes Flower 1.7 the most tested release ever." +msgstr "" + +#: ../../source/ref-changelog.md:76 +#, fuzzy +msgid "" +"**Update dependencies** " +"([#2753](https://github.com/adap/flower/pull/2753), " +"[#2651](https://github.com/adap/flower/pull/2651), " +"[#2739](https://github.com/adap/flower/pull/2739), " +"[#2837](https://github.com/adap/flower/pull/2837), " +"[#2788](https://github.com/adap/flower/pull/2788), " +"[#2811](https://github.com/adap/flower/pull/2811), " +"[#2774](https://github.com/adap/flower/pull/2774), " +"[#2790](https://github.com/adap/flower/pull/2790), " +"[#2751](https://github.com/adap/flower/pull/2751), " +"[#2850](https://github.com/adap/flower/pull/2850), " +"[#2812](https://github.com/adap/flower/pull/2812), " +"[#2872](https://github.com/adap/flower/pull/2872), " +"[#2736](https://github.com/adap/flower/pull/2736), " +"[#2756](https://github.com/adap/flower/pull/2756), " +"[#2857](https://github.com/adap/flower/pull/2857), " +"[#2757](https://github.com/adap/flower/pull/2757), " +"[#2810](https://github.com/adap/flower/pull/2810), " +"[#2740](https://github.com/adap/flower/pull/2740), " +"[#2789](https://github.com/adap/flower/pull/2789))" +msgstr "" +"**更新Example** ([#1772](https://github.com/adap/flower/pull/1772), " +"[#1873](https://github.com/adap/flower/pull/1873), " +"[#1981](https://github.com/adap/flower/pull/1981), " +"[#1988](https://github.com/adap/flower/pull/1988), " +"[#1984](https://github.com/adap/flower/pull/1984), " +"[#1982](https://github.com/adap/flower/pull/1982), " +"[#2112](https://github.com/adap/flower/pull/2112), " +"[#2144](https://github.com/adap/flower/pull/2144), " +"[#2174](https://github.com/adap/flower/pull/2174), " +"[#2225](https://github.com/adap/flower/pull/2225), " +"[#2183](https://github.com/adap/flower/pull/2183))" -#: flwr.common.typing.ReconnectIns:1 of -msgid "ReconnectIns message from server to client." +#: ../../source/ref-changelog.md:78 +msgid "" +"**General improvements** " +"([#2803](https://github.com/adap/flower/pull/2803), " +"[#2847](https://github.com/adap/flower/pull/2847), " +"[#2877](https://github.com/adap/flower/pull/2877), " +"[#2690](https://github.com/adap/flower/pull/2690), " +"[#2889](https://github.com/adap/flower/pull/2889), " +"[#2874](https://github.com/adap/flower/pull/2874), " +"[#2819](https://github.com/adap/flower/pull/2819), " +"[#2689](https://github.com/adap/flower/pull/2689), " +"[#2457](https://github.com/adap/flower/pull/2457), " +"[#2870](https://github.com/adap/flower/pull/2870), " +"[#2669](https://github.com/adap/flower/pull/2669), " +"[#2876](https://github.com/adap/flower/pull/2876), " +"[#2885](https://github.com/adap/flower/pull/2885), " +"[#2858](https://github.com/adap/flower/pull/2858), " +"[#2867](https://github.com/adap/flower/pull/2867), " +"[#2351](https://github.com/adap/flower/pull/2351), " +"[#2886](https://github.com/adap/flower/pull/2886), " +"[#2860](https://github.com/adap/flower/pull/2860), " +"[#2828](https://github.com/adap/flower/pull/2828), " +"[#2869](https://github.com/adap/flower/pull/2869), " +"[#2875](https://github.com/adap/flower/pull/2875), " +"[#2733](https://github.com/adap/flower/pull/2733), " +"[#2488](https://github.com/adap/flower/pull/2488), " +"[#2646](https://github.com/adap/flower/pull/2646), " +"[#2879](https://github.com/adap/flower/pull/2879), " +"[#2821](https://github.com/adap/flower/pull/2821), " +"[#2855](https://github.com/adap/flower/pull/2855), " +"[#2800](https://github.com/adap/flower/pull/2800), " +"[#2807](https://github.com/adap/flower/pull/2807), " +"[#2801](https://github.com/adap/flower/pull/2801), " +"[#2804](https://github.com/adap/flower/pull/2804), " +"[#2851](https://github.com/adap/flower/pull/2851), " +"[#2787](https://github.com/adap/flower/pull/2787), " +"[#2852](https://github.com/adap/flower/pull/2852), " +"[#2672](https://github.com/adap/flower/pull/2672), " +"[#2759](https://github.com/adap/flower/pull/2759))" +msgstr "" + +#: ../../source/ref-changelog.md:82 +#, fuzzy +msgid "" +"**Deprecate** `start_numpy_client` " +"([#2563](https://github.com/adap/flower/pull/2563), " +"[#2718](https://github.com/adap/flower/pull/2718))" msgstr "" +"TAMUNA ([#2254](https://github.com/adap/flower/pull/2254), " +"[#2508](https://github.com/adap/flower/pull/2508))" -#: flwr.common.typing.ServerMessage:1 of -msgid "ServerMessage is a container used to hold one instruction message." +#: ../../source/ref-changelog.md:84 +msgid "" +"Until now, clients of type `NumPyClient` needed to be started via " +"`start_numpy_client`. In our efforts to consolidate framework APIs, we " +"have introduced changes, and now all client types should start via " +"`start_client`. To continue using `NumPyClient` clients, you simply need " +"to first call the `.to_client()` method and then pass returned `Client` " +"object to `start_client`. The examples and the documentation have been " +"updated accordingly." msgstr "" -#: flwr.common.typing.Status:1 of -msgid "Client status." -msgstr "" +#: ../../source/ref-changelog.md:86 +#, fuzzy +msgid "" +"**Deprecate legacy DP wrappers** " +"([#2749](https://github.com/adap/flower/pull/2749))" +msgstr "**移除过时的 KerasClient**([#857](https://github.com/adap/flower/pull/857))" -#: flwr.common.parameter.bytes_to_ndarray:1 of -msgid "Deserialize NumPy ndarray from bytes." +#: ../../source/ref-changelog.md:88 +msgid "" +"Legacy DP wrapper classes are deprecated, but still functional. This is " +"in preparation for an all-new pluggable version of differential privacy " +"support in Flower." msgstr "" -#: flwr.common.logger.configure:1 of -msgid "Configure logging to file and/or remote log server." +#: ../../source/ref-changelog.md:90 +#, fuzzy +msgid "" +"**Make optional arg** `--callable` **in** `flower-client` **a required " +"positional arg** ([#2673](https://github.com/adap/flower/pull/2673))" msgstr "" +"**从** `start_client` 中移除** `rest` **实验参数 " +"([#2324](https://github.com/adap/flower/pull/2324))" -#: logging.Logger.log:1 of -msgid "Log 'msg % args' with the integer severity 'level'." +#: ../../source/ref-changelog.md:92 +#, fuzzy +msgid "" +"**Rename** `certificates` **to** `root_certificates` **in** `Driver` " +"([#2890](https://github.com/adap/flower/pull/2890))" msgstr "" +"**重新命名** `rnd` ** to** `server_round` " +"([#1321](https://github.com/adap/flower/pull/1321))" -#: logging.Logger.log:3 of +#: ../../source/ref-changelog.md:94 +#, fuzzy msgid "" -"To pass exception information, use the keyword argument exc_info with a " -"true value, e.g." +"**Drop experimental** `Task` **fields** " +"([#2866](https://github.com/adap/flower/pull/2866), " +"[#2865](https://github.com/adap/flower/pull/2865))" msgstr "" +"FedBN ([#2608](https://github.com/adap/flower/pull/2608), " +"[#2615](https://github.com/adap/flower/pull/2615))" -#: logging.Logger.log:6 of -#, python-format -msgid "logger.log(level, \"We have a %s\", \"mysterious problem\", exc_info=1)" +#: ../../source/ref-changelog.md:96 +msgid "" +"Experimental fields `sa`, `legacy_server_message` and " +"`legacy_client_message` were removed from `Task` message. The removed " +"fields are superseded by the new `RecordSet` abstraction." msgstr "" -#: flwr.common.parameter.ndarray_to_bytes:1 of -msgid "Serialize NumPy ndarray to bytes." -msgstr "" +#: ../../source/ref-changelog.md:98 +#, fuzzy +msgid "" +"**Retire MXNet examples** " +"([#2724](https://github.com/adap/flower/pull/2724))" +msgstr "**新的 scikit-learn 代码示例** ([#748](https://github.com/adap/flower/pull/748))" -#: flwr.common.parameter.ndarrays_to_parameters:1 of -msgid "Convert NumPy ndarrays to parameters object." +#: ../../source/ref-changelog.md:100 +msgid "" +"The development of the MXNet fremework has ended and the project is now " +"[archived on GitHub](https://github.com/apache/mxnet). Existing MXNet " +"examples won't receive updates." msgstr "" -#: flwr.common.date.now:1 of -msgid "Construct a datetime from time.time() with time zone set to UTC." -msgstr "" +#: ../../source/ref-changelog.md:102 +#, fuzzy +msgid "v1.6.0 (2023-11-28)" +msgstr "v1.4.0 (2023-04-21)" -#: flwr.common.parameter.parameters_to_ndarrays:1 of -msgid "Convert parameters object to NumPy ndarrays." +#: ../../source/ref-changelog.md:108 +msgid "" +"`Aashish Kolluri`, `Adam Narozniak`, `Alessio Mora`, `Barathwaja S`, " +"`Charles Beauville`, `Daniel J. Beutel`, `Daniel Nata Nugraha`, `Gabriel " +"Mota`, `Heng Pan`, `Ivan Agarský`, `JS.KIM`, `Javier`, `Marius Schlegel`," +" `Navin Chandra`, `Nic Lane`, `Peterpan828`, `Qinbin Li`, `Shaz-hash`, " +"`Steve Laskaridis`, `Taner Topal`, `William Lindskog`, `Yan Gao`, " +"`cnxdeveloper`, `k3nfalt` " msgstr "" -#: ../../source/ref-changelog.md:1 -msgid "Changelog" +#: ../../source/ref-changelog.md:112 +msgid "" +"**Add experimental support for Python 3.12** " +"([#2565](https://github.com/adap/flower/pull/2565))" msgstr "" +"** 增加对 Python 3.12 的实验支持** " +"([#2565](https://github.com/adap/flower/pull/2565))" -#: ../../source/ref-changelog.md:3 -msgid "Unreleased" +#: ../../source/ref-changelog.md:114 +#, fuzzy +msgid "" +"**Add new XGBoost examples** " +"([#2612](https://github.com/adap/flower/pull/2612), " +"[#2554](https://github.com/adap/flower/pull/2554), " +"[#2617](https://github.com/adap/flower/pull/2617), " +"[#2618](https://github.com/adap/flower/pull/2618), " +"[#2619](https://github.com/adap/flower/pull/2619), " +"[#2567](https://github.com/adap/flower/pull/2567))" msgstr "" +"**引入(试验性)Driver API** ([#1520](https://github.com/adap/flower/pull/1520)," +" [#1525](https://github.com/adap/flower/pull/1525), " +"[#1545](https://github.com/adap/flower/pull/1545), " +"[#1546](https://github.com/adap/flower/pull/1546), " +"[#1550](https://github.com/adap/flower/pull/1550), " +"[#1551](https://github.com/adap/flower/pull/1551), " +"[#1567](https://github.com/adap/flower/pull/1567))" -#: ../../source/ref-changelog.md:5 ../../source/ref-changelog.md:83 -#: ../../source/ref-changelog.md:167 ../../source/ref-changelog.md:231 -#: ../../source/ref-changelog.md:289 ../../source/ref-changelog.md:358 -#: ../../source/ref-changelog.md:487 ../../source/ref-changelog.md:529 -#: ../../source/ref-changelog.md:596 ../../source/ref-changelog.md:662 -#: ../../source/ref-changelog.md:707 ../../source/ref-changelog.md:746 -#: ../../source/ref-changelog.md:779 ../../source/ref-changelog.md:829 -msgid "What's new?" +#: ../../source/ref-changelog.md:116 +msgid "" +"We have added a new `xgboost-quickstart` example alongside a new " +"`xgboost-comprehensive` example that goes more in-depth." msgstr "" -#: ../../source/ref-changelog.md:7 +#: ../../source/ref-changelog.md:118 +#, fuzzy msgid "" -"**Add experimental support for Python 3.12** " -"([#2565](https://github.com/adap/flower/pull/2565))" +"**Add Vertical FL example** " +"([#2598](https://github.com/adap/flower/pull/2598))" +msgstr "**新的 iOS CoreML 代码示例**([#1289](https://github.com/adap/flower/pull/1289))" + +#: ../../source/ref-changelog.md:120 +msgid "" +"We had many questions about Vertical Federated Learning using Flower, so " +"we decided to add an simple example for it on the [Titanic " +"dataset](https://www.kaggle.com/competitions/titanic/data) alongside a " +"tutorial (in the README)." msgstr "" -#: ../../source/ref-changelog.md:9 +#: ../../source/ref-changelog.md:122 msgid "" "**Support custom** `ClientManager` **in** `start_driver()` " "([#2292](https://github.com/adap/flower/pull/2292))" -msgstr "" +msgstr "**在***`start_driver()`中支持自定义***`ClientManager([#2292](https://github.com/adap/flower/pull/2292))" -#: ../../source/ref-changelog.md:11 +#: ../../source/ref-changelog.md:124 msgid "" "**Update REST API to support create and delete nodes** " "([#2283](https://github.com/adap/flower/pull/2283))" msgstr "" +"**更新 REST API 以支持创建和删除节点** " +"([#2283](https://github.com/adap/flower/pull/2283))" + +#: ../../source/ref-changelog.md:126 +#, fuzzy +msgid "" +"**Update the Android SDK** " +"([#2187](https://github.com/adap/flower/pull/2187))" +msgstr "" +"**介绍Flower Android SDK** " +"([#2131](https://github.com/adap/flower/pull/2131))" + +#: ../../source/ref-changelog.md:128 +#, fuzzy +msgid "Add gRPC request-response capability to the Android SDK." +msgstr "为 C++ SDK 添加 gRPC 请求-响应功能。" -#: ../../source/ref-changelog.md:13 +#: ../../source/ref-changelog.md:130 +#, fuzzy msgid "" "**Update the C++ SDK** " -"([#2537](https://github/com/adap/flower/pull/2537), " -"[#2528](https://github/com/adap/flower/pull/2528), " +"([#2537](https://github.com/adap/flower/pull/2537), " +"[#2528](https://github.com/adap/flower/pull/2528), " "[#2523](https://github.com/adap/flower/pull/2523), " "[#2522](https://github.com/adap/flower/pull/2522))" msgstr "" +"** 更新 C++ SDK** ([#2537](https://github/com/adap/flower/pull/2537), " +"[#2528](https://github/com/adap/flower/pull/2528), " +"[#2523](https://github.com/adap/flower/pull/2523), " +"[#2522](https://github.com/adap/flower/pull/2522))" -#: ../../source/ref-changelog.md:15 +#: ../../source/ref-changelog.md:132 msgid "Add gRPC request-response capability to the C++ SDK." +msgstr "为 C++ SDK 添加 gRPC 请求-响应功能。" + +#: ../../source/ref-changelog.md:134 +#, fuzzy +msgid "" +"**Make HTTPS the new default** " +"([#2591](https://github.com/adap/flower/pull/2591), " +"[#2636](https://github.com/adap/flower/pull/2636))" msgstr "" +"Baselines文档([#2290](https://github.com/adap/flower/pull/2290), " +"[#2400](https://github.com/adap/flower/pull/2400)" -#: ../../source/ref-changelog.md:17 +#: ../../source/ref-changelog.md:136 msgid "" -"**Fix the incorrect return types of Strategy** " -"([#2432](https://github.com/adap/flower/pull/2432/files))" +"Flower is moving to HTTPS by default. The new `flower-server` requires " +"passing `--certificates`, but users can enable `--insecure` to use HTTP " +"for prototyping. The same applies to `flower-client`, which can either " +"use user-provided credentials or gRPC-bundled certificates to connect to " +"an HTTPS-enabled server or requires opt-out via passing `--insecure` to " +"enable insecure HTTP connections." msgstr "" -#: ../../source/ref-changelog.md:19 +#: ../../source/ref-changelog.md:138 msgid "" -"The types of the return values in the docstrings in two methods " -"(`aggregate_fit` and `aggregate_evaluate`) now match the hint types in " -"the code." +"For backward compatibility, `start_client()` and `start_numpy_client()` " +"will still start in insecure mode by default. In a future release, " +"insecure connections will require user opt-in by passing `insecure=True`." msgstr "" -#: ../../source/ref-changelog.md:21 +#: ../../source/ref-changelog.md:140 msgid "" "**Unify client API** ([#2303](https://github.com/adap/flower/pull/2303), " "[#2390](https://github.com/adap/flower/pull/2390), " "[#2493](https://github.com/adap/flower/pull/2493))" msgstr "" +"** 统一客户端应用程序接口** ([#2303](https://github.com/adap/flower/pull/2303), " +"[#2390](https://github.com/adap/flower/pull/2390), " +"[#2493](https://github.com/adap/flower/pull/2493))" -#: ../../source/ref-changelog.md:23 +#: ../../source/ref-changelog.md:142 +#, fuzzy msgid "" "Using the `client_fn`, Flower clients can interchangeably run as " "standalone processes (i.e. via `start_client`) or in simulation (i.e. via" " `start_simulation`) without requiring changes to how the client class is" -" defined and instantiated. Calling `start_numpy_client` is now " -"deprecated." +" defined and instantiated. The `to_client()` function is introduced to " +"convert a `NumPyClient` to a `Client`." msgstr "" +"使用 `client_fn`,Flower 客户端可以作为独立进程(即通过 `start_client`)或在模拟中(即通过 " +"`start_simulation`)交替运行,而无需更改客户端类的定义和实例化方式。调用 `start_numpy_client` 现已过时。" -#: ../../source/ref-changelog.md:25 -msgid "**Update Flower Baselines**" +#: ../../source/ref-changelog.md:144 +msgid "" +"**Add new** `Bulyan` **strategy** " +"([#1817](https://github.com/adap/flower/pull/1817), " +"[#1891](https://github.com/adap/flower/pull/1891))" msgstr "" +"**添加新**\"Bulyan " +"\"**策略**([#1817](https://github.com/adap/flower/pull/1817), " +"[#1891](https://github.com/adap/flower/pull/1891)" -#: ../../source/ref-changelog.md:27 +#: ../../source/ref-changelog.md:146 +msgid "" +"The new `Bulyan` strategy implements Bulyan by [El Mhamdi et al., " +"2018](https://arxiv.org/abs/1802.07927)" +msgstr "新的 \"Bulyan\"策略通过[El Mhamdi 等人,2018](https://arxiv.org/abs/1802.07927)实现" + +#: ../../source/ref-changelog.md:148 +#, fuzzy +msgid "" +"**Add new** `XGB Bagging` **strategy** " +"([#2611](https://github.com/adap/flower/pull/2611))" +msgstr "**添加新的`FedProx`策略** ([#1619](https://github.com/adap/flower/pull/1619))" + +#: ../../source/ref-changelog.md:150 ../../source/ref-changelog.md:152 +#, fuzzy +msgid "" +"**Introduce `WorkloadState`** " +"([#2564](https://github.com/adap/flower/pull/2564), " +"[#2632](https://github.com/adap/flower/pull/2632))" +msgstr "" +"**新的内置策略**([#828](https://github.com/adap/flower/pull/828) " +"[#822](https://github.com/adap/flower/pull/822)" + +#: ../../source/ref-changelog.md:156 msgid "" "FedProx ([#2210](https://github.com/adap/flower/pull/2210), " "[#2286](https://github.com/adap/flower/pull/2286), " "[#2509](https://github.com/adap/flower/pull/2509))" msgstr "" +"FedProx ([#2210](https://github.com/adap/flower/pull/2210), " +"[#2286](https://github.com/adap/flower/pull/2286), " +"[#2509](https://github.com/adap/flower/pull/2509))" -#: ../../source/ref-changelog.md:29 +#: ../../source/ref-changelog.md:158 msgid "" "Baselines Docs ([#2290](https://github.com/adap/flower/pull/2290), " "[#2400](https://github.com/adap/flower/pull/2400))" msgstr "" +"Baselines文档([#2290](https://github.com/adap/flower/pull/2290), " +"[#2400](https://github.com/adap/flower/pull/2400)" -#: ../../source/ref-changelog.md:31 +#: ../../source/ref-changelog.md:160 msgid "" "FedMLB ([#2340](https://github.com/adap/flower/pull/2340), " "[#2507](https://github.com/adap/flower/pull/2507))" msgstr "" +"FedMLB ([#2340](https://github.com/adap/flower/pull/2340), " +"[#2507](https://github.com/adap/flower/pull/2507))" -#: ../../source/ref-changelog.md:33 +#: ../../source/ref-changelog.md:162 msgid "" "TAMUNA ([#2254](https://github.com/adap/flower/pull/2254), " "[#2508](https://github.com/adap/flower/pull/2508))" msgstr "" +"TAMUNA ([#2254](https://github.com/adap/flower/pull/2254), " +"[#2508](https://github.com/adap/flower/pull/2508))" -#: ../../source/ref-changelog.md:35 +#: ../../source/ref-changelog.md:164 msgid "FedMeta [#2438](https://github.com/adap/flower/pull/2438)" -msgstr "" +msgstr "FedMeta [#2438](https://github.com/adap/flower/pull/2438)" -#: ../../source/ref-changelog.md:37 +#: ../../source/ref-changelog.md:166 msgid "FjORD [#2431](https://github.com/adap/flower/pull/2431)" -msgstr "" +msgstr "FjORD [#2431](https://github.com/adap/flower/pull/2431)" -#: ../../source/ref-changelog.md:39 +#: ../../source/ref-changelog.md:168 msgid "MOON [#2421](https://github.com/adap/flower/pull/2421)" -msgstr "" +msgstr "MOON [#2421](https://github.com/adap/flower/pull/2421)" -#: ../../source/ref-changelog.md:41 +#: ../../source/ref-changelog.md:170 msgid "DepthFL [#2295](https://github.com/adap/flower/pull/2295)" -msgstr "" +msgstr "DepthFL [#2295](https://github.com/adap/flower/pull/2295)" -#: ../../source/ref-changelog.md:43 +#: ../../source/ref-changelog.md:172 msgid "FedPer [#2266](https://github.com/adap/flower/pull/2266)" -msgstr "" +msgstr "FedPer [#2266](https://github.com/adap/flower/pull/2266)" -#: ../../source/ref-changelog.md:45 +#: ../../source/ref-changelog.md:174 msgid "FedWav2vec [#2551](https://github.com/adap/flower/pull/2551)" -msgstr "" +msgstr "FedWav2vec [#2551](https://github.com/adap/flower/pull/2551)" -#: ../../source/ref-changelog.md:47 +#: ../../source/ref-changelog.md:176 msgid "niid-Bench [#2428](https://github.com/adap/flower/pull/2428)" -msgstr "" +msgstr "niid-Bench [#2428](https://github.com/adap/flower/pull/2428)" -#: ../../source/ref-changelog.md:49 +#: ../../source/ref-changelog.md:178 msgid "" "FedBN ([#2608](https://github.com/adap/flower/pull/2608), " "[#2615](https://github.com/adap/flower/pull/2615))" msgstr "" +"FedBN ([#2608](https://github.com/adap/flower/pull/2608), " +"[#2615](https://github.com/adap/flower/pull/2615))" -#: ../../source/ref-changelog.md:51 +#: ../../source/ref-changelog.md:180 +#, fuzzy msgid "" -"**Update Flower Examples** " -"([#2384](https://github.com/adap/flower/pull/2384),[#2425](https://github.com/adap/flower/pull/2425)," -" [#2526](https://github.com/adap/flower/pull/2526))" +"**General updates to Flower Examples** " +"([#2384](https://github.com/adap/flower/pull/2384), " +"[#2425](https://github.com/adap/flower/pull/2425), " +"[#2526](https://github.com/adap/flower/pull/2526), " +"[#2302](https://github.com/adap/flower/pull/2302), " +"[#2545](https://github.com/adap/flower/pull/2545))" msgstr "" +"** 更新 C++ SDK** ([#2537](https://github/com/adap/flower/pull/2537), " +"[#2528](https://github/com/adap/flower/pull/2528), " +"[#2523](https://github.com/adap/flower/pull/2523), " +"[#2522](https://github.com/adap/flower/pull/2522))" -#: ../../source/ref-changelog.md:53 +#: ../../source/ref-changelog.md:182 +#, fuzzy msgid "" -"**General updates to baselines** " +"**General updates to Flower Baselines** " "([#2301](https://github.com/adap/flower/pull/2301), " "[#2305](https://github.com/adap/flower/pull/2305), " "[#2307](https://github.com/adap/flower/pull/2307), " "[#2327](https://github.com/adap/flower/pull/2327), " -"[#2435](https://github.com/adap/flower/pull/2435))" -msgstr "" - -#: ../../source/ref-changelog.md:55 -msgid "" -"**General updates to the simulation engine** " -"([#2331](https://github.com/adap/flower/pull/2331), " -"[#2447](https://github.com/adap/flower/pull/2447), " -"[#2448](https://github.com/adap/flower/pull/2448))" -msgstr "" - -#: ../../source/ref-changelog.md:57 -msgid "" -"**General improvements** " -"([#2309](https://github.com/adap/flower/pull/2309), " +"[#2435](https://github.com/adap/flower/pull/2435), " +"[#2462](https://github.com/adap/flower/pull/2462), " +"[#2463](https://github.com/adap/flower/pull/2463), " +"[#2461](https://github.com/adap/flower/pull/2461), " +"[#2469](https://github.com/adap/flower/pull/2469), " +"[#2466](https://github.com/adap/flower/pull/2466), " +"[#2471](https://github.com/adap/flower/pull/2471), " +"[#2472](https://github.com/adap/flower/pull/2472), " +"[#2470](https://github.com/adap/flower/pull/2470))" +msgstr "" +"**普通改进**([#2309](https://github.com/adap/flower/pull/2309), " "[#2310](https://github.com/adap/flower/pull/2310), " "[2313](https://github.com/adap/flower/pull/2313), " "[#2316](https://github.com/adap/flower/pull/2316), " @@ -7237,37 +12318,78 @@ msgid "" "[#2402](https://github.com/adap/flower/pull/2402), " "[#2446](https://github.com/adap/flower/pull/2446) " "[#2561](https://github.com/adap/flower/pull/2561))" -msgstr "" - -#: ../../source/ref-changelog.md:59 ../../source/ref-changelog.md:153 -#: ../../source/ref-changelog.md:217 ../../source/ref-changelog.md:271 -#: ../../source/ref-changelog.md:338 -msgid "Flower received many improvements under the hood, too many to list here." -msgstr "" -#: ../../source/ref-changelog.md:61 +#: ../../source/ref-changelog.md:184 +#, fuzzy msgid "" -"**Add new** `Bulyan` **strategy** " -"([#1817](https://github.com/adap/flower/pull/1817), " -"[#1891](https://github.com/adap/flower/pull/1891))" +"**General updates to the simulation engine** " +"([#2331](https://github.com/adap/flower/pull/2331), " +"[#2447](https://github.com/adap/flower/pull/2447), " +"[#2448](https://github.com/adap/flower/pull/2448), " +"[#2294](https://github.com/adap/flower/pull/2294))" msgstr "" +"**模拟引擎的普通更新** ([#2331](https://github.com/adap/flower/pull/2331), " +"[#2447](https://github.com/adap/flower/pull/2447), " +"[#2448](https://github.com/adap/flower/pull/2448))" -#: ../../source/ref-changelog.md:63 +#: ../../source/ref-changelog.md:186 +#, fuzzy msgid "" -"The new `Bulyan` strategy implements Bulyan by [El Mhamdi et al., " -"2018](https://arxiv.org/abs/1802.07927)" +"**General updates to Flower SDKs** " +"([#2288](https://github.com/adap/flower/pull/2288), " +"[#2429](https://github.com/adap/flower/pull/2429), " +"[#2555](https://github.com/adap/flower/pull/2555), " +"[#2543](https://github.com/adap/flower/pull/2543), " +"[#2544](https://github.com/adap/flower/pull/2544), " +"[#2597](https://github.com/adap/flower/pull/2597), " +"[#2623](https://github.com/adap/flower/pull/2623))" msgstr "" +"**改进教程** ([#1468](https://github.com/adap/flower/pull/1468), " +"[#1470](https://github.com/adap/flower/pull/1470), " +"[#1472](https://github.com/adap/flower/pull/1472), " +"[#1473](https://github.com/adap/flower/pull/1473), " +"[#1474](https://github.com/adap/flower/pull/1474), " +"[#1475](https://github.com/adap/flower/pull/1475)))" -#: ../../source/ref-changelog.md:65 ../../source/ref-changelog.md:155 -#: ../../source/ref-changelog.md:219 ../../source/ref-changelog.md:277 -#: ../../source/ref-changelog.md:346 ../../source/ref-changelog.md:408 -#: ../../source/ref-changelog.md:427 ../../source/ref-changelog.md:583 -#: ../../source/ref-changelog.md:654 ../../source/ref-changelog.md:691 -#: ../../source/ref-changelog.md:734 -msgid "Incompatible changes" -msgstr "" +#: ../../source/ref-changelog.md:188 +msgid "" +"**General improvements** " +"([#2309](https://github.com/adap/flower/pull/2309), " +"[#2310](https://github.com/adap/flower/pull/2310), " +"[#2313](https://github.com/adap/flower/pull/2313), " +"[#2316](https://github.com/adap/flower/pull/2316), " +"[#2317](https://github.com/adap/flower/pull/2317), " +"[#2349](https://github.com/adap/flower/pull/2349), " +"[#2360](https://github.com/adap/flower/pull/2360), " +"[#2402](https://github.com/adap/flower/pull/2402), " +"[#2446](https://github.com/adap/flower/pull/2446), " +"[#2561](https://github.com/adap/flower/pull/2561), " +"[#2273](https://github.com/adap/flower/pull/2273), " +"[#2267](https://github.com/adap/flower/pull/2267), " +"[#2274](https://github.com/adap/flower/pull/2274), " +"[#2275](https://github.com/adap/flower/pull/2275), " +"[#2432](https://github.com/adap/flower/pull/2432), " +"[#2251](https://github.com/adap/flower/pull/2251), " +"[#2321](https://github.com/adap/flower/pull/2321), " +"[#1936](https://github.com/adap/flower/pull/1936), " +"[#2408](https://github.com/adap/flower/pull/2408), " +"[#2413](https://github.com/adap/flower/pull/2413), " +"[#2401](https://github.com/adap/flower/pull/2401), " +"[#2531](https://github.com/adap/flower/pull/2531), " +"[#2534](https://github.com/adap/flower/pull/2534), " +"[#2535](https://github.com/adap/flower/pull/2535), " +"[#2521](https://github.com/adap/flower/pull/2521), " +"[#2553](https://github.com/adap/flower/pull/2553), " +"[#2596](https://github.com/adap/flower/pull/2596))" +msgstr "" + +#: ../../source/ref-changelog.md:190 ../../source/ref-changelog.md:280 +#: ../../source/ref-changelog.md:344 ../../source/ref-changelog.md:398 +#: ../../source/ref-changelog.md:465 +msgid "Flower received many improvements under the hood, too many to list here." +msgstr "Flower 进行了许多改进,这里就不一一列举了。" -#: ../../source/ref-changelog.md:67 +#: ../../source/ref-changelog.md:194 msgid "" "**Remove support for Python 3.7** " "([#2280](https://github.com/adap/flower/pull/2280), " @@ -7277,44 +12399,42 @@ msgid "" "[#2355](https://github.com/adap/flower/pull/2355), " "[#2356](https://github.com/adap/flower/pull/2356))" msgstr "" +"**移除对 Python 3.7 的支持** " +"([#2280](https://github.com/adap/flower/pull/2280), " +"[#2299](https://github.com/adap/flower/pull/2299), " +"[#2304](https://github.com/adap/flower/pull/2304), " +"[#2306](https://github.com/adap/flower/pull/2306), " +"[#2355](https://github.com/adap/flower/pull/2355), " +"[#2356](https://github.com/adap/flower/pull/2356))" -#: ../../source/ref-changelog.md:69 +#: ../../source/ref-changelog.md:196 msgid "" "Python 3.7 support was deprecated in Flower 1.5, and this release removes" " support. Flower now requires Python 3.8." -msgstr "" +msgstr "在 Flower 1.5 中,Python 3.7 支持已被弃用,本版本将删除该支持。Flower 现在需要 Python 3.8。" -#: ../../source/ref-changelog.md:71 +#: ../../source/ref-changelog.md:198 msgid "" "**Remove experimental argument** `rest` **from** `start_client` " "([#2324](https://github.com/adap/flower/pull/2324))" msgstr "" +"**从** `start_client` 中移除** `rest` **实验参数 " +"([#2324](https://github.com/adap/flower/pull/2324))" -#: ../../source/ref-changelog.md:73 +#: ../../source/ref-changelog.md:200 msgid "" "The (still experimental) argument `rest` was removed from `start_client` " "and `start_numpy_client`. Use `transport=\"rest\"` to opt into the " "experimental REST API instead." msgstr "" +"删除了 `start_client` 和 `start_numpy_client` 中的参数 `rest`(仍属试验性质)。请使用 " +"`transport=\"rest\"` 来选择使用试验性 REST API。" -#: ../../source/ref-changelog.md:75 +#: ../../source/ref-changelog.md:202 msgid "v1.5.0 (2023-08-31)" -msgstr "" - -#: ../../source/ref-changelog.md:77 ../../source/ref-changelog.md:161 -#: ../../source/ref-changelog.md:225 ../../source/ref-changelog.md:283 -#: ../../source/ref-changelog.md:352 ../../source/ref-changelog.md:421 -msgid "Thanks to our contributors" -msgstr "" - -#: ../../source/ref-changelog.md:79 ../../source/ref-changelog.md:163 -#: ../../source/ref-changelog.md:227 ../../source/ref-changelog.md:285 -msgid "" -"We would like to give our special thanks to all the contributors who made" -" the new version of Flower possible (in `git shortlog` order):" -msgstr "" +msgstr "v1.5.0 (2023-08-31)" -#: ../../source/ref-changelog.md:81 +#: ../../source/ref-changelog.md:208 msgid "" "`Adam Narozniak`, `Anass Anhari`, `Charles Beauville`, `Dana-Farber`, " "`Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo Gabrielli`, `Gustavo " @@ -7322,16 +12442,24 @@ msgid "" "Topal`, `achiverram28`, `danielnugraha`, `eunchung`, `ruthgal` " msgstr "" +"`Adam Narozniak`, `Anass Anhari`, `Charles Beauville`, `Dana-Farber`, " +"`Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo Gabrielli`, `Gustavo " +"Bertoli`, `Heng Pan`, `Javier`, `Mahdi`, `Steven Hé (Sīchàng)`, `Taner " +"Topal`, `achiverram28`, `danielnugraha`, `eunchung`, `ruthgal` " -#: ../../source/ref-changelog.md:85 +#: ../../source/ref-changelog.md:212 msgid "" "**Introduce new simulation engine** " "([#1969](https://github.com/adap/flower/pull/1969), " "[#2221](https://github.com/adap/flower/pull/2221), " "[#2248](https://github.com/adap/flower/pull/2248))" msgstr "" +"**引入新的模拟引擎** ([#1969](https://github.com/adap/flower/pull/1969), " +"[#2221](https://github.com/adap/flower/pull/2221), " +"[#2248](https://github.com/adap/flower/pull/2248))" -#: ../../source/ref-changelog.md:87 +#: ../../source/ref-changelog.md:214 msgid "" "The new simulation engine has been rewritten from the ground up, yet it " "remains fully backwards compatible. It offers much improved stability and" @@ -7339,22 +12467,58 @@ msgid "" "transparently adapt to different settings to scale simulation in CPU-" "only, CPU+GPU, multi-GPU, or multi-node multi-GPU environments." msgstr "" +"新的模拟引擎从头开始重新编写,但仍完全向后兼容。它的稳定性和内存处理能力大大提高,尤其是在使用 GPU 时。仿真可透明地适应不同的设置,以在仅 " +"CPU、CPU+GPU、多 GPU 或多节点多 GPU 环境中扩展模拟。" -#: ../../source/ref-changelog.md:89 +#: ../../source/ref-changelog.md:216 msgid "" "Comprehensive documentation includes a new [how-to run " -"simulations](https://flower.dev/docs/framework/how-to-run-" +"simulations](https://flower.ai/docs/framework/how-to-run-" "simulations.html) guide, new [simulation-" -"pytorch](https://flower.dev/docs/examples/simulation-pytorch.html) and " -"[simulation-tensorflow](https://flower.dev/docs/examples/simulation-" +"pytorch](https://flower.ai/docs/examples/simulation-pytorch.html) and " +"[simulation-tensorflow](https://flower.ai/docs/examples/simulation-" "tensorflow.html) notebooks, and a new [YouTube tutorial " "series](https://www.youtube.com/watch?v=cRebUIGB5RU&list=PLNG4feLHqCWlnj8a_E1A_n5zr2-8pafTB)." msgstr "" - -#: ../../source/ref-changelog.md:91 -msgid "" -"**Restructure Flower Docs** " -"([#1824](https://github.com/adap/flower/pull/1824), " +"综合文档包括新的[how-to run simulations](https://flower.ai/docs/framework/how-" +"to-run-simulations.html) guide, new [simulation-" +"pytorch](https://flower.ai/docs/examples/simulation-pytorch.html) and " +"[simulation-tensorflow](https://flower.ai/docs/examples/simulation-" +"tensorflow.html) notebooks, and a new [YouTube tutorial " +"series](https://www.youtube.com/watch?v=cRebUIGB5RU&list=PLNG4feLHqCWlnj8a_E1A_n5zr2-8pafTB)。" + +#: ../../source/ref-changelog.md:218 +msgid "" +"**Restructure Flower Docs** " +"([#1824](https://github.com/adap/flower/pull/1824), " +"[#1865](https://github.com/adap/flower/pull/1865), " +"[#1884](https://github.com/adap/flower/pull/1884), " +"[#1887](https://github.com/adap/flower/pull/1887), " +"[#1919](https://github.com/adap/flower/pull/1919), " +"[#1922](https://github.com/adap/flower/pull/1922), " +"[#1920](https://github.com/adap/flower/pull/1920), " +"[#1923](https://github.com/adap/flower/pull/1923), " +"[#1924](https://github.com/adap/flower/pull/1924), " +"[#1962](https://github.com/adap/flower/pull/1962), " +"[#2006](https://github.com/adap/flower/pull/2006), " +"[#2133](https://github.com/adap/flower/pull/2133), " +"[#2203](https://github.com/adap/flower/pull/2203), " +"[#2215](https://github.com/adap/flower/pull/2215), " +"[#2122](https://github.com/adap/flower/pull/2122), " +"[#2223](https://github.com/adap/flower/pull/2223), " +"[#2219](https://github.com/adap/flower/pull/2219), " +"[#2232](https://github.com/adap/flower/pull/2232), " +"[#2233](https://github.com/adap/flower/pull/2233), " +"[#2234](https://github.com/adap/flower/pull/2234), " +"[#2235](https://github.com/adap/flower/pull/2235), " +"[#2237](https://github.com/adap/flower/pull/2237), " +"[#2238](https://github.com/adap/flower/pull/2238), " +"[#2242](https://github.com/adap/flower/pull/2242), " +"[#2231](https://github.com/adap/flower/pull/2231), " +"[#2243](https://github.com/adap/flower/pull/2243), " +"[#2227](https://github.com/adap/flower/pull/2227))" +msgstr "" +"**重构 Flower 文档** ([#1824](https://github.com/adap/flower/pull/1824), " "[#1865](https://github.com/adap/flower/pull/1865), " "[#1884](https://github.com/adap/flower/pull/1884), " "[#1887](https://github.com/adap/flower/pull/1887), " @@ -7381,44 +12545,55 @@ msgid "" "[#2231](https://github.com/adap/flower/pull/2231), " "[#2243](https://github.com/adap/flower/pull/2243), " "[#2227](https://github.com/adap/flower/pull/2227))" -msgstr "" -#: ../../source/ref-changelog.md:93 +#: ../../source/ref-changelog.md:220 msgid "" "Much effort went into a completely restructured Flower docs experience. " -"The documentation on [flower.dev/docs](flower.dev/docs) is now divided " +"The documentation on [flower.ai/docs](flower.ai/docs) is now divided " "into Flower Framework, Flower Baselines, Flower Android SDK, Flower iOS " "SDK, and code example projects." msgstr "" +"Flower 文档体验的全面重构耗费了大量精力。现在,[flower.ai/docs](flower.ai/docs)上的文档分为 " +"Flower Framework、Flower Baselines、Flower Android SDK、Flower iOS SDK " +"和代码示例项目。" -#: ../../source/ref-changelog.md:95 +#: ../../source/ref-changelog.md:222 msgid "" "**Introduce Flower Swift SDK** " "([#1858](https://github.com/adap/flower/pull/1858), " "[#1897](https://github.com/adap/flower/pull/1897))" msgstr "" +"**介绍 Flower Swift SDK** " +"([#1858](https://github.com/adap/flower/pull/1858), " +"[#1897](https://github.com/adap/flower/pull/1897))" -#: ../../source/ref-changelog.md:97 +#: ../../source/ref-changelog.md:224 msgid "" "This is the first preview release of the Flower Swift SDK. Flower support" " on iOS is improving, and alongside the Swift SDK and code example, there" " is now also an iOS quickstart tutorial." msgstr "" +"这是 Flower Swift SDK 的首个预览版。Flower 对 iOS 的支持正在不断改进,除了 Swift SDK " +"和代码示例外,现在还有 iOS 快速入门教程。" -#: ../../source/ref-changelog.md:99 +#: ../../source/ref-changelog.md:226 msgid "" "**Introduce Flower Android SDK** " "([#2131](https://github.com/adap/flower/pull/2131))" msgstr "" +"**介绍Flower Android SDK** " +"([#2131](https://github.com/adap/flower/pull/2131))" -#: ../../source/ref-changelog.md:101 +#: ../../source/ref-changelog.md:228 msgid "" "This is the first preview release of the Flower Kotlin SDK. Flower " "support on Android is improving, and alongside the Kotlin SDK and code " "example, there is now also an Android quickstart tutorial." msgstr "" +"这是 Flower Kotlin SDK 的首个预览版。Flower 对 Android 的支持正在不断改进,除了 Kotlin SDK " +"和代码示例,现在还有 Android 快速入门教程。" -#: ../../source/ref-changelog.md:103 +#: ../../source/ref-changelog.md:230 msgid "" "**Introduce new end-to-end testing infrastructure** " "([#1842](https://github.com/adap/flower/pull/1842), " @@ -7439,43 +12614,64 @@ msgid "" "[#2137](https://github.com/adap/flower/pull/2137), " "[#2165](https://github.com/adap/flower/pull/2165))" msgstr "" +"*介绍新的端到端测试** ([#1842](https://github.com/adap/flower/pull/1842), " +"[#2071](https://github.com/adap/flower/pull/2071), " +"[#2072](https://github.com/adap/flower/pull/2072), " +"[#2068](https://github.com/adap/flower/pull/2068), " +"[#2067](https://github.com/adap/flower/pull/2067), " +"[#2069](https://github.com/adap/flower/pull/2069), " +"[#2073](https://github.com/adap/flower/pull/2073), " +"[#2070](https://github.com/adap/flower/pull/2070), " +"[#2074](https://github.com/adap/flower/pull/2074), " +"[#2082](https://github.com/adap/flower/pull/2082), " +"[#2084](https://github.com/adap/flower/pull/2084), " +"[#2093](https://github.com/adap/flower/pull/2093), " +"[#2109](https://github.com/adap/flower/pull/2109), " +"[#2095](https://github.com/adap/flower/pull/2095), " +"[#2140](https://github.com/adap/flower/pull/2140), " +"[#2137](https://github.com/adap/flower/pull/2137), " +"[#2165](https://github.com/adap/flower/pull/2165))" -#: ../../source/ref-changelog.md:105 +#: ../../source/ref-changelog.md:232 msgid "" "A new testing infrastructure ensures that new changes stay compatible " "with existing framework integrations or strategies." -msgstr "" +msgstr "新的测试设施可确保新的变更与现有的框架集成或策略保持兼容。" -#: ../../source/ref-changelog.md:107 +#: ../../source/ref-changelog.md:234 msgid "**Deprecate Python 3.7**" -msgstr "" +msgstr "** 过时的 Python 3.7**" -#: ../../source/ref-changelog.md:109 +#: ../../source/ref-changelog.md:236 msgid "" "Since Python 3.7 reached its end of life (EOL) on 2023-06-27, support for" " Python 3.7 is now deprecated and will be removed in an upcoming release." -msgstr "" +msgstr "由于 Python 3.7 已于 2023-06-27 弃用 (EOL),对 Python 3.7 的支持现已废弃,并将在即将发布的版本中移除。" -#: ../../source/ref-changelog.md:111 +#: ../../source/ref-changelog.md:238 msgid "" "**Add new** `FedTrimmedAvg` **strategy** " "([#1769](https://github.com/adap/flower/pull/1769), " "[#1853](https://github.com/adap/flower/pull/1853))" msgstr "" +"**添加新的**`FedTrimmedAvg`**策略**([#1769](https://github.com/adap/flower/pull/1769)," +" [#1853](https://github.com/adap/flower/pull/1853)" -#: ../../source/ref-changelog.md:113 +#: ../../source/ref-changelog.md:240 msgid "" "The new `FedTrimmedAvg` strategy implements Trimmed Mean by [Dong Yin, " "2018](https://arxiv.org/abs/1803.01498)." msgstr "" +"新的 \"FedTrimmedAvg \"策略实现了[Dong Yin, " +"2018](https://arxiv.org/abs/1803.01498)的 \"Trimmed Mean\"。" -#: ../../source/ref-changelog.md:115 +#: ../../source/ref-changelog.md:242 msgid "" "**Introduce start_driver** " "([#1697](https://github.com/adap/flower/pull/1697))" -msgstr "" +msgstr "**引入 start_driver**([#1697](https://github.com/adap/flower/pull/1697))" -#: ../../source/ref-changelog.md:117 +#: ../../source/ref-changelog.md:244 msgid "" "In addition to `start_server` and using the raw Driver API, there is a " "new `start_driver` function that allows for running `start_server` " @@ -7483,68 +12679,87 @@ msgid "" " the `mt-pytorch` code example to see a working example using " "`start_driver`." msgstr "" +"除了 `start_server` 和使用原始驱动 API 之外,还有一个新的 `start_driver` 函数,只需修改一行代码,就能将 " +"`start_server` 脚本作为 Flower 驱动程序运行。请查看 `mt-pytorch` 代码示例,了解使用 " +"`start_driver` 的工作示例。" -#: ../../source/ref-changelog.md:119 +#: ../../source/ref-changelog.md:246 msgid "" "**Add parameter aggregation to** `mt-pytorch` **code example** " "([#1785](https://github.com/adap/flower/pull/1785))" msgstr "" +"为 `mt-pytorch` **代码示例**添加参数聚合 " +"([#1785](https://github.com/adap/flower/pull/1785))" -#: ../../source/ref-changelog.md:121 +#: ../../source/ref-changelog.md:248 msgid "" "The `mt-pytorch` example shows how to aggregate parameters when writing a" " driver script. The included `driver.py` and `server.py` have been " "aligned to demonstrate both the low-level way and the high-level way of " "building server-side logic." msgstr "" +"`mt-pytorch`示例展示了如何在编写驱动程序脚本时聚合参数。附带的 `driver.py` 和 `server.py` " +"已经进行了调整,以演示构建服务器端逻辑的低级方法和高级方法。" -#: ../../source/ref-changelog.md:123 +#: ../../source/ref-changelog.md:250 msgid "" "**Migrate experimental REST API to Starlette** " "([2171](https://github.com/adap/flower/pull/2171))" msgstr "" +"**将实验性 REST API 移植到 Starlette** " +"([2171](https://github.com/adap/flower/pull/2171))" -#: ../../source/ref-changelog.md:125 +#: ../../source/ref-changelog.md:252 msgid "" "The (experimental) REST API used to be implemented in " "[FastAPI](https://fastapi.tiangolo.com/), but it has now been migrated to" " use [Starlette](https://www.starlette.io/) directly." msgstr "" +"REST API(试验性)曾在 [FastAPI](https://fastapi.tiangolo.com/) 中实现,但现在已迁移到直接使用 " +"[Starlette](https://www.starlette.io/) 。" -#: ../../source/ref-changelog.md:127 +#: ../../source/ref-changelog.md:254 msgid "" "Please note: The REST request-response API is still experimental and will" " likely change significantly over time." -msgstr "" +msgstr "请注意:REST 请求-响应 API 仍处于试验阶段,随着时间的推移可能会发生重大变化。" -#: ../../source/ref-changelog.md:129 +#: ../../source/ref-changelog.md:256 msgid "" "**Introduce experimental gRPC request-response API** " "([#1867](https://github.com/adap/flower/pull/1867), " "[#1901](https://github.com/adap/flower/pull/1901))" msgstr "" +"**引入实验性 gRPC 请求-响应 API** " +"([#1867](https://github.com/adap/flower/pull/1867), " +"[#1901](https://github.com/adap/flower/pull/1901)" -#: ../../source/ref-changelog.md:131 +#: ../../source/ref-changelog.md:258 msgid "" "In addition to the existing gRPC API (based on bidirectional streaming) " "and the experimental REST API, there is now a new gRPC API that uses a " "request-response model to communicate with client nodes." msgstr "" +"除了现有的 gRPC 应用程序接口(基于双向流)和试验性 REST 应用程序接口外,现在还有一个新的 gRPC " +"应用程序接口,它使用请求-响应模型与客户端节点通信。" -#: ../../source/ref-changelog.md:133 +#: ../../source/ref-changelog.md:260 msgid "" "Please note: The gRPC request-response API is still experimental and will" " likely change significantly over time." -msgstr "" +msgstr "请注意:gRPC 请求-响应 API 仍处于试验阶段,随着时间的推移可能会发生重大变化。" -#: ../../source/ref-changelog.md:135 +#: ../../source/ref-changelog.md:262 msgid "" "**Replace the experimental** `start_client(rest=True)` **with the new** " "`start_client(transport=\"rest\")` " "([#1880](https://github.com/adap/flower/pull/1880))" msgstr "" +"**用新的** `start_client(transport=\"rest\")` 替换实验性** " +"`start_client(rest=True)` " +"([#1880](https://github.com/adap/flower/pull/1880))" -#: ../../source/ref-changelog.md:137 +#: ../../source/ref-changelog.md:264 msgid "" "The (experimental) `start_client` argument `rest` was deprecated in " "favour of a new argument `transport`. `start_client(transport=\"rest\")` " @@ -7552,31 +12767,35 @@ msgid "" "All code should migrate to the new argument `transport`. The deprecated " "argument `rest` will be removed in a future release." msgstr "" +"已废弃(试验性的)`start_client`参数`rest`,改用新参数`transport`。`start_client(transport=\"rest\")`将产生与以前的`start_client(rest=True)`相同的行为。所有代码都应迁移到新参数" +" `transport`。过时的参数 `rest` 将在今后的版本中删除。" -#: ../../source/ref-changelog.md:139 +#: ../../source/ref-changelog.md:266 msgid "" "**Add a new gRPC option** " "([#2197](https://github.com/adap/flower/pull/2197))" -msgstr "" +msgstr "** 添加一个新的 gRPC 选项**([#2197](https://github.com/adap/flower/pull/2197))" -#: ../../source/ref-changelog.md:141 +#: ../../source/ref-changelog.md:268 msgid "" "We now start a gRPC server with the `grpc.keepalive_permit_without_calls`" " option set to 0 by default. This prevents the clients from sending " "keepalive pings when there is no outstanding stream." msgstr "" +"现在我们启动一个 gRPC 服务器,并将 `grpc.keepalive_permit_without_calls` 选项默认设置为 " +"0。这将防止客户端在没有未处理数据流时发送 keepalive pings。" -#: ../../source/ref-changelog.md:143 +#: ../../source/ref-changelog.md:270 msgid "" "**Improve example notebooks** " "([#2005](https://github.com/adap/flower/pull/2005))" -msgstr "" +msgstr "**改进示例笔记** ([#2005](https://github.com/adap/flower/pull/2005))" -#: ../../source/ref-changelog.md:145 +#: ../../source/ref-changelog.md:272 msgid "There's a new 30min Federated Learning PyTorch tutorial!" -msgstr "" +msgstr "有一个新的 30 分钟的联邦学习 PyTorch 教程!" -#: ../../source/ref-changelog.md:147 +#: ../../source/ref-changelog.md:274 msgid "" "**Example updates** ([#1772](https://github.com/adap/flower/pull/1772), " "[#1873](https://github.com/adap/flower/pull/1873), " @@ -7590,8 +12809,19 @@ msgid "" "[#2225](https://github.com/adap/flower/pull/2225), " "[#2183](https://github.com/adap/flower/pull/2183))" msgstr "" +"**更新Example** ([#1772](https://github.com/adap/flower/pull/1772), " +"[#1873](https://github.com/adap/flower/pull/1873), " +"[#1981](https://github.com/adap/flower/pull/1981), " +"[#1988](https://github.com/adap/flower/pull/1988), " +"[#1984](https://github.com/adap/flower/pull/1984), " +"[#1982](https://github.com/adap/flower/pull/1982), " +"[#2112](https://github.com/adap/flower/pull/2112), " +"[#2144](https://github.com/adap/flower/pull/2144), " +"[#2174](https://github.com/adap/flower/pull/2174), " +"[#2225](https://github.com/adap/flower/pull/2225), " +"[#2183](https://github.com/adap/flower/pull/2183))" -#: ../../source/ref-changelog.md:149 +#: ../../source/ref-changelog.md:276 msgid "" "Many examples have received significant updates, including simplified " "advanced-tensorflow and advanced-pytorch examples, improved macOS " @@ -7599,8 +12829,11 @@ msgid "" " major upgrade is that all code examples now have a `requirements.txt` " "(in addition to `pyproject.toml`)." msgstr "" +"许多示例都进行了重大更新,包括简化了 advanced-tensorflow 和 advanced-pytorch 示例,改进了 " +"TensorFlow 示例的 macOS 兼容性,以及模拟代码示例。一项重大升级是所有代码示例现在都有了 " +"\"requirements.txt\"(除 \"pyproject.toml \"外)。" -#: ../../source/ref-changelog.md:151 +#: ../../source/ref-changelog.md:278 msgid "" "**General improvements** " "([#1872](https://github.com/adap/flower/pull/1872), " @@ -7610,18 +12843,24 @@ msgid "" "[#1477](https://github.com/adap/flower/pull/1477), " "[#2171](https://github.com/adap/flower/pull/2171))" msgstr "" +"**普通改进**([#1872](https://github.com/adap/flower/pull/1872), " +"[#1866](https://github.com/adap/flower/pull/1866), " +"[#1884](https://github.com/adap/flower/pull/1884), " +"[#1837](https://github.com/adap/flower/pull/1837), " +"[#1477](https://github.com/adap/flower/pull/1477), " +"[#2171](https://github.com/adap/flower/pull/2171))" -#: ../../source/ref-changelog.md:157 ../../source/ref-changelog.md:221 -#: ../../source/ref-changelog.md:279 ../../source/ref-changelog.md:348 -#: ../../source/ref-changelog.md:410 +#: ../../source/ref-changelog.md:284 ../../source/ref-changelog.md:348 +#: ../../source/ref-changelog.md:406 ../../source/ref-changelog.md:475 +#: ../../source/ref-changelog.md:537 msgid "None" -msgstr "" +msgstr "无" -#: ../../source/ref-changelog.md:159 +#: ../../source/ref-changelog.md:286 msgid "v1.4.0 (2023-04-21)" -msgstr "" +msgstr "v1.4.0 (2023-04-21)" -#: ../../source/ref-changelog.md:165 +#: ../../source/ref-changelog.md:292 msgid "" "`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " "`Chenyang Ma (Danny)`, `Daniel J. Beutel`, `Edoardo`, `Gautam Jajoo`, " @@ -7630,8 +12869,14 @@ msgid "" "Lane`, `Nikolaos Episkopos`, `Ragy`, `Saurav Maheshkar`, `Semo Yang`, " "`Steve Laskaridis`, `Steven Hé (Sīchàng)`, `Taner Topal`" msgstr "" +"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " +"`Chenyang Ma (Danny)`, `Daniel J. Beutel`, `Edoardo`, `Gautam Jajoo`, " +"`Iacob-Alexandru-Andrei`, `JDRanpariya`, `Jean Charle Yaacoub`, `Kunal " +"Sarkhel`, `L. Jiang`, `Lennart Behme`, `Max Kapsecker`, `Michał`, `Nic " +"Lane`, `Nikolaos Episkopos`, `Ragy`, `Saurav Maheshkar`, `Semo Yang`, " +"`Steve Laskaridis`, `Steven Hé (Sīchàng)`, `Taner Topal`" -#: ../../source/ref-changelog.md:169 +#: ../../source/ref-changelog.md:296 msgid "" "**Introduce support for XGBoost (**`FedXgbNnAvg` **strategy and " "example)** ([#1694](https://github.com/adap/flower/pull/1694), " @@ -7641,8 +12886,15 @@ msgid "" "[#1763](https://github.com/adap/flower/pull/1763), " "[#1795](https://github.com/adap/flower/pull/1795))" msgstr "" +"**引入对XGBoost的支持(**`FedXgbNnAvg` **策略和示例)** " +"([#1694](https://github.com/adap/flower/pull/1694), " +"[#1709](https://github.com/adap/flower/pull/1709), " +"[#1715](https://github.com/adap/flower/pull/1715), " +"[#1717](https://github.com/adap/flower/pull/1717), " +"[#1763](https://github.com/adap/flower/pull/1763), " +"[#1795](https://github.com/adap/flower/pull/1795))" -#: ../../source/ref-changelog.md:171 +#: ../../source/ref-changelog.md:298 msgid "" "XGBoost is a tree-based ensemble machine learning algorithm that uses " "gradient boosting to improve model accuracy. We added a new `FedXgbNnAvg`" @@ -7652,15 +12904,20 @@ msgid "" "example](https://github.com/adap/flower/tree/main/examples/quickstart_xgboost_horizontal)" " that demonstrates the usage of this new strategy in an XGBoost project." msgstr "" +"XGBoost 是一种基于树的集合机器学习算法,它使用梯度提升来提高模型的准确性。我们添加了一个新的 " +"\"FedXgbNnAvg\"[策略](https://github.com/adap/flower/tree/main/src/py/flwr/server/strategy/fedxgb_nn_avg.py)和一个[代码示例](https://github.com/adap/flower/tree/main/examples/quickstart_xgboost_horizontal),演示如何在" +" XGBoost 项目中使用这个新策略。" -#: ../../source/ref-changelog.md:173 +#: ../../source/ref-changelog.md:300 msgid "" "**Introduce iOS SDK (preview)** " "([#1621](https://github.com/adap/flower/pull/1621), " "[#1764](https://github.com/adap/flower/pull/1764))" msgstr "" +"**介绍 iOS SDK(预览版)** ([#1621](https://github.com/adap/flower/pull/1621), " +"[#1764](https://github.com/adap/flower/pull/1764))" -#: ../../source/ref-changelog.md:175 +#: ../../source/ref-changelog.md:302 msgid "" "This is a major update for anyone wanting to implement Federated Learning" " on iOS mobile devices. We now have a swift iOS SDK present under " @@ -7670,24 +12927,34 @@ msgid "" "example](https://github.com/adap/flower/tree/main/examples/ios) has also " "been updated!" msgstr "" +"对于想要在 iOS 移动设备上实施联邦学习的人来说,这是一次重大更新。现在,我们在 " +"[src/swift/flwr](https://github.com/adap/flower/tree/main/src/swift/flwr)" +" 下提供了一个迅捷的 iOS SDK,这将大大方便应用程序的创建过程。为了展示其使用情况,我们还更新了 [iOS " +"示例](https://github.com/adap/flower/tree/main/examples/ios)!" -#: ../../source/ref-changelog.md:177 +#: ../../source/ref-changelog.md:304 msgid "" "**Introduce new \"What is Federated Learning?\" tutorial** " "([#1657](https://github.com/adap/flower/pull/1657), " "[#1721](https://github.com/adap/flower/pull/1721))" msgstr "" +"**引入新的 " +"\"什么是联邦学习?\"教程**([#1657](https://github.com/adap/flower/pull/1657), " +"[#1721](https://github.com/adap/flower/pull/1721)" -#: ../../source/ref-changelog.md:179 +#: ../../source/ref-changelog.md:306 msgid "" -"A new [entry-level tutorial](https://flower.dev/docs/framework/tutorial-" +"A new [entry-level tutorial](https://flower.ai/docs/framework/tutorial-" "what-is-federated-learning.html) in our documentation explains the basics" " of Fedetated Learning. It enables anyone who's unfamiliar with Federated" " Learning to start their journey with Flower. Forward it to anyone who's " "interested in Federated Learning!" msgstr "" +"我们的文档中新增了一个[入门级教程](https://flower.ai/docs/framework/tutorial-what-is-" +"federated-learning.html),解释了联邦学习的基础知识。它让任何不熟悉联邦学习的人都能开始 Flower " +"之旅。请转发给对联邦学习感兴趣的人!" -#: ../../source/ref-changelog.md:181 +#: ../../source/ref-changelog.md:308 msgid "" "**Introduce new Flower Baseline: FedProx MNIST** " "([#1513](https://github.com/adap/flower/pull/1513), " @@ -7695,30 +12962,42 @@ msgid "" "[#1681](https://github.com/adap/flower/pull/1681), " "[#1679](https://github.com/adap/flower/pull/1679))" msgstr "" +"**引入新的 Flower Baseline: FedProx MNIST** " +"([#1513](https://github.com/adap/flower/pull/1513), " +"[#1680](https://github.com/adap/flower/pull/1680), " +"[#1681](https://github.com/adap/flower/pull/1681), " +"[#1679](https://github.com/adap/flower/pull/1679)" -#: ../../source/ref-changelog.md:183 +#: ../../source/ref-changelog.md:310 msgid "" "This new baseline replicates the MNIST+CNN task from the paper [Federated" " Optimization in Heterogeneous Networks (Li et al., " "2018)](https://arxiv.org/abs/1812.06127). It uses the `FedProx` strategy," " which aims at making convergence more robust in heterogenous settings." msgstr "" +"这条新Baseline复现了论文[Federated Optimization in Heterogeneous Networks (Li et " +"al., 2018)](https://arxiv.org/abs/1812.06127)中的 MNIST+CNN 任务。它使用 " +"\"FedProx \"策略,旨在使收敛在异构环境中更加稳健。" -#: ../../source/ref-changelog.md:185 +#: ../../source/ref-changelog.md:312 msgid "" "**Introduce new Flower Baseline: FedAvg FEMNIST** " "([#1655](https://github.com/adap/flower/pull/1655))" msgstr "" +"**引入新的 Flower Baseline: FedAvg FEMNIST** " +"([#1655](https://github.com/adap/flower/pull/1655))" -#: ../../source/ref-changelog.md:187 +#: ../../source/ref-changelog.md:314 msgid "" "This new baseline replicates an experiment evaluating the performance of " "the FedAvg algorithm on the FEMNIST dataset from the paper [LEAF: A " "Benchmark for Federated Settings (Caldas et al., " "2018)](https://arxiv.org/abs/1812.01097)." msgstr "" +"这一新Baseline复现了论文[LEAF: A Benchmark for Federated Settings(Caldas 等人,2018 " +"年)](https://arxiv.org/abs/1812.01097)中评估 FedAvg 算法在 FEMNIST 数据集上性能的实验。" -#: ../../source/ref-changelog.md:189 +#: ../../source/ref-changelog.md:316 msgid "" "**Introduce (experimental) REST API** " "([#1594](https://github.com/adap/flower/pull/1594), " @@ -7729,21 +13008,28 @@ msgid "" "[#1770](https://github.com/adap/flower/pull/1770), " "[#1733](https://github.com/adap/flower/pull/1733))" msgstr "" +"**引入(试验性)REST API** ([#1594](https://github.com/adap/flower/pull/1594), " +"[#1690](https://github.com/adap/flower/pull/1690), " +"[#1695](https://github.com/adap/flower/pull/1695), " +"[#1712](https://github.com/adap/flower/pull/1712), " +"[#1802](https://github.com/adap/flower/pull/1802), " +"[#1770](https://github.com/adap/flower/pull/1770), " +"[#1733](https://github.com/adap/flower/pull/1733))" -#: ../../source/ref-changelog.md:191 +#: ../../source/ref-changelog.md:318 msgid "" "A new REST API has been introduced as an alternative to the gRPC-based " "communication stack. In this initial version, the REST API only supports " "anonymous clients." -msgstr "" +msgstr "作为基于 gRPC 的通信栈的替代方案,我们引入了新的 REST API。在初始版本中,REST API 仅支持匿名客户端。" -#: ../../source/ref-changelog.md:193 +#: ../../source/ref-changelog.md:320 msgid "" "Please note: The REST API is still experimental and will likely change " "significantly over time." -msgstr "" +msgstr "请注意:REST API 仍处于试验阶段,随着时间的推移可能会发生重大变化。" -#: ../../source/ref-changelog.md:195 +#: ../../source/ref-changelog.md:322 msgid "" "**Improve the (experimental) Driver API** " "([#1663](https://github.com/adap/flower/pull/1663), " @@ -7756,8 +13042,17 @@ msgid "" "[#1662](https://github.com/adap/flower/pull/1662), " "[#1794](https://github.com/adap/flower/pull/1794))" msgstr "" +"**改进(试验性)驱动程序应用程序接口** ([#1663](https://github.com/adap/flower/pull/1663)," +" [#1666](https://github.com/adap/flower/pull/1666), " +"[#1667](https://github.com/adap/flower/pull/1667), " +"[#1664](https://github.com/adap/flower/pull/1664), " +"[#1675](https://github.com/adap/flower/pull/1675), " +"[#1676](https://github.com/adap/flower/pull/1676), " +"[#1693](https://github.com/adap/flower/pull/1693), " +"[#1662](https://github.com/adap/flower/pull/1662), " +"[#1794](https://github.com/adap/flower/pull/1794))" -#: ../../source/ref-changelog.md:197 +#: ../../source/ref-changelog.md:324 msgid "" "The Driver API is still an experimental feature, but this release " "introduces some major upgrades. One of the main improvements is the " @@ -7766,59 +13061,70 @@ msgid "" "results) that have been delivered will now be deleted. This greatly " "improves the memory efficiency of a long-running Flower server." msgstr "" +"驱动程序应用程序接口(Driver API)仍是一项试验性功能,但这一版本引入了一些重大升级。主要改进之一是引入了 SQLite " +"数据库,将服务器状态存储在磁盘上(而不是内存中)。另一项改进是,已交付的任务(指令或结果)现在将被删除。这大大提高了长期运行的 Flower " +"服务器的内存效率。" -#: ../../source/ref-changelog.md:199 +#: ../../source/ref-changelog.md:326 msgid "" "**Fix spilling issues related to Ray during simulations** " "([#1698](https://github.com/adap/flower/pull/1698))" -msgstr "" +msgstr "**修复模拟过程中与Ray有关的溢出问题** ([#1698](https://github.com/adap/flower/pull/1698))" -#: ../../source/ref-changelog.md:201 +#: ../../source/ref-changelog.md:328 msgid "" "While running long simulations, `ray` was sometimes spilling huge amounts" " of data that would make the training unable to continue. This is now " "fixed! 🎉" -msgstr "" +msgstr "在运行长时间模拟时,`ray` 有时会溢出大量数据,导致训练无法继续。现在这个问题已经解决!🎉" -#: ../../source/ref-changelog.md:203 +#: ../../source/ref-changelog.md:330 msgid "" "**Add new example using** `TabNet` **and Flower** " "([#1725](https://github.com/adap/flower/pull/1725))" msgstr "" +"** 添加使用** `TabNet` ** 的新示例** " +"([#1725](https://github.com/adap/flower/pull/1725))" -#: ../../source/ref-changelog.md:205 +#: ../../source/ref-changelog.md:332 msgid "" "TabNet is a powerful and flexible framework for training machine learning" " models on tabular data. We now have a federated example using Flower: " "[https://github.com/adap/flower/tree/main/examples/tabnet](https://github.com/adap/flower/tree/main/examples/quickstart_tabnet)." msgstr "" +"TabNet 是一个强大而灵活的框架,用于在表格数据上训练机器学习模型。我们现在有一个使用 Flower " +"的联邦示例:[https://github.com/adap/flower/tree/main/examples/tabnet](https://github.com/adap/flower/tree/main/examples/quickstart_tabnet)。" -#: ../../source/ref-changelog.md:207 +#: ../../source/ref-changelog.md:334 msgid "" "**Add new how-to guide for monitoring simulations** " "([#1649](https://github.com/adap/flower/pull/1649))" -msgstr "" +msgstr "** 添加新的模拟监控指南** ([#1649](https://github.com/adap/flower/pull/1649))" -#: ../../source/ref-changelog.md:209 +#: ../../source/ref-changelog.md:336 msgid "" "We now have a documentation guide to help users monitor their performance" " during simulations." -msgstr "" +msgstr "我们现在有一份文档指南,可帮助用户在模拟过程中监控其性能。" -#: ../../source/ref-changelog.md:211 +#: ../../source/ref-changelog.md:338 msgid "" "**Add training metrics to** `History` **object during simulations** " "([#1696](https://github.com/adap/flower/pull/1696))" msgstr "" +"**在模拟过程中为***`历史`***对象添加训练指标*** " +"([#1696](https://github.com/adap/flower/pull/1696))" -#: ../../source/ref-changelog.md:213 +#: ../../source/ref-changelog.md:340 msgid "" "The `fit_metrics_aggregation_fn` can be used to aggregate training " "metrics, but previous releases did not save the results in the `History` " "object. This is now the case!" msgstr "" +"`fit_metrics_aggregation_fn`可用于汇总训练指标,但以前的版本不会将结果保存在 \"History " +"\"对象中。现在可以了!" -#: ../../source/ref-changelog.md:215 +#: ../../source/ref-changelog.md:342 msgid "" "**General improvements** " "([#1659](https://github.com/adap/flower/pull/1659), " @@ -7871,24 +13177,77 @@ msgid "" "[#1804](https://github.com/adap/flower/pull/1804), " "[#1805](https://github.com/adap/flower/pull/1805))" msgstr "" +"**普通改进** ([#1659](https://github.com/adap/flower/pull/1659), " +"[#1646](https://github.com/adap/flower/pull/1646), " +"[#1647](https://github.com/adap/flower/pull/1647), " +"[#1471](https://github.com/adap/flower/pull/1471), " +"[#1648](https://github.com/adap/flower/pull/1648), " +"[#1651](https://github.com/adap/flower/pull/1651), " +"[#1652](https://github.com/adap/flower/pull/1652), " +"[#1653](https://github.com/adap/flower/pull/1653), " +"[#1659](https://github.com/adap/flower/pull/1659), " +"[#1665](https://github.com/adap/flower/pull/1665), " +"[#1670](https://github.com/adap/flower/pull/1670), " +"[#1672](https://github.com/adap/flower/pull/1672), " +"[#1677](https://github.com/adap/flower/pull/1677), " +"[#1684](https://github.com/adap/flower/pull/1684), " +"[#1683](https://github.com/adap/flower/pull/1683), " +"[#1686](https://github.com/adap/flower/pull/1686), " +"[#1682](https://github.com/adap/flower/pull/1682), " +"[#1685](https://github.com/adap/flower/pull/1685), " +"[#1692](https://github.com/adap/flower/pull/1692), " +"[#1705](https://github.com/adap/flower/pull/1705), " +"[#1708](https://github.com/adap/flower/pull/1708), " +"[#1711](https://github.com/adap/flower/pull/1711), " +"[#1713](https://github.com/adap/flower/pull/1713), " +"[#1714](https://github.com/adap/flower/pull/1714), " +"[#1718](https://github.com/adap/flower/pull/1718), " +"[#1716](https://github.com/adap/flower/pull/1716), " +"[#1723](https://github.com/adap/flower/pull/1723), " +"[#1735](https://github.com/adap/flower/pull/1735), " +"[#1678](https://github.com/adap/flower/pull/1678), " +"[#1750](https://github.com/adap/flower/pull/1750), " +"[#1753](https://github.com/adap/flower/pull/1753), " +"[#1736](https://github.com/adap/flower/pull/1736), " +"[#1766](https://github.com/adap/flower/pull/1766), " +"[#1760](https://github.com/adap/flower/pull/1760), " +"[#1775](https://github.com/adap/flower/pull/1775), " +"[#1776](https://github.com/adap/flower/pull/1776), " +"[#1777](https://github.com/adap/flower/pull/1777), " +"[#1779](https://github.com/adap/flower/pull/1779), " +"[#1784](https://github.com/adap/flower/pull/1784), " +"[#1773](https://github.com/adap/flower/pull/1773), " +"[#1755](https://github.com/adap/flower/pull/1755), " +"[#1789](https://github.com/adap/flower/pull/1789), " +"[#1788](https://github.com/adap/flower/pull/1788), " +"[#1798](https://github.com/adap/flower/pull/1798), " +"[#1799](https://github.com/adap/flower/pull/1799), " +"[#1739](https://github.com/adap/flower/pull/1739), " +"[#1800](https://github.com/adap/flower/pull/1800), " +"[#1804](https://github.com/adap/flower/pull/1804), " +"[#1805](https://github.com/adap/flower/pull/1805))" -#: ../../source/ref-changelog.md:223 +#: ../../source/ref-changelog.md:350 msgid "v1.3.0 (2023-02-06)" -msgstr "" +msgstr "v1.3.0 (2023-02-06)" -#: ../../source/ref-changelog.md:229 +#: ../../source/ref-changelog.md:356 msgid "" "`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " "`Daniel J. Beutel`, `JDRanpariya`, `Lennart Behme`, `Taner Topal`" msgstr "" +"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " +"`Daniel J. Beutel`, `JDRanpariya`, `Lennart Behme`, `Taner Topal`" -#: ../../source/ref-changelog.md:233 +#: ../../source/ref-changelog.md:360 msgid "" "**Add support for** `workload_id` **and** `group_id` **in Driver API** " "([#1595](https://github.com/adap/flower/pull/1595))" msgstr "" +"**在驱动程序应用程序接口中添加对** `workload_id` **和** `group_id` **的支持** " +"([#1595](https://github.com/adap/flower/pull/1595))" -#: ../../source/ref-changelog.md:235 +#: ../../source/ref-changelog.md:362 msgid "" "The (experimental) Driver API now supports a `workload_id` that can be " "used to identify which workload a task belongs to. It also supports a new" @@ -7896,65 +13255,84 @@ msgid "" "training round. Both the `workload_id` and `group_id` enable client nodes" " to decide whether they want to handle a task or not." msgstr "" +"驱动程序 API(试验性)现在支持 `workload_id`,可用于识别任务所属的工作量。它还支持新的 " +"`group_id`,例如,可用于指示当前的训练轮次。通过 `workload_id` 和 `group_id` " +"客户端节点可以决定是否要处理某个任务。" -#: ../../source/ref-changelog.md:237 +#: ../../source/ref-changelog.md:364 msgid "" "**Make Driver API and Fleet API address configurable** " "([#1637](https://github.com/adap/flower/pull/1637))" msgstr "" +"**使Driver API 和Fleet " +"API地址可配置**([#1637](https://github.com/adap/flower/pull/1637))" -#: ../../source/ref-changelog.md:239 +#: ../../source/ref-changelog.md:366 msgid "" "The (experimental) long-running Flower server (Driver API and Fleet API) " "can now configure the server address of both Driver API (via `--driver-" "api-address`) and Fleet API (via `--fleet-api-address`) when starting:" msgstr "" +"长期运行的 Flower 服务器(Driver API 和 Fleet API)现在可以在启动时配置 Driver API(通过 " +"`--driver-api-address`)和 Fleet API(通过 `-fleet-api-address`)的服务器地址:" -#: ../../source/ref-changelog.md:241 +#: ../../source/ref-changelog.md:368 +#, fuzzy msgid "" "`flower-server --driver-api-address \"0.0.0.0:8081\" --fleet-api-address " "\"0.0.0.0:8086\"`" msgstr "" +"`flower-server --driver-api-address \"0.0.0.0:8081\" --fleet-api-address " +"\"0.0.0.0:8086\"`" -#: ../../source/ref-changelog.md:243 +#: ../../source/ref-changelog.md:370 msgid "Both IPv4 and IPv6 addresses are supported." -msgstr "" +msgstr "支持 IPv4 和 IPv6 地址。" -#: ../../source/ref-changelog.md:245 +#: ../../source/ref-changelog.md:372 msgid "" "**Add new example of Federated Learning using fastai and Flower** " "([#1598](https://github.com/adap/flower/pull/1598))" msgstr "" +"** 添加使用 fastai 和 Flower 进行联邦学习的新示例** " +"([#1598](https://github.com/adap/flower/pull/1598))" -#: ../../source/ref-changelog.md:247 +#: ../../source/ref-changelog.md:374 msgid "" "A new code example (`quickstart_fastai`) demonstrates federated learning " "with [fastai](https://www.fast.ai/) and Flower. You can find it here: " "[quickstart_fastai](https://github.com/adap/flower/tree/main/examples/quickstart_fastai)." msgstr "" +"一个新的代码示例(`quickstart_fastai`)演示了使用 [fastai](https://www.fast.ai/) 和 " +"Flower 的联邦学习。您可以在这里找到它: " +"[quickstart_fastai](https://github.com/adap/flower/tree/main/examples/quickstart_fastai)。" -#: ../../source/ref-changelog.md:249 +#: ../../source/ref-changelog.md:376 msgid "" "**Make Android example compatible with** `flwr >= 1.0.0` **and the latest" " versions of Android** " "([#1603](https://github.com/adap/flower/pull/1603))" msgstr "" +"**使安卓示例兼容** `flwr >= 1.0.0` **和最新版本的安卓** " +"([#1603](https://github.com/adap/flower/pull/1603))" -#: ../../source/ref-changelog.md:251 +#: ../../source/ref-changelog.md:378 msgid "" "The Android code example has received a substantial update: the project " "is compatible with Flower 1.0 (and later), the UI received a full " "refresh, and the project is updated to be compatible with newer Android " "tooling." msgstr "" +"Android 代码示例已进行了大幅更新:项目兼容 Flower 1.0(及更高版本),用户界面已全面刷新,项目已更新为兼容较新的 Android" +" 工具。" -#: ../../source/ref-changelog.md:253 +#: ../../source/ref-changelog.md:380 msgid "" "**Add new `FedProx` strategy** " "([#1619](https://github.com/adap/flower/pull/1619))" -msgstr "" +msgstr "**添加新的`FedProx`策略** ([#1619](https://github.com/adap/flower/pull/1619))" -#: ../../source/ref-changelog.md:255 +#: ../../source/ref-changelog.md:382 msgid "" "This " "[strategy](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedprox.py)" @@ -7965,48 +13343,56 @@ msgid "" "parameter called `proximal_mu` to regularize the local models with " "respect to the global models." msgstr "" +"该[策略](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedprox.py)与[`FedAvg`](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedavg.py)几乎相同,但可以帮助用户复现本[论文](https://arxiv.org/abs/1812.06127)中的描述。它的本质是添加一个名为" +" `proximal_mu`的参数,使局部模型与全局模型正则化。" -#: ../../source/ref-changelog.md:257 +#: ../../source/ref-changelog.md:384 msgid "" "**Add new metrics to telemetry events** " "([#1640](https://github.com/adap/flower/pull/1640))" -msgstr "" +msgstr "**为遥测事件添加新指标**([#1640](https://github.com/adap/flower/pull/1640))" -#: ../../source/ref-changelog.md:259 +#: ../../source/ref-changelog.md:386 msgid "" "An updated event structure allows, for example, the clustering of events " "within the same workload." -msgstr "" +msgstr "例如,更新后的事件结构可以将同一工作负载中的事件集中在一起。" -#: ../../source/ref-changelog.md:261 +#: ../../source/ref-changelog.md:388 msgid "" "**Add new custom strategy tutorial section** " "[#1623](https://github.com/adap/flower/pull/1623)" -msgstr "" +msgstr "**添加新的自定义策略教程部分** [#1623](https://github.com/adap/flower/pull/1623)" -#: ../../source/ref-changelog.md:263 +#: ../../source/ref-changelog.md:390 msgid "" "The Flower tutorial now has a new section that covers implementing a " "custom strategy from scratch: [Open in " "Colab](https://colab.research.google.com/github/adap/flower/blob/main/doc/source" "/tutorial-build-a-strategy-from-scratch-pytorch.ipynb)" msgstr "" +"Flower 教程新增了一个章节,介绍如何从零开始实施自定义策略: [在 Colab " +"中打开](https://colab.research.google.com/github/adap/flower/blob/main/doc/source" +"/tutorial-build-a-strategy-from-scratch-pytorch.ipynb)" -#: ../../source/ref-changelog.md:265 +#: ../../source/ref-changelog.md:392 msgid "" "**Add new custom serialization tutorial section** " "([#1622](https://github.com/adap/flower/pull/1622))" -msgstr "" +msgstr "** 添加新的自定义序列化教程部分** ([#1622](https://github.com/adap/flower/pull/1622))" -#: ../../source/ref-changelog.md:267 +#: ../../source/ref-changelog.md:394 msgid "" "The Flower tutorial now has a new section that covers custom " "serialization: [Open in " "Colab](https://colab.research.google.com/github/adap/flower/blob/main/doc/source" "/tutorial-customize-the-client-pytorch.ipynb)" msgstr "" +"Flower 教程现在新增了一个章节,介绍自定义序列化: [在 Colab " +"中打开](https://colab.research.google.com/github/adap/flower/blob/main/doc/source" +"/tutorial-customize-the-client-pytorch.ipynb)" -#: ../../source/ref-changelog.md:269 +#: ../../source/ref-changelog.md:396 msgid "" "**General improvements** " "([#1638](https://github.com/adap/flower/pull/1638), " @@ -8043,8 +13429,41 @@ msgid "" "[#1572](https://github.com/adap/flower/pull/1572), " "[#1586](https://github.com/adap/flower/pull/1586))" msgstr "" +"**普通改进** ([#1638](https://github.com/adap/flower/pull/1638), " +"[#1634](https://github.com/adap/flower/pull/1634), " +"[#1636](https://github.com/adap/flower/pull/1636), " +"[#1635](https://github.com/adap/flower/pull/1635), " +"[#1633](https://github.com/adap/flower/pull/1633), " +"[#1632](https://github.com/adap/flower/pull/1632), " +"[#1631](https://github.com/adap/flower/pull/1631), " +"[#1630](https://github.com/adap/flower/pull/1630), " +"[#1627](https://github. com/adap/flower/pull/1627), " +"[#1593](https://github.com/adap/flower/pull/1593), " +"[#1616](https://github.com/adap/flower/pull/1616), " +"[#1615](https://github.com/adap/flower/pull/1615), " +"[#1607](https://github.com/adap/flower/pull/1607), " +"[#1609](https://github.com/adap/flower/pull/1609), " +"[#1608](https://github.com/adap/flower/pull/1608), " +"[#1603](https://github.com/adap/flower/pull/1603), " +"[#1590](https://github. com/adap/flower/pull/1590), " +"[#1580](https://github.com/adap/flower/pull/1580), " +"[#1599](https://github.com/adap/flower/pull/1599), " +"[#1600](https://github.com/adap/flower/pull/1600), " +"[#1601](https://github.com/adap/flower/pull/1601), " +"[#1597](https://github.com/adap/flower/pull/1597), " +"[#1595](https://github.com/adap/flower/pull/1595), " +"[#1591](https://github.com/adap/flower/pull/1591), " +"[#1588](https://github. com/adap/flower/pull/1588), " +"[#1589](https://github.com/adap/flower/pull/1589), " +"[#1587](https://github.com/adap/flower/pull/1587), " +"[#1573](https://github.com/adap/flower/pull/1573), " +"[#1581](https://github.com/adap/flower/pull/1581), " +"[#1578](https://github.com/adap/flower/pull/1578), " +"[#1574](https://github.com/adap/flower/pull/1574), " +"[#1572](https://github.com/adap/flower/pull/1572), " +"[#1586](https://github.com/adap/flower/pull/1586))" -#: ../../source/ref-changelog.md:273 +#: ../../source/ref-changelog.md:400 msgid "" "**Updated documentation** " "([#1629](https://github.com/adap/flower/pull/1629), " @@ -8055,103 +13474,137 @@ msgid "" "[#1613](https://github.com/adap/flower/pull/1613), " "[#1614](https://github.com/adap/flower/pull/1614))" msgstr "" +"** 更新文档** ([#1629](https://github.com/adap/flower/pull/1629), " +"[#1628](https://github.com/adap/flower/pull/1628), " +"[#1620](https://github.com/adap/flower/pull/1620), " +"[#1618](https://github.com/adap/flower/pull/1618), " +"[#1617](https://github.com/adap/flower/pull/1617), " +"[#1613](https://github.com/adap/flower/pull/1613), " +"[#1614](https://github.com/adap/flower/pull/1614)))" -#: ../../source/ref-changelog.md:275 ../../source/ref-changelog.md:342 +#: ../../source/ref-changelog.md:402 ../../source/ref-changelog.md:469 msgid "" "As usual, the documentation has improved quite a bit. It is another step " "in our effort to make the Flower documentation the best documentation of " "any project. Stay tuned and as always, feel free to provide feedback!" -msgstr "" +msgstr "和往常一样,我们的文档有了很大的改进。这是我们努力使 Flower 文档成为所有项目中最好文档的又一步骤。请继续关注,并随时提供反馈意见!" -#: ../../source/ref-changelog.md:281 +#: ../../source/ref-changelog.md:408 msgid "v1.2.0 (2023-01-13)" -msgstr "" +msgstr "v1.2.0 (2023-01-13)" -#: ../../source/ref-changelog.md:287 +#: ../../source/ref-changelog.md:414 msgid "" "`Adam Narozniak`, `Charles Beauville`, `Daniel J. Beutel`, `Edoardo`, `L." " Jiang`, `Ragy`, `Taner Topal`, `dannymcy`" msgstr "" +"`Adam Narozniak`, `Charles Beauville`, `Daniel J. Beutel`, `Edoardo`, `L." +" Jiang`, `Ragy`, `Taner Topal`, `dannymcy`" -#: ../../source/ref-changelog.md:291 +#: ../../source/ref-changelog.md:418 msgid "" "**Introduce new Flower Baseline: FedAvg MNIST** " "([#1497](https://github.com/adap/flower/pull/1497), " "[#1552](https://github.com/adap/flower/pull/1552))" -msgstr "" +msgstr "" +"**引入新的 Flower Baseline: FedAvg MNIST** " +"([#1497](https://github.com/adap/flower/pull/1497), " +"[#1552](https://github.com/adap/flower/pull/1552))" -#: ../../source/ref-changelog.md:293 +#: ../../source/ref-changelog.md:420 msgid "" "Over the coming weeks, we will be releasing a number of new reference " "implementations useful especially to FL newcomers. They will typically " "revisit well known papers from the literature, and be suitable for " "integration in your own application or for experimentation, in order to " "deepen your knowledge of FL in general. Today's release is the first in " -"this series. [Read more.](https://flower.dev/blog/2023-01-12-fl-starter-" +"this series. [Read more.](https://flower.ai/blog/2023-01-12-fl-starter-" "pack-fedavg-mnist-cnn/)" msgstr "" +"在未来几周内,我们将发布一些新的参考,特别是对 FL " +"新手有用的方法。它们通常会重温文献中的知名论文,适合集成到您自己的应用程序中或用于实验,以加深您对 FL " +"的总体了解。今天发布的是该系列中的第一篇。[阅读全文](https://flower.ai/blog/2023-01-12-fl-" +"starter-pack-fedavg-mnist-cnn/)" -#: ../../source/ref-changelog.md:295 +#: ../../source/ref-changelog.md:422 msgid "" "**Improve GPU support in simulations** " "([#1555](https://github.com/adap/flower/pull/1555))" -msgstr "" +msgstr "**改进模拟中的 GPU 支持**([#1555](https://github.com/adap/flower/pull/1555))" -#: ../../source/ref-changelog.md:297 +#: ../../source/ref-changelog.md:424 msgid "" "The Ray-based Virtual Client Engine (`start_simulation`) has been updated" " to improve GPU support. The update includes some of the hard-earned " "lessons from scaling simulations in GPU cluster environments. New " "defaults make running GPU-based simulations substantially more robust." msgstr "" +"基于 Ray 的虚拟客户端引擎 (`start_simulation`)已更新,以改进对 GPU 的支持。此次更新包含了在 GPU " +"集群环境中扩展模拟的一些经验教训。新的默认设置使基于 GPU 的模拟运行更加稳健。" -#: ../../source/ref-changelog.md:299 +#: ../../source/ref-changelog.md:426 msgid "" "**Improve GPU support in Jupyter Notebook tutorials** " "([#1527](https://github.com/adap/flower/pull/1527), " "[#1558](https://github.com/adap/flower/pull/1558))" msgstr "" +"**改进 Jupyter Notebook 教程中的 GPU 支持** " +"([#1527](https://github.com/adap/flower/pull/1527), " +"[#1558](https://github.com/adap/flower/pull/1558))" -#: ../../source/ref-changelog.md:301 +#: ../../source/ref-changelog.md:428 msgid "" "Some users reported that Jupyter Notebooks have not always been easy to " "use on GPU instances. We listened and made improvements to all of our " "Jupyter notebooks! Check out the updated notebooks here:" msgstr "" +"一些用户报告说,在 GPU 实例上使用 Jupyter 笔记本并不是很方便。我们听取了他们的意见,并对所有 Jupyter " +"笔记本进行了改进!点击这里查看更新后的笔记本:" -#: ../../source/ref-changelog.md:303 +#: ../../source/ref-changelog.md:430 msgid "" -"[An Introduction to Federated Learning](https://flower.dev/docs/framework" +"[An Introduction to Federated Learning](https://flower.ai/docs/framework" "/tutorial-get-started-with-flower-pytorch.html)" msgstr "" +"[联邦学习简介](https://flower.ai/docs/framework/tutorial-get-started-with-" +"flower-pytorch.html)" -#: ../../source/ref-changelog.md:304 +#: ../../source/ref-changelog.md:431 msgid "" -"[Strategies in Federated Learning](https://flower.dev/docs/framework" +"[Strategies in Federated Learning](https://flower.ai/docs/framework" "/tutorial-use-a-federated-learning-strategy-pytorch.html)" msgstr "" +"[联邦学习策略](https://flower.ai/docs/framework/tutorial-use-a-federated-" +"learning-strategy-pytorch.html)" -#: ../../source/ref-changelog.md:305 +#: ../../source/ref-changelog.md:432 msgid "" -"[Building a Strategy](https://flower.dev/docs/framework/tutorial-build-a" +"[Building a Strategy](https://flower.ai/docs/framework/tutorial-build-a" "-strategy-from-scratch-pytorch.html)" msgstr "" +"[制定策略](https://flower.ai/docs/framework/tutorial-build-a-strategy-from-" +"scratch-pytorch.html)" -#: ../../source/ref-changelog.md:306 +#: ../../source/ref-changelog.md:433 msgid "" -"[Client and NumPyClient](https://flower.dev/docs/framework/tutorial-" +"[Client and NumPyClient](https://flower.ai/docs/framework/tutorial-" "customize-the-client-pytorch.html)" msgstr "" +"[客户端和 NumPyClient](https://flower.ai/docs/framework/tutorial-customize-" +"the-client-pytorch.html)" -#: ../../source/ref-changelog.md:308 +#: ../../source/ref-changelog.md:435 msgid "" "**Introduce optional telemetry** " "([#1533](https://github.com/adap/flower/pull/1533), " "[#1544](https://github.com/adap/flower/pull/1544), " "[#1584](https://github.com/adap/flower/pull/1584))" msgstr "" +"**引入可选遥测**([#1533](https://github.com/adap/flower/pull/1533), " +"[#1544](https://github.com/adap/flower/pull/1544), " +"[#1584](https://github.com/adap/flower/pull/1584)" -#: ../../source/ref-changelog.md:310 +#: ../../source/ref-changelog.md:437 msgid "" "After a [request for " "feedback](https://github.com/adap/flower/issues/1534) from the community," @@ -8160,16 +13613,21 @@ msgid "" "Flower. Doing this enables the Flower team to understand how Flower is " "used and what challenges users might face." msgstr "" +"在社区发出[反馈请求](https://github.com/adap/flower/issues/1534)之后,Flower " +"开放源码项目引入了可选的*匿名*使用指标收集,以便在充分知情的情况下做出改进 Flower 的决定。这样做能让 Flower 团队了解 " +"Flower 的使用情况以及用户可能面临的挑战。" -#: ../../source/ref-changelog.md:312 +#: ../../source/ref-changelog.md:439 msgid "" "**Flower is a friendly framework for collaborative AI and data science.**" " Staying true to this statement, Flower makes it easy to disable " "telemetry for users who do not want to share anonymous usage metrics. " -"[Read more.](https://flower.dev/docs/telemetry.html)." +"[Read more.](https://flower.ai/docs/telemetry.html)." msgstr "" +"**Flower 是一个用于协作式人工智能和数据科学的友好框架。** Flower " +"遵循这一声明,让不想分享匿名使用指标的用户可以轻松禁用遥测技术。[阅读全文](https://flower.ai/docs/telemetry.html)。" -#: ../../source/ref-changelog.md:314 +#: ../../source/ref-changelog.md:441 msgid "" "**Introduce (experimental) Driver API** " "([#1520](https://github.com/adap/flower/pull/1520), " @@ -8180,8 +13638,15 @@ msgid "" "[#1551](https://github.com/adap/flower/pull/1551), " "[#1567](https://github.com/adap/flower/pull/1567))" msgstr "" +"**引入(试验性)Driver API** ([#1520](https://github.com/adap/flower/pull/1520)," +" [#1525](https://github.com/adap/flower/pull/1525), " +"[#1545](https://github.com/adap/flower/pull/1545), " +"[#1546](https://github.com/adap/flower/pull/1546), " +"[#1550](https://github.com/adap/flower/pull/1550), " +"[#1551](https://github.com/adap/flower/pull/1551), " +"[#1567](https://github.com/adap/flower/pull/1567))" -#: ../../source/ref-changelog.md:316 +#: ../../source/ref-changelog.md:443 msgid "" "Flower now has a new (experimental) Driver API which will enable fully " "programmable, async, and multi-tenant Federated Learning and Federated " @@ -8189,67 +13654,83 @@ msgid "" " will be the abstraction that many upcoming features will be built on - " "and you can start building those things now, too." msgstr "" +"Flower 现在有了一个新的(试验性的)驱动程序应用程序接口(Driver " +"API),它将支持完全可编程、异步和多租户的联邦学习(Federated Learning)和联邦分析(Federated " +"Analytics)应用程序。展望未来,Driver API 将成为许多即将推出的功能的抽象基础,您现在就可以开始构建这些功能。" -#: ../../source/ref-changelog.md:318 +#: ../../source/ref-changelog.md:445 msgid "" "The Driver API also enables a new execution mode in which the server runs" " indefinitely. Multiple individual workloads can run concurrently and " "start and stop their execution independent of the server. This is " "especially useful for users who want to deploy Flower in production." msgstr "" +"驱动程序应用程序接口还支持一种新的执行模式,在这种模式下,服务器可无限期运行。多个单独的工作负载可以同时运行,并独立于服务器启动和停止执行。这对于希望在生产中部署" +" Flower 的用户来说尤其有用。" -#: ../../source/ref-changelog.md:320 +#: ../../source/ref-changelog.md:447 msgid "" "To learn more, check out the `mt-pytorch` code example. We look forward " "to you feedback!" -msgstr "" +msgstr "要了解更多信息,请查看 `mt-pytorch` 代码示例。我们期待您的反馈!" -#: ../../source/ref-changelog.md:322 +#: ../../source/ref-changelog.md:449 msgid "" "Please note: *The Driver API is still experimental and will likely change" " significantly over time.*" -msgstr "" +msgstr "请注意:Driver API仍处于试验阶段,随着时间的推移可能会发生重大变化。*" -#: ../../source/ref-changelog.md:324 +#: ../../source/ref-changelog.md:451 msgid "" "**Add new Federated Analytics with Pandas example** " "([#1469](https://github.com/adap/flower/pull/1469), " "[#1535](https://github.com/adap/flower/pull/1535))" msgstr "" +"** 添加新的使用 Pandas " +"的联邦分析示例**([#1469](https://github.com/adap/flower/pull/1469), " +"[#1535](https://github.com/adap/flower/pull/1535)" -#: ../../source/ref-changelog.md:326 +#: ../../source/ref-changelog.md:453 msgid "" "A new code example (`quickstart_pandas`) demonstrates federated analytics" " with Pandas and Flower. You can find it here: " "[quickstart_pandas](https://github.com/adap/flower/tree/main/examples/quickstart_pandas)." msgstr "" +"新代码示例(`quickstart_pandas`)演示了使用 Pandas 和 Flower 进行联邦分析。您可以在此处找到它: " +"[quickstart_pandas](https://github.com/adap/flower/tree/main/examples/quickstart_pandas)。" -#: ../../source/ref-changelog.md:328 +#: ../../source/ref-changelog.md:455 msgid "" "**Add new strategies: Krum and MultiKrum** " "([#1481](https://github.com/adap/flower/pull/1481))" msgstr "" +"**添加新策略: Krum 和 MultiKrum** " +"([#1481](https://github.com/adap/flower/pull/1481))" -#: ../../source/ref-changelog.md:330 +#: ../../source/ref-changelog.md:457 msgid "" "Edoardo, a computer science student at the Sapienza University of Rome, " "contributed a new `Krum` strategy that enables users to easily use Krum " "and MultiKrum in their workloads." msgstr "" +"罗马萨皮恩扎大学(Sapienza University)计算机科学专业的学生埃多尔多(Edoardo)提出了一种新的 \"Krum " +"\"策略,使用户能够在其工作负载中轻松使用 Krum 和 MultiKrum。" -#: ../../source/ref-changelog.md:332 +#: ../../source/ref-changelog.md:459 msgid "" "**Update C++ example to be compatible with Flower v1.2.0** " "([#1495](https://github.com/adap/flower/pull/1495))" msgstr "" +"** 更新 C++ 示例,与 Flower v1.2.0 兼容** " +"([#1495](https://github.com/adap/flower/pull/1495))" -#: ../../source/ref-changelog.md:334 +#: ../../source/ref-changelog.md:461 msgid "" "The C++ code example has received a substantial update to make it " "compatible with the latest version of Flower." -msgstr "" +msgstr "为了与最新版本的 Flower 兼容,C++ 示例代码进行了大幅更新。" -#: ../../source/ref-changelog.md:336 +#: ../../source/ref-changelog.md:463 msgid "" "**General improvements** " "([#1491](https://github.com/adap/flower/pull/1491), " @@ -8266,8 +13747,21 @@ msgid "" "[#1564](https://github.com/adap/flower/pull/1564), " "[#1566](https://github.com/adap/flower/pull/1566))" msgstr "" +"**普通改进** ([#1491](https://github.com/adap/flower/pull/1491), " +"[#1504](https://github.com/adap/flower/pull/1504), " +"[#1506](https://github.com/adap/flower/pull/1506), " +"[#1514](https://github.com/adap/flower/pull/1514), " +"[#1522](https://github.com/adap/flower/pull/1522), " +"[#1523](https://github.com/adap/flower/pull/1523), " +"[#1526](https://github. com/adap/flower/pull/1526), " +"[#1528](https://github.com/adap/flower/pull/1528), " +"[#1547](https://github.com/adap/flower/pull/1547), " +"[#1549](https://github.com/adap/flower/pull/1549), " +"[#1560](https://github.com/adap/flower/pull/1560), " +"[#1564](https://github.com/adap/flower/pull/1564), " +"[#1566](https://github.com/adap/flower/pull/1566))" -#: ../../source/ref-changelog.md:340 +#: ../../source/ref-changelog.md:467 msgid "" "**Updated documentation** " "([#1494](https://github.com/adap/flower/pull/1494), " @@ -8280,40 +13774,57 @@ msgid "" "[#1519](https://github.com/adap/flower/pull/1519), " "[#1515](https://github.com/adap/flower/pull/1515))" msgstr "" +"** 更新文档** ([#1494](https://github.com/adap/flower/pull/1494), " +"[#1496](https://github.com/adap/flower/pull/1496), " +"[#1500](https://github.com/adap/flower/pull/1500), " +"[#1503](https://github.com/adap/flower/pull/1503), " +"[#1505](https://github.com/adap/flower/pull/1505), " +"[#1524](https://github.com/adap/flower/pull/1524), " +"[#1518](https://github.com/adap/flower/pull/1518), " +"[#1519](https://github.com/adap/flower/pull/1519), " +"[#1515](https://github.com/adap/flower/pull/1515))" -#: ../../source/ref-changelog.md:344 +#: ../../source/ref-changelog.md:471 msgid "" "One highlight is the new [first time contributor " -"guide](https://flower.dev/docs/first-time-contributors.html): if you've " +"guide](https://flower.ai/docs/first-time-contributors.html): if you've " "never contributed on GitHub before, this is the perfect place to start!" msgstr "" +"其中一个亮点是新的[首次贡献者指南](https://flower.ai/docs/first-time-" +"contributors.html):如果你以前从未在 GitHub 上做过贡献,这将是一个完美的开始!" -#: ../../source/ref-changelog.md:350 +#: ../../source/ref-changelog.md:477 msgid "v1.1.0 (2022-10-31)" -msgstr "" +msgstr "v1.1.0 (2022-10-31)" -#: ../../source/ref-changelog.md:354 +#: ../../source/ref-changelog.md:481 msgid "" "We would like to give our **special thanks** to all the contributors who " "made the new version of Flower possible (in `git shortlog` order):" -msgstr "" +msgstr "在此,我们向所有促成 Flower 新版本的贡献者致以**特别的谢意(按 \"git shortlog \"顺序排列):" -#: ../../source/ref-changelog.md:356 +#: ../../source/ref-changelog.md:483 msgid "" "`Akis Linardos`, `Christopher S`, `Daniel J. Beutel`, `George`, `Jan " "Schlicht`, `Mohammad Fares`, `Pedro Porto Buarque de Gusmão`, `Philipp " "Wiesner`, `Rob Luke`, `Taner Topal`, `VasundharaAgarwal`, " "`danielnugraha`, `edogab33`" msgstr "" +"`Akis Linardos`, `Christopher S`, `Daniel J. Beutel`, `George`, `Jan " +"Schlicht`, `Mohammad Fares`, `Pedro Porto Buarque de Gusmão`, `Philipp " +"Wiesner`, `Rob Luke`, `Taner Topal`, `VasundharaAgarwal`, " +"`danielnugraha`, `edogab33`" -#: ../../source/ref-changelog.md:360 +#: ../../source/ref-changelog.md:487 msgid "" "**Introduce Differential Privacy wrappers (preview)** " "([#1357](https://github.com/adap/flower/pull/1357), " "[#1460](https://github.com/adap/flower/pull/1460))" msgstr "" +"**引入差分隐私包装器(预览)** ([#1357](https://github.com/adap/flower/pull/1357), " +"[#1460](https://github.com/adap/flower/pull/1460))" -#: ../../source/ref-changelog.md:362 +#: ../../source/ref-changelog.md:489 msgid "" "The first (experimental) preview of pluggable Differential Privacy " "wrappers enables easy configuration and usage of differential privacy " @@ -8321,101 +13832,115 @@ msgid "" "strategy-agnostic usage of both client-side DP and server-side DP. Head " "over to the Flower docs, a new explainer goes into more detail." msgstr "" +"可插拔差分隐私封装器的首个(实验性)预览版可轻松配置和使用差分隐私(DP)。可插拔的差分隐私封装器可实现客户端差分隐私和服务器端差分隐私的框架无关**以及**策略无关的使用。请访问" +" Flower 文档,新的解释器会提供更多细节。" -#: ../../source/ref-changelog.md:364 +#: ../../source/ref-changelog.md:491 msgid "" "**New iOS CoreML code example** " "([#1289](https://github.com/adap/flower/pull/1289))" -msgstr "" +msgstr "**新的 iOS CoreML 代码示例**([#1289](https://github.com/adap/flower/pull/1289))" -#: ../../source/ref-changelog.md:366 +#: ../../source/ref-changelog.md:493 msgid "" "Flower goes iOS! A massive new code example shows how Flower clients can " "be built for iOS. The code example contains both Flower iOS SDK " "components that can be used for many tasks, and one task example running " "on CoreML." msgstr "" +"Flower 进入 iOS!大量新代码示例展示了如何为 iOS 构建 Flower 客户端。该代码示例包含可用于多种任务的 Flower iOS " +"SDK 组件,以及在 CoreML 上运行的一个任务示例。" -#: ../../source/ref-changelog.md:368 +#: ../../source/ref-changelog.md:495 msgid "" "**New FedMedian strategy** " "([#1461](https://github.com/adap/flower/pull/1461))" -msgstr "" +msgstr "**新的联邦医疗策略** ([#1461](https://github.com/adap/flower/pull/1461))" -#: ../../source/ref-changelog.md:370 +#: ../../source/ref-changelog.md:497 msgid "" "The new `FedMedian` strategy implements Federated Median (FedMedian) by " "[Yin et al., 2018](https://arxiv.org/pdf/1803.01498v1.pdf)." msgstr "" +"新的 \"FedMedian \"战略实现了[Yin " +"等人,2018]的联邦中值(FedMedian)(https://arxiv.org/pdf/1803.01498v1.pdf)。" -#: ../../source/ref-changelog.md:372 +#: ../../source/ref-changelog.md:499 msgid "" "**Log** `Client` **exceptions in Virtual Client Engine** " "([#1493](https://github.com/adap/flower/pull/1493))" -msgstr "" +msgstr "**虚拟客户端引擎中的**日志**`客户端`**异常([#1493](https://github.com/adap/flower/pull/1493))" -#: ../../source/ref-changelog.md:374 +#: ../../source/ref-changelog.md:501 msgid "" "All `Client` exceptions happening in the VCE are now logged by default " "and not just exposed to the configured `Strategy` (via the `failures` " "argument)." -msgstr "" +msgstr "VCE 中发生的所有 \"客户端 \"异常现在都会被默认记录下来,而不只是暴露给配置的 `Strategy`(通过 `failures`参数)。" -#: ../../source/ref-changelog.md:376 +#: ../../source/ref-changelog.md:503 msgid "" "**Improve Virtual Client Engine internals** " "([#1401](https://github.com/adap/flower/pull/1401), " "[#1453](https://github.com/adap/flower/pull/1453))" -msgstr "" +msgstr "**改进虚拟客户端引擎内部**([#1401](https://github.com/adap/flower/pull/1401)、[#1453](https://github.com/adap/flower/pull/1453))" -#: ../../source/ref-changelog.md:378 +#: ../../source/ref-changelog.md:505 msgid "" "Some internals of the Virtual Client Engine have been revamped. The VCE " "now uses Ray 2.0 under the hood, the value type of the `client_resources`" " dictionary changed to `float` to allow fractions of resources to be " "allocated." msgstr "" +"虚拟客户端引擎的部分内部结构已进行了修改。VCE 现在使用 Ray 2.0,\"client_resources \"字典的值类型改为 " +"\"float\",以允许分配分数资源。" -#: ../../source/ref-changelog.md:380 +#: ../../source/ref-changelog.md:507 msgid "" "**Support optional** `Client`**/**`NumPyClient` **methods in Virtual " "Client Engine**" -msgstr "" +msgstr "**支持虚拟客户端引擎中的可选** `Client`**/**`NumPyClient` **方法**" -#: ../../source/ref-changelog.md:382 +#: ../../source/ref-changelog.md:509 msgid "" "The Virtual Client Engine now has full support for optional `Client` (and" " `NumPyClient`) methods." -msgstr "" +msgstr "虚拟客户端引擎现在完全支持可选的 `Client`(和 `NumPyClient`)方法。" -#: ../../source/ref-changelog.md:384 +#: ../../source/ref-changelog.md:511 msgid "" "**Provide type information to packages using** `flwr` " "([#1377](https://github.com/adap/flower/pull/1377))" msgstr "" +"**使用** `flwr`向软件包提供类型信息 " +"([#1377](https://github.com/adap/flower/pull/1377))" -#: ../../source/ref-changelog.md:386 +#: ../../source/ref-changelog.md:513 msgid "" "The package `flwr` is now bundled with a `py.typed` file indicating that " "the package is typed. This enables typing support for projects or " "packages that use `flwr` by enabling them to improve their code using " "static type checkers like `mypy`." msgstr "" +"软件包 `flwr` 现在捆绑了一个 `py.typed` 文件,表明该软件包是类型化的。这样,使用 `flwr` 的项目或软件包就可以使用 " +"`mypy` 等静态类型检查器改进代码,从而获得类型支持。" -#: ../../source/ref-changelog.md:388 +#: ../../source/ref-changelog.md:515 msgid "" "**Updated code example** " "([#1344](https://github.com/adap/flower/pull/1344), " "[#1347](https://github.com/adap/flower/pull/1347))" msgstr "" +"** 更新代码示例** ([#1344](https://github.com/adap/flower/pull/1344), " +"[#1347](https://github.com/adap/flower/pull/1347))" -#: ../../source/ref-changelog.md:390 +#: ../../source/ref-changelog.md:517 msgid "" "The code examples covering scikit-learn and PyTorch Lightning have been " "updated to work with the latest version of Flower." -msgstr "" +msgstr "涵盖 scikit-learn 和 PyTorch Lightning 的代码示例已更新,以便与最新版本的 Flower 配合使用。" -#: ../../source/ref-changelog.md:392 +#: ../../source/ref-changelog.md:519 msgid "" "**Updated documentation** " "([#1355](https://github.com/adap/flower/pull/1355), " @@ -8436,41 +13961,60 @@ msgid "" "[#1465](https://github.com/adap/flower/pull/1465), " "[#1467](https://github.com/adap/flower/pull/1467))" msgstr "" +"**更新文档** ([#1355](https://github.com/adap/flower/pull/1355), " +"[#1558](https://github.com/adap/flower/pull/1558), " +"[#1379](https://github.com/adap/flower/pull/1379), " +"[#1380](https://github.com/adap/flower/pull/1380), " +"[#1381](https://github.com/adap/flower/pull/1381), " +"[#1332](https://github.com/adap/flower/pull/1332), " +"[#1391](https://github.com/adap/flower/pull/1391), " +"[#1403](https://github.com/adap/flower/pull/1403), " +"[#1364](https://github. com/adap/flower/pull/1364), " +"[#1409](https://github.com/adap/flower/pull/1409), " +"[#1419](https://github.com/adap/flower/pull/1419), " +"[#1444](https://github.com/adap/flower/pull/1444), " +"[#1448](https://github.com/adap/flower/pull/1448), " +"[#1417](https://github.com/adap/flower/pull/1417), " +"[#1449](https://github.com/adap/flower/pull/1449), " +"[#1465](https://github.com/adap/flower/pull/1465), " +"[#1467](https://github.com/adap/flower/pull/1467))" -#: ../../source/ref-changelog.md:394 +#: ../../source/ref-changelog.md:521 msgid "" "There have been so many documentation updates that it doesn't even make " "sense to list them individually." -msgstr "" +msgstr "文档更新的数量之多,甚至没有必要逐一列出。" -#: ../../source/ref-changelog.md:396 +#: ../../source/ref-changelog.md:523 msgid "" "**Restructured documentation** " "([#1387](https://github.com/adap/flower/pull/1387))" -msgstr "" +msgstr "**重构文档**([#1387](https://github.com/adap/flower/pull/1387))" -#: ../../source/ref-changelog.md:398 +#: ../../source/ref-changelog.md:525 msgid "" "The documentation has been restructured to make it easier to navigate. " "This is just the first step in a larger effort to make the Flower " "documentation the best documentation of any project ever. Stay tuned!" -msgstr "" +msgstr "我们对文档进行了重组,使其更易于浏览。这只是让 Flower 文档成为所有项目中最好文档的第一步。敬请期待!" -#: ../../source/ref-changelog.md:400 +#: ../../source/ref-changelog.md:527 msgid "" "**Open in Colab button** " "([#1389](https://github.com/adap/flower/pull/1389))" -msgstr "" +msgstr "**在 Colab 中打开按钮** ([#1389](https://github.com/adap/flower/pull/1389))" -#: ../../source/ref-changelog.md:402 +#: ../../source/ref-changelog.md:529 msgid "" "The four parts of the Flower Federated Learning Tutorial now come with a " "new `Open in Colab` button. No need to install anything on your local " "machine, you can now use and learn about Flower in your browser, it's " "only a single click away." msgstr "" +"Flower 联邦学习教程的四个部分现在都带有一个新的 \"在 Colab 中打开 " +"\"按钮。现在,您无需在本地计算机上安装任何软件,只需点击一下,就可以在浏览器中使用和学习 Flower。" -#: ../../source/ref-changelog.md:404 +#: ../../source/ref-changelog.md:531 msgid "" "**Improved tutorial** ([#1468](https://github.com/adap/flower/pull/1468)," " [#1470](https://github.com/adap/flower/pull/1470), " @@ -8479,49 +14023,59 @@ msgid "" "[#1474](https://github.com/adap/flower/pull/1474), " "[#1475](https://github.com/adap/flower/pull/1475))" msgstr "" +"**改进教程** ([#1468](https://github.com/adap/flower/pull/1468), " +"[#1470](https://github.com/adap/flower/pull/1470), " +"[#1472](https://github.com/adap/flower/pull/1472), " +"[#1473](https://github.com/adap/flower/pull/1473), " +"[#1474](https://github.com/adap/flower/pull/1474), " +"[#1475](https://github.com/adap/flower/pull/1475)))" -#: ../../source/ref-changelog.md:406 +#: ../../source/ref-changelog.md:533 msgid "" "The Flower Federated Learning Tutorial has two brand-new parts covering " "custom strategies (still WIP) and the distinction between `Client` and " "`NumPyClient`. The existing parts one and two have also been improved " "(many small changes and fixes)." msgstr "" +"Flower 联邦学习教程有两个全新的部分,涉及自定义策略(仍处于 WIP 阶段)和 `Client` 与 `NumPyClient` " +"之间的区别。现有的第一和第二部分也得到了改进(许多小改动和修正)。" -#: ../../source/ref-changelog.md:412 +#: ../../source/ref-changelog.md:539 msgid "v1.0.0 (2022-07-28)" -msgstr "" +msgstr "v1.0.0 (2022-07-28)" -#: ../../source/ref-changelog.md:414 +#: ../../source/ref-changelog.md:541 msgid "Highlights" -msgstr "" +msgstr "亮点" -#: ../../source/ref-changelog.md:416 +#: ../../source/ref-changelog.md:543 msgid "Stable **Virtual Client Engine** (accessible via `start_simulation`)" -msgstr "" +msgstr "稳定的**虚拟客户端引擎**(可通过`start_simulation`访问)" -#: ../../source/ref-changelog.md:417 +#: ../../source/ref-changelog.md:544 msgid "All `Client`/`NumPyClient` methods are now optional" -msgstr "" +msgstr "所有 `Client`/`NumPyClient` 方法现在都是可选的了" -#: ../../source/ref-changelog.md:418 +#: ../../source/ref-changelog.md:545 msgid "Configurable `get_parameters`" -msgstr "" +msgstr "可配置的`get_parameters`" -#: ../../source/ref-changelog.md:419 +#: ../../source/ref-changelog.md:546 msgid "" "Tons of small API cleanups resulting in a more coherent developer " "experience" -msgstr "" +msgstr "对大量小型应用程序接口进行了清理,使开发人员的体验更加一致" -#: ../../source/ref-changelog.md:423 +#: ../../source/ref-changelog.md:550 msgid "" "We would like to give our **special thanks** to all the contributors who " "made Flower 1.0 possible (in reverse [GitHub " "Contributors](https://github.com/adap/flower/graphs/contributors) order):" msgstr "" +"在此,我们谨向所有促成 Flower 1.0 的贡献者致以**特别的谢意(按[GitHub " +"贡献者](https://github.com/adap/flower/graphs/contributors) 倒序排列):" -#: ../../source/ref-changelog.md:425 +#: ../../source/ref-changelog.md:552 msgid "" "[@rtaiello](https://github.com/rtaiello), " "[@g-pichler](https://github.com/g-pichler), [@rob-" @@ -8560,14 +14114,50 @@ msgid "" "[@tanertopal](https://github.com/tanertopal), " "[@danieljanes](https://github.com/danieljanes)." msgstr "" +"[@rtaiello](https://github.com/rtaiello), " +"[@g-pichler](https://github.com/g-pichler), [@rob-" +"luke](https://github.com/rob-luke), [@andreea-zaharia](https://github.com" +"/andreea-zaharia), [@kinshukdua](https://github.com/kinshukdua), " +"[@nfnt](https://github.com/nfnt), " +"[@tatiana-s](https://github.com/tatiana-s), " +"[@TParcollet](https://github.com/TParcollet), " +"[@vballoli](https://github.com/vballoli), " +"[@negedng](https://github.com/negedng), " +"[@RISHIKESHAVAN](https://github.com/RISHIKESHAVAN), " +"[@hei411](https://github.com/hei411), " +"[@SebastianSpeitel](https://github.com/SebastianSpeitel), " +"[@AmitChaulwar](https://github.com/AmitChaulwar), " +"[@Rubiel1](https://github.com/Rubiel1), [@FANTOME-PAN](https://github.com" +"/FANTOME-PAN), [@Rono-BC](https://github.com/Rono-BC), " +"[@lbhm](https://github.com/lbhm), " +"[@sishtiaq](https://github.com/sishtiaq), " +"[@remde](https://github.com/remde), [@Jueun-Park](https://github.com" +"/Jueun-Park), [@architjen](https://github.com/architjen), " +"[@PratikGarai](https://github.com/PratikGarai), " +"[@mrinaald](https://github.com/mrinaald), " +"[@zliel](https://github.com/zliel), " +"[@MeiruiJiang](https://github.com/MeiruiJiang), " +"[@sandracl72](https://github.com/sandracl72), " +"[@gubertoli](https://github.com/gubertoli), " +"[@Vingt100](https://github.com/Vingt100), " +"[@MakGulati](https://github.com/MakGulati), " +"[@cozek](https://github.com/cozek), " +"[@jafermarq](https://github.com/jafermarq), " +"[@sisco0](https://github.com/sisco0), " +"[@akhilmathurs](https://github.com/akhilmathurs), " +"[@CanTuerk](https://github.com/CanTuerk), " +"[@mariaboerner1987](https://github.com/mariaboerner1987), " +"[@pedropgusmao](https://github.com/pedropgusmao), " +"[@tanertopal](https://github.com/tanertopal), " +"[@danieljanes](https://github.com/danieljanes)." -#: ../../source/ref-changelog.md:429 +#: ../../source/ref-changelog.md:556 msgid "" "**All arguments must be passed as keyword arguments** " "([#1338](https://github.com/adap/flower/pull/1338))" -msgstr "" +msgstr "** 所有参数必须作为关键字参数传递** ([#1338](https://github.com/adap/flower/pull/1338))" -#: ../../source/ref-changelog.md:431 +#: ../../source/ref-changelog.md:558 msgid "" "Pass all arguments as keyword arguments, positional arguments are not " "longer supported. Code that uses positional arguments (e.g., " @@ -8576,15 +14166,20 @@ msgid "" "`start_client(server_address=\"127.0.0.1:8080\", " "client=FlowerClient())`)." msgstr "" +"以关键字参数传递所有参数,不再支持位置参数。使用位置参数的代码(例如,`start_client(\"127.0.0.1:8080\", " +"FlowerClient())`)必须为每个位置参数添加关键字(例如,`start_client(server_address=\"127.0.0.1:8080\"," +" client=FlowerClient())`)。" -#: ../../source/ref-changelog.md:433 +#: ../../source/ref-changelog.md:560 msgid "" "**Introduce configuration object** `ServerConfig` **in** `start_server` " "**and** `start_simulation` " "([#1317](https://github.com/adap/flower/pull/1317))" msgstr "" +"**在*** `start_server` ***和*** `start_simulation` 中引入配置对象*** " +"`ServerConfig` ([#1317](https://github.com/adap/flower/pull/1317))" -#: ../../source/ref-changelog.md:435 +#: ../../source/ref-changelog.md:562 msgid "" "Instead of a config dictionary `{\"num_rounds\": 3, \"round_timeout\": " "600.0}`, `start_server` and `start_simulation` now expect a configuration" @@ -8592,38 +14187,42 @@ msgid "" " arguments that as the previous config dict, but it makes writing type-" "safe code easier and the default parameters values more transparent." msgstr "" +"并非配置字典`{\"num_rounds\": 3, \"round_timeout\": 600.0}`, `start_server`和 " +"`start_simulation`现在用一个类型为 " +"`flwr.server.ServerConfig`的配置对象。`ServerConfig`接收的参数与之前的 config dict " +"相同,但它使编写类型安全代码变得更容易,默认参数值也更加透明。" -#: ../../source/ref-changelog.md:437 +#: ../../source/ref-changelog.md:564 msgid "" "**Rename built-in strategy parameters for clarity** " "([#1334](https://github.com/adap/flower/pull/1334))" -msgstr "" +msgstr "**重新命名内置策略参数,使其更加清晰** ([#1334](https://github.com/adap/flower/pull/1334))" -#: ../../source/ref-changelog.md:439 +#: ../../source/ref-changelog.md:566 msgid "" "The following built-in strategy parameters were renamed to improve " "readability and consistency with other API's:" -msgstr "" +msgstr "以下内置策略参数已重新命名,以提高可读性并与其他 API 保持一致:" -#: ../../source/ref-changelog.md:441 +#: ../../source/ref-changelog.md:568 msgid "`fraction_eval` --> `fraction_evaluate`" -msgstr "" +msgstr "`fraction_eval` --> `fraction_evaluate`" -#: ../../source/ref-changelog.md:442 +#: ../../source/ref-changelog.md:569 msgid "`min_eval_clients` --> `min_evaluate_clients`" -msgstr "" +msgstr "`min_eval_clients` --> `min_evaluate_clients`" -#: ../../source/ref-changelog.md:443 +#: ../../source/ref-changelog.md:570 msgid "`eval_fn` --> `evaluate_fn`" -msgstr "" +msgstr "`eval_fn` --> `evaluate_fn`" -#: ../../source/ref-changelog.md:445 +#: ../../source/ref-changelog.md:572 msgid "" "**Update default arguments of built-in strategies** " "([#1278](https://github.com/adap/flower/pull/1278))" -msgstr "" +msgstr "**更新内置策略的默认参数** ([#1278](https://github.com/adap/flower/pull/1278))" -#: ../../source/ref-changelog.md:447 +#: ../../source/ref-changelog.md:574 msgid "" "All built-in strategies now use `fraction_fit=1.0` and " "`fraction_evaluate=1.0`, which means they select *all* currently " @@ -8631,44 +14230,54 @@ msgid "" "the previous default values can get the previous behaviour by " "initializing the strategy in the following way:" msgstr "" +"所有内置策略现在都使用 \"fraction_fit=1.0 \"和 " +"\"fraction_evaluate=1.0\",这意味着它们会选择*所有*当前可用的客户端进行训练和评估。依赖以前默认值的项目可以通过以下方式初始化策略,获得以前的行为:" -#: ../../source/ref-changelog.md:449 +#: ../../source/ref-changelog.md:576 msgid "`strategy = FedAvg(fraction_fit=0.1, fraction_evaluate=0.1)`" -msgstr "" +msgstr "`strategy = FedAvg(fraction_fit=0.1, fraction_evaluate=0.1)`" -#: ../../source/ref-changelog.md:451 +#: ../../source/ref-changelog.md:578 msgid "" "**Add** `server_round` **to** `Strategy.evaluate` " "([#1334](https://github.com/adap/flower/pull/1334))" msgstr "" +"**添加*** `server_round` ***到*** `Strategy.evaluate` " +"([#1334](https://github.com/adap/flower/pull/1334))" -#: ../../source/ref-changelog.md:453 +#: ../../source/ref-changelog.md:580 msgid "" "The `Strategy` method `evaluate` now receives the current round of " "federated learning/evaluation as the first parameter." -msgstr "" +msgstr "`Strategy`的`evaluate` 方法现在会接收当前一轮联邦学习/评估作为第一个参数。" -#: ../../source/ref-changelog.md:455 +#: ../../source/ref-changelog.md:582 msgid "" "**Add** `server_round` **and** `config` **parameters to** `evaluate_fn` " "([#1334](https://github.com/adap/flower/pull/1334))" msgstr "" +"**将*** `server_round` **和*** `config` **参数添加到*** `evaluate_fn` " +"([#1334](https://github.com/adap/flower/pull/1334))" -#: ../../source/ref-changelog.md:457 +#: ../../source/ref-changelog.md:584 msgid "" "The `evaluate_fn` passed to built-in strategies like `FedAvg` now takes " "three parameters: (1) The current round of federated learning/evaluation " "(`server_round`), (2) the model parameters to evaluate (`parameters`), " "and (3) a config dictionary (`config`)." msgstr "" +"传递给内置策略(如 `FedAvg`)的 `evaluate_fn` 现在需要三个参数:(1) 当前一轮联邦学习/评估 " +"(`server_round`),(2) 要评估的模型参数 (`parameters`),(3) 配置字典 (`config`)。" -#: ../../source/ref-changelog.md:459 +#: ../../source/ref-changelog.md:586 msgid "" "**Rename** `rnd` **to** `server_round` " "([#1321](https://github.com/adap/flower/pull/1321))" msgstr "" +"**重新命名** `rnd` ** to** `server_round` " +"([#1321](https://github.com/adap/flower/pull/1321))" -#: ../../source/ref-changelog.md:461 +#: ../../source/ref-changelog.md:588 msgid "" "Several Flower methods and functions (`evaluate_fn`, `configure_fit`, " "`aggregate_fit`, `configure_evaluate`, `aggregate_evaluate`) receive the " @@ -8676,174 +14285,210 @@ msgid "" "To improve reaability and avoid confusion with *random*, this parameter " "has been renamed from `rnd` to `server_round`." msgstr "" +"几个 Flower " +"方法和函数(`evaluate_fn`、`configure_fit`、`aggregate_fit`、`configure_evaluate`、`aggregate_evaluate`)的第一个参数是当前一轮的联邦学习/评估。为提高可重复性并避免与" +" *random* 混淆,该参数已从 `rnd` 更名为 `server_round`。" -#: ../../source/ref-changelog.md:463 +#: ../../source/ref-changelog.md:590 msgid "" "**Move** `flwr.dataset` **to** `flwr_baselines` " "([#1273](https://github.com/adap/flower/pull/1273))" msgstr "" +"**移动*** `flwr.dataset` **到*** `flwr_baselines` " +"([#1273](https://github.com/adap/flower/pull/1273))" -#: ../../source/ref-changelog.md:465 +#: ../../source/ref-changelog.md:592 msgid "The experimental package `flwr.dataset` was migrated to Flower Baselines." -msgstr "" +msgstr "实验软件包 `flwr.dataset` 已迁移至 Flower Baselines。" -#: ../../source/ref-changelog.md:467 +#: ../../source/ref-changelog.md:594 msgid "" "**Remove experimental strategies** " "([#1280](https://github.com/adap/flower/pull/1280))" -msgstr "" +msgstr "**删除实验策略** ([#1280](https://github.com/adap/flower/pull/1280))" -#: ../../source/ref-changelog.md:469 +#: ../../source/ref-changelog.md:596 msgid "" "Remove unmaintained experimental strategies (`FastAndSlow`, `FedFSv0`, " "`FedFSv1`)." -msgstr "" +msgstr "移除未维护的试验性策略(`FastAndSlow`、`FedFSv0`、`FedFSv1`)。" -#: ../../source/ref-changelog.md:471 +#: ../../source/ref-changelog.md:598 msgid "" "**Rename** `Weights` **to** `NDArrays` " "([#1258](https://github.com/adap/flower/pull/1258), " "[#1259](https://github.com/adap/flower/pull/1259))" msgstr "" +"**重新命名** `Weights` **到** `NDArrays` " +"([#1258](https://github.com/adap/flower/pull/1258), " +"[#1259](https://github.com/adap/flower/pull/1259))" -#: ../../source/ref-changelog.md:473 +#: ../../source/ref-changelog.md:600 msgid "" "`flwr.common.Weights` was renamed to `flwr.common.NDArrays` to better " "capture what this type is all about." -msgstr "" +msgstr "flwr.common.Weights \"更名为 \"flwr.common.NDArrays\",以更好地反映该类型的含义。" -#: ../../source/ref-changelog.md:475 +#: ../../source/ref-changelog.md:602 msgid "" "**Remove antiquated** `force_final_distributed_eval` **from** " "`start_server` ([#1258](https://github.com/adap/flower/pull/1258), " "[#1259](https://github.com/adap/flower/pull/1259))" msgstr "" +"**从** `start_server` 中移除过时的** `force_final_distributed_eval` " +"([#1258](https://github.com/adap/flower/pull/1258), " +"[#1259](https://github.com/adap/flower/pull/1259))" -#: ../../source/ref-changelog.md:477 +#: ../../source/ref-changelog.md:604 msgid "" "The `start_server` parameter `force_final_distributed_eval` has long been" " a historic artefact, in this release it is finally gone for good." msgstr "" +"start_server \"参数 \"force_final_distributed_eval " +"\"长期以来一直是个历史遗留问题,在此版本中终于永远消失了。" -#: ../../source/ref-changelog.md:479 +#: ../../source/ref-changelog.md:606 msgid "" "**Make** `get_parameters` **configurable** " "([#1242](https://github.com/adap/flower/pull/1242))" msgstr "" +"**使** `get_parameters` **可配置** " +"([#1242](https://github.com/adap/flower/pull/1242))" -#: ../../source/ref-changelog.md:481 +#: ../../source/ref-changelog.md:608 msgid "" "The `get_parameters` method now accepts a configuration dictionary, just " "like `get_properties`, `fit`, and `evaluate`." msgstr "" +"现在,\"get_parameters \"方法与 \"get_properties\"、\"fit \"和 \"evaluate " +"\"一样,都接受配置字典。" -#: ../../source/ref-changelog.md:483 +#: ../../source/ref-changelog.md:610 msgid "" "**Replace** `num_rounds` **in** `start_simulation` **with new** `config` " "**parameter** ([#1281](https://github.com/adap/flower/pull/1281))" msgstr "" +"**用新的** `config` 参数** 替换** `num_rounds` ** in** `start_simulation` ** " +"([#1281](https://github.com/adap/flower/pull/1281))" -#: ../../source/ref-changelog.md:485 +#: ../../source/ref-changelog.md:612 msgid "" "The `start_simulation` function now accepts a configuration dictionary " "`config` instead of the `num_rounds` integer. This improves the " "consistency between `start_simulation` and `start_server` and makes " "transitioning between the two easier." msgstr "" +"现在,`start_simulation`(开始模拟)` 函数接受配置字典 `config` 而不是 `num_rounds` 整数。这改进了 " +"`start_simulation` 和 `start_server` 之间的一致性,并使两者之间的转换更容易。" -#: ../../source/ref-changelog.md:489 +#: ../../source/ref-changelog.md:616 msgid "" "**Support Python 3.10** " "([#1320](https://github.com/adap/flower/pull/1320))" -msgstr "" +msgstr "** 支持 Python 3.10** ([#1320](https://github.com/adap/flower/pull/1320))" -#: ../../source/ref-changelog.md:491 +#: ../../source/ref-changelog.md:618 msgid "" "The previous Flower release introduced experimental support for Python " "3.10, this release declares Python 3.10 support as stable." -msgstr "" +msgstr "上一个 Flower 版本引入了对 Python 3.10 的实验支持,而本版本则宣布对 Python 3.10 的支持为稳定支持。" -#: ../../source/ref-changelog.md:493 +#: ../../source/ref-changelog.md:620 msgid "" "**Make all** `Client` **and** `NumPyClient` **methods optional** " "([#1260](https://github.com/adap/flower/pull/1260), " "[#1277](https://github.com/adap/flower/pull/1277))" msgstr "" +"**使所有** `Client` **和** `NumPyClient` **方法成为可选** " +"([#1260](https://github.com/adap/flower/pull/1260), " +"[#1277](https://github.com/adap/flower/pull/1277))" -#: ../../source/ref-changelog.md:495 +#: ../../source/ref-changelog.md:622 msgid "" "The `Client`/`NumPyClient` methods `get_properties`, `get_parameters`, " "`fit`, and `evaluate` are all optional. This enables writing clients that" " implement, for example, only `fit`, but no other method. No need to " "implement `evaluate` when using centralized evaluation!" msgstr "" +"`Client`/`NumPyClient`的 \"get_properties\"、\"get_parameters\"、\"fit \"和 " +"\"evaluate \"方法都是可选的。这样就可以编写只实现 `fit` 而不实现其他方法的客户端。使用集中评估时,无需实现 " +"`evaluate`!" -#: ../../source/ref-changelog.md:497 +#: ../../source/ref-changelog.md:624 msgid "" "**Enable passing a** `Server` **instance to** `start_simulation` " "([#1281](https://github.com/adap/flower/pull/1281))" msgstr "" +"**启用向** `start_simulation` 传递** `Server` 实例 " +"([#1281](https://github.com/adap/flower/pull/1281))" -#: ../../source/ref-changelog.md:499 +#: ../../source/ref-changelog.md:626 msgid "" "Similar to `start_server`, `start_simulation` now accepts a full `Server`" " instance. This enables users to heavily customize the execution of " "eperiments and opens the door to running, for example, async FL using the" " Virtual Client Engine." msgstr "" +"与 `start_server` 类似,`start_simulation` 现在也接受一个完整的 `Server` " +"实例。这使得用户可以对实验的执行进行大量自定义,并为使用虚拟客户端引擎运行异步 FL 等打开了大门。" -#: ../../source/ref-changelog.md:501 +#: ../../source/ref-changelog.md:628 msgid "" "**Update code examples** " "([#1291](https://github.com/adap/flower/pull/1291), " "[#1286](https://github.com/adap/flower/pull/1286), " "[#1282](https://github.com/adap/flower/pull/1282))" msgstr "" +"**更新代码示例** ([#1291](https://github.com/adap/flower/pull/1291), " +"[#1286](https://github.com/adap/flower/pull/1286), " +"[#1282](https://github.com/adap/flower/pull/1282))" -#: ../../source/ref-changelog.md:503 +#: ../../source/ref-changelog.md:630 msgid "" "Many code examples received small or even large maintenance updates, " "among them are" -msgstr "" +msgstr "许多代码示例都进行了小规模甚至大规模的维护更新,其中包括" -#: ../../source/ref-changelog.md:505 +#: ../../source/ref-changelog.md:632 msgid "`scikit-learn`" -msgstr "" +msgstr "`scikit-learn`" -#: ../../source/ref-changelog.md:506 +#: ../../source/ref-changelog.md:633 msgid "`simulation_pytorch`" -msgstr "" +msgstr "`simulation_pytorch`" -#: ../../source/ref-changelog.md:507 +#: ../../source/ref-changelog.md:634 msgid "`quickstart_pytorch`" -msgstr "" +msgstr "`quickstart_pytorch`" -#: ../../source/ref-changelog.md:508 +#: ../../source/ref-changelog.md:635 msgid "`quickstart_simulation`" -msgstr "" +msgstr "`quickstart_simulation`" -#: ../../source/ref-changelog.md:509 +#: ../../source/ref-changelog.md:636 msgid "`quickstart_tensorflow`" -msgstr "" +msgstr "`quickstart_tensorflow`" -#: ../../source/ref-changelog.md:510 +#: ../../source/ref-changelog.md:637 msgid "`advanced_tensorflow`" -msgstr "" +msgstr "`advanced_tensorflow`" -#: ../../source/ref-changelog.md:512 +#: ../../source/ref-changelog.md:639 msgid "" "**Remove the obsolete simulation example** " "([#1328](https://github.com/adap/flower/pull/1328))" -msgstr "" +msgstr "**删除过时的模拟示例** ([#1328](https://github.com/adap/flower/pull/1328))" -#: ../../source/ref-changelog.md:514 +#: ../../source/ref-changelog.md:641 msgid "" "Removes the obsolete `simulation` example and renames " "`quickstart_simulation` to `simulation_tensorflow` so it fits withs the " "naming of `simulation_pytorch`" msgstr "" +"删除过时的 \"simulation \"示例,并将 \"quickstart_simulation \"重命名为 " +"\"simulation_tensorflow\",使其与 \"simulation_pytorch \"的命名一致" -#: ../../source/ref-changelog.md:516 +#: ../../source/ref-changelog.md:643 msgid "" "**Update documentation** " "([#1223](https://github.com/adap/flower/pull/1223), " @@ -8857,8 +14502,18 @@ msgid "" "[#1305](https://github.com/adap/flower/pull/1305), " "[#1307](https://github.com/adap/flower/pull/1307))" msgstr "" +"**更新文档** ([#1223](https://github.com/adap/flower/pull/1223), " +"[#1209](https://github.com/adap/flower/pull/1209), " +"[#1251](https://github.com/adap/flower/pull/1251), " +"[#1257](https://github.com/adap/flower/pull/1257), " +"[#1267](https://github.com/adap/flower/pull/1267), " +"[#1268](https://github.com/adap/flower/pull/1268), " +"[#1300](https://github.com/adap/flower/pull/1300), " +"[#1304](https://github.com/adap/flower/pull/1304), " +"[#1305](https://github.com/adap/flower/pull/1305), " +"[#1307](https://github.com/adap/flower/pull/1307))" -#: ../../source/ref-changelog.md:518 +#: ../../source/ref-changelog.md:645 msgid "" "One substantial documentation update fixes multiple smaller rendering " "issues, makes titles more succinct to improve navigation, removes a " @@ -8867,25 +14522,30 @@ msgid "" "based documentation, migrates the changelog from `.rst` to `.md`, and " "fixes a number of smaller details!" msgstr "" +"其中一个实质性的文档更新修复了多个较小的渲染问题,使标题更加简洁以改善导航,删除了一个已废弃的库,更新了文档依赖关系,在 API 参考中包含了 " +"`flwr.common` 模块,包含了对基于 markdown 的文档的支持,将更新日志从 `.rst` 移植到了 " +"`.md`,并修复了一些较小的细节!" -#: ../../source/ref-changelog.md:520 ../../source/ref-changelog.md:575 -#: ../../source/ref-changelog.md:644 ../../source/ref-changelog.md:683 +#: ../../source/ref-changelog.md:647 ../../source/ref-changelog.md:702 +#: ../../source/ref-changelog.md:771 ../../source/ref-changelog.md:810 msgid "**Minor updates**" -msgstr "" +msgstr "**小规模更新**" -#: ../../source/ref-changelog.md:522 +#: ../../source/ref-changelog.md:649 msgid "" "Add round number to fit and evaluate log messages " "([#1266](https://github.com/adap/flower/pull/1266))" -msgstr "" +msgstr "添加四舍五入数字,以适应和评估日志信息([#1266](https://github.com/adap/flower/pull/1266))" -#: ../../source/ref-changelog.md:523 +#: ../../source/ref-changelog.md:650 msgid "" "Add secure gRPC connection to the `advanced_tensorflow` code example " "([#847](https://github.com/adap/flower/pull/847))" msgstr "" +"为 `advanced_tensorflow` 代码示例添加安全 gRPC 连接 " +"([#847](https://github.com/adap/flower/pull/847))" -#: ../../source/ref-changelog.md:524 +#: ../../source/ref-changelog.md:651 msgid "" "Update developer tooling " "([#1231](https://github.com/adap/flower/pull/1231), " @@ -8893,85 +14553,109 @@ msgid "" "[#1301](https://github.com/adap/flower/pull/1301), " "[#1310](https://github.com/adap/flower/pull/1310))" msgstr "" +"更新开发人员工具([#1231](https://github.com/adap/flower/pull/1231), " +"[#1276](https://github.com/adap/flower/pull/1276), " +"[#1301](https://github.com/adap/flower/pull/1301), " +"[#1310](https://github.com/adap/flower/pull/1310)" -#: ../../source/ref-changelog.md:525 +#: ../../source/ref-changelog.md:652 msgid "" "Rename ProtoBuf messages to improve consistency " "([#1214](https://github.com/adap/flower/pull/1214), " "[#1258](https://github.com/adap/flower/pull/1258), " "[#1259](https://github.com/adap/flower/pull/1259))" msgstr "" +"重命名 ProtoBuf 消息以提高一致性([#1214](https://github.com/adap/flower/pull/1214), " +"[#1258](https://github.com/adap/flower/pull/1258), " +"[#1259](https://github.com/adap/flower/pull/1259)" -#: ../../source/ref-changelog.md:527 +#: ../../source/ref-changelog.md:654 msgid "v0.19.0 (2022-05-18)" -msgstr "" +msgstr "v0.19.0 (2022-05-18)" -#: ../../source/ref-changelog.md:531 +#: ../../source/ref-changelog.md:658 msgid "" "**Flower Baselines (preview): FedOpt, FedBN, FedAvgM** " "([#919](https://github.com/adap/flower/pull/919), " "[#1127](https://github.com/adap/flower/pull/1127), " "[#914](https://github.com/adap/flower/pull/914))" msgstr "" +"**Flower Baselines(预览): FedOpt、FedBN、FedAvgM** " +"([#919](https://github.com/adap/flower/pull/919), " +"[#1127](https://github.com/adap/flower/pull/1127), " +"[#914](https://github.com/adap/flower/pull/914))" -#: ../../source/ref-changelog.md:533 +#: ../../source/ref-changelog.md:660 msgid "" "The first preview release of Flower Baselines has arrived! We're " "kickstarting Flower Baselines with implementations of FedOpt (FedYogi, " "FedAdam, FedAdagrad), FedBN, and FedAvgM. Check the documentation on how " -"to use [Flower Baselines](https://flower.dev/docs/using-baselines.html). " +"to use [Flower Baselines](https://flower.ai/docs/using-baselines.html). " "With this first preview release we're also inviting the community to " -"[contribute their own baselines](https://flower.dev/docs/contributing-" +"[contribute their own baselines](https://flower.ai/docs/contributing-" "baselines.html)." msgstr "" +"Flower Baselines 的第一个预览版已经发布!我们通过实现 " +"FedOpt(FedYogi、FedAdam、FedAdagrad)、FedBN 和 FedAvgM 来启动 Flower " +"Baselines。请查阅文档了解如何使用 [Flower Baselines](https://flower.ai/docs/using-" +"baselines.html)。在首次发布预览版时,我们还邀请社区成员[贡献自己的Baselines](https://flower.ai/docs" +"/contributing-baselines.html)。" -#: ../../source/ref-changelog.md:535 +#: ../../source/ref-changelog.md:662 msgid "" "**C++ client SDK (preview) and code example** " "([#1111](https://github.com/adap/flower/pull/1111))" -msgstr "" +msgstr "**C++客户端SDK(预览版)和代码示例**([#1111](https://github.com/adap/flower/pull/1111))" -#: ../../source/ref-changelog.md:537 +#: ../../source/ref-changelog.md:664 msgid "" "Preview support for Flower clients written in C++. The C++ preview " "includes a Flower client SDK and a quickstart code example that " "demonstrates a simple C++ client using the SDK." msgstr "" +"预览版支持用 C++ 编写的 Flower 客户端。C++ 预览版包括一个 Flower 客户端 SDK 和一个快速入门代码示例,使用 SDK " +"演示了一个简单的 C++ 客户端。" -#: ../../source/ref-changelog.md:539 +#: ../../source/ref-changelog.md:666 msgid "" "**Add experimental support for Python 3.10 and Python 3.11** " "([#1135](https://github.com/adap/flower/pull/1135))" msgstr "" +"** 增加对 Python 3.10 和 Python 3.11 的实验支持** " +"([#1135](https://github.com/adap/flower/pull/1135))" -#: ../../source/ref-changelog.md:541 +#: ../../source/ref-changelog.md:668 msgid "" "Python 3.10 is the latest stable release of Python and Python 3.11 is due" " to be released in October. This Flower release adds experimental support" " for both Python versions." msgstr "" +"Python 3.10 是 Python 的最新稳定版本,Python 3.11 将于 10 月份发布。Flower 版本增加了对这两个 " +"Python 版本的实验支持。" -#: ../../source/ref-changelog.md:543 +#: ../../source/ref-changelog.md:670 msgid "" "**Aggregate custom metrics through user-provided functions** " "([#1144](https://github.com/adap/flower/pull/1144))" -msgstr "" +msgstr "**通过用户提供的函数聚合自定义指标**([#1144](https://github.com/adap/flower/pull/1144))" -#: ../../source/ref-changelog.md:545 +#: ../../source/ref-changelog.md:672 msgid "" "Custom metrics (e.g., `accuracy`) can now be aggregated without having to" " customize the strategy. Built-in strategies support two new arguments, " "`fit_metrics_aggregation_fn` and `evaluate_metrics_aggregation_fn`, that " "allow passing custom metric aggregation functions." msgstr "" +"现在无需定制策略即可聚合自定义度量(如`准确度`)。内置策略支持两个新参数:`fit_metrics_aggregation_fn` " +"和`evaluate_metrics_aggregation_fn`,允许传递自定义度量聚合函数。" -#: ../../source/ref-changelog.md:547 +#: ../../source/ref-changelog.md:674 msgid "" "**User-configurable round timeout** " "([#1162](https://github.com/adap/flower/pull/1162))" -msgstr "" +msgstr "**用户可配置的回合超时**([#1162](https://github.com/adap/flower/pull/1162))" -#: ../../source/ref-changelog.md:549 +#: ../../source/ref-changelog.md:676 msgid "" "A new configuration value allows the round timeout to be set for " "`start_server` and `start_simulation`. If the `config` dictionary " @@ -8979,171 +14663,208 @@ msgid "" "server will wait *at least* `round_timeout` seconds before it closes the " "connection." msgstr "" +"新的配置值允许为 `start_server` 和 `start_simulation` 设置回合超时。如果 `config` 字典中包含一个 " +"`round_timeout` 键(以秒为单位的 `float`值),服务器将至少等待 ** `round_timeout` 秒后才关闭连接。" -#: ../../source/ref-changelog.md:551 +#: ../../source/ref-changelog.md:678 msgid "" "**Enable both federated evaluation and centralized evaluation to be used " "at the same time in all built-in strategies** " "([#1091](https://github.com/adap/flower/pull/1091))" msgstr "" +"**允许在所有内置策略中同时使用联邦评价和集中评估** " +"([#1091](https://github.com/adap/flower/pull/1091))" -#: ../../source/ref-changelog.md:553 +#: ../../source/ref-changelog.md:680 msgid "" "Built-in strategies can now perform both federated evaluation (i.e., " "client-side) and centralized evaluation (i.e., server-side) in the same " "round. Federated evaluation can be disabled by setting `fraction_eval` to" " `0.0`." msgstr "" +"内置策略现在可以在同一轮中同时执行联邦评估(即客户端)和集中评估(即服务器端)。可以通过将 `fraction_eval` 设置为 " +"`0.0`来禁用联邦评估。" -#: ../../source/ref-changelog.md:555 +#: ../../source/ref-changelog.md:682 msgid "" "**Two new Jupyter Notebook tutorials** " "([#1141](https://github.com/adap/flower/pull/1141))" msgstr "" +"**两本新的 Jupyter Notebook 教程** " +"([#1141](https://github.com/adap/flower/pull/1141))" -#: ../../source/ref-changelog.md:557 +#: ../../source/ref-changelog.md:684 msgid "" "Two Jupyter Notebook tutorials (compatible with Google Colab) explain " "basic and intermediate Flower features:" -msgstr "" +msgstr "两本 Jupyter Notebook 教程(与 Google Colab 兼容)介绍了 Flower 的基本和中级功能:" -#: ../../source/ref-changelog.md:559 +#: ../../source/ref-changelog.md:686 msgid "" "*An Introduction to Federated Learning*: [Open in " "Colab](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-1" "-Intro-to-FL-PyTorch.ipynb)" msgstr "" +"*联邦学习简介*: [在 Colab " +"中打开](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-1" +"-Intro-to-FL-PyTorch.ipynb)" -#: ../../source/ref-changelog.md:561 +#: ../../source/ref-changelog.md:688 msgid "" "*Using Strategies in Federated Learning*: [Open in " "Colab](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-2" "-Strategies-in-FL-PyTorch.ipynb)" msgstr "" +"*在联邦学习中使用策略*: [在 Colab " +"中打开](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-2" +"-Strategies-in-FL-PyTorch.ipynb)" -#: ../../source/ref-changelog.md:563 +#: ../../source/ref-changelog.md:690 msgid "" "**New FedAvgM strategy (Federated Averaging with Server Momentum)** " "([#1076](https://github.com/adap/flower/pull/1076))" msgstr "" +"**新的 FedAvgM 策略(带服务器动量的联邦平均)** " +"([#1076](https://github.com/adap/flower/pull/1076))" -#: ../../source/ref-changelog.md:565 +#: ../../source/ref-changelog.md:692 msgid "" "The new `FedAvgM` strategy implements Federated Averaging with Server " "Momentum \\[Hsu et al., 2019\\]." -msgstr "" +msgstr "新的 \"FedAvgM \"策略实现了带服务器动量的联邦平均[Hsu et al., 2019\\]." -#: ../../source/ref-changelog.md:567 +#: ../../source/ref-changelog.md:694 msgid "" "**New advanced PyTorch code example** " "([#1007](https://github.com/adap/flower/pull/1007))" -msgstr "" +msgstr "**新的 PyTorch 高级代码示例** ([#1007](https://github.com/adap/flower/pull/1007))" -#: ../../source/ref-changelog.md:569 +#: ../../source/ref-changelog.md:696 msgid "" "A new code example (`advanced_pytorch`) demonstrates advanced Flower " "concepts with PyTorch." -msgstr "" +msgstr "新代码示例 (`advanced_pytorch`) 演示了 PyTorch 的高级 Flower 概念。" -#: ../../source/ref-changelog.md:571 +#: ../../source/ref-changelog.md:698 msgid "" "**New JAX code example** " "([#906](https://github.com/adap/flower/pull/906), " "[#1143](https://github.com/adap/flower/pull/1143))" msgstr "" +"**新的 JAX 代码示例**([#906](https://github.com/adap/flower/pull/906), " +"[#1143](https://github.com/adap/flower/pull/1143)" -#: ../../source/ref-changelog.md:573 +#: ../../source/ref-changelog.md:700 msgid "" "A new code example (`jax_from_centralized_to_federated`) shows federated " "learning with JAX and Flower." -msgstr "" +msgstr "新代码示例(`jax_from_centralized_to_federated`)展示了使用 JAX 和 Flower 的联邦学习。" -#: ../../source/ref-changelog.md:577 +#: ../../source/ref-changelog.md:704 msgid "" "New option to keep Ray running if Ray was already initialized in " "`start_simulation` ([#1177](https://github.com/adap/flower/pull/1177))" msgstr "" +"新增选项,用于在 \"start_simulation\"(开始模拟)中已初始化 Ray 的情况下保持 Ray " +"运行([#1177](https://github.com/adap/flower/pull/1177))" -#: ../../source/ref-changelog.md:578 +#: ../../source/ref-changelog.md:705 msgid "" "Add support for custom `ClientManager` as a `start_simulation` parameter " "([#1171](https://github.com/adap/flower/pull/1171))" msgstr "" +"添加对自定义 \"客户端管理器 \"作为 \"start_simulation " +"\"参数的支持([#1171](https://github.com/adap/flower/pull/1171))" -#: ../../source/ref-changelog.md:579 +#: ../../source/ref-changelog.md:706 msgid "" "New documentation for [implementing " -"strategies](https://flower.dev/docs/framework/how-to-implement-" +"strategies](https://flower.ai/docs/framework/how-to-implement-" "strategies.html) ([#1097](https://github.com/adap/flower/pull/1097), " "[#1175](https://github.com/adap/flower/pull/1175))" msgstr "" +"[实施战略](https://flower.ai/docs/framework/how-to-implement-" +"strategies.html) 的新文件([#1097](https://github.com/adap/flower/pull/1097), " +"[#1175](https://github.com/adap/flower/pull/1175)" -#: ../../source/ref-changelog.md:580 +#: ../../source/ref-changelog.md:707 msgid "" "New mobile-friendly documentation theme " "([#1174](https://github.com/adap/flower/pull/1174))" -msgstr "" +msgstr "新的移动友好型文档主题 ([#1174](https://github.com/adap/flower/pull/1174))" -#: ../../source/ref-changelog.md:581 +#: ../../source/ref-changelog.md:708 msgid "" "Limit version range for (optional) `ray` dependency to include only " "compatible releases (`>=1.9.2,<1.12.0`) " "([#1205](https://github.com/adap/flower/pull/1205))" msgstr "" +"限制(可选)`ray`依赖的版本范围,使其仅包含兼容版本(`>=1.9.2,<1.12.0`) " +"([#1205](https://github.com/adap/flower/pull/1205))" -#: ../../source/ref-changelog.md:585 +#: ../../source/ref-changelog.md:712 msgid "" "**Remove deprecated support for Python 3.6** " "([#871](https://github.com/adap/flower/pull/871))" -msgstr "" +msgstr "**删除对 Python 3.6 的过时支持** ([#871](https://github.com/adap/flower/pull/871))" -#: ../../source/ref-changelog.md:586 +#: ../../source/ref-changelog.md:713 msgid "" "**Remove deprecated KerasClient** " "([#857](https://github.com/adap/flower/pull/857))" -msgstr "" +msgstr "**移除过时的 KerasClient**([#857](https://github.com/adap/flower/pull/857))" -#: ../../source/ref-changelog.md:587 +#: ../../source/ref-changelog.md:714 msgid "" "**Remove deprecated no-op extra installs** " "([#973](https://github.com/adap/flower/pull/973))" -msgstr "" +msgstr "**移除过时的不操作额外安装** ([#973](https://github.com/adap/flower/pull/973))" -#: ../../source/ref-changelog.md:588 +#: ../../source/ref-changelog.md:715 msgid "" "**Remove deprecated proto fields from** `FitRes` **and** `EvaluateRes` " "([#869](https://github.com/adap/flower/pull/869))" msgstr "" +"**从** `FitRes` **和** `EvaluateRes` 中移除已废弃的 proto 字段 " +"([#869](https://github.com/adap/flower/pull/869))" -#: ../../source/ref-changelog.md:589 +#: ../../source/ref-changelog.md:716 msgid "" "**Remove deprecated QffedAvg strategy (replaced by QFedAvg)** " "([#1107](https://github.com/adap/flower/pull/1107))" msgstr "" +"**移除过时的 QffedAvg 策略(由 QFedAvg 取代)** " +"([#1107](https://github.com/adap/flower/pull/1107))" -#: ../../source/ref-changelog.md:590 +#: ../../source/ref-changelog.md:717 msgid "" "**Remove deprecated DefaultStrategy strategy** " "([#1142](https://github.com/adap/flower/pull/1142))" msgstr "" +"**删除过时的 DefaultStrategy 策略** " +"([#1142](https://github.com/adap/flower/pull/1142))" -#: ../../source/ref-changelog.md:591 +#: ../../source/ref-changelog.md:718 msgid "" "**Remove deprecated support for eval_fn accuracy return value** " "([#1142](https://github.com/adap/flower/pull/1142))" msgstr "" +"**删除已过时的对 eval_fn 返回值准确性的支持** " +"([#1142](https://github.com/adap/flower/pull/1142))" -#: ../../source/ref-changelog.md:592 +#: ../../source/ref-changelog.md:719 msgid "" "**Remove deprecated support for passing initial parameters as NumPy " "ndarrays** ([#1142](https://github.com/adap/flower/pull/1142))" msgstr "" +"**移除对以 NumPy ndarrays 传递初始参数的过时支持** " +"([#1142](https://github.com/adap/flower/pull/1142))" -#: ../../source/ref-changelog.md:594 +#: ../../source/ref-changelog.md:721 msgid "v0.18.0 (2022-02-28)" -msgstr "" +msgstr "v0.18.0 (2022-02-28)" -#: ../../source/ref-changelog.md:598 +#: ../../source/ref-changelog.md:725 msgid "" "**Improved Virtual Client Engine compatibility with Jupyter Notebook / " "Google Colab** ([#866](https://github.com/adap/flower/pull/866), " @@ -9151,55 +14872,69 @@ msgid "" "[#833](https://github.com/adap/flower/pull/833), " "[#1036](https://github.com/adap/flower/pull/1036))" msgstr "" +"**改进了虚拟客户端引擎与 Jupyter Notebook / Google Colab 的兼容性** " +"([#866](https://github.com/adap/flower/pull/866), " +"[#872](https://github.com/adap/flower/pull/872), " +"[#833](https://github.com/adap/flower/pull/833), " +"[#1036](https://github.com/adap/flower/pull/1036))" -#: ../../source/ref-changelog.md:600 +#: ../../source/ref-changelog.md:727 msgid "" "Simulations (using the Virtual Client Engine through `start_simulation`) " "now work more smoothly on Jupyter Notebooks (incl. Google Colab) after " "installing Flower with the `simulation` extra (`pip install " "flwr[simulation]`)." msgstr "" +"通过 `start_simulation` 在 Jupyter 笔记本(包括 Google Colab)上安装 Flower 并附加 " +"`simulation` (`pip install flwr[simulation]`)后,模拟(通过 `start_simulation` " +"使用虚拟客户端引擎)现在可以更流畅地运行。" -#: ../../source/ref-changelog.md:602 +#: ../../source/ref-changelog.md:729 msgid "" "**New Jupyter Notebook code example** " "([#833](https://github.com/adap/flower/pull/833))" msgstr "" +"**新的 Jupyter Notebook 代码示例** " +"([#833](https://github.com/adap/flower/pull/833))" -#: ../../source/ref-changelog.md:604 +#: ../../source/ref-changelog.md:731 msgid "" "A new code example (`quickstart_simulation`) demonstrates Flower " "simulations using the Virtual Client Engine through Jupyter Notebook " "(incl. Google Colab)." msgstr "" +"新代码示例(`quickstart_simulation`)通过 Jupyter Notebook(包括 Google " +"Colab)演示了使用虚拟客户端引擎进行 Flower 模拟。" -#: ../../source/ref-changelog.md:606 +#: ../../source/ref-changelog.md:733 msgid "" "**Client properties (feature preview)** " "([#795](https://github.com/adap/flower/pull/795))" -msgstr "" +msgstr "**客户端属性(功能预览)** ([#795](https://github.com/adap/flower/pull/795))" -#: ../../source/ref-changelog.md:608 +#: ../../source/ref-changelog.md:735 msgid "" "Clients can implement a new method `get_properties` to enable server-side" " strategies to query client properties." -msgstr "" +msgstr "客户端可以实现一个新方法 `get_properties`,以启用服务器端策略来查询客户端属性。" -#: ../../source/ref-changelog.md:610 +#: ../../source/ref-changelog.md:737 msgid "" "**Experimental Android support with TFLite** " "([#865](https://github.com/adap/flower/pull/865))" -msgstr "" +msgstr "** 使用 TFLite 实验性支持安卓系统** ([#865](https://github.com/adap/flower/pull/865))" -#: ../../source/ref-changelog.md:612 +#: ../../source/ref-changelog.md:739 msgid "" "Android support has finally arrived in `main`! Flower is both client-" "agnostic and framework-agnostic by design. One can integrate arbitrary " "client platforms and with this release, using Flower on Android has " "become a lot easier." msgstr "" +"`main`终于支持 Android 了!Flower 的设计与客户端和框架无关。我们可以集成任意客户端平台,有了这个版本,在安卓系统上使用 " +"Flower 就变得更容易了。" -#: ../../source/ref-changelog.md:614 +#: ../../source/ref-changelog.md:741 msgid "" "The example uses TFLite on the client side, along with a new " "`FedAvgAndroid` strategy. The Android client and `FedAvgAndroid` are " @@ -9207,46 +14942,57 @@ msgid "" "Android SDK and a unified `FedAvg` implementation that integrated the new" " functionality from `FedAvgAndroid`." msgstr "" +"该示例在客户端使用了 TFLite 以及新的 `FedAvgAndroid`策略。Android 客户端和 " +"`FedAvgAndroid`仍处于试验阶段,但这是向成熟的 Android SDK 和集成了 `FedAvgAndroid`新功能的统一 " +"`FedAvg`实现迈出的第一步。" -#: ../../source/ref-changelog.md:616 +#: ../../source/ref-changelog.md:743 msgid "" "**Make gRPC keepalive time user-configurable and decrease default " "keepalive time** ([#1069](https://github.com/adap/flower/pull/1069))" msgstr "" +"**使 gRPC 保持连接时间可由用户配置,并缩短默认保持连接时间** " +"([#1069](https://github.com/adap/flower/pull/1069))" -#: ../../source/ref-changelog.md:618 +#: ../../source/ref-changelog.md:745 msgid "" "The default gRPC keepalive time has been reduced to increase the " "compatibility of Flower with more cloud environments (for example, " "Microsoft Azure). Users can configure the keepalive time to customize the" " gRPC stack based on specific requirements." msgstr "" +"为提高 Flower 与更多云环境(如 Microsoft Azure)的兼容性,缩短了默认 gRPC 保持时间。用户可以根据具体要求配置 " +"keepalive 时间,自定义 gRPC 堆栈。" -#: ../../source/ref-changelog.md:620 +#: ../../source/ref-changelog.md:747 msgid "" "**New differential privacy example using Opacus and PyTorch** " "([#805](https://github.com/adap/flower/pull/805))" msgstr "" +"**使用 Opacus 和 PyTorch 的新差分隐私示例** " +"([#805](https://github.com/adap/flower/pull/805))" -#: ../../source/ref-changelog.md:622 +#: ../../source/ref-changelog.md:749 msgid "" "A new code example (`opacus`) demonstrates differentially-private " "federated learning with Opacus, PyTorch, and Flower." -msgstr "" +msgstr "一个新的代码示例(\"opacus\")演示了使用 Opacus、PyTorch 和 Flower 进行差分隐私的联邦学习。" -#: ../../source/ref-changelog.md:624 +#: ../../source/ref-changelog.md:751 msgid "" "**New Hugging Face Transformers code example** " "([#863](https://github.com/adap/flower/pull/863))" msgstr "" +"**新的Hugging Face Transformers代码示例** " +"([#863](https://github.com/adap/flower/pull/863))" -#: ../../source/ref-changelog.md:626 +#: ../../source/ref-changelog.md:753 msgid "" "A new code example (`quickstart_huggingface`) demonstrates usage of " "Hugging Face Transformers with Flower." -msgstr "" +msgstr "新的代码示例(`quickstart_huggingface`)证明了结合Flower和Hugging Face Transformers的实用性。" -#: ../../source/ref-changelog.md:628 +#: ../../source/ref-changelog.md:755 msgid "" "**New MLCube code example** " "([#779](https://github.com/adap/flower/pull/779), " @@ -9254,14 +15000,18 @@ msgid "" "[#1065](https://github.com/adap/flower/pull/1065), " "[#1090](https://github.com/adap/flower/pull/1090))" msgstr "" +"**新的 MLCube 代码示例** ([#779](https://github.com/adap/flower/pull/779), " +"[#1034](https://github.com/adap/flower/pull/1034), " +"[#1065](https://github.com/adap/flower/pull/1065), " +"[#1090](https://github.com/adap/flower/pull/1090))" -#: ../../source/ref-changelog.md:630 +#: ../../source/ref-changelog.md:757 msgid "" "A new code example (`quickstart_mlcube`) demonstrates usage of MLCube " "with Flower." -msgstr "" +msgstr "新代码示例(\"quickstart_mlcube\")演示了 MLCube 与 Flower 的用法。" -#: ../../source/ref-changelog.md:632 +#: ../../source/ref-changelog.md:759 msgid "" "**SSL-enabled server and client** " "([#842](https://github.com/adap/flower/pull/842), " @@ -9271,34 +15021,45 @@ msgid "" "[#993](https://github.com/adap/flower/pull/993), " "[#994](https://github.com/adap/flower/pull/994))" msgstr "" +"** 支持 SSL 的服务器和客户端** ([#842](https://github.com/adap/flower/pull/842), " +"[#844](https://github.com/adap/flower/pull/844), " +"[#845](https://github.com/adap/flower/pull/845), " +"[#847](https://github.com/adap/flower/pull/847), " +"[#993](https://github.com/adap/flower/pull/993), " +"[#994](https://github.com/adap/flower/pull/994))" -#: ../../source/ref-changelog.md:634 +#: ../../source/ref-changelog.md:761 msgid "" "SSL enables secure encrypted connections between clients and servers. " "This release open-sources the Flower secure gRPC implementation to make " "encrypted communication channels accessible to all Flower users." -msgstr "" +msgstr "SSL 可实现客户端与服务器之间的安全加密连接。该版本开源了 Flower 安全 gRPC 实现,使所有 Flower 用户都能访问加密通信通道。" -#: ../../source/ref-changelog.md:636 +#: ../../source/ref-changelog.md:763 msgid "" "**Updated** `FedAdam` **and** `FedYogi` **strategies** " "([#885](https://github.com/adap/flower/pull/885), " "[#895](https://github.com/adap/flower/pull/895))" msgstr "" +"**更新**`FedAdam`**和**`FedYogi`**战略** " +"([#885](https://github.com/adap/flower/pull/885), " +"[#895](https://github.com/adap/flower/pull/895))" -#: ../../source/ref-changelog.md:638 +#: ../../source/ref-changelog.md:765 msgid "" "`FedAdam` and `FedAdam` match the latest version of the Adaptive " "Federated Optimization paper." -msgstr "" +msgstr "FedAdam \"和 \"FedAdam \"与最新版本的 \"自适应联邦优化 \"论文相匹配。" -#: ../../source/ref-changelog.md:640 +#: ../../source/ref-changelog.md:767 msgid "" "**Initialize** `start_simulation` **with a list of client IDs** " "([#860](https://github.com/adap/flower/pull/860))" msgstr "" +"**初始化** `start_simulation` **使用客户端 ID 列表** " +"([#860](https://github.com/adap/flower/pull/860))" -#: ../../source/ref-changelog.md:642 +#: ../../source/ref-changelog.md:769 msgid "" "`start_simulation` can now be called with a list of client IDs " "(`clients_ids`, type: `List[str]`). Those IDs will be passed to the " @@ -9306,56 +15067,69 @@ msgid "" "easier to load data partitions that are not accessible through `int` " "identifiers." msgstr "" +"现在可以使用客户端 ID 列表(`clients_ids`,类型:`List[str]`)调用 " +"`start_simulation`。每当需要初始化客户端时,这些 ID 就会被传递到 `client_fn` 中,这样就能更轻松地加载无法通过 " +"`int` 标识符访问的数据分区。" -#: ../../source/ref-changelog.md:646 +#: ../../source/ref-changelog.md:773 msgid "" "Update `num_examples` calculation in PyTorch code examples in " "([#909](https://github.com/adap/flower/pull/909))" msgstr "" +"更新 PyTorch 代码示例中的 \"num_examples \"计算 " +"([#909](https://github.com/adap/flower/pull/909))" -#: ../../source/ref-changelog.md:647 +#: ../../source/ref-changelog.md:774 msgid "" "Expose Flower version through `flwr.__version__` " "([#952](https://github.com/adap/flower/pull/952))" msgstr "" +"通过 `flwr.__version__` 公开 Flower 版本 " +"([#952](https://github.com/adap/flower/pull/952))" -#: ../../source/ref-changelog.md:648 +#: ../../source/ref-changelog.md:775 msgid "" "`start_server` in `app.py` now returns a `History` object containing " "metrics from training ([#974](https://github.com/adap/flower/pull/974))" msgstr "" +"`app.py`中的 `start_server`现在会返回一个 `History` " +"对象,其中包含训练中的指标([#974](https://github.com/adap/flower/pull/974))" -#: ../../source/ref-changelog.md:649 +#: ../../source/ref-changelog.md:776 msgid "" "Make `max_workers` (used by `ThreadPoolExecutor`) configurable " "([#978](https://github.com/adap/flower/pull/978))" msgstr "" +"使 `max_workers`(由 " +"`ThreadPoolExecutor`使用)可配置([#978](https://github.com/adap/flower/pull/978))" -#: ../../source/ref-changelog.md:650 +#: ../../source/ref-changelog.md:777 msgid "" "Increase sleep time after server start to three seconds in all code " "examples ([#1086](https://github.com/adap/flower/pull/1086))" -msgstr "" +msgstr "在所有代码示例中,将服务器启动后的休眠时间延长至三秒([#1086](https://github.com/adap/flower/pull/1086))" -#: ../../source/ref-changelog.md:651 +#: ../../source/ref-changelog.md:778 msgid "" "Added a new FAQ section to the documentation " "([#948](https://github.com/adap/flower/pull/948))" -msgstr "" +msgstr "在文档中添加了新的常见问题部分 ([#948](https://github.com/adap/flower/pull/948))" -#: ../../source/ref-changelog.md:652 +#: ../../source/ref-changelog.md:779 msgid "" "And many more under-the-hood changes, library updates, documentation " "changes, and tooling improvements!" -msgstr "" +msgstr "还有更多底层更改、库更新、文档更改和工具改进!" -#: ../../source/ref-changelog.md:656 +#: ../../source/ref-changelog.md:783 msgid "" "**Removed** `flwr_example` **and** `flwr_experimental` **from release " "build** ([#869](https://github.com/adap/flower/pull/869))" msgstr "" +"**从发布版中删除**`flwr_example`**和**`flwr_experimental`** " +"([#869](https://github.com/adap/flower/pull/869))" -#: ../../source/ref-changelog.md:658 +#: ../../source/ref-changelog.md:785 msgid "" "The packages `flwr_example` and `flwr_experimental` have been deprecated " "since Flower 0.12.0 and they are not longer included in Flower release " @@ -9363,20 +15137,26 @@ msgid "" "tensorflow`, `http-logger`, `ops`) are now no-op and will be removed in " "an upcoming release." msgstr "" +"自 Flower 0.12.0 起,软件包 `flwr_example` 和 `flwr_experimental` 已被弃用,它们不再包含在 " +"Flower 的发布版本中。相关的额外包(`baseline`, `examples-pytorch`, `examples-" +"tensorflow`, `http-logger`, `ops`)现在已不再使用,并将在即将发布的版本中移除。" -#: ../../source/ref-changelog.md:660 +#: ../../source/ref-changelog.md:787 msgid "v0.17.0 (2021-09-24)" -msgstr "" +msgstr "v0.17.0 (2021-09-24)" -#: ../../source/ref-changelog.md:664 +#: ../../source/ref-changelog.md:791 msgid "" "**Experimental virtual client engine** " "([#781](https://github.com/adap/flower/pull/781) " "[#790](https://github.com/adap/flower/pull/790) " "[#791](https://github.com/adap/flower/pull/791))" msgstr "" +"**实验性虚拟客户端引擎** ([#781](https://github.com/adap/flower/pull/781) " +"[#790](https://github.com/adap/flower/pull/790) " +"[#791](https://github.com/adap/flower/pull/791))" -#: ../../source/ref-changelog.md:666 +#: ../../source/ref-changelog.md:793 msgid "" "One of Flower's goals is to enable research at scale. This release " "enables a first (experimental) peek at a major new feature, codenamed the" @@ -9385,109 +15165,122 @@ msgid "" "The easiest way to test the new functionality is to look at the two new " "code examples called `quickstart_simulation` and `simulation_pytorch`." msgstr "" +"Flower 的目标之一是实现大规模研究。这一版本首次(试验性地)展示了代号为 \"虚拟客户端引擎 " +"\"的重要新功能。虚拟客户端可以在单台机器或计算集群上对大量客户端进行模拟。测试新功能的最简单方法是查看名为 " +"\"quickstart_simulation \"和 \"simulation_pytorch \"的两个新代码示例。" -#: ../../source/ref-changelog.md:668 +#: ../../source/ref-changelog.md:795 msgid "" "The feature is still experimental, so there's no stability guarantee for " "the API. It's also not quite ready for prime time and comes with a few " "known caveats. However, those who are curious are encouraged to try it " "out and share their thoughts." msgstr "" +"该功能仍处于试验阶段,因此无法保证 API " +"的稳定性。此外,它还没有完全准备好进入黄金时间,并有一些已知的注意事项。不过,我们鼓励好奇的用户尝试使用并分享他们的想法。" -#: ../../source/ref-changelog.md:670 +#: ../../source/ref-changelog.md:797 msgid "" "**New built-in strategies** " "([#828](https://github.com/adap/flower/pull/828) " "[#822](https://github.com/adap/flower/pull/822))" msgstr "" +"**新的内置策略**([#828](https://github.com/adap/flower/pull/828) " +"[#822](https://github.com/adap/flower/pull/822)" -#: ../../source/ref-changelog.md:672 +#: ../../source/ref-changelog.md:799 msgid "" "FedYogi - Federated learning strategy using Yogi on server-side. " "Implementation based on https://arxiv.org/abs/2003.00295" -msgstr "" +msgstr "FedYogi - 在服务器端使用 Yogi 的联邦学习策略。基于 https://arxiv.org/abs/2003.00295 实现" -#: ../../source/ref-changelog.md:673 +#: ../../source/ref-changelog.md:800 msgid "" "FedAdam - Federated learning strategy using Adam on server-side. " "Implementation based on https://arxiv.org/abs/2003.00295" -msgstr "" +msgstr "FedAdam - 在服务器端使用 Adam 的联邦学习策略。基于 https://arxiv.org/abs/2003.00295 实现" -#: ../../source/ref-changelog.md:675 +#: ../../source/ref-changelog.md:802 msgid "" "**New PyTorch Lightning code example** " "([#617](https://github.com/adap/flower/pull/617))" msgstr "" +"**新的 PyTorch Lightning 代码示例** " +"([#617](https://github.com/adap/flower/pull/617))" -#: ../../source/ref-changelog.md:677 +#: ../../source/ref-changelog.md:804 msgid "" "**New Variational Auto-Encoder code example** " "([#752](https://github.com/adap/flower/pull/752))" -msgstr "" +msgstr "**新的变分自动编码器代码示例** ([#752](https://github.com/adap/flower/pull/752))" -#: ../../source/ref-changelog.md:679 +#: ../../source/ref-changelog.md:806 msgid "" "**New scikit-learn code example** " "([#748](https://github.com/adap/flower/pull/748))" -msgstr "" +msgstr "**新的 scikit-learn 代码示例** ([#748](https://github.com/adap/flower/pull/748))" -#: ../../source/ref-changelog.md:681 +#: ../../source/ref-changelog.md:808 msgid "" "**New experimental TensorBoard strategy** " "([#789](https://github.com/adap/flower/pull/789))" -msgstr "" +msgstr "**新的实验性 TensorBoard 策略**([#789](https://github.com/adap/flower/pull/789))" -#: ../../source/ref-changelog.md:685 +#: ../../source/ref-changelog.md:812 msgid "" "Improved advanced TensorFlow code example " "([#769](https://github.com/adap/flower/pull/769))" -msgstr "" +msgstr "改进的高级 TensorFlow 代码示例([#769](https://github.com/adap/flower/pull/769)" -#: ../../source/ref-changelog.md:686 +#: ../../source/ref-changelog.md:813 msgid "" "Warning when `min_available_clients` is misconfigured " "([#830](https://github.com/adap/flower/pull/830))" msgstr "" +"当 `min_available_clients` 配置错误时发出警告 " +"([#830](https://github.com/adap/flower/pull/830))" -#: ../../source/ref-changelog.md:687 +#: ../../source/ref-changelog.md:814 msgid "" "Improved gRPC server docs " "([#841](https://github.com/adap/flower/pull/841))" -msgstr "" +msgstr "改进了 gRPC 服务器文档([#841](https://github.com/adap/flower/pull/841))" -#: ../../source/ref-changelog.md:688 +#: ../../source/ref-changelog.md:815 msgid "" "Improved error message in `NumPyClient` " "([#851](https://github.com/adap/flower/pull/851))" -msgstr "" +msgstr "改进了 `NumPyClient` 中的错误信息 ([#851](https://github.com/adap/flower/pull/851))" -#: ../../source/ref-changelog.md:689 +#: ../../source/ref-changelog.md:816 msgid "" "Improved PyTorch quickstart code example " "([#852](https://github.com/adap/flower/pull/852))" -msgstr "" +msgstr "改进的 PyTorch 快速启动代码示例 ([#852](https://github.com/adap/flower/pull/852))" -#: ../../source/ref-changelog.md:693 +#: ../../source/ref-changelog.md:820 msgid "" "**Disabled final distributed evaluation** " "([#800](https://github.com/adap/flower/pull/800))" -msgstr "" +msgstr "**禁用最终分布式评价** ([#800](https://github.com/adap/flower/pull/800))" -#: ../../source/ref-changelog.md:695 +#: ../../source/ref-changelog.md:822 msgid "" "Prior behaviour was to perform a final round of distributed evaluation on" " all connected clients, which is often not required (e.g., when using " "server-side evaluation). The prior behaviour can be enabled by passing " "`force_final_distributed_eval=True` to `start_server`." msgstr "" +"之前的行为是在所有连接的客户端上执行最后一轮分布式评估,而这通常是不需要的(例如,在使用服务器端评估时)。可以通过向 `start_server`" +" 传递 `force_final_distributed_eval=True` 来启用之前的行为。" -#: ../../source/ref-changelog.md:697 +#: ../../source/ref-changelog.md:824 msgid "" "**Renamed q-FedAvg strategy** " "([#802](https://github.com/adap/flower/pull/802))" -msgstr "" +msgstr "**更名为 q-FedAvg 策略** ([#802](https://github.com/adap/flower/pull/802))" -#: ../../source/ref-changelog.md:699 +#: ../../source/ref-changelog.md:826 msgid "" "The strategy named `QffedAvg` was renamed to `QFedAvg` to better reflect " "the notation given in the original paper (q-FFL is the optimization " @@ -9495,15 +15288,19 @@ msgid "" "deprecated) `QffedAvg` class is still available for compatibility reasons" " (it will be removed in a future release)." msgstr "" +"名为 `QffedAvg` 的策略已更名为 `QFedAvg`,以更好地反映原始论文中给出的符号(q-FFL 是优化目标,q-FedAvg " +"是建议的求解器)。请注意,出于兼容性原因,原始(现已废弃)的 `QffedAvg` 类仍然可用(它将在未来的版本中移除)。" -#: ../../source/ref-changelog.md:701 +#: ../../source/ref-changelog.md:828 msgid "" "**Deprecated and renamed code example** `simulation_pytorch` **to** " "`simulation_pytorch_legacy` " "([#791](https://github.com/adap/flower/pull/791))" msgstr "" +"**删除并重命名代码示例**`simulation_pytorch`**为**`simulation_pytorch_legacy` " +"([#791](https://github.com/adap/flower/pull/791))" -#: ../../source/ref-changelog.md:703 +#: ../../source/ref-changelog.md:830 msgid "" "This example has been replaced by a new example. The new example is based" " on the experimental virtual client engine, which will become the new " @@ -9511,32 +15308,30 @@ msgid "" " existing example was kept for reference purposes, but it might be " "removed in the future." msgstr "" +"该示例已被新示例取代。新示例基于试验性虚拟客户端引擎,它将成为在 Flower " +"中进行大多数类型大规模模拟的新的默认方式。现有示例将作为参考保留,但将来可能会删除。" -#: ../../source/ref-changelog.md:705 +#: ../../source/ref-changelog.md:832 msgid "v0.16.0 (2021-05-11)" -msgstr "" +msgstr "v0.16.0 (2021-05-11)" -#: ../../source/ref-changelog.md:709 +#: ../../source/ref-changelog.md:836 msgid "" "**New built-in strategies** " "([#549](https://github.com/adap/flower/pull/549))" -msgstr "" +msgstr "**新的内置策略** ([#549](https://github.com/adap/flower/pull/549))" -#: ../../source/ref-changelog.md:711 +#: ../../source/ref-changelog.md:838 msgid "(abstract) FedOpt" -msgstr "" - -#: ../../source/ref-changelog.md:712 -msgid "FedAdagrad" -msgstr "" +msgstr "(摘要) FedOpt" -#: ../../source/ref-changelog.md:714 +#: ../../source/ref-changelog.md:841 msgid "" "**Custom metrics for server and strategies** " "([#717](https://github.com/adap/flower/pull/717))" -msgstr "" +msgstr "**服务器和策略的自定义指标** ([#717](https://github.com/adap/flower/pull/717))" -#: ../../source/ref-changelog.md:716 +#: ../../source/ref-changelog.md:843 msgid "" "The Flower server is now fully task-agnostic, all remaining instances of " "task-specific metrics (such as `accuracy`) have been replaced by custom " @@ -9544,8 +15339,10 @@ msgid "" "dictionary containing custom metrics from client to server. As of this " "release, custom metrics replace task-specific metrics on the server." msgstr "" +"Flower 服务器现在完全与任务无关,所有剩余的任务特定度量(如 \"准确度\")都已被自定义度量字典取代。Flower 0.15 " +"引入了从客户端向服务器传递包含自定义指标的字典的功能。从本版本开始,自定义指标将取代服务器上的特定任务指标。" -#: ../../source/ref-changelog.md:718 +#: ../../source/ref-changelog.md:845 msgid "" "Custom metric dictionaries are now used in two user-facing APIs: they are" " returned from Strategy methods `aggregate_fit`/`aggregate_evaluate` and " @@ -9554,28 +15351,34 @@ msgid "" "even return *aggregated* metrics dictionaries for the server to keep " "track of." msgstr "" +"自定义度量字典现在可在两个面向用户的 API 中使用:它们可从策略方法 `aggregate_fit`/`aggregate_evaluate` " +"返回,还可使传递给内置策略(通过 `eval_fn`)的评估函数返回两个以上的评估度量。策略甚至可以返回 *aggregated* " +"指标字典,以便服务器跟踪。" -#: ../../source/ref-changelog.md:720 +#: ../../source/ref-changelog.md:847 msgid "" "Stratey implementations should migrate their `aggregate_fit` and " "`aggregate_evaluate` methods to the new return type (e.g., by simply " "returning an empty `{}`), server-side evaluation functions should migrate" " from `return loss, accuracy` to `return loss, {\"accuracy\": accuracy}`." msgstr "" +"Stratey 实现应将其 `aggregate_fit` 和 `aggregate_evaluate` " +"方法迁移到新的返回类型(例如,只需返回空的 `{}`),服务器端评估函数应从 `return loss, accuracy` 迁移到 " +"`return loss, {\"accuracy\": accuracy}`。" -#: ../../source/ref-changelog.md:722 +#: ../../source/ref-changelog.md:849 msgid "" "Flower 0.15-style return types are deprecated (but still supported), " "compatibility will be removed in a future release." -msgstr "" +msgstr "Flower 0.15 风格的返回类型已被弃用(但仍受支持),兼容性将在未来的版本中移除。" -#: ../../source/ref-changelog.md:724 +#: ../../source/ref-changelog.md:851 msgid "" "**Migration warnings for deprecated functionality** " "([#690](https://github.com/adap/flower/pull/690))" -msgstr "" +msgstr "** 过时功能的迁移警告** ([#690](https://github.com/adap/flower/pull/690))" -#: ../../source/ref-changelog.md:726 +#: ../../source/ref-changelog.md:853 msgid "" "Earlier versions of Flower were often migrated to new APIs, while " "maintaining compatibility with legacy APIs. This release introduces " @@ -9583,34 +15386,43 @@ msgid "" "new warning messages often provide details on how to migrate to more " "recent APIs, thus easing the transition from one release to another." msgstr "" +"Flower 早期版本通常会迁移到新的应用程序接口,同时保持与旧版应用程序接口的兼容。如果检测到使用了过时的 " +"API,本版本将引入详细的警告信息。新的警告信息通常会详细说明如何迁移到更新的 API,从而简化从一个版本到另一个版本的过渡。" -#: ../../source/ref-changelog.md:728 +#: ../../source/ref-changelog.md:855 msgid "" "Improved docs and docstrings " "([#691](https://github.com/adap/flower/pull/691) " "[#692](https://github.com/adap/flower/pull/692) " "[#713](https://github.com/adap/flower/pull/713))" msgstr "" +"改进了文档和文档说明 ([#691](https://github.com/adap/flower/pull/691) " +"[#692](https://github.com/adap/flower/pull/692) " +"[#713](https://github.com/adap/flower/pull/713))" -#: ../../source/ref-changelog.md:730 +#: ../../source/ref-changelog.md:857 msgid "MXNet example and documentation" -msgstr "" +msgstr "MXNet 示例和文档" -#: ../../source/ref-changelog.md:732 +#: ../../source/ref-changelog.md:859 msgid "" "FedBN implementation in example PyTorch: From Centralized To Federated " "([#696](https://github.com/adap/flower/pull/696) " "[#702](https://github.com/adap/flower/pull/702) " "[#705](https://github.com/adap/flower/pull/705))" msgstr "" +"PyTorch 示例中的 FedBN 实现: 从集中到联邦 " +"([#696](https://github.com/adap/flower/pull/696) " +"[#702](https://github.com/adap/flower/pull/702) " +"[#705](https://github.com/adap/flower/pull/705))" -#: ../../source/ref-changelog.md:736 +#: ../../source/ref-changelog.md:863 msgid "" "**Serialization-agnostic server** " "([#721](https://github.com/adap/flower/pull/721))" -msgstr "" +msgstr "**序列化无关服务器** ([#721](https://github.com/adap/flower/pull/721))" -#: ../../source/ref-changelog.md:738 +#: ../../source/ref-changelog.md:865 msgid "" "The Flower server is now fully serialization-agnostic. Prior usage of " "class `Weights` (which represents parameters as deserialized NumPy " @@ -9620,8 +15432,11 @@ msgid "" "these byte arrays should be interpreted (e.g., for " "serialization/deserialization)." msgstr "" +"Flower 服务器现在完全不依赖序列化。之前使用的 `Weights` 类(以反序列化的 NumPy ndarrays 表示参数)已被 " +"`Parameters` 类取代(例如在 `Strategy`中)。参数 " +"\"对象与序列化完全无关,它以字节数组的形式表示参数,\"tensor_type \"属性表示如何解释这些字节数组(例如,用于序列化/反序列化)。" -#: ../../source/ref-changelog.md:740 +#: ../../source/ref-changelog.md:867 msgid "" "Built-in strategies implement this approach by handling serialization and" " deserialization to/from `Weights` internally. Custom/3rd-party Strategy " @@ -9630,86 +15445,105 @@ msgid "" "[#721](https://github.com/adap/flower/pull/721) to see how strategies can" " easily migrate to the new format." msgstr "" +"内置策略通过在内部处理序列化和反序列化到/从`Weights`来实现这种方法。自定义/第三方策略实现应更新为稍有改动的策略方法定义。策略作者可查阅" +" PR [#721](https://github.com/adap/flower/pull/721) 以了解如何将策略轻松迁移到新格式。" -#: ../../source/ref-changelog.md:742 +#: ../../source/ref-changelog.md:869 msgid "" "Deprecated `flwr.server.Server.evaluate`, use " "`flwr.server.Server.evaluate_round` instead " "([#717](https://github.com/adap/flower/pull/717))" msgstr "" +"已弃用 `flwr.server.Server.evaluate`,改用 " +"`flwr.server.Server.evaluate_round`([#717](https://github.com/adap/flower/pull/717)" -#: ../../source/ref-changelog.md:744 +#: ../../source/ref-changelog.md:871 msgid "v0.15.0 (2021-03-12)" -msgstr "" +msgstr "v0.15.0 (2021-03-12)" -#: ../../source/ref-changelog.md:748 +#: ../../source/ref-changelog.md:875 msgid "" "**Server-side parameter initialization** " "([#658](https://github.com/adap/flower/pull/658))" -msgstr "" +msgstr "**服务器端参数初始化** ([#658](https://github.com/adap/flower/pull/658))" -#: ../../source/ref-changelog.md:750 +#: ../../source/ref-changelog.md:877 msgid "" "Model parameters can now be initialized on the server-side. Server-side " "parameter initialization works via a new `Strategy` method called " "`initialize_parameters`." msgstr "" +"现在可以在服务器端初始化模型参数。服务器端参数初始化通过名为 \"initialize_parameters \"的新 \"Strategy " +"\"方法进行。" -#: ../../source/ref-changelog.md:752 +#: ../../source/ref-changelog.md:879 msgid "" "Built-in strategies support a new constructor argument called " "`initial_parameters` to set the initial parameters. Built-in strategies " "will provide these initial parameters to the server on startup and then " "delete them to free the memory afterwards." msgstr "" +"内置策略支持名为 \"initial_parameters " +"\"的新构造函数参数,用于设置初始参数。内置策略会在启动时向服务器提供这些初始参数,然后删除它们以释放内存。" -#: ../../source/ref-changelog.md:771 +#: ../../source/ref-changelog.md:898 msgid "" "If no initial parameters are provided to the strategy, the server will " "continue to use the current behaviour (namely, it will ask one of the " "connected clients for its parameters and use these as the initial global " "parameters)." -msgstr "" +msgstr "如果没有向策略提供初始参数,服务器将继续使用当前行为(即向其中一个已连接的客户端询问参数,并将这些参数用作初始全局参数)。" -#: ../../source/ref-changelog.md:773 +#: ../../source/ref-changelog.md:900 msgid "Deprecations" -msgstr "" +msgstr "停用" -#: ../../source/ref-changelog.md:775 +#: ../../source/ref-changelog.md:902 msgid "" "Deprecate `flwr.server.strategy.DefaultStrategy` (migrate to " "`flwr.server.strategy.FedAvg`, which is equivalent)" msgstr "" +"停用 `flwr.server.strategy.DefaultStrategy`(迁移到等价的 " +"`flwr.server.strategy.FedAvg`)" -#: ../../source/ref-changelog.md:777 +#: ../../source/ref-changelog.md:904 msgid "v0.14.0 (2021-02-18)" -msgstr "" +msgstr "v0.14.0 (2021-02-18)" -#: ../../source/ref-changelog.md:781 +#: ../../source/ref-changelog.md:908 msgid "" "**Generalized** `Client.fit` **and** `Client.evaluate` **return values** " "([#610](https://github.com/adap/flower/pull/610) " "[#572](https://github.com/adap/flower/pull/572) " "[#633](https://github.com/adap/flower/pull/633))" msgstr "" +"**通用** `Client.fit` **和** `Client.evaluate` **返回值** " +"([#610](https://github.com/adap/flower/pull/610) " +"[#572](https://github.com/adap/flower/pull/572) " +"[#633](https://github.com/adap/flower/pull/633))" -#: ../../source/ref-changelog.md:783 +#: ../../source/ref-changelog.md:910 msgid "" "Clients can now return an additional dictionary mapping `str` keys to " "values of the following types: `bool`, `bytes`, `float`, `int`, `str`. " "This means one can return almost arbitrary values from `fit`/`evaluate` " "and make use of them on the server side!" msgstr "" +"客户端现在可以返回一个额外的字典,将 `str` 键映射为以下类型的值: " +"bool`、`bytes`、`float`、`int`、`str`。这意味着我们可以从 `fit`/`evaluate` " +"返回几乎任意的值,并在服务器端使用它们!" -#: ../../source/ref-changelog.md:785 +#: ../../source/ref-changelog.md:912 msgid "" "This improvement also allowed for more consistent return types between " "`fit` and `evaluate`: `evaluate` should now return a tuple `(float, int, " "dict)` representing the loss, number of examples, and a dictionary " "holding arbitrary problem-specific values like accuracy." msgstr "" +"这一改进还使 `fit` 和 `evaluate` 之间的返回类型更加一致:`evaluate` 现在应返回一个元组`(float, int, " +"dict)`,代表损失、示例数和一个包含特定问题任意值(如准确度)的字典。" -#: ../../source/ref-changelog.md:787 +#: ../../source/ref-changelog.md:914 msgid "" "In case you wondered: this feature is compatible with existing projects, " "the additional dictionary return value is optional. New code should " @@ -9718,124 +15552,142 @@ msgid "" "`evaluate`: `float, int, Dict[str, Scalar]`). See the example below for " "details." msgstr "" +"如果你想知道:此功能与现有项目兼容,额外的字典返回值是可选的。不过,新代码应迁移到新的返回类型,以便与即将发布的 Flower " +"版本兼容(`fit`: `List[np.ndarray], int, Dict[str, Scalar]`,`evaluate`: " +"`float, int, Dict[str, Scalar]`)。详见下面的示例。" -#: ../../source/ref-changelog.md:789 +#: ../../source/ref-changelog.md:916 msgid "" "*Code example:* note the additional dictionary return values in both " "`FlwrClient.fit` and `FlwrClient.evaluate`:" -msgstr "" +msgstr "*代码示例:* 注意 `FlwrClient.fit` 和 `FlwrClient.evaluate` 中的附加字典返回值:" -#: ../../source/ref-changelog.md:804 +#: ../../source/ref-changelog.md:931 msgid "" "**Generalized** `config` **argument in** `Client.fit` **and** " "`Client.evaluate` ([#595](https://github.com/adap/flower/pull/595))" msgstr "" +"**在**`Client.fit` " +"**和**`Client.evaluate`中泛化**`config`参数([#595](https://github.com/adap/flower/pull/595))" -#: ../../source/ref-changelog.md:806 +#: ../../source/ref-changelog.md:933 msgid "" "The `config` argument used to be of type `Dict[str, str]`, which means " "that dictionary values were expected to be strings. The new release " "generalizes this to enable values of the following types: `bool`, " "`bytes`, `float`, `int`, `str`." msgstr "" +"`config`参数曾是 \"字典[str, str]\"类型,这意味着字典值应是字符串。新版本将其扩展为以下类型的值: " +"bool`、`bytes`、`float`、`int`、`str`。" -#: ../../source/ref-changelog.md:808 +#: ../../source/ref-changelog.md:935 msgid "" "This means one can now pass almost arbitrary values to `fit`/`evaluate` " "using the `config` dictionary. Yay, no more `str(epochs)` on the server-" "side and `int(config[\"epochs\"])` on the client side!" msgstr "" +"这意味着现在可以使用 `config` 字典向 `fit`/`evaluate` 传递几乎任意的值。耶,服务器端不再需要 " +"`str(epochs)`,客户端不再需要 `int(config[\"epochs\"])`!" -#: ../../source/ref-changelog.md:810 +#: ../../source/ref-changelog.md:937 msgid "" "*Code example:* note that the `config` dictionary now contains non-`str` " "values in both `Client.fit` and `Client.evaluate`:" -msgstr "" +msgstr "*代码示例:* 注意 `config` 字典现在在 `Client.fit` 和 `Client.evaluate` 中都包含非 `str` 值:" -#: ../../source/ref-changelog.md:827 +#: ../../source/ref-changelog.md:954 msgid "v0.13.0 (2021-01-08)" -msgstr "" +msgstr "v0.13.0 (2021-01-08)" -#: ../../source/ref-changelog.md:831 +#: ../../source/ref-changelog.md:958 msgid "" "New example: PyTorch From Centralized To Federated " "([#549](https://github.com/adap/flower/pull/549))" -msgstr "" +msgstr "新示例: PyTorch 从集中到联邦 ([#549](https://github.com/adap/flower/pull/549))" -#: ../../source/ref-changelog.md:832 +#: ../../source/ref-changelog.md:959 msgid "Improved documentation" -msgstr "" +msgstr "改进文档" -#: ../../source/ref-changelog.md:833 +#: ../../source/ref-changelog.md:960 msgid "New documentation theme ([#551](https://github.com/adap/flower/pull/551))" -msgstr "" +msgstr "新文档主题 ([#551](https://github.com/adap/flower/pull/551))" -#: ../../source/ref-changelog.md:834 +#: ../../source/ref-changelog.md:961 msgid "New API reference ([#554](https://github.com/adap/flower/pull/554))" -msgstr "" +msgstr "新的 API 参考 ([#554](https://github.com/adap/flower/pull/554))" -#: ../../source/ref-changelog.md:835 +#: ../../source/ref-changelog.md:962 msgid "" "Updated examples documentation " "([#549](https://github.com/adap/flower/pull/549))" -msgstr "" +msgstr "更新了示例文档 ([#549](https://github.com/adap/flower/pull/549))" -#: ../../source/ref-changelog.md:836 +#: ../../source/ref-changelog.md:963 msgid "" "Removed obsolete documentation " "([#548](https://github.com/adap/flower/pull/548))" -msgstr "" +msgstr "删除了过时的文档 ([#548](https://github.com/adap/flower/pull/548))" -#: ../../source/ref-changelog.md:838 +#: ../../source/ref-changelog.md:965 msgid "Bugfix:" -msgstr "" +msgstr "错误修正:" -#: ../../source/ref-changelog.md:840 +#: ../../source/ref-changelog.md:967 msgid "" "`Server.fit` does not disconnect clients when finished, disconnecting the" " clients is now handled in `flwr.server.start_server` " "([#553](https://github.com/adap/flower/pull/553) " "[#540](https://github.com/adap/flower/issues/540))." msgstr "" +"Server.fit \"完成后不会断开客户端连接,现在断开客户端连接是在 \"flwr.server.start_server " +"\"中处理的([#553](https://github.com/adap/flower/pull/553) " +"[#540](https://github.com/adap/flower/issues/540))。" -#: ../../source/ref-changelog.md:842 +#: ../../source/ref-changelog.md:969 msgid "v0.12.0 (2020-12-07)" -msgstr "" +msgstr "v0.12.0 (2020-12-07)" -#: ../../source/ref-changelog.md:844 ../../source/ref-changelog.md:860 +#: ../../source/ref-changelog.md:971 ../../source/ref-changelog.md:987 msgid "Important changes:" -msgstr "" +msgstr "重要变更:" -#: ../../source/ref-changelog.md:846 +#: ../../source/ref-changelog.md:973 msgid "" "Added an example for embedded devices " "([#507](https://github.com/adap/flower/pull/507))" -msgstr "" +msgstr "添加了嵌入式设备示例 ([#507](https://github.com/adap/flower/pull/507))" -#: ../../source/ref-changelog.md:847 +#: ../../source/ref-changelog.md:974 msgid "" "Added a new NumPyClient (in addition to the existing KerasClient) " "([#504](https://github.com/adap/flower/pull/504) " "[#508](https://github.com/adap/flower/pull/508))" msgstr "" +"添加了一个新的 NumPyClient(除现有的 KerasClient " +"之外)([#504](https://github.com/adap/flower/pull/504) " +"[#508](https://github.com/adap/flower/pull/508)" -#: ../../source/ref-changelog.md:848 +#: ../../source/ref-changelog.md:975 msgid "" "Deprecated `flwr_example` package and started to migrate examples into " "the top-level `examples` directory " "([#494](https://github.com/adap/flower/pull/494) " "[#512](https://github.com/adap/flower/pull/512))" msgstr "" +"弃用 `flwr_example` 软件包,并开始将示例迁移到顶层的 `examples` 目录 " +"([#494](https://github.com/adap/flower/pull/494) " +"[#512](https://github.com/adap/flower/pull/512))" -#: ../../source/ref-changelog.md:850 +#: ../../source/ref-changelog.md:977 msgid "v0.11.0 (2020-11-30)" -msgstr "" +msgstr "v0.11.0 (2020-11-30)" -#: ../../source/ref-changelog.md:852 +#: ../../source/ref-changelog.md:979 msgid "Incompatible changes:" -msgstr "" +msgstr "不兼容的更改:" -#: ../../source/ref-changelog.md:854 +#: ../../source/ref-changelog.md:981 msgid "" "Renamed strategy methods " "([#486](https://github.com/adap/flower/pull/486)) to unify the naming of " @@ -9844,57 +15696,68 @@ msgid "" "which is why we're removing it from the four methods in Strategy. To " "migrate rename the following `Strategy` methods accordingly:" msgstr "" +"重命名了策略方法([#486](https://github.com/adap/flower/pull/486)),以统一 Flower公共 " +"API 的命名。其他公共方法/函数(例如 `Client` 中的每个方法,以及 `Strategy.evaluate`)不使用 `on_` " +"前缀,这就是我们从 Strategy 中的四个方法中移除它的原因。迁移时,请相应地重命名以下 `Strategy` 方法:" -#: ../../source/ref-changelog.md:855 +#: ../../source/ref-changelog.md:982 msgid "`on_configure_evaluate` => `configure_evaluate`" -msgstr "" +msgstr "`on_configure_evaluate` => `configure_evaluate`" -#: ../../source/ref-changelog.md:856 +#: ../../source/ref-changelog.md:983 msgid "`on_aggregate_evaluate` => `aggregate_evaluate`" -msgstr "" +msgstr "`on_aggregate_evaluate` => `aggregate_evaluate`" -#: ../../source/ref-changelog.md:857 +#: ../../source/ref-changelog.md:984 msgid "`on_configure_fit` => `configure_fit`" -msgstr "" +msgstr "`on_configure_fit` => `configure_fit`" -#: ../../source/ref-changelog.md:858 +#: ../../source/ref-changelog.md:985 msgid "`on_aggregate_fit` => `aggregate_fit`" -msgstr "" +msgstr "`on_aggregate_fit` => `aggregate_fit`" -#: ../../source/ref-changelog.md:862 +#: ../../source/ref-changelog.md:989 msgid "" "Deprecated `DefaultStrategy` " "([#479](https://github.com/adap/flower/pull/479)). To migrate use " "`FedAvg` instead." msgstr "" +"已废弃的 `DefaultStrategy` ([#479](https://github.com/adap/flower/pull/479)) " +"。迁移时请使用 `FedAvg`。" -#: ../../source/ref-changelog.md:863 +#: ../../source/ref-changelog.md:990 msgid "" "Simplified examples and baselines " "([#484](https://github.com/adap/flower/pull/484))." -msgstr "" +msgstr "简化示例和baselines([#484](https://github.com/adap/flower/pull/484))。" -#: ../../source/ref-changelog.md:864 +#: ../../source/ref-changelog.md:991 msgid "" "Removed presently unused `on_conclude_round` from strategy interface " "([#483](https://github.com/adap/flower/pull/483))." msgstr "" +"删除了策略界面中目前未使用的 " +"\"on_conclude_round\"([#483](https://github.com/adap/flower/pull/483))。" -#: ../../source/ref-changelog.md:865 +#: ../../source/ref-changelog.md:992 msgid "" "Set minimal Python version to 3.6.1 instead of 3.6.9 " "([#471](https://github.com/adap/flower/pull/471))." msgstr "" +"将最小 Python 版本设为 3.6.1,而不是 3.6.9 " +"([#471](https://github.com/adap/flower/pull/471))." -#: ../../source/ref-changelog.md:866 +#: ../../source/ref-changelog.md:993 msgid "" "Improved `Strategy` docstrings " "([#470](https://github.com/adap/flower/pull/470))." msgstr "" +"改进了 `Strategy` " +"docstrings([#470](https://github.com/adap/flower/pull/470))。" #: ../../source/ref-example-projects.rst:2 msgid "Example projects" -msgstr "" +msgstr "项目实例" #: ../../source/ref-example-projects.rst:4 msgid "" @@ -9904,6 +15767,9 @@ msgid "" "frameworks such as `PyTorch `_ or `TensorFlow " "`_." msgstr "" +"Flower 附带了许多使用示例。这些示例演示了如何使用 Flower 联邦不同类型的现有机器学习形式,通常是利用流行的机器学习框架,如 " +"`PyTorch `_ 或 `TensorFlow " +"`_。" #: ../../source/ref-example-projects.rst:11 msgid "" @@ -9912,20 +15778,23 @@ msgid "" "to make them easier to use. All new examples are based in the directory " "`examples `_." msgstr "" +"Flower 的使用示例曾与 Flower 捆绑在一个名为 ``flwr_example`` " +"的软件包中。我们正在将这些示例迁移到独立项目中,以使它们更易于使用。所有新示例都位于目录 `examples " +"`_。" #: ../../source/ref-example-projects.rst:16 msgid "The following examples are available as standalone projects." -msgstr "" +msgstr "以下示例可作为独立项目使用。" #: ../../source/ref-example-projects.rst:20 msgid "Quickstart TensorFlow/Keras" -msgstr "" +msgstr "快速入门 TensorFlow/Keras" #: ../../source/ref-example-projects.rst:22 msgid "" "The TensorFlow/Keras quickstart example shows CIFAR-10 image " "classification with MobileNetV2:" -msgstr "" +msgstr "TensorFlow/Keras 快速入门示例展示了使用 MobileNetV2 进行的 CIFAR-10 图像分类:" #: ../../source/ref-example-projects.rst:25 msgid "" @@ -9933,51 +15802,61 @@ msgid "" "`_" msgstr "" +"`TensorFlow快速入门 (代码) `_" #: ../../source/ref-example-projects.rst:26 msgid "" -"`Quickstart TensorFlow (Tutorial) `_" msgstr "" +"`TensorFlow快速入门 (教程) `_" #: ../../source/ref-example-projects.rst:27 msgid "" -"`Quickstart TensorFlow (Blog Post) `_" msgstr "" +"`TensorFlow快速入门 (博客) `_" #: ../../source/ref-example-projects.rst:31 #: ../../source/tutorial-quickstart-pytorch.rst:5 msgid "Quickstart PyTorch" -msgstr "" +msgstr "PyTorch快速入门" #: ../../source/ref-example-projects.rst:33 msgid "" "The PyTorch quickstart example shows CIFAR-10 image classification with a" " simple Convolutional Neural Network:" -msgstr "" +msgstr "PyTorch 快速入门范例展示了使用简单卷积神经网络进行 CIFAR-10 图像分类的情况:" #: ../../source/ref-example-projects.rst:36 msgid "" "`Quickstart PyTorch (Code) " "`_" msgstr "" +"`PyTorch快速入门 (代码) `_" #: ../../source/ref-example-projects.rst:37 msgid "" -"`Quickstart PyTorch (Tutorial) `_" msgstr "" +"`PyTorch快速入门 (教程) `_" #: ../../source/ref-example-projects.rst:41 msgid "PyTorch: From Centralized To Federated" -msgstr "" +msgstr "PyTorch: 从集中式到联邦式" #: ../../source/ref-example-projects.rst:43 msgid "" "This example shows how a regular PyTorch project can be federated using " "Flower:" -msgstr "" +msgstr "本例展示了如何使用 Flower 联邦化一个普通的 PyTorch 项目:" #: ../../source/ref-example-projects.rst:45 msgid "" @@ -9985,39 +15864,47 @@ msgid "" "`_" msgstr "" +"PyTorch: 从集中式到联邦式(代码) `_" #: ../../source/ref-example-projects.rst:46 msgid "" "`PyTorch: From Centralized To Federated (Tutorial) " -"`_" msgstr "" +"PyTorch: 从集中式到联邦式(教程) `_" #: ../../source/ref-example-projects.rst:50 msgid "Federated Learning on Raspberry Pi and Nvidia Jetson" -msgstr "" +msgstr "树莓派和 Nvidia Jetson 上的联邦学习" #: ../../source/ref-example-projects.rst:52 msgid "" "This example shows how Flower can be used to build a federated learning " "system that run across Raspberry Pi and Nvidia Jetson:" -msgstr "" +msgstr "本示例展示了如何利用 Flower 建立一个跨 Raspberry Pi 和 Nvidia Jetson 运行的联邦学习系统:" #: ../../source/ref-example-projects.rst:54 msgid "" "`Federated Learning on Raspberry Pi and Nvidia Jetson (Code) " "`_" msgstr "" +"Raspberry Pi 和 Nvidia Jetson 上的联邦学习(代码) " +"`_" #: ../../source/ref-example-projects.rst:55 msgid "" "`Federated Learning on Raspberry Pi and Nvidia Jetson (Blog Post) " -"`_" +"`_" msgstr "" +"Raspberry Pi和 Nvidia Jetson 上的联邦学习(博客) " +"`_" #: ../../source/ref-example-projects.rst:60 msgid "Legacy Examples (`flwr_example`)" -msgstr "" +msgstr "传统示例 (`flwr_example`)" #: ../../source/ref-example-projects.rst:63 msgid "" @@ -10025,10 +15912,12 @@ msgid "" "in the future. New examples are provided as standalone projects in " "`examples `_." msgstr "" +"在 `flwr_example` 中的使用示例已被弃用,今后将被移除。新示例将作为独立项目在 `examples " +"`_ 中提供。" #: ../../source/ref-example-projects.rst:69 msgid "Extra Dependencies" -msgstr "" +msgstr "额外依赖" #: ../../source/ref-example-projects.rst:71 msgid "" @@ -10036,39 +15925,41 @@ msgid "" "examples demonstrate Flower in the context of different machine learning " "frameworks, so additional dependencies need to be installed before an " "example can be run." -msgstr "" +msgstr "Flower 核心框架只保留了最低限度的依赖项。这些示例在不同机器学习框架的背景下演示了 Flower,因此在运行示例之前需要安装额外的依赖项。" #: ../../source/ref-example-projects.rst:75 msgid "For PyTorch examples::" -msgstr "" +msgstr "PyTorch 示例::" #: ../../source/ref-example-projects.rst:79 msgid "For TensorFlow examples::" -msgstr "" +msgstr "TensorFlow 示例::" #: ../../source/ref-example-projects.rst:83 msgid "For both PyTorch and TensorFlow examples::" -msgstr "" +msgstr "PyTorch 和 TensorFlow 示例::" #: ../../source/ref-example-projects.rst:87 msgid "" "Please consult :code:`pyproject.toml` for a full list of possible extras " "(section :code:`[tool.poetry.extras]`)." msgstr "" +"请参阅 :code:`pyproject.toml`,了解可能的 extras 的完整列表(章节 " +":code:`[tool.poems.extras]`)。" #: ../../source/ref-example-projects.rst:92 msgid "PyTorch Examples" -msgstr "" +msgstr "PyTorch 示例" #: ../../source/ref-example-projects.rst:94 msgid "" "Our PyTorch examples are based on PyTorch 1.7. They should work with " "other releases as well. So far, we provide the following examples." -msgstr "" +msgstr "我们的 PyTorch 示例基于 PyTorch 1.7。它们应该也能在其他版本中使用。到目前为止,我们提供了以下示例。" #: ../../source/ref-example-projects.rst:98 msgid "CIFAR-10 Image Classification" -msgstr "" +msgstr "CIFAR-10 图像分类" #: ../../source/ref-example-projects.rst:100 msgid "" @@ -10077,34 +15968,37 @@ msgid "" "to train a simple CNN classifier in a federated learning setup with two " "clients." msgstr "" +"CIFAR-10 和 CIFAR-100 ``_ " +"是流行的 RGB 图像数据集。Flower CIFAR-10 示例使用 PyTorch 在有两个客户端的联邦学习设置中训练一个简单的 CNN " +"分类器。" #: ../../source/ref-example-projects.rst:104 #: ../../source/ref-example-projects.rst:121 #: ../../source/ref-example-projects.rst:146 msgid "First, start a Flower server:" -msgstr "" +msgstr "首先,启动 Flower 服务器:" #: ../../source/ref-example-projects.rst:106 msgid "$ ./src/py/flwr_example/pytorch_cifar/run-server.sh" -msgstr "" +msgstr "$ ./src/py/flwr_example/pytorch_cifar/run-server.sh" #: ../../source/ref-example-projects.rst:108 #: ../../source/ref-example-projects.rst:125 #: ../../source/ref-example-projects.rst:150 msgid "Then, start the two clients in a new terminal window:" -msgstr "" +msgstr "然后,在新的终端窗口中启动两个客户端:" #: ../../source/ref-example-projects.rst:110 msgid "$ ./src/py/flwr_example/pytorch_cifar/run-clients.sh" -msgstr "" +msgstr "$ ./src/py/flwr_example/pytorch_cifar/run-clients.sh" #: ../../source/ref-example-projects.rst:112 msgid "For more details, see :code:`src/py/flwr_example/pytorch_cifar`." -msgstr "" +msgstr "更多详情,请参阅 :code:`src/py/flwr_example/pytorch_cifar`。" #: ../../source/ref-example-projects.rst:115 msgid "ImageNet-2012 Image Classification" -msgstr "" +msgstr "ImageNet-2012 图像分类" #: ../../source/ref-example-projects.rst:117 msgid "" @@ -10112,32 +16006,34 @@ msgid "" " vision datasets. The Flower ImageNet example uses PyTorch to train a " "ResNet-18 classifier in a federated learning setup with ten clients." msgstr "" +"ImageNet-2012 `_ 是主要的计算机视觉数据集之一。Flower " +"ImageNet 示例使用 PyTorch 在有十个客户端的联邦学习设置中训练 ResNet-18 分类器。" #: ../../source/ref-example-projects.rst:123 msgid "$ ./src/py/flwr_example/pytorch_imagenet/run-server.sh" -msgstr "" +msgstr "$ ./src/py/flwr_example/pytorch_imagenet/run-server.sh" #: ../../source/ref-example-projects.rst:127 msgid "$ ./src/py/flwr_example/pytorch_imagenet/run-clients.sh" -msgstr "" +msgstr "$ ./src/py/flwr_example/pytorch_imagenet/run-clients.sh" #: ../../source/ref-example-projects.rst:129 msgid "For more details, see :code:`src/py/flwr_example/pytorch_imagenet`." -msgstr "" +msgstr "更多详情,请参阅 :code:`src/py/flwr_example/pytorch_imagenet`。" #: ../../source/ref-example-projects.rst:133 msgid "TensorFlow Examples" -msgstr "" +msgstr "TensorFlow 示例" #: ../../source/ref-example-projects.rst:135 msgid "" "Our TensorFlow examples are based on TensorFlow 2.0 or newer. So far, we " "provide the following examples." -msgstr "" +msgstr "我们的 TensorFlow 示例基于 TensorFlow 2.0 或更新版本。到目前为止,我们提供了以下示例。" #: ../../source/ref-example-projects.rst:139 msgid "Fashion-MNIST Image Classification" -msgstr "" +msgstr "Fashion-MNIST 图像分类" #: ../../source/ref-example-projects.rst:141 msgid "" @@ -10147,36 +16043,39 @@ msgid "" " Fashion-MNIST and trains a simple image classification model over those " "partitions." msgstr "" +"`Fashion-MNIST `_ " +"经常被用作机器学习的 \"你好,世界!\"。我们遵循这一传统,提供了一个从Fashion-MNIST " +"中随机抽样本地数据集的示例,并在这些分区上训练一个简单的图像分类模型。" #: ../../source/ref-example-projects.rst:148 msgid "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-server.sh" -msgstr "" +msgstr "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-server.sh" #: ../../source/ref-example-projects.rst:152 msgid "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-clients.sh" -msgstr "" +msgstr "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-clients.sh" #: ../../source/ref-example-projects.rst:154 msgid "" "For more details, see " ":code:`src/py/flwr_example/tensorflow_fashion_mnist`." -msgstr "" +msgstr "更多详情,请参阅 :code:`src/py/flwr_example/tensorflow_fashion_mnist`。" #: ../../source/ref-faq.rst:4 msgid "" "This page collects answers to commonly asked questions about Federated " "Learning with Flower." -msgstr "" +msgstr "本页收集了有关 \"Flower 联邦学习 \"常见问题的答案。" #: ../../source/ref-faq.rst msgid ":fa:`eye,mr-1` Can Flower run on Juptyter Notebooks / Google Colab?" -msgstr "" +msgstr ":fa:`eye,mr-1` Flower 可以在 Juptyter Notebooks / Google Colab 上运行吗?" #: ../../source/ref-faq.rst:8 msgid "" "Yes, it can! Flower even comes with a few under-the-hood optimizations to" " make it work even better on Colab. Here's a quickstart example:" -msgstr "" +msgstr "是的,它可以!Flower 甚至还进行了一些底层优化,使其在 Colab 上运行得更好。下面是一个快速启动示例:" #: ../../source/ref-faq.rst:10 msgid "" @@ -10184,6 +16083,9 @@ msgid "" "`_" msgstr "" +"`Flower 模拟 PyTorch " +"`_" #: ../../source/ref-faq.rst:11 msgid "" @@ -10191,67 +16093,84 @@ msgid "" "`_" msgstr "" +"`Flower模拟TensorFlow/Keras " +"`_" #: ../../source/ref-faq.rst msgid ":fa:`eye,mr-1` How can I run Federated Learning on a Raspberry Pi?" -msgstr "" +msgstr ":fa:`eye,mr-1` 如何在 Raspberry Pi 上运行联邦学习?" #: ../../source/ref-faq.rst:15 msgid "" "Find the `blog post about federated learning on embedded device here " -"`_" +"`_" " and the corresponding `GitHub code example " "`_." msgstr "" +"请点击此处查看有关嵌入式设备联邦学习的 " +"\"博文\"`_和相应的" +" \"GitHub 代码示例\"`_。" #: ../../source/ref-faq.rst msgid ":fa:`eye,mr-1` Does Flower support federated learning on Android devices?" -msgstr "" +msgstr ":fa:`eye,mr-1` Flower 是否支持安卓设备上的联邦学习?" #: ../../source/ref-faq.rst:19 msgid "" "Yes, it does. Please take a look at our `blog post " -"`_ or check out the code examples:" msgstr "" +"是的,确实如此。请查看我们的 \"博客文章 `_\" 或查看代码示例:" #: ../../source/ref-faq.rst:21 msgid "" -"`Android Kotlin example `_" msgstr "" +"`Android Kotlin 示例 `_" #: ../../source/ref-faq.rst:22 -msgid "`Android Java example `_" -msgstr "" +msgid "`Android Java example `_" +msgstr "Android Java 示例 `_" #: ../../source/ref-faq.rst msgid ":fa:`eye,mr-1` Can I combine federated learning with blockchain?" -msgstr "" +msgstr ":fa:`eye,mr-1` 我可以将联邦学习与区块链结合起来吗?" #: ../../source/ref-faq.rst:26 msgid "" "Yes, of course. A list of available examples using Flower within a " "blockchain environment is available here:" -msgstr "" +msgstr "当然可以。有关在区块链环境中使用 Flower 的可用示例列表,请点击此处:" #: ../../source/ref-faq.rst:28 msgid "" "`Flower meets Nevermined GitHub Repository `_." msgstr "" +"`Flower meets Nevermined GitHub Repository `_." #: ../../source/ref-faq.rst:29 msgid "" "`Flower meets Nevermined YouTube video " "`_." msgstr "" +"`Flower meets Nevermined YouTube 视频 " +"`_." #: ../../source/ref-faq.rst:30 msgid "" "`Flower meets KOSMoS `_." msgstr "" +"`Flower meets KOSMoS `_." #: ../../source/ref-faq.rst:31 msgid "" @@ -10259,16 +16178,21 @@ msgid "" "learning-same-mask-different-faces-imen-" "ayari/?trackingId=971oIlxLQ9%2BA9RB0IQ73XQ%3D%3D>`_ ." msgstr "" +"`Flower meets Talan博文 `_ 。" #: ../../source/ref-faq.rst:32 msgid "" "`Flower meets Talan GitHub Repository " "`_ ." msgstr "" +"`Flower meets Talan GitHub Repository " +"`_ ." #: ../../source/ref-telemetry.md:1 msgid "Telemetry" -msgstr "" +msgstr "遥测功能" #: ../../source/ref-telemetry.md:3 msgid "" @@ -10277,27 +16201,29 @@ msgid "" "Flower team to understand how Flower is used and what challenges users " "might face." msgstr "" +"Flower 开源项目收集**匿名**使用指标,以便在充分知情的情况下做出改进 Flower 的决定。这样做能让 Flower 团队了解 " +"Flower 的使用情况以及用户可能面临的挑战。" #: ../../source/ref-telemetry.md:5 msgid "" "**Flower is a friendly framework for collaborative AI and data science.**" " Staying true to this statement, Flower makes it easy to disable " "telemetry for users that do not want to share anonymous usage metrics." -msgstr "" +msgstr "**Flower 是一个用于协作式人工智能和数据科学的友好框架。** Flower 遵循这一声明,让不想分享匿名使用指标的用户可以轻松禁用遥测技术。" #: ../../source/ref-telemetry.md:7 msgid "Principles" -msgstr "" +msgstr "原则" #: ../../source/ref-telemetry.md:9 msgid "We follow strong principles guarding anonymous usage metrics collection:" -msgstr "" +msgstr "我们遵循严格的匿名使用指标收集原则:" #: ../../source/ref-telemetry.md:11 msgid "" "**Optional:** You will always be able to disable telemetry; read on to " "learn “[How to opt-out](#how-to-opt-out)”." -msgstr "" +msgstr "**可选:** 您始终可以禁用遥测功能;请继续阅读\"[如何退出](#how-to-opt-out)\"。" #: ../../source/ref-telemetry.md:12 msgid "" @@ -10306,6 +16232,8 @@ msgid "" "metrics](#collected-metrics)” to understand what metrics are being " "reported." msgstr "" +"**匿名:** 报告的使用指标是匿名的,不包含任何个人身份信息 (PII)。请参阅\"[收集的指标](#collected-metrics) " +"\"了解报告的指标。" #: ../../source/ref-telemetry.md:13 msgid "" @@ -10313,17 +16241,19 @@ msgid "" "reported; see the section “[How to inspect what is being reported](#how-" "to-inspect-what-is-being-reported)”" msgstr "" +"**透明:** 您可以轻松查看正在报告的匿名指标;请参阅\"[如何查看正在报告的指标](#how-to-inspect-what-is-" +"being-reported)\"部分" #: ../../source/ref-telemetry.md:14 msgid "" "**Open for feedback:** You can always reach out to us if you have " "feedback; see the section “[How to contact us](#how-to-contact-us)” for " "details." -msgstr "" +msgstr "**欢迎反馈:** 如果您有反馈意见,可以随时联系我们;详情请参见\"[如何联系我们](#how-to-contact-us) \"部分。" #: ../../source/ref-telemetry.md:16 msgid "How to opt-out" -msgstr "" +msgstr "如何退出" #: ../../source/ref-telemetry.md:18 msgid "" @@ -10332,6 +16262,9 @@ msgid "" "`FLWR_TELEMETRY_ENABLED=0`. Assuming you are starting a Flower server or " "client, simply do so by prepending your command as in:" msgstr "" +"Flower 启动时,会检查环境变量 `FLWR_TELEMETRY_ENABLED` 是否存在。通过设置 " +"`FLWR_TELEMETRY_ENABLED=0` 可以轻松禁用遥测功能。假设你启动的是 Flower " +"服务器或客户端,只需在命令前添加以下内容即可:" #: ../../source/ref-telemetry.md:24 msgid "" @@ -10339,14 +16272,16 @@ msgid "" " `.bashrc` (or whatever configuration file applies to your environment) " "to disable Flower telemetry permanently." msgstr "" +"或者,你也可以在 `.bashrc`(或任何适用于你的环境的配置文件)中导出 `FLWR_TELEMETRY_ENABLED=0` 来永久禁用 " +"Flower telemetry。" #: ../../source/ref-telemetry.md:26 msgid "Collected metrics" -msgstr "" +msgstr "收集的指标" #: ../../source/ref-telemetry.md:28 msgid "Flower telemetry collects the following metrics:" -msgstr "" +msgstr "Flower 遥测技术收集以下指标:" #: ../../source/ref-telemetry.md:30 msgid "" @@ -10354,13 +16289,13 @@ msgid "" "being used. This helps us to decide whether we should invest effort into " "releasing a patch version for an older version of Flower or instead use " "the bandwidth to build new features." -msgstr "" +msgstr "**了解目前使用的 Flower 版本。这有助于我们决定是否应该投入精力为旧版本的 Flower 发布补丁版本,还是利用带宽来构建新功能。" #: ../../source/ref-telemetry.md:32 msgid "" "**Operating system.** Enables us to answer questions such as: *Should we " "create more guides for Linux, macOS, or Windows?*" -msgstr "" +msgstr "**操作系统**使我们能够回答以下问题: *我们应该为 Linux、macOS 还是 Windows 创建更多指南?*" #: ../../source/ref-telemetry.md:34 msgid "" @@ -10368,21 +16303,21 @@ msgid "" "decide whether we should invest effort into supporting old versions of " "Python or stop supporting them and start taking advantage of new Python " "features." -msgstr "" +msgstr "**例如,了解 Python 版本有助于我们决定是否应该投入精力支持旧版本的 Python,还是停止支持这些版本并开始利用新的 Python 功能。" #: ../../source/ref-telemetry.md:36 msgid "" "**Hardware properties.** Understanding the hardware environment that " "Flower is being used in helps to decide whether we should, for example, " "put more effort into supporting low-resource environments." -msgstr "" +msgstr "**硬件属性** 了解 Flower 的硬件使用环境,有助于决定我们是否应在支持低资源环境等方面投入更多精力。" #: ../../source/ref-telemetry.md:38 msgid "" "**Execution mode.** Knowing what execution mode Flower starts in enables " "us to understand how heavily certain features are being used and better " "prioritize based on that." -msgstr "" +msgstr "** 执行模式** 了解 Flower 的启动执行模式,能让我们了解某些功能的使用率,并据此更好地确定优先级。" #: ../../source/ref-telemetry.md:40 msgid "" @@ -10391,6 +16326,8 @@ msgid "" "types not only start Flower workloads but also successfully complete " "them." msgstr "" +"**每次 Flower 工作负载启动时,Flower 遥测都会随机分配一个内存集群 ID。这样,我们就能了解哪些设备类型不仅启动了 Flower " +"工作负载,而且还成功完成了它们。" #: ../../source/ref-telemetry.md:42 msgid "" @@ -10403,14 +16340,18 @@ msgid "" "in order to reproduce the issue, multiple workloads must be started at " "the same time." msgstr "" +"**Source.** Flower 遥测会在第一次生成遥测事件时,尝试在 `~/.flwr/source` 中存储一个随机源 ID。源 ID " +"对于识别问题是否反复出现或问题是否由多个集群同时运行触发(这在模拟中经常发生)非常重要。例如,如果设备同时运行多个工作负载并导致问题,那么为了重现问题,必须同时启动多个工作负载。" #: ../../source/ref-telemetry.md:44 msgid "" "You may delete the source ID at any time. If you wish for all events " "logged under a specific source ID to be deleted, you can send a deletion " -"request mentioning the source ID to `telemetry@flower.dev`. All events " +"request mentioning the source ID to `telemetry@flower.ai`. All events " "related to that source ID will then be permanently deleted." msgstr "" +"您可以随时删除源 ID。如果您希望删除特定源 ID 下记录的所有事件,可以向 `telemetry@flower.ai` " +"发送删除请求,并提及该源 ID。届时,与该源 ID 相关的所有事件都将被永久删除。" #: ../../source/ref-telemetry.md:46 msgid "" @@ -10420,17 +16361,19 @@ msgid "" "any changes to the metrics collected and publish changes in the " "changelog." msgstr "" +"我们不会收集任何个人身份信息。如果您认为所收集的任何指标可能以任何方式被滥用,请[与我们联系](#how-to-contact-" +"us)。我们将更新本页面,以反映对所收集指标的任何更改,并在更新日志中公布更改内容。" #: ../../source/ref-telemetry.md:48 msgid "" "If you think other metrics would be helpful for us to better guide our " "decisions, please let us know! We will carefully review them; if we are " "confident that they do not compromise user privacy, we may add them." -msgstr "" +msgstr "如果您认为其他指标有助于我们更好地指导决策,请告诉我们!我们将仔细审查这些指标;如果我们确信它们不会损害用户隐私,我们可能会添加这些指标。" #: ../../source/ref-telemetry.md:50 msgid "How to inspect what is being reported" -msgstr "" +msgstr "如何检查报告中的内容" #: ../../source/ref-telemetry.md:52 msgid "" @@ -10441,40 +16384,45 @@ msgid "" "`FLWR_TELEMETRY_ENABLED` so that you can inspect the telemetry feature " "without sending any metrics." msgstr "" +"我们希望能让您轻松查看所报告的匿名使用指标。通过设置环境变量 `FLWR_TELEMETRY_LOGGING=1` " +"可以查看所有报告的遥测信息。日志记录默认为禁用。您可以不使用 `FLWR_TELEMETRY_ENABLED` " +"而单独使用日志记录,这样就可以在不发送任何指标的情况下检查遥测功能。" #: ../../source/ref-telemetry.md:58 msgid "" "The inspect Flower telemetry without sending any anonymous usage metrics," " use both environment variables:" -msgstr "" +msgstr "在不发送任何匿名使用指标的情况下检查 Flower 遥测,可使用这两个环境变量:" #: ../../source/ref-telemetry.md:64 msgid "How to contact us" -msgstr "" +msgstr "如何联系我们" #: ../../source/ref-telemetry.md:66 msgid "" "We want to hear from you. If you have any feedback or ideas on how to " "improve the way we handle anonymous usage metrics, reach out to us via " -"[Slack](https://flower.dev/join-slack/) (channel `#telemetry`) or email " -"(`telemetry@flower.dev`)." +"[Slack](https://flower.ai/join-slack/) (channel `#telemetry`) or email " +"(`telemetry@flower.ai`)." msgstr "" +"我们希望听到您的意见。如果您对如何改进我们处理匿名使用指标的方式有任何反馈或想法,请通过 [Slack](https://flower.ai" +"/join-slack/) (频道 `#telemetry`)或电子邮件 (`telemetry@flower.ai`)与我们联系。" #: ../../source/tutorial-quickstart-android.rst:-1 msgid "" "Read this Federated Learning quickstart tutorial for creating an Android " "app using Flower." -msgstr "" +msgstr "阅读本联邦学习快速入门教程,了解如何使用 Flower 创建 Android 应用程序。" #: ../../source/tutorial-quickstart-android.rst:5 msgid "Quickstart Android" -msgstr "" +msgstr "快速入门 Android" #: ../../source/tutorial-quickstart-android.rst:10 msgid "" "Let's build a federated learning system using TFLite and Flower on " "Android!" -msgstr "" +msgstr "让我们在 Android 上使用 TFLite 和 Flower 构建一个联邦学习系统!" #: ../../source/tutorial-quickstart-android.rst:12 msgid "" @@ -10482,20 +16430,22 @@ msgid "" "`_ to learn " "more." msgstr "" +"请参阅`完整代码示例 " +"`_了解更多信息。" #: ../../source/tutorial-quickstart-fastai.rst:-1 msgid "" "Check out this Federated Learning quickstart tutorial for using Flower " "with FastAI to train a vision model on CIFAR-10." -msgstr "" +msgstr "查看此联邦学习快速入门教程,了解如何使用 Flower 和 FastAI 在 CIFAR-10 上训练视觉模型。" #: ../../source/tutorial-quickstart-fastai.rst:5 msgid "Quickstart fastai" -msgstr "" +msgstr "快速入门 fastai" #: ../../source/tutorial-quickstart-fastai.rst:10 msgid "Let's build a federated learning system using fastai and Flower!" -msgstr "" +msgstr "让我们用 fastai 和 Flower 建立一个联邦学习系统!" #: ../../source/tutorial-quickstart-fastai.rst:12 msgid "" @@ -10503,22 +16453,24 @@ msgid "" "`_ " "to learn more." msgstr "" +"请参阅 `完整代码示例 `_了解更多信息。" #: ../../source/tutorial-quickstart-huggingface.rst:-1 msgid "" "Check out this Federating Learning quickstart tutorial for using Flower " "with HuggingFace Transformers in order to fine-tune an LLM." -msgstr "" +msgstr "查看此联邦学习 快速入门教程,了解如何使用 Flower 和 HuggingFace Transformers 来微调 LLM。" #: ../../source/tutorial-quickstart-huggingface.rst:5 msgid "Quickstart 🤗 Transformers" -msgstr "" +msgstr "🤗 Transformers快速入门" #: ../../source/tutorial-quickstart-huggingface.rst:10 msgid "" "Let's build a federated learning system using Hugging Face Transformers " "and Flower!" -msgstr "" +msgstr "让我们用Hugging Face Transformers和Flower来构建一个联邦学习系统!" #: ../../source/tutorial-quickstart-huggingface.rst:12 msgid "" @@ -10528,10 +16480,12 @@ msgid "" " over a dataset of IMDB ratings. The end goal is to detect if a movie " "rating is positive or negative." msgstr "" +"我们将利用Hugging Face技术,使用 Flower 在多个客户端上联邦训练语言模型。更具体地说,我们将对预先训练好的 " +"Transformer 模型(distilBERT)进行微调,以便在 IMDB 评分数据集上进行序列分类。最终目标是检测电影评分是正面还是负面。" #: ../../source/tutorial-quickstart-huggingface.rst:18 msgid "Dependencies" -msgstr "" +msgstr "依赖关系" #: ../../source/tutorial-quickstart-huggingface.rst:20 msgid "" @@ -10540,14 +16494,16 @@ msgid "" ":code:`torch`, and :code:`transformers`. This can be done using " ":code:`pip`:" msgstr "" +"要学习本教程,您需要安装以下软件包: :code:`datasets`、 :code:`evaluate`、 :code:`flwr`、 " +":code:`torch`和 :code:`transformers`。这可以通过 :code:`pip` 来完成:" #: ../../source/tutorial-quickstart-huggingface.rst:30 msgid "Standard Hugging Face workflow" -msgstr "" +msgstr "标准Hugging Face工作流程" #: ../../source/tutorial-quickstart-huggingface.rst:33 msgid "Handling the data" -msgstr "" +msgstr "处理数据" #: ../../source/tutorial-quickstart-huggingface.rst:35 msgid "" @@ -10555,10 +16511,12 @@ msgid "" "library. We then need to tokenize the data and create :code:`PyTorch` " "dataloaders, this is all done in the :code:`load_data` function:" msgstr "" +"为了获取 IMDB 数据集,我们将使用 Hugging Face 的 :code:`datasets` 库。然后,我们需要对数据进行标记化,并创建" +" :code:`PyTorch` 数据加载器,这些都将在 :code:`load_data` 函数中完成:" #: ../../source/tutorial-quickstart-huggingface.rst:81 msgid "Training and testing the model" -msgstr "" +msgstr "训练和测试模型" #: ../../source/tutorial-quickstart-huggingface.rst:83 msgid "" @@ -10566,24 +16524,28 @@ msgid "" "take care of the training and testing. This is very similar to any " ":code:`PyTorch` training or testing loop:" msgstr "" +"有了创建 trainloader 和 testloader 的方法后,我们就可以进行训练和测试了。这与任何 :code:`PyTorch` " +"训练或测试循环都非常相似:" #: ../../source/tutorial-quickstart-huggingface.rst:121 msgid "Creating the model itself" -msgstr "" +msgstr "创建模型本身" #: ../../source/tutorial-quickstart-huggingface.rst:123 msgid "" "To create the model itself, we will just load the pre-trained distillBERT" " model using Hugging Face’s :code:`AutoModelForSequenceClassification` :" msgstr "" +"要创建模型本身,我们只需使用 Hugging Face 的 :code:`AutoModelForSequenceClassification` " +"加载预训练的 distillBERT 模型:" #: ../../source/tutorial-quickstart-huggingface.rst:136 msgid "Federating the example" -msgstr "" +msgstr "将示例联邦化" #: ../../source/tutorial-quickstart-huggingface.rst:139 msgid "Creating the IMDBClient" -msgstr "" +msgstr "创建 IMDBClient" #: ../../source/tutorial-quickstart-huggingface.rst:141 msgid "" @@ -10591,6 +16553,8 @@ msgid "" "Flower client class (inheriting from :code:`flwr.client.NumPyClient`). " "This is very easy, as our model is a standard :code:`PyTorch` model:" msgstr "" +"要将我们的示例联邦到多个客户端,我们首先需要编写 Flower 客户端类(继承自 " +":code:`flwr.client.NumPyClient`)。这很容易,因为我们的模型是一个标准的 :code:`PyTorch` 模型:" #: ../../source/tutorial-quickstart-huggingface.rst:169 msgid "" @@ -10601,10 +16565,12 @@ msgid "" ":code:`evaluate` function tests the model locally and returns the " "relevant metrics." msgstr "" +":code:`get_parameters` " +"函数允许服务器获取客户端的参数。相反,:code:`set_parameters`函数允许服务器将其参数发送给客户端。最后,:code:`fit`函数在本地为客户端训练模型,:code:`evaluate`函数在本地测试模型并返回相关指标。" #: ../../source/tutorial-quickstart-huggingface.rst:175 msgid "Starting the server" -msgstr "" +msgstr "启动服务器" #: ../../source/tutorial-quickstart-huggingface.rst:177 msgid "" @@ -10615,6 +16581,9 @@ msgid "" "all the clients' weights at each round) and then using the " ":code:`flwr.server.start_server` function:" msgstr "" +"现在我们有了实例化客户端的方法,我们需要创建服务器,以便汇总结果。使用 Flower,首先选择一个策略(这里我们使用 " +":code:`FedAvg`,它将把全局模型参数定义为每轮所有客户端模型参数的平均值),然后使用 " +":code:`flwr.server.start_server`函数,就可以非常轻松地完成这项工作:" #: ../../source/tutorial-quickstart-huggingface.rst:205 msgid "" @@ -10622,20 +16591,22 @@ msgid "" "aggregate the metrics distributed amongst the clients (basically this " "allows us to display a nice average accuracy and loss for every round)." msgstr "" +"使用 :code:`weighted_average` " +"函数是为了提供一种方法来汇总分布在客户端的指标(基本上,这可以让我们显示每一轮的平均精度和损失值)。" #: ../../source/tutorial-quickstart-huggingface.rst:209 msgid "Putting everything together" -msgstr "" +msgstr "把所有东西放在一起" #: ../../source/tutorial-quickstart-huggingface.rst:211 msgid "We can now start client instances using:" -msgstr "" +msgstr "现在我们可以使用:" #: ../../source/tutorial-quickstart-huggingface.rst:221 msgid "" "And they will be able to connect to the server and start the federated " "training." -msgstr "" +msgstr "他们就能连接到服务器,开始联邦训练。" #: ../../source/tutorial-quickstart-huggingface.rst:223 msgid "" @@ -10645,49 +16616,56 @@ msgid "" "huggingface](https://github.com/adap/flower/tree/main/examples" "/quickstart-huggingface)." msgstr "" +"如果您想查看所有内容,请查看完整的代码示例: [https://github.com/adap/flower/tree/main/examples" +"/quickstart-" +"huggingface](https://github.com/adap/flower/tree/main/examples" +"/quickstart-huggingface)." #: ../../source/tutorial-quickstart-huggingface.rst:227 msgid "" "Of course, this is a very basic example, and a lot can be added or " "modified, it was just to showcase how simply we could federate a Hugging " "Face workflow using Flower." -msgstr "" +msgstr "当然,这只是一个非常基本的示例,还可以添加或修改很多内容,只是为了展示我们可以如何简单地使用 Flower 联合Hugging Face的工作流程。" #: ../../source/tutorial-quickstart-huggingface.rst:230 msgid "" "Note that in this example we used :code:`PyTorch`, but we could have very" " well used :code:`TensorFlow`." -msgstr "" +msgstr "请注意,在本例中我们使用了 :code:`PyTorch`,但也完全可以使用 :code:`TensorFlow`。" #: ../../source/tutorial-quickstart-ios.rst:-1 msgid "" "Read this Federated Learning quickstart tutorial for creating an iOS app " "using Flower to train a neural network on MNIST." -msgstr "" +msgstr "阅读本联邦学习快速入门教程,了解如何使用 Flower 创建 iOS 应用程序,并在 MNIST 上训练神经网络。" #: ../../source/tutorial-quickstart-ios.rst:5 msgid "Quickstart iOS" -msgstr "" +msgstr "快速入门 iOS" #: ../../source/tutorial-quickstart-ios.rst:10 msgid "" "In this tutorial we will learn how to train a Neural Network on MNIST " "using Flower and CoreML on iOS devices." -msgstr "" +msgstr "在本教程中,我们将学习如何在 iOS 设备上使用 Flower 和 CoreML 在 MNIST 上训练神经网络。" #: ../../source/tutorial-quickstart-ios.rst:12 msgid "" "First of all, for running the Flower Python server, it is recommended to " "create a virtual environment and run everything within a `virtualenv " -"`_. For the Flower " +"`_. For the Flower " "client implementation in iOS, it is recommended to use Xcode as our IDE." msgstr "" +"首先,为了运行 Flower Python 服务器,建议创建一个虚拟环境,并在 `virtualenv " +"`_ 中运行一切。对于在 iOS 中实现 " +"Flower 客户端,建议使用 Xcode 作为我们的集成开发环境。" #: ../../source/tutorial-quickstart-ios.rst:15 msgid "" "Our example consists of one Python *server* and two iPhone *clients* that" " all have the same model." -msgstr "" +msgstr "我们的示例包括一个 Python *服务器*和两个 iPhone *客户端*,它们都具有相同的模型。" #: ../../source/tutorial-quickstart-ios.rst:17 msgid "" @@ -10696,18 +16674,18 @@ msgid "" "the *server* which will aggregate them to produce a better model. " "Finally, the *server* sends this improved version of the model back to " "each *client*. A complete cycle of weight updates is called a *round*." -msgstr "" +msgstr "*客户端*负责根据其本地数据集为模型生成独立的模型参数。然后,这些参数更新会被发送到*服务器*,由*服务器*汇总后生成一个更好的模型。最后,*服务器*将改进后的模型发送回每个*客户端*。一个完整的参数更新周期称为一*轮*。" #: ../../source/tutorial-quickstart-ios.rst:21 msgid "" "Now that we have a rough idea of what is going on, let's get started to " "setup our Flower server environment. We first need to install Flower. You" " can do this by using pip:" -msgstr "" +msgstr "现在我们已经有了一个大致的概念,让我们开始设置 Flower 服务器环境吧。首先,我们需要安装 Flower。你可以使用 pip 来安装:" #: ../../source/tutorial-quickstart-ios.rst:27 msgid "Or Poetry:" -msgstr "" +msgstr "或者Poetry:" #: ../../source/tutorial-quickstart-ios.rst:36 msgid "" @@ -10717,6 +16695,9 @@ msgid "" "Flower client with CoreML, that has been implemented and stored inside " "the Swift SDK. The client implementation can be seen below:" msgstr "" +"现在我们已经安装了所有依赖项,让我们使用 CoreML 作为本地训练框架和 MNIST " +"作为数据集,运行一个简单的分布式训练。为了简单起见,我们将使用 CoreML 的完整 Flower 客户端,该客户端已在 Swift SDK " +"中实现并存储。客户端实现如下:" #: ../../source/tutorial-quickstart-ios.rst:72 msgid "" @@ -10728,10 +16709,14 @@ msgid "" "`_ to learn more " "about the app." msgstr "" +"让我们在 Xcode 中创建一个新的应用程序项目,并在项目中添加 :code:`flwr` 作为依赖关系。对于我们的应用程序,我们将在 " +":code:`FLiOSModel.swift` 中存储应用程序的逻辑,在 :code:`ContentView.swift` 中存储 UI " +"元素。在本快速入门中,我们将更多地关注 :code:`FLiOSModel.swift`。请参阅 `完整代码示例 " +"`_ 以了解更多有关应用程序的信息。" #: ../../source/tutorial-quickstart-ios.rst:75 msgid "Import Flower and CoreML related packages in :code:`FLiOSModel.swift`:" -msgstr "" +msgstr "在 :code:`FLiOSModel.swift` 中导入 Flower 和 CoreML 相关软件包:" #: ../../source/tutorial-quickstart-ios.rst:83 msgid "" @@ -10743,6 +16728,10 @@ msgid "" "into :code:`MLBatchProvider` object. The preprocessing is done inside " ":code:`DataLoader.swift`." msgstr "" +"然后通过拖放将 mlmodel 添加到项目中,在部署到 iOS 设备时,mlmodel 将被捆绑到应用程序中。我们需要传递 url 以访问 " +"mlmodel 并运行 CoreML 机器学习进程,可通过调用函数 :code:`Bundle.main.url` 获取。对于 MNIST " +"数据集,我们需要将其预处理为 :code:`MLBatchProvider` 对象。预处理在 :code:`DataLoader.swift` " +"中完成。" #: ../../source/tutorial-quickstart-ios.rst:99 msgid "" @@ -10753,18 +16742,20 @@ msgid "" "which are written as proto files. The implementation can be seen in " ":code:`MLModelInspect`." msgstr "" +"由于 CoreML 不允许在训练前查看模型参数,而在训练过程中或训练后访问模型参数只能通过指定层名来完成,因此我们需要事先通过查看模型规范(写成 " +"proto 文件)来了解这些信息。具体实现可参见 :code:`MLModelInspect`。" #: ../../source/tutorial-quickstart-ios.rst:102 msgid "" "After we have all of the necessary informations, let's create our Flower " "client." -msgstr "" +msgstr "获得所有必要信息后,让我们创建 Flower 客户端。" #: ../../source/tutorial-quickstart-ios.rst:117 msgid "" "Then start the Flower gRPC client and start communicating to the server " "by passing our Flower client to the function :code:`startFlwrGRPC`." -msgstr "" +msgstr "然后启动 Flower gRPC 客户端,并通过将 Flower 客户端传递给函数 :code:`startFlwrGRPC` 来开始与服务器通信。" #: ../../source/tutorial-quickstart-ios.rst:124 msgid "" @@ -10775,9 +16766,12 @@ msgid "" "in the application before clicking the start button to start the " "federated learning process." msgstr "" +"这就是客户端。我们只需实现 :code:`Client` 或调用提供的 :code:`MLFlwrClient` 并调用 " +":code:`startFlwrGRPC()`。属性 :code:`hostname` 和 :code:`port` " +"会告诉客户端要连接到哪个服务器。这可以通过在应用程序中输入主机名和端口来实现,然后再点击开始按钮启动联邦学习进程。" #: ../../source/tutorial-quickstart-ios.rst:131 -#: ../../source/tutorial-quickstart-mxnet.rst:226 +#: ../../source/tutorial-quickstart-mxnet.rst:228 #: ../../source/tutorial-quickstart-pytorch.rst:205 #: ../../source/tutorial-quickstart-tensorflow.rst:100 msgid "" @@ -10785,24 +16779,26 @@ msgid "" "configuration possibilities at their default values. In a file named " ":code:`server.py`, import Flower and start the server:" msgstr "" +"对于简单的工作负载,我们可以启动 Flower 服务器,并将所有配置选项保留为默认值。在名为 :code:`server.py` 的文件中,导入 " +"Flower 并启动服务器:" #: ../../source/tutorial-quickstart-ios.rst:142 -#: ../../source/tutorial-quickstart-mxnet.rst:237 +#: ../../source/tutorial-quickstart-mxnet.rst:239 #: ../../source/tutorial-quickstart-pytorch.rst:216 #: ../../source/tutorial-quickstart-scikitlearn.rst:215 #: ../../source/tutorial-quickstart-tensorflow.rst:112 msgid "Train the model, federated!" -msgstr "" +msgstr "联邦训练模型!" #: ../../source/tutorial-quickstart-ios.rst:144 #: ../../source/tutorial-quickstart-pytorch.rst:218 #: ../../source/tutorial-quickstart-tensorflow.rst:114 -#: ../../source/tutorial-quickstart-xgboost.rst:522 +#: ../../source/tutorial-quickstart-xgboost.rst:525 msgid "" "With both client and server ready, we can now run everything and see " "federated learning in action. FL systems usually have a server and " "multiple clients. We therefore have to start the server first:" -msgstr "" +msgstr "客户端和服务器都已准备就绪,我们现在可以运行一切,看看联邦学习的实际效果。FL 系统通常有一个服务器和多个客户端。因此,我们必须先启动服务器:" #: ../../source/tutorial-quickstart-ios.rst:152 msgid "" @@ -10813,6 +16809,10 @@ msgid "" "`_." msgstr "" +"服务器运行后,我们就可以在不同的终端启动客户端。通过 Xcode 构建并运行客户端,一个通过 Xcode 模拟器,另一个通过部署到 " +"iPhone。要了解更多有关如何将应用程序部署到 iPhone 或模拟器的信息,请访问 `此处 " +"`_。" #: ../../source/tutorial-quickstart-ios.rst:156 msgid "" @@ -10821,42 +16821,55 @@ msgid "" "`_ for this " "example can be found in :code:`examples/ios`." msgstr "" +"恭喜您! 您已经成功地在 ios 设备中构建并运行了第一个联邦学习系统。本示例的`完整源代码 " +"`_ 可在 " +":code:`examples/ios` 中找到。" #: ../../source/tutorial-quickstart-jax.rst:-1 msgid "" "Check out this Federated Learning quickstart tutorial for using Flower " "with Jax to train a linear regression model on a scikit-learn dataset." -msgstr "" +msgstr "查看此联邦学习快速入门教程,了解如何使用 Flower 和 Jax 在 scikit-learn 数据集上训练线性回归模型。" #: ../../source/tutorial-quickstart-jax.rst:5 msgid "Quickstart JAX" -msgstr "" +msgstr "快速入门 JAX" #: ../../source/tutorial-quickstart-mxnet.rst:-1 msgid "" "Check out this Federated Learning quickstart tutorial for using Flower " "with MXNet to train a Sequential model on MNIST." -msgstr "" +msgstr "查看此联邦学习 快速入门教程,了解如何使用 Flower 和 MXNet 在 MNIST 上训练序列模型。" #: ../../source/tutorial-quickstart-mxnet.rst:5 msgid "Quickstart MXNet" +msgstr "快速入门 MXNet" + +#: ../../source/tutorial-quickstart-mxnet.rst:7 +msgid "" +"MXNet is no longer maintained and has been moved into `Attic " +"`_. As a result, we would " +"encourage you to use other ML frameworks alongise Flower, for example, " +"PyTorch. This tutorial might be removed in future versions of Flower." msgstr "" -#: ../../source/tutorial-quickstart-mxnet.rst:10 +#: ../../source/tutorial-quickstart-mxnet.rst:12 msgid "" "In this tutorial, we will learn how to train a :code:`Sequential` model " "on MNIST using Flower and MXNet." -msgstr "" +msgstr "在本教程中,我们将学习如何使用 Flower 和 MXNet 在 MNIST 上训练 :code:`Sequential` 模型。" -#: ../../source/tutorial-quickstart-mxnet.rst:12 +#: ../../source/tutorial-quickstart-mxnet.rst:14 #: ../../source/tutorial-quickstart-scikitlearn.rst:12 msgid "" "It is recommended to create a virtual environment and run everything " -"within this `virtualenv `_." msgstr "" +"建议创建一个虚拟环境,并在此 `virtualenv `_ 中运行所有内容。" -#: ../../source/tutorial-quickstart-mxnet.rst:16 +#: ../../source/tutorial-quickstart-mxnet.rst:18 #: ../../source/tutorial-quickstart-scikitlearn.rst:16 msgid "" "*Clients* are responsible for generating individual model parameter " @@ -10865,20 +16878,20 @@ msgid "" " global model. Finally, the *server* sends this improved version of the " "model back to each *client*. A complete cycle of parameters updates is " "called a *round*." -msgstr "" +msgstr "*客户端*负责根据其本地数据集为模型生成单独的模型参数更新。然后,这些参数更新将被发送到*服务器*,由*服务器*汇总后生成一个更新的全局模型。最后,*服务器*将这一改进版模型发回给每个*客户端*。一个完整的参数更新周期称为一*轮*。" -#: ../../source/tutorial-quickstart-mxnet.rst:20 +#: ../../source/tutorial-quickstart-mxnet.rst:22 #: ../../source/tutorial-quickstart-scikitlearn.rst:20 msgid "" "Now that we have a rough idea of what is going on, let's get started. We " "first need to install Flower. You can do this by running:" -msgstr "" +msgstr "现在,我们已经有了一个大致的概念,让我们开始吧。首先,我们需要安装 Flower。运行:" -#: ../../source/tutorial-quickstart-mxnet.rst:26 +#: ../../source/tutorial-quickstart-mxnet.rst:28 msgid "Since we want to use MXNet, let's go ahead and install it:" -msgstr "" +msgstr "既然我们要使用 MXNet,那就继续安装吧:" -#: ../../source/tutorial-quickstart-mxnet.rst:36 +#: ../../source/tutorial-quickstart-mxnet.rst:38 msgid "" "Now that we have all our dependencies installed, let's run a simple " "distributed training with two clients and one server. Our training " @@ -10886,54 +16899,61 @@ msgid "" "Digit Recognition tutorial " "`_." msgstr "" +"现在,我们已经安装了所有依赖项,让我们用两个客户端和一个服务器来运行一个简单的分布式训练。我们的训练程序和网络架构基于 MXNet 的 " +"`手写数字识别教程 " +"`_\"。" -#: ../../source/tutorial-quickstart-mxnet.rst:38 +#: ../../source/tutorial-quickstart-mxnet.rst:40 msgid "" "In a file called :code:`client.py`, import Flower and MXNet related " "packages:" -msgstr "" +msgstr "在名为 :code:`client.py` 的文件中,导入 Flower 和 MXNet 相关软件包:" -#: ../../source/tutorial-quickstart-mxnet.rst:53 +#: ../../source/tutorial-quickstart-mxnet.rst:55 msgid "In addition, define the device allocation in MXNet with:" -msgstr "" +msgstr "此外,还可以在 MXNet 中定义设备分配:" -#: ../../source/tutorial-quickstart-mxnet.rst:59 +#: ../../source/tutorial-quickstart-mxnet.rst:61 msgid "" "We use MXNet to load MNIST, a popular image classification dataset of " "handwritten digits for machine learning. The MXNet utility " ":code:`mx.test_utils.get_mnist()` downloads the training and test data." msgstr "" +"我们使用 MXNet 加载 MNIST,这是一个用于机器学习的流行手写数字图像分类数据集。MXNet 工具 " +":code:`mx.test_utils.get_mnist()` 会下载训练和测试数据。" -#: ../../source/tutorial-quickstart-mxnet.rst:73 +#: ../../source/tutorial-quickstart-mxnet.rst:75 msgid "" "Define the training and loss with MXNet. We train the model by looping " "over the dataset, measure the corresponding loss, and optimize it." -msgstr "" +msgstr "用 MXNet 定义训练和损失值。我们在数据集上循环训练模型,测量相应的损失值,并对其进行优化。" -#: ../../source/tutorial-quickstart-mxnet.rst:111 +#: ../../source/tutorial-quickstart-mxnet.rst:113 msgid "" "Next, we define the validation of our machine learning model. We loop " "over the test set and measure both loss and accuracy on the test set." -msgstr "" +msgstr "接下来,我们定义机器学习模型的验证。我们在测试集上循环,测量测试集上的损失值和准确率。" -#: ../../source/tutorial-quickstart-mxnet.rst:135 +#: ../../source/tutorial-quickstart-mxnet.rst:137 msgid "" "After defining the training and testing of a MXNet machine learning " "model, we use these functions to implement a Flower client." -msgstr "" +msgstr "在定义了 MXNet 机器学习模型的训练和测试后,我们使用这些函数实现了 Flower 客户端。" -#: ../../source/tutorial-quickstart-mxnet.rst:137 +#: ../../source/tutorial-quickstart-mxnet.rst:139 msgid "Our Flower clients will use a simple :code:`Sequential` model:" -msgstr "" +msgstr "我们的 Flower 客户端将使用简单的 :code:`Sequential` 模型:" -#: ../../source/tutorial-quickstart-mxnet.rst:156 +#: ../../source/tutorial-quickstart-mxnet.rst:158 msgid "" "After loading the dataset with :code:`load_data()` we perform one forward" " propagation to initialize the model and model parameters with " ":code:`model(init)`. Next, we implement a Flower client." msgstr "" +"使用 :code:`load_data()` 加载数据集后,我们会执行一次前向传播,使用 :code:`model(init)` " +"初始化模型和模型参数。接下来,我们实现一个 Flower 客户端。" -#: ../../source/tutorial-quickstart-mxnet.rst:158 +#: ../../source/tutorial-quickstart-mxnet.rst:160 #: ../../source/tutorial-quickstart-pytorch.rst:144 #: ../../source/tutorial-quickstart-tensorflow.rst:54 msgid "" @@ -10943,8 +16963,11 @@ msgid "" "those instructions and calls one of the :code:`Client` methods to run " "your code (i.e., to train the neural network we defined earlier)." msgstr "" +"Flower 服务器通过一个名为 :code:`Client` " +"的接口与客户端交互。当服务器选择一个特定的客户端进行训练时,它会通过网络发送训练指令。客户端接收到这些指令后,会调用 :code:`Client`" +" 方法之一来运行您的代码(即训练我们之前定义的神经网络)。" -#: ../../source/tutorial-quickstart-mxnet.rst:164 +#: ../../source/tutorial-quickstart-mxnet.rst:166 msgid "" "Flower provides a convenience class called :code:`NumPyClient` which " "makes it easier to implement the :code:`Client` interface when your " @@ -10952,63 +16975,65 @@ msgid "" "defining the following methods (:code:`set_parameters` is optional " "though):" msgstr "" +"Flower 提供了一个名为 :code:`NumPyClient` 的便捷类,当您的工作负载使用 MXNet 时,它可以让您更轻松地实现 " +":code:`Client` 接口。实现 :code:`NumPyClient` " +"通常意味着定义以下方法(:code:`set_parameters` 是可选的):" -#: ../../source/tutorial-quickstart-mxnet.rst:170 +#: ../../source/tutorial-quickstart-mxnet.rst:172 #: ../../source/tutorial-quickstart-pytorch.rst:156 #: ../../source/tutorial-quickstart-scikitlearn.rst:109 msgid "return the model weight as a list of NumPy ndarrays" -msgstr "" +msgstr "以 NumPy ndarrays 列表形式返回模型参数" -#: ../../source/tutorial-quickstart-mxnet.rst:171 +#: ../../source/tutorial-quickstart-mxnet.rst:173 #: ../../source/tutorial-quickstart-pytorch.rst:157 #: ../../source/tutorial-quickstart-scikitlearn.rst:111 msgid ":code:`set_parameters` (optional)" -msgstr "" +msgstr ":code:`set_parameters` (可选)" -#: ../../source/tutorial-quickstart-mxnet.rst:172 +#: ../../source/tutorial-quickstart-mxnet.rst:174 #: ../../source/tutorial-quickstart-pytorch.rst:158 #: ../../source/tutorial-quickstart-scikitlearn.rst:111 msgid "" "update the local model weights with the parameters received from the " "server" -msgstr "" +msgstr "用从服务器接收到的参数更新本地模型参数" -#: ../../source/tutorial-quickstart-mxnet.rst:174 +#: ../../source/tutorial-quickstart-mxnet.rst:176 #: ../../source/tutorial-quickstart-pytorch.rst:160 #: ../../source/tutorial-quickstart-scikitlearn.rst:114 msgid "set the local model weights" -msgstr "" +msgstr "设置本地模型参数" -#: ../../source/tutorial-quickstart-mxnet.rst:175 +#: ../../source/tutorial-quickstart-mxnet.rst:177 #: ../../source/tutorial-quickstart-pytorch.rst:161 #: ../../source/tutorial-quickstart-scikitlearn.rst:115 msgid "train the local model" -msgstr "" +msgstr "训练本地模型" -#: ../../source/tutorial-quickstart-mxnet.rst:176 +#: ../../source/tutorial-quickstart-mxnet.rst:178 #: ../../source/tutorial-quickstart-pytorch.rst:162 #: ../../source/tutorial-quickstart-scikitlearn.rst:116 msgid "receive the updated local model weights" -msgstr "" +msgstr "接收更新的本地模型参数" -#: ../../source/tutorial-quickstart-mxnet.rst:178 +#: ../../source/tutorial-quickstart-mxnet.rst:180 #: ../../source/tutorial-quickstart-pytorch.rst:164 #: ../../source/tutorial-quickstart-scikitlearn.rst:118 msgid "test the local model" -msgstr "" +msgstr "测试本地模型" -#: ../../source/tutorial-quickstart-mxnet.rst:180 +#: ../../source/tutorial-quickstart-mxnet.rst:182 msgid "They can be implemented in the following way:" -msgstr "" +msgstr "它们可以通过以下方式实现:" -#: ../../source/tutorial-quickstart-mxnet.rst:210 +#: ../../source/tutorial-quickstart-mxnet.rst:212 msgid "" "We can now create an instance of our class :code:`MNISTClient` and add " "one line to actually run this client:" -msgstr "" +msgstr "现在我们可以创建一个 :code:`MNISTClient` 类的实例,并添加一行来实际运行该客户端:" -#: ../../source/tutorial-quickstart-mxnet.rst:217 -#: ../../source/tutorial-quickstart-scikitlearn.rst:150 +#: ../../source/tutorial-quickstart-mxnet.rst:219 msgid "" "That's it for the client. We only have to implement :code:`Client` or " ":code:`NumPyClient` and call :code:`fl.client.start_client()` or " @@ -11019,43 +17044,49 @@ msgid "" "workload with the server and clients running on different machines, all " "that needs to change is the :code:`server_address` we pass to the client." msgstr "" +"这就是客户端。我们只需实现 :code:`Client` 或 :code:`NumPyClient` 并调用 " +":code:`fl.client.start_client()` 或 " +":code:`fl.client.start_numpy_client()`。字符串 " +":code:`\"0.0.0.0:8080\"`会告诉客户端要连接的服务器。在本例中,我们可以在同一台机器上运行服务器和客户端,因此我们使用 " +":code:`\"0.0.0.0:8080\"`。如果我们运行的是真正的联邦工作负载,服务器和客户端运行在不同的机器上,那么需要改变的只是传递给客户端的" +" :code:`server_address`。" -#: ../../source/tutorial-quickstart-mxnet.rst:239 +#: ../../source/tutorial-quickstart-mxnet.rst:241 msgid "" "With both client and server ready, we can now run everything and see " "federated learning in action. Federated learning systems usually have a " "server and multiple clients. We therefore have to start the server first:" -msgstr "" +msgstr "客户端和服务器都准备就绪后,我们现在就可以运行一切,看看联邦学习的运行情况。联邦学习系统通常有一个服务器和多个客户端。因此,我们必须先启动服务器:" -#: ../../source/tutorial-quickstart-mxnet.rst:247 +#: ../../source/tutorial-quickstart-mxnet.rst:249 #: ../../source/tutorial-quickstart-pytorch.rst:226 #: ../../source/tutorial-quickstart-scikitlearn.rst:224 #: ../../source/tutorial-quickstart-tensorflow.rst:122 -#: ../../source/tutorial-quickstart-xgboost.rst:530 +#: ../../source/tutorial-quickstart-xgboost.rst:533 msgid "" "Once the server is running we can start the clients in different " "terminals. Open a new terminal and start the first client:" -msgstr "" +msgstr "服务器运行后,我们就可以在不同终端启动客户端了。打开一个新终端,启动第一个客户端:" -#: ../../source/tutorial-quickstart-mxnet.rst:254 +#: ../../source/tutorial-quickstart-mxnet.rst:256 #: ../../source/tutorial-quickstart-pytorch.rst:233 #: ../../source/tutorial-quickstart-scikitlearn.rst:231 #: ../../source/tutorial-quickstart-tensorflow.rst:129 -#: ../../source/tutorial-quickstart-xgboost.rst:537 +#: ../../source/tutorial-quickstart-xgboost.rst:540 msgid "Open another terminal and start the second client:" -msgstr "" +msgstr "打开另一台终端,启动第二个客户端:" -#: ../../source/tutorial-quickstart-mxnet.rst:260 +#: ../../source/tutorial-quickstart-mxnet.rst:262 #: ../../source/tutorial-quickstart-pytorch.rst:239 #: ../../source/tutorial-quickstart-scikitlearn.rst:237 -#: ../../source/tutorial-quickstart-xgboost.rst:543 +#: ../../source/tutorial-quickstart-xgboost.rst:546 msgid "" "Each client will have its own dataset. You should now see how the " "training does in the very first terminal (the one that started the " "server):" -msgstr "" +msgstr "每个客户端都有自己的数据集。现在你应该看到第一个终端(启动服务器的终端)的训练效果了:" -#: ../../source/tutorial-quickstart-mxnet.rst:292 +#: ../../source/tutorial-quickstart-mxnet.rst:294 msgid "" "Congratulations! You've successfully built and run your first federated " "learning system. The full `source code " @@ -11063,20 +17094,23 @@ msgid "" "mxnet/client.py>`_ for this example can be found in :code:`examples" "/quickstart-mxnet`." msgstr "" +"恭喜您!您已经成功构建并运行了第一个联邦学习系统。本示例的`完整源代码 " +"`_ 可在 :code:`examples/quickstart-mxnet` 中找到。" #: ../../source/tutorial-quickstart-pandas.rst:-1 msgid "" "Check out this Federated Learning quickstart tutorial for using Flower " "with Pandas to perform Federated Analytics." -msgstr "" +msgstr "查看此联邦学习快速入门教程,了解如何使用 Flower 和 Pandas 执行联邦分析。" #: ../../source/tutorial-quickstart-pandas.rst:5 msgid "Quickstart Pandas" -msgstr "" +msgstr "快速入门Pandas" #: ../../source/tutorial-quickstart-pandas.rst:10 msgid "Let's build a federated analytics system using Pandas and Flower!" -msgstr "" +msgstr "让我们使用 Pandas 和 Flower 建立一个联邦分析系统!" #: ../../source/tutorial-quickstart-pandas.rst:12 msgid "" @@ -11084,32 +17118,36 @@ msgid "" "`_ " "to learn more." msgstr "" +"请参阅 `完整代码示例 `_\" 了解更多信息。" #: ../../source/tutorial-quickstart-pytorch.rst:-1 msgid "" "Check out this Federated Learning quickstart tutorial for using Flower " "with PyTorch to train a CNN model on MNIST." -msgstr "" +msgstr "查看此联邦学习快速入门教程,了解如何使用 Flower 和 PyTorch 在 MNIST 上训练 CNN 模型。" #: ../../source/tutorial-quickstart-pytorch.rst:13 msgid "" "In this tutorial we will learn how to train a Convolutional Neural " "Network on CIFAR10 using Flower and PyTorch." -msgstr "" +msgstr "在本教程中,我们将学习如何使用 Flower 和 PyTorch 在 CIFAR10 上训练卷积神经网络。" #: ../../source/tutorial-quickstart-pytorch.rst:15 -#: ../../source/tutorial-quickstart-xgboost.rst:36 +#: ../../source/tutorial-quickstart-xgboost.rst:39 msgid "" "First of all, it is recommended to create a virtual environment and run " -"everything within a `virtualenv `_." msgstr "" +"首先,建议创建一个虚拟环境,并在 `virtualenv `_ 中运行一切。" #: ../../source/tutorial-quickstart-pytorch.rst:29 msgid "" "Since we want to use PyTorch to solve a computer vision task, let's go " "ahead and install PyTorch and the **torchvision** library:" -msgstr "" +msgstr "既然我们想用 PyTorch 解决计算机视觉任务,那就继续安装 PyTorch 和 **torchvision** 库吧:" #: ../../source/tutorial-quickstart-pytorch.rst:39 msgid "" @@ -11119,16 +17157,19 @@ msgid "" "with PyTorch " "`_." msgstr "" +"现在我们已经安装了所有的依赖项,让我们用两个客户端和一个服务器来运行一个简单的分布式训练。我们的训练过程和网络架构基于 PyTorch " +"的《Deep Learning with PyTorch " +"`_》。" #: ../../source/tutorial-quickstart-pytorch.rst:41 msgid "" "In a file called :code:`client.py`, import Flower and PyTorch related " "packages:" -msgstr "" +msgstr "在名为 :code:`client.py` 的文件中,导入 Flower 和 PyTorch 相关软件包:" #: ../../source/tutorial-quickstart-pytorch.rst:56 msgid "In addition, we define the device allocation in PyTorch with:" -msgstr "" +msgstr "此外,我们还在 PyTorch 中定义了设备分配:" #: ../../source/tutorial-quickstart-pytorch.rst:62 msgid "" @@ -11136,37 +17177,39 @@ msgid "" "dataset for machine learning. The PyTorch :code:`DataLoader()` downloads " "the training and test data that are then normalized." msgstr "" +"我们使用 PyTorch 来加载 CIFAR10,这是一个用于机器学习的流行彩色图像分类数据集。PyTorch " +":code:`DataLoader()`下载训练数据和测试数据,然后进行归一化处理。" #: ../../source/tutorial-quickstart-pytorch.rst:78 msgid "" "Define the loss and optimizer with PyTorch. The training of the dataset " "is done by looping over the dataset, measure the corresponding loss and " "optimize it." -msgstr "" +msgstr "使用 PyTorch 定义损失和优化器。数据集的训练是通过循环数据集、测量相应的损失值并对其进行优化来完成的。" #: ../../source/tutorial-quickstart-pytorch.rst:94 msgid "" "Define then the validation of the machine learning network. We loop over" " the test set and measure the loss and accuracy of the test set." -msgstr "" +msgstr "然后定义机器学习网络的验证。我们在测试集上循环,计算测试集的损失值和准确率。" #: ../../source/tutorial-quickstart-pytorch.rst:113 msgid "" "After defining the training and testing of a PyTorch machine learning " "model, we use the functions for the Flower clients." -msgstr "" +msgstr "在定义了 PyTorch 机器学习模型的训练和测试之后,我们将这些功能用于 Flower 客户端。" #: ../../source/tutorial-quickstart-pytorch.rst:115 msgid "" "The Flower clients will use a simple CNN adapted from 'PyTorch: A 60 " "Minute Blitz':" -msgstr "" +msgstr "Flower 客户端将使用一个简单的从“PyTorch: 60 分钟突击\"改编的CNN:" #: ../../source/tutorial-quickstart-pytorch.rst:142 msgid "" "After loading the data set with :code:`load_data()` we define the Flower " "interface." -msgstr "" +msgstr "使用 :code:`load_data()` 加载数据集后,我们定义了 Flower 接口。" #: ../../source/tutorial-quickstart-pytorch.rst:150 msgid "" @@ -11176,30 +17219,41 @@ msgid "" "defining the following methods (:code:`set_parameters` is optional " "though):" msgstr "" +"Flower 提供了一个名为 :code:`NumPyClient` 的便捷类,当您的工作负载使用 PyTorch 时,它使 " +":code:`Client` 接口的实现变得更容易。实现 :code:`NumPyClient` " +"通常意味着定义以下方法(:code:`set_parameters` 是可选的):" #: ../../source/tutorial-quickstart-pytorch.rst:166 msgid "which can be implemented in the following way:" -msgstr "" +msgstr "可以通过以下方式实现:" #: ../../source/tutorial-quickstart-pytorch.rst:189 #: ../../source/tutorial-quickstart-tensorflow.rst:82 msgid "" "We can now create an instance of our class :code:`CifarClient` and add " "one line to actually run this client:" -msgstr "" +msgstr "现在我们可以创建一个 :code:`CifarClient` 类的实例,并添加一行来实际运行该客户端:" #: ../../source/tutorial-quickstart-pytorch.rst:196 #: ../../source/tutorial-quickstart-tensorflow.rst:90 +#, fuzzy msgid "" "That's it for the client. We only have to implement :code:`Client` or " -":code:`NumPyClient` and call :code:`fl.client.start_client()` or " -":code:`fl.client.start_numpy_client()`. The string :code:`\"[::]:8080\"` " -"tells the client which server to connect to. In our case we can run the " -"server and the client on the same machine, therefore we use " +":code:`NumPyClient` and call :code:`fl.client.start_client()`. If you " +"implement a client of type :code:`NumPyClient` you'll need to first call " +"its :code:`to_client()` method. The string :code:`\"[::]:8080\"` tells " +"the client which server to connect to. In our case we can run the server " +"and the client on the same machine, therefore we use " ":code:`\"[::]:8080\"`. If we run a truly federated workload with the " "server and clients running on different machines, all that needs to " "change is the :code:`server_address` we point the client at." msgstr "" +"这就是客户端。我们只需实现 :code:`Client` 或 :code:`NumPyClient` 并调用 " +":code:`fl.client.start_client()` 或 " +":code:`fl.client.start_numpy_client()`。字符串 " +":code:`\"[::]:8080\"`会告诉客户端要连接的服务器。在本例中,我们可以在同一台机器上运行服务器和客户端,因此使用 " +":code:`\"[::]:8080\"。如果我们运行的是真正的联邦工作负载,服务器和客户端运行在不同的机器上,那么需要改变的只是客户端指向的 " +":code:`server_address`。" #: ../../source/tutorial-quickstart-pytorch.rst:271 msgid "" @@ -11209,22 +17263,25 @@ msgid "" "pytorch/client.py>`_ for this example can be found in :code:`examples" "/quickstart-pytorch`." msgstr "" +"恭喜您!您已经成功构建并运行了第一个联邦学习系统。本示例的`完整源代码 " +"`_ 可以在 :code:`examples/quickstart-pytorch` 中找到。" #: ../../source/tutorial-quickstart-pytorch-lightning.rst:-1 msgid "" "Check out this Federated Learning quickstart tutorial for using Flower " "with PyTorch Lightning to train an Auto Encoder model on MNIST." -msgstr "" +msgstr "查看此联邦学习快速入门教程,了解如何使用 Flower 和 PyTorch Lightning 在 MNIST 上训练自动编码器模型。" #: ../../source/tutorial-quickstart-pytorch-lightning.rst:5 msgid "Quickstart PyTorch Lightning" -msgstr "" +msgstr "快速入门 PyTorch Lightning" #: ../../source/tutorial-quickstart-pytorch-lightning.rst:10 msgid "" "Let's build a horizontal federated learning system using PyTorch " "Lightning and Flower!" -msgstr "" +msgstr "让我们使用 PyTorch Lightning 和 Flower 构建一个水平联邦学习系统!" #: ../../source/tutorial-quickstart-pytorch-lightning.rst:12 msgid "" @@ -11232,30 +17289,34 @@ msgid "" "`_ to learn more." msgstr "" +"请参阅 `完整代码示例 `_ 了解更多信息。" #: ../../source/tutorial-quickstart-scikitlearn.rst:-1 msgid "" "Check out this Federated Learning quickstart tutorial for using Flower " "with scikit-learn to train a linear regression model." -msgstr "" +msgstr "查看此联邦学习快速入门教程,了解如何使用 Flower 和 scikit-learn 训练线性回归模型。" #: ../../source/tutorial-quickstart-scikitlearn.rst:5 msgid "Quickstart scikit-learn" -msgstr "" +msgstr "scikit-learn快速入门" #: ../../source/tutorial-quickstart-scikitlearn.rst:10 msgid "" "In this tutorial, we will learn how to train a :code:`Logistic " "Regression` model on MNIST using Flower and scikit-learn." msgstr "" +"在本教程中,我们将学习如何使用 Flower 和 scikit-learn 在 MNIST 上训练一个 :code:`Logistic " +"Regression` 模型。" #: ../../source/tutorial-quickstart-scikitlearn.rst:26 msgid "Since we want to use scikt-learn, let's go ahead and install it:" -msgstr "" +msgstr "既然我们要使用 scikt-learn,那就继续安装吧:" #: ../../source/tutorial-quickstart-scikitlearn.rst:32 msgid "Or simply install all dependencies using Poetry:" -msgstr "" +msgstr "或者直接使用 Poetry 安装所有依赖项:" #: ../../source/tutorial-quickstart-scikitlearn.rst:42 msgid "" @@ -11266,54 +17327,56 @@ msgid "" ":code:`utils.py` contains different functions defining all the machine " "learning basics:" msgstr "" +"现在我们已经安装了所有的依赖项,让我们用两个客户端和一个服务器来运行一个简单的分布式训练。不过,在设置客户端和服务器之前,我们将在 " +":code:`utils.py` 中定义联邦学习设置所需的所有功能。:code:`utils.py`包含定义所有机器学习基础知识的不同函数:" #: ../../source/tutorial-quickstart-scikitlearn.rst:45 msgid ":code:`get_model_parameters()`" -msgstr "" +msgstr ":code:`get_model_parameters()`" #: ../../source/tutorial-quickstart-scikitlearn.rst:46 msgid "Returns the paramters of a :code:`sklearn` LogisticRegression model" -msgstr "" +msgstr "返回 :code:`sklearn` LogisticRegression 模型的参数" #: ../../source/tutorial-quickstart-scikitlearn.rst:47 msgid ":code:`set_model_params()`" -msgstr "" +msgstr ":code:`set_model_params()`" #: ../../source/tutorial-quickstart-scikitlearn.rst:48 msgid "Sets the parameters of a :code:`sklean` LogisticRegression model" -msgstr "" +msgstr "设置:code:`sklean`的LogisticRegression模型的参数" #: ../../source/tutorial-quickstart-scikitlearn.rst:49 msgid ":code:`set_initial_params()`" -msgstr "" +msgstr ":code:`set_initial_params()`" #: ../../source/tutorial-quickstart-scikitlearn.rst:50 msgid "Initializes the model parameters that the Flower server will ask for" -msgstr "" +msgstr "初始化 Flower 服务器将要求的模型参数" #: ../../source/tutorial-quickstart-scikitlearn.rst:51 msgid ":code:`load_mnist()`" -msgstr "" +msgstr ":code:`load_mnist()`" #: ../../source/tutorial-quickstart-scikitlearn.rst:52 msgid "Loads the MNIST dataset using OpenML" -msgstr "" +msgstr "使用 OpenML 加载 MNIST 数据集" #: ../../source/tutorial-quickstart-scikitlearn.rst:53 msgid ":code:`shuffle()`" -msgstr "" +msgstr ":code:`shuffle()`" #: ../../source/tutorial-quickstart-scikitlearn.rst:54 msgid "Shuffles data and its label" -msgstr "" +msgstr "对数据及其标签进行洗牌" #: ../../source/tutorial-quickstart-scikitlearn.rst:56 msgid ":code:`partition()`" -msgstr "" +msgstr ":code:`partition()`" #: ../../source/tutorial-quickstart-scikitlearn.rst:56 msgid "Splits datasets into a number of partitions" -msgstr "" +msgstr "将数据集分割成多个分区" #: ../../source/tutorial-quickstart-scikitlearn.rst:58 msgid "" @@ -11323,6 +17386,10 @@ msgid "" " the :code:`client.py` and imported. The :code:`client.py` also requires " "to import several packages such as Flower and scikit-learn:" msgstr "" +"更多详情请查看 :code:`utils.py`` 这里 " +"`_。在 :code:`client.py` 中使用并导入了预定义函数。:code:`client.py` " +"还需要导入几个软件包,如 Flower 和 scikit-learn:" #: ../../source/tutorial-quickstart-scikitlearn.rst:73 msgid "" @@ -11332,12 +17399,15 @@ msgid "" "and test data. The training set is split afterwards into 10 partitions " "with :code:`utils.partition()`." msgstr "" +"我们从 `OpenML `_ 中加载 MNIST " +"数据集,这是一个用于机器学习的流行手写数字图像分类数据集。实用程序 :code:`utils.load_mnist()` " +"下载训练和测试数据。然后使用 :code:`utils.partition()`将训练集分割成 10 个分区。" #: ../../source/tutorial-quickstart-scikitlearn.rst:85 msgid "" "Next, the logistic regression model is defined and initialized with " ":code:`utils.set_initial_params()`." -msgstr "" +msgstr "接下来,使用 :code:`utils.set_initial_params()` 对逻辑回归模型进行定义和初始化。" #: ../../source/tutorial-quickstart-scikitlearn.rst:97 msgid "" @@ -11347,6 +17417,9 @@ msgid "" "those instructions and calls one of the :code:`Client` methods to run " "your code (i.e., to fit the logistic regression we defined earlier)." msgstr "" +"Flower 服务器通过一个名为 :code:`Client` " +"的接口与客户端交互。当服务器选择一个特定的客户端进行训练时,它会通过网络发送训练指令。客户端接收到这些指令后,会调用 :code:`Client`" +" 方法之一来运行您的代码(即拟合我们之前定义的逻辑回归)。" #: ../../source/tutorial-quickstart-scikitlearn.rst:103 msgid "" @@ -11356,20 +17429,43 @@ msgid "" "means defining the following methods (:code:`set_parameters` is optional " "though):" msgstr "" +"Flower 提供了一个名为 :code:`NumPyClient` 的便捷类,当你的工作负载使用 scikit-learn " +"时,它可以让你更容易地实现 :code:`Client` 接口。实现 :code:`NumPyClient` " +"通常意味着定义以下方法(:code:`set_parameters` 是可选的):" #: ../../source/tutorial-quickstart-scikitlearn.rst:112 msgid "is directly imported with :code:`utils.set_model_params()`" -msgstr "" +msgstr "直接导入 :code:`utils.set_model_params()`" #: ../../source/tutorial-quickstart-scikitlearn.rst:120 msgid "The methods can be implemented in the following way:" -msgstr "" +msgstr "这些方法可以通过以下方式实现:" #: ../../source/tutorial-quickstart-scikitlearn.rst:143 msgid "" "We can now create an instance of our class :code:`MnistClient` and add " "one line to actually run this client:" +msgstr "现在我们可以创建一个 :code:`MnistClient` 类的实例,并添加一行来实际运行该客户端:" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:150 +#, fuzzy +msgid "" +"That's it for the client. We only have to implement :code:`Client` or " +":code:`NumPyClient` and call :code:`fl.client.start_client()`. If you " +"implement a client of type :code:`NumPyClient` you'll need to first call " +"its :code:`to_client()` method. The string :code:`\"0.0.0.0:8080\"` tells" +" the client which server to connect to. In our case we can run the server" +" and the client on the same machine, therefore we use " +":code:`\"0.0.0.0:8080\"`. If we run a truly federated workload with the " +"server and clients running on different machines, all that needs to " +"change is the :code:`server_address` we pass to the client." msgstr "" +"这就是客户端。我们只需实现 :code:`Client` 或 :code:`NumPyClient` 并调用 " +":code:`fl.client.start_client()` 或 " +":code:`fl.client.start_numpy_client()`。字符串 " +":code:`\"0.0.0.0:8080\"`会告诉客户端要连接的服务器。在本例中,我们可以在同一台机器上运行服务器和客户端,因此我们使用 " +":code:`\"0.0.0.0:8080\"`。如果我们运行的是真正的联邦工作负载,服务器和客户端运行在不同的机器上,那么需要改变的只是传递给客户端的" +" :code:`server_address`。" #: ../../source/tutorial-quickstart-scikitlearn.rst:159 msgid "" @@ -11377,10 +17473,12 @@ msgid "" "evaluation function for the server-side evaluation. First, we import " "again all required libraries such as Flower and scikit-learn." msgstr "" +"下面的 Flower 服务器更先进一些,会返回一个用于服务器端评估的评估函数。首先,我们再次导入所有需要的库,如 Flower 和 scikit-" +"learn。" #: ../../source/tutorial-quickstart-scikitlearn.rst:162 msgid ":code:`server.py`, import Flower and start the server:" -msgstr "" +msgstr ":code:`server.py`, 导入 Flower 并启动服务器:" #: ../../source/tutorial-quickstart-scikitlearn.rst:173 msgid "" @@ -11389,6 +17487,8 @@ msgid "" "function is called after each federated learning round and gives you " "information about loss and accuracy." msgstr "" +"联邦学习轮数在 :code:`fit_round()` 中设置,评估在 :code:`get_evaluate_fn()` " +"中定义。每轮联邦学习后都会调用评估函数,并提供有关损失值和准确率的信息。" #: ../../source/tutorial-quickstart-scikitlearn.rst:198 msgid "" @@ -11400,6 +17500,11 @@ msgid "" " :code:`fl.server.start_server(server_address=\"0.0.0.0:8080\", " "strategy=strategy, config=fl.server.ServerConfig(num_rounds=3))`." msgstr "" +":code:`main`包含服务器端参数初始化:code:`utils.set_initial_params()`以及聚合策略 " +":code:`fl.server.strategy:FedAvg()`。该策略是默认的联邦平均(或 " +"FedAvg)策略,有两个客户端,在每轮联邦学习后进行评估。可以使用 " +":code:`fl.server.start_server(server_address=\"0.0.0.0:8080\", " +"strategy=strategy, config=fl.server.ServerConfig(num_rounds=3))` 命令启动服务器。" #: ../../source/tutorial-quickstart-scikitlearn.rst:217 msgid "" @@ -11407,7 +17512,7 @@ msgid "" "federated learning in action. Federated learning systems usually have a " "server and multiple clients. We, therefore, have to start the server " "first:" -msgstr "" +msgstr "客户端和服务器都准备就绪后,我们现在就可以运行一切,看看联邦学习的运行情况。联邦学习系统通常有一个服务器和多个客户端。因此,我们必须先启动服务器:" #: ../../source/tutorial-quickstart-scikitlearn.rst:271 msgid "" @@ -11417,34 +17522,37 @@ msgid "" "mnist>`_ for this example can be found in :code:`examples/sklearn-logreg-" "mnist`." msgstr "" +"恭喜您!您已经成功构建并运行了第一个联邦学习系统。本示例的`完整源代码 " +"`_ 可以在 :code:`examples/sklearn-logreg-mnist` 中找到。" #: ../../source/tutorial-quickstart-tensorflow.rst:-1 msgid "" "Check out this Federated Learning quickstart tutorial for using Flower " "with TensorFlow to train a MobilNetV2 model on CIFAR-10." -msgstr "" +msgstr "查看此联邦学习快速入门教程,了解如何使用 Flower 和 TensorFlow 在 CIFAR-10 上训练 MobilNetV2 模型。" #: ../../source/tutorial-quickstart-tensorflow.rst:5 msgid "Quickstart TensorFlow" -msgstr "" +msgstr "快速入门 TensorFlow" #: ../../source/tutorial-quickstart-tensorflow.rst:13 msgid "Let's build a federated learning system in less than 20 lines of code!" -msgstr "" +msgstr "让我们用不到 20 行代码构建一个联邦学习系统!" #: ../../source/tutorial-quickstart-tensorflow.rst:15 msgid "Before Flower can be imported we have to install it:" -msgstr "" +msgstr "在导入 Flower 之前,我们必须先安装它:" #: ../../source/tutorial-quickstart-tensorflow.rst:21 msgid "" "Since we want to use the Keras API of TensorFlow (TF), we have to install" " TF as well:" -msgstr "" +msgstr "由于我们要使用 TensorFlow (TF) 的 Keras API,因此还必须安装 TF:" #: ../../source/tutorial-quickstart-tensorflow.rst:31 msgid "Next, in a file called :code:`client.py`, import Flower and TensorFlow:" -msgstr "" +msgstr "接下来,在名为 :code:`client.py` 的文件中导入 Flower 和 TensorFlow:" #: ../../source/tutorial-quickstart-tensorflow.rst:38 msgid "" @@ -11454,12 +17562,15 @@ msgid "" "it locally, and then returns the entire training and test set as NumPy " "ndarrays." msgstr "" +"我们使用 TF 的 Keras 实用程序加载 CIFAR10,这是一个用于机器学习的流行彩色图像分类数据集。调用 " +":code:`tf.keras.datasets.cifar10.load_data()` 会下载 CIFAR10,将其缓存到本地,然后以 " +"NumPy ndarrays 的形式返回整个训练集和测试集。" #: ../../source/tutorial-quickstart-tensorflow.rst:47 msgid "" "Next, we need a model. For the purpose of this tutorial, we use " "MobilNetV2 with 10 output classes:" -msgstr "" +msgstr "接下来,我们需要一个模型。在本教程中,我们使用带有 10 个输出类的 MobilNetV2:" #: ../../source/tutorial-quickstart-tensorflow.rst:60 msgid "" @@ -11468,16 +17579,18 @@ msgid "" "workload uses Keras. The :code:`NumPyClient` interface defines three " "methods which can be implemented in the following way:" msgstr "" +"Flower 提供了一个名为 :code:`NumPyClient` 的便捷类,当您的工作负载使用 Keras 时,该类可以更轻松地实现 " +":code:`Client` 接口。:code:`NumPyClient` 接口定义了三个方法,可以通过以下方式实现:" #: ../../source/tutorial-quickstart-tensorflow.rst:135 msgid "Each client will have its own dataset." -msgstr "" +msgstr "每个客户都有自己的数据集。" #: ../../source/tutorial-quickstart-tensorflow.rst:137 msgid "" "You should now see how the training does in the very first terminal (the " "one that started the server):" -msgstr "" +msgstr "现在你应该能在第一个终端(启动服务器的终端)看到训练的效果了:" #: ../../source/tutorial-quickstart-tensorflow.rst:169 msgid "" @@ -11487,22 +17600,26 @@ msgid "" "tensorflow/client.py>`_ for this can be found in :code:`examples" "/quickstart-tensorflow/client.py`." msgstr "" +"恭喜您!您已经成功构建并运行了第一个联邦学习系统。`完整的源代码 " +"`_ 可以在 :code:`examples/quickstart-" +"tensorflow/client.py` 中找到。" #: ../../source/tutorial-quickstart-xgboost.rst:-1 msgid "" "Check out this Federated Learning quickstart tutorial for using Flower " "with XGBoost to train classification models on trees." -msgstr "" +msgstr "查看此联邦学习 快速入门教程,了解如何使用 Flower 和 XGBoost 上训练分类模型。" #: ../../source/tutorial-quickstart-xgboost.rst:5 msgid "Quickstart XGBoost" -msgstr "" +msgstr "XGBoost快速入门" -#: ../../source/tutorial-quickstart-xgboost.rst:11 +#: ../../source/tutorial-quickstart-xgboost.rst:14 msgid "Federated XGBoost" -msgstr "" +msgstr "联邦化 XGBoost" -#: ../../source/tutorial-quickstart-xgboost.rst:13 +#: ../../source/tutorial-quickstart-xgboost.rst:16 msgid "" "EXtreme Gradient Boosting (**XGBoost**) is a robust and efficient " "implementation of gradient-boosted decision tree (**GBDT**), that " @@ -11511,27 +17628,30 @@ msgid "" "speed of machine learning models. In XGBoost, trees are constructed " "concurrently, unlike the sequential approach taken by GBDT." msgstr "" +"EXtreme Gradient " +"Boosting(**XGBoost**)是梯度提升决策树(**GBDT**)的一种稳健而高效的实现方法,能最大限度地提高提升树方法的计算边界。它主要用于提高机器学习模型的性能和计算速度。在" +" XGBoost 中,决策树是并发构建的,与 GBDT 采用的顺序方法不同。" -#: ../../source/tutorial-quickstart-xgboost.rst:17 +#: ../../source/tutorial-quickstart-xgboost.rst:20 msgid "" "Often, for tabular data on medium-sized datasets with fewer than 10k " "training examples, XGBoost surpasses the results of deep learning " "techniques." -msgstr "" +msgstr "对于训练示例少于 10k 的中型数据集上的表格数据,XGBoost 的结果往往超过深度学习技术。" -#: ../../source/tutorial-quickstart-xgboost.rst:20 +#: ../../source/tutorial-quickstart-xgboost.rst:23 msgid "Why federated XGBoost?" -msgstr "" +msgstr "为什么选择联邦 XGBoost?" -#: ../../source/tutorial-quickstart-xgboost.rst:22 +#: ../../source/tutorial-quickstart-xgboost.rst:25 msgid "" "Indeed, as the demand for data privacy and decentralized learning grows, " "there's an increasing requirement to implement federated XGBoost systems " "for specialised applications, like survival analysis and financial fraud " "detection." -msgstr "" +msgstr "事实上,随着对数据隐私和分散学习的需求不断增长,越来越多的专业应用(如生存分析和金融欺诈检测)需要实施联邦 XGBoost 系统。" -#: ../../source/tutorial-quickstart-xgboost.rst:24 +#: ../../source/tutorial-quickstart-xgboost.rst:27 msgid "" "Federated learning ensures that raw data remains on the local device, " "making it an attractive approach for sensitive domains where data " @@ -11539,8 +17659,10 @@ msgid "" "of XGBoost, combining it with federated learning offers a promising " "solution for these specific challenges." msgstr "" +"联邦学习可确保原始数据保留在本地设备上,因此对于数据安全和隐私至关重要的敏感领域来说,这是一种极具吸引力的方法。鉴于 XGBoost " +"的稳健性和高效性,将其与联邦学习相结合为应对这些特定挑战提供了一种前景广阔的解决方案。" -#: ../../source/tutorial-quickstart-xgboost.rst:27 +#: ../../source/tutorial-quickstart-xgboost.rst:30 msgid "" "In this tutorial we will learn how to train a federated XGBoost model on " "HIGGS dataset using Flower and :code:`xgboost` package. We use a simple " @@ -11551,104 +17673,120 @@ msgid "" "comprehensive `_) to run various experiments." msgstr "" +"在本教程中,我们将学习如何使用 Flower 和 :code:`xgboost` 软件包在 HIGGS 数据集上训练联邦 XGBoost " +"模型。我们将使用一个包含两个 * 客户端* 和一个 * 服务器* 的简单示例 (`完整代码 xgboost-quickstart " +"`_)来演示联邦 XGBoost 如何工作,然后我们将深入到一个更复杂的示例 (`完整代码 xgboost-" +"comprehensive `_),以运行各种实验。" -#: ../../source/tutorial-quickstart-xgboost.rst:34 +#: ../../source/tutorial-quickstart-xgboost.rst:37 msgid "Environment Setup" -msgstr "" +msgstr "环境设定" -#: ../../source/tutorial-quickstart-xgboost.rst:38 +#: ../../source/tutorial-quickstart-xgboost.rst:41 msgid "" "We first need to install Flower and Flower Datasets. You can do this by " "running :" -msgstr "" +msgstr "我们首先需要安装 Flower 和 Flower Datasets。您可以通过运行 :" -#: ../../source/tutorial-quickstart-xgboost.rst:44 +#: ../../source/tutorial-quickstart-xgboost.rst:47 msgid "" "Since we want to use :code:`xgboost` package to build up XGBoost trees, " "let's go ahead and install :code:`xgboost`:" -msgstr "" +msgstr "既然我们要使用 :code:`xgboost` 软件包来构建 XGBoost 树,那就继续安装 :code:`xgboost`:" -#: ../../source/tutorial-quickstart-xgboost.rst:54 +#: ../../source/tutorial-quickstart-xgboost.rst:57 msgid "" "*Clients* are responsible for generating individual weight-updates for " "the model based on their local datasets. Now that we have all our " "dependencies installed, let's run a simple distributed training with two " "clients and one server." -msgstr "" +msgstr "*客户端*负责根据其本地数据集为模型生成单独的模型参数更新。现在我们已经安装了所有的依赖项,让我们用两个客户端和一个服务器来运行一个简单的分布式训练。" -#: ../../source/tutorial-quickstart-xgboost.rst:57 +#: ../../source/tutorial-quickstart-xgboost.rst:60 msgid "" "In a file called :code:`client.py`, import xgboost, Flower, Flower " "Datasets and other related functions:" -msgstr "" +msgstr "在名为 :code:`client.py` 的文件中,导入 xgboost、Flower、Flower Datasets 和其他相关函数:" -#: ../../source/tutorial-quickstart-xgboost.rst:84 +#: ../../source/tutorial-quickstart-xgboost.rst:87 msgid "Dataset partition and hyper-parameter selection" -msgstr "" +msgstr "数据集划分和超参数选择" -#: ../../source/tutorial-quickstart-xgboost.rst:86 +#: ../../source/tutorial-quickstart-xgboost.rst:89 msgid "" "Prior to local training, we require loading the HIGGS dataset from Flower" " Datasets and conduct data partitioning for FL:" -msgstr "" +msgstr "在本地训练之前,我们需要从 Flower Datasets 加载 HIGGS 数据集,并对 FL 进行数据分区:" -#: ../../source/tutorial-quickstart-xgboost.rst:99 +#: ../../source/tutorial-quickstart-xgboost.rst:102 msgid "" "In this example, we split the dataset into two partitions with uniform " "distribution (:code:`IidPartitioner(num_partitions=2)`). Then, we load " "the partition for the given client based on :code:`node_id`:" msgstr "" +"在此示例中,我们将数据集分割成两个均匀分布的分区(:code:`IidPartitioner(num_partitions=2)`)。然后,我们根据" +" :code:`node_id` 为给定客户端加载分区:" -#: ../../source/tutorial-quickstart-xgboost.rst:118 +#: ../../source/tutorial-quickstart-xgboost.rst:121 msgid "" "After that, we do train/test splitting on the given partition (client's " "local data), and transform data format for :code:`xgboost` package." -msgstr "" +msgstr "然后,我们在给定的分区(客户端的本地数据)上进行训练/测试分割,并为 :code:`xgboost` 软件包转换数据格式。" -#: ../../source/tutorial-quickstart-xgboost.rst:131 +#: ../../source/tutorial-quickstart-xgboost.rst:134 msgid "" "The functions of :code:`train_test_split` and " ":code:`transform_dataset_to_dmatrix` are defined as below:" -msgstr "" +msgstr ":code:`train_test_split` 和 :code:`transform_dataset_too_dmatrix` 的函数定义如下:" -#: ../../source/tutorial-quickstart-xgboost.rst:155 +#: ../../source/tutorial-quickstart-xgboost.rst:158 msgid "Finally, we define the hyper-parameters used for XGBoost training." -msgstr "" +msgstr "最后,我们定义了用于 XGBoost 训练的超参数。" -#: ../../source/tutorial-quickstart-xgboost.rst:171 +#: ../../source/tutorial-quickstart-xgboost.rst:174 msgid "" "The :code:`num_local_round` represents the number of iterations for local" " tree boost. We use CPU for the training in default. One can shift it to " "GPU by setting :code:`tree_method` to :code:`gpu_hist`. We use AUC as " "evaluation metric." msgstr "" +"代码:`num_local_round`表示本地树的迭代次数。我们默认使用 CPU 进行训练。可以通过将 :code:`tree_method` " +"设置为 :code:`gpu_hist`,将其转换为 GPU。我们使用 AUC 作为评估指标。" -#: ../../source/tutorial-quickstart-xgboost.rst:178 +#: ../../source/tutorial-quickstart-xgboost.rst:181 msgid "Flower client definition for XGBoost" -msgstr "" +msgstr "用于 XGBoost 的 Flower 客户端定义" -#: ../../source/tutorial-quickstart-xgboost.rst:180 +#: ../../source/tutorial-quickstart-xgboost.rst:183 msgid "" "After loading the dataset we define the Flower client. We follow the " "general rule to define :code:`XgbClient` class inherited from " ":code:`fl.client.Client`." msgstr "" +"加载数据集后,我们定义 Flower 客户端。我们按照一般规则定义从 :code:`fl.client.Client` 继承而来的 " +":code:`XgbClient` 类。" -#: ../../source/tutorial-quickstart-xgboost.rst:190 +#: ../../source/tutorial-quickstart-xgboost.rst:193 msgid "" "The :code:`self.bst` is used to keep the Booster objects that remain " "consistent across rounds, allowing them to store predictions from trees " "integrated in earlier rounds and maintain other essential data structures" " for training." msgstr "" +"代码:`self.bst`用于保存在各轮中保持一致的 Booster " +"对象,使其能够存储在前几轮中集成的树的预测结果,并维护其他用于训练的重要数据结构。" -#: ../../source/tutorial-quickstart-xgboost.rst:193 +#: ../../source/tutorial-quickstart-xgboost.rst:196 msgid "" "Then, we override :code:`get_parameters`, :code:`fit` and " ":code:`evaluate` methods insides :code:`XgbClient` class as follows." msgstr "" +"然后,我们在 :code:`XgbClient` 类中重写 :code:`get_parameters`、:code:`fit` 和 " +":code:`evaluate` 方法如下。" -#: ../../source/tutorial-quickstart-xgboost.rst:207 +#: ../../source/tutorial-quickstart-xgboost.rst:210 msgid "" "Unlike neural network training, XGBoost trees are not started from a " "specified random weights. In this case, we do not use " @@ -11657,8 +17795,11 @@ msgid "" ":code:`get_parameters` when it is called by the server at the first " "round." msgstr "" +"与神经网络训练不同,XGBoost 树不是从指定的随机参数开始的。在这种情况下,我们不使用 :code:`get_parameters` 和 " +":code:`set_parameters` 来初始化 XGBoost 的模型参数。因此,当服务器在第一轮调用 " +":code:`get_parameters` 时,让我们在 :code:`get_parameters` 中返回一个空张量。" -#: ../../source/tutorial-quickstart-xgboost.rst:248 +#: ../../source/tutorial-quickstart-xgboost.rst:251 msgid "" "In :code:`fit`, at the first round, we call :code:`xgb.train()` to build " "up the first set of trees. the returned Booster object and config are " @@ -11667,27 +17808,33 @@ msgid "" ":code:`self.bst`, and then update model weights on local training data " "with function :code:`local_boost` as follows:" msgstr "" +"在 :code:`fit`中,第一轮我们调用 :code:`xgb.train()`来建立第一组树,返回的 Booster 对象和 config " +"分别存储在 :code:`self.bst` 和 :code:`self.config` 中。从第二轮开始,我们将服务器发送的全局模型加载到 " +":code:`self.bst`,然后使用函数 :code:`local_boost`更新本地训练数据的模型权重,如下所示:" -#: ../../source/tutorial-quickstart-xgboost.rst:266 +#: ../../source/tutorial-quickstart-xgboost.rst:269 msgid "" "Given :code:`num_local_round`, we update trees by calling " ":code:`self.bst.update` method. After training, the last " ":code:`N=num_local_round` trees will be extracted to send to the server." msgstr "" +"给定 :code:`num_local_round`,我们通过调用 " +":code:`self.bst.update`方法更新树。训练结束后,我们将提取最后一个 :code:`N=num_local_round` " +"树并发送给服务器。" -#: ../../source/tutorial-quickstart-xgboost.rst:288 +#: ../../source/tutorial-quickstart-xgboost.rst:291 msgid "" "In :code:`evaluate`, we call :code:`self.bst.eval_set` function to " "conduct evaluation on valid set. The AUC value will be returned." -msgstr "" +msgstr "在 :code:`evaluate`中,我们调用 :code:`self.bst.eval_set`函数对有效集合进行评估。将返回 AUC 值。" -#: ../../source/tutorial-quickstart-xgboost.rst:291 +#: ../../source/tutorial-quickstart-xgboost.rst:294 msgid "" "Now, we can create an instance of our class :code:`XgbClient` and add one" " line to actually run this client:" -msgstr "" +msgstr "现在,我们可以创建一个 :code:`XgbClient` 类的实例,并添加一行来实际运行该客户端:" -#: ../../source/tutorial-quickstart-xgboost.rst:297 +#: ../../source/tutorial-quickstart-xgboost.rst:300 msgid "" "That's it for the client. We only have to implement :code:`Client`and " "call :code:`fl.client.start_client()`. The string :code:`\"[::]:8080\"` " @@ -11697,60 +17844,75 @@ msgid "" "server and clients running on different machines, all that needs to " "change is the :code:`server_address` we point the client at." msgstr "" +"这就是客户端。我们只需实现 :code:`客户端`并调用 :code:`fl.client.start_client()`。字符串 " +":code:`\"[::]:8080\"`会告诉客户端要连接的服务器。在本例中,我们可以在同一台机器上运行服务器和客户端,因此我们使用 " +":code:`\"[::]:8080\"`。如果我们运行的是真正的联邦工作负载,服务器和客户端运行在不同的机器上,那么需要改变的只是客户端指向的 " +":code:`server_address`。" -#: ../../source/tutorial-quickstart-xgboost.rst:308 +#: ../../source/tutorial-quickstart-xgboost.rst:311 msgid "" "These updates are then sent to the *server* which will aggregate them to " "produce a better model. Finally, the *server* sends this improved version" " of the model back to each *client* to finish a complete FL round." msgstr "" +"然后,这些更新会被发送到*服务器*,由*服务器*聚合后生成一个更好的模型。最后,*服务器*将这个改进版的模型发回给每个*客户端*,以完成一轮完整的" +" FL。" -#: ../../source/tutorial-quickstart-xgboost.rst:311 +#: ../../source/tutorial-quickstart-xgboost.rst:314 msgid "" "In a file named :code:`server.py`, import Flower and FedXgbBagging from " ":code:`flwr.server.strategy`." msgstr "" +"在名为 :code:`server.py` 的文件中,从 :code:`flwr.server.strategy` 导入 Flower 和 " +"FedXgbBagging。" -#: ../../source/tutorial-quickstart-xgboost.rst:313 +#: ../../source/tutorial-quickstart-xgboost.rst:316 msgid "We first define a strategy for XGBoost bagging aggregation." -msgstr "" +msgstr "我们首先定义了 XGBoost bagging聚合策略。" -#: ../../source/tutorial-quickstart-xgboost.rst:336 +#: ../../source/tutorial-quickstart-xgboost.rst:339 msgid "" "We use two clients for this example. An " ":code:`evaluate_metrics_aggregation` function is defined to collect and " "wighted average the AUC values from clients." msgstr "" +"本示例使用两个客户端。我们定义了一个 :code:`evaluate_metrics_aggregation` 函数,用于收集客户机的 AUC " +"值并求取平均值。" -#: ../../source/tutorial-quickstart-xgboost.rst:339 +#: ../../source/tutorial-quickstart-xgboost.rst:342 msgid "Then, we start the server:" -msgstr "" +msgstr "然后,我们启动服务器:" -#: ../../source/tutorial-quickstart-xgboost.rst:351 +#: ../../source/tutorial-quickstart-xgboost.rst:354 msgid "Tree-based bagging aggregation" -msgstr "" +msgstr "基于树的bagging聚合" -#: ../../source/tutorial-quickstart-xgboost.rst:353 +#: ../../source/tutorial-quickstart-xgboost.rst:356 msgid "" "You must be curious about how bagging aggregation works. Let's look into " "the details." -msgstr "" +msgstr "您一定很好奇bagging聚合是如何工作的。让我们来详细了解一下。" -#: ../../source/tutorial-quickstart-xgboost.rst:355 +#: ../../source/tutorial-quickstart-xgboost.rst:358 msgid "" "In file :code:`flwr.server.strategy.fedxgb_bagging.py`, we define " ":code:`FedXgbBagging` inherited from :code:`flwr.server.strategy.FedAvg`." " Then, we override the :code:`aggregate_fit`, :code:`aggregate_evaluate` " "and :code:`evaluate` methods as follows:" msgstr "" +"在文件 :code:`flwr.server.strategy.fedxgb_bagging.py`中,我们定义了从 " +":code:`flwr.server.strategy.FedAvg`继承的 :code:`FedXgbBagging`。然后,我们覆盖 " +":code:`aggregate_fit`、:code:`aggregate_evaluate` 和 :code:`evaluate` 方法如下:" -#: ../../source/tutorial-quickstart-xgboost.rst:451 +#: ../../source/tutorial-quickstart-xgboost.rst:454 msgid "" "In :code:`aggregate_fit`, we sequentially aggregate the clients' XGBoost " "trees by calling :code:`aggregate()` function:" msgstr "" +"在 :code:`aggregate_fit` 中,我们通过调用 :code:`aggregate()` 函数,按顺序聚合客户端的 XGBoost" +" 树:" -#: ../../source/tutorial-quickstart-xgboost.rst:510 +#: ../../source/tutorial-quickstart-xgboost.rst:513 msgid "" "In this function, we first fetch the number of trees and the number of " "parallel trees for the current and previous model by calling " @@ -11758,52 +17920,110 @@ msgid "" " After that, the trees (containing model weights) are aggregated to " "generate a new tree model." msgstr "" +"在该函数中,我们首先通过调用 :code:`_get_tree_nums` " +"获取当前模型和上一个模型的树数和并行树数。然后,对获取的信息进行聚合。然后,聚合树(包含模型参数)生成新的树模型。" -#: ../../source/tutorial-quickstart-xgboost.rst:515 +#: ../../source/tutorial-quickstart-xgboost.rst:518 msgid "" "After traversal of all clients' models, a new global model is generated, " "followed by the serialisation, and sending back to each client." -msgstr "" +msgstr "在遍历所有客户端的模型后,会生成一个新的全局模型,然后进行序列化,并发回给每个客户端。" -#: ../../source/tutorial-quickstart-xgboost.rst:520 +#: ../../source/tutorial-quickstart-xgboost.rst:523 msgid "Launch Federated XGBoost!" -msgstr "" +msgstr "启动联邦 XGBoost!" -#: ../../source/tutorial-quickstart-xgboost.rst:582 +#: ../../source/tutorial-quickstart-xgboost.rst:585 msgid "" "Congratulations! You've successfully built and run your first federated " "XGBoost system. The AUC values can be checked in " ":code:`metrics_distributed`. One can see that the average AUC increases " "over FL rounds." msgstr "" +"恭喜您!您已成功构建并运行了第一个联邦 XGBoost 系统。可以在 :code:`metrics_distributed` 中查看 AUC " +"值。我们可以看到,平均 AUC 随 FL 轮数的增加而增加。" -#: ../../source/tutorial-quickstart-xgboost.rst:587 +#: ../../source/tutorial-quickstart-xgboost.rst:590 msgid "" "The full `source code `_ for this example can be found in :code:`examples" "/xgboost-quickstart`." msgstr "" +"此示例的`完整源代码 `_ 可在 :code:`examples/xgboost-quickstart` 中找到。" -#: ../../source/tutorial-quickstart-xgboost.rst:591 +#: ../../source/tutorial-quickstart-xgboost.rst:594 msgid "Comprehensive Federated XGBoost" -msgstr "" +msgstr "综合的联邦 XGBoost" -#: ../../source/tutorial-quickstart-xgboost.rst:593 +#: ../../source/tutorial-quickstart-xgboost.rst:596 +#, fuzzy msgid "" "Now that you have known how federated XGBoost work with Flower, it's time" " to run some more comprehensive experiments by customising the " "experimental settings. In the xgboost-comprehensive example (`full code " "`_), we provide more options to define various experimental" -" setups, including data partitioning and centralised/distributed " -"evaluation. Let's take a look!" +" setups, including aggregation strategies, data partitioning and " +"centralised/distributed evaluation. We also support `Flower simulation " +"`_ making " +"it easy to simulate large client cohorts in a resource-aware manner. " +"Let's take a look!" +msgstr "" +"既然您已经知道联合 XGBoost 如何与 Flower 协同工作,那么现在就该通过自定义实验设置来运行一些更综合的实验了。在 xgboost-" +"comprehensive 示例 (`完整代码 " +"`_)中,我们提供了更多选项来定义各种实验设置,包括数据分区和集中/分布式评估。让我们一起来看看!" + +#: ../../source/tutorial-quickstart-xgboost.rst:603 +#, fuzzy +msgid "Cyclic training" +msgstr "集中式训练" + +#: ../../source/tutorial-quickstart-xgboost.rst:605 +msgid "" +"In addition to bagging aggregation, we offer a cyclic training scheme, " +"which performs FL in a client-by-client fashion. Instead of aggregating " +"multiple clients, there is only one single client participating in the " +"training per round in the cyclic training scenario. The trained local " +"XGBoost trees will be passed to the next client as an initialised model " +"for next round's boosting." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:599 -msgid "Customised data partitioning" +#: ../../source/tutorial-quickstart-xgboost.rst:609 +msgid "" +"To do this, we first customise a :code:`ClientManager` in " +":code:`server_utils.py`:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:649 +msgid "" +"The customised :code:`ClientManager` samples all available clients in " +"each FL round based on the order of connection to the server. Then, we " +"define a new strategy :code:`FedXgbCyclic` in " +":code:`flwr.server.strategy.fedxgb_cyclic.py`, in order to sequentially " +"select only one client in given round and pass the received model to next" +" client." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:690 +msgid "" +"Unlike the original :code:`FedAvg`, we don't perform aggregation here. " +"Instead, we just make a copy of the received client model as global model" +" by overriding :code:`aggregate_fit`." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:693 +msgid "" +"Also, the customised :code:`configure_fit` and :code:`configure_evaluate`" +" methods ensure the clients to be sequentially selected given FL round:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:601 +#: ../../source/tutorial-quickstart-xgboost.rst:757 +msgid "Customised data partitioning" +msgstr "定制数据分区" + +#: ../../source/tutorial-quickstart-xgboost.rst:759 msgid "" "In :code:`dataset.py`, we have a function :code:`instantiate_partitioner`" " to instantiate the data partitioner based on the given " @@ -11811,106 +18031,187 @@ msgid "" "provide four supported partitioner type to simulate the uniformity/non-" "uniformity in data quantity (uniform, linear, square, exponential)." msgstr "" +"在 :code:`dataset.py` 中,我们有一个函数 :code:`instantiate_partitioner` 来根据给定的 " +":code:`num_partitions` 和 :code:`partitioner_type` " +"来实例化数据分区器。目前,我们提供四种支持的分区器类型(均匀、线性、正方形、指数)来模拟数据量的均匀性/非均匀性。" -#: ../../source/tutorial-quickstart-xgboost.rst:632 +#: ../../source/tutorial-quickstart-xgboost.rst:790 msgid "Customised centralised/distributed evaluation" -msgstr "" +msgstr "定制的集中/分布式评估" -#: ../../source/tutorial-quickstart-xgboost.rst:634 +#: ../../source/tutorial-quickstart-xgboost.rst:792 +#, fuzzy msgid "" "To facilitate centralised evaluation, we define a function in " -":code:`server.py`:" -msgstr "" +":code:`server_utils.py`:" +msgstr "为便于集中评估,我们在 :code:`server.py` 中定义了一个函数:" -#: ../../source/tutorial-quickstart-xgboost.rst:666 +#: ../../source/tutorial-quickstart-xgboost.rst:824 msgid "" "This function returns a evaluation function which instantiates a " ":code:`Booster` object and loads the global model weights to it. The " "evaluation is conducted by calling :code:`eval_set()` method, and the " "tested AUC value is reported." msgstr "" +"此函数返回一个评估函数,该函数实例化一个 :code:`Booster` 对象,并向其加载全局模型参数。评估通过调用 " +":code:`eval_set()` 方法进行,并报告测试的 AUC 值。" -#: ../../source/tutorial-quickstart-xgboost.rst:669 +#: ../../source/tutorial-quickstart-xgboost.rst:827 +#, fuzzy msgid "" "As for distributed evaluation on the clients, it's same as the quick-" "start example by overriding the :code:`evaluate()` method insides the " -":code:`XgbClient` class in :code:`client.py`." +":code:`XgbClient` class in :code:`client_utils.py`." msgstr "" +"至于客户端上的分布式评估,与快速启动示例相同,通过覆盖 :code:`client.py` 中 :code:`XgbClient` 类内部的 " +":code:`evaluate()` 方法。" -#: ../../source/tutorial-quickstart-xgboost.rst:673 -msgid "Arguments parser" +#: ../../source/tutorial-quickstart-xgboost.rst:831 +#, fuzzy +msgid "Flower simulation" +msgstr "运行模拟" + +#: ../../source/tutorial-quickstart-xgboost.rst:832 +msgid "" +"We also provide an example code (:code:`sim.py`) to use the simulation " +"capabilities of Flower to simulate federated XGBoost training on either a" +" single machine or a cluster of machines." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:675 +#: ../../source/tutorial-quickstart-xgboost.rst:866 msgid "" -"In :code:`utils.py`, we define the arguments parsers for clients and " -"server, allowing users to specify different experimental settings. Let's " -"first see the sever side:" +"After importing all required packages, we define a :code:`main()` " +"function to perform the simulation process:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:714 +#: ../../source/tutorial-quickstart-xgboost.rst:921 msgid "" -"This allows user to specify the number of total clients / FL rounds / " -"participating clients / clients for evaluation, and evaluation fashion. " -"Note that with :code:`--centralised-eval`, the sever will do centralised " -"evaluation and all functionalities for client evaluation will be " -"disabled." +"We first load the dataset and perform data partitioning, and the pre-" +"processed data is stored in a :code:`list`. After the simulation begins, " +"the clients won't need to pre-process their partitions again." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:718 -msgid "Then, the argument parser on client side:" +#: ../../source/tutorial-quickstart-xgboost.rst:924 +msgid "Then, we define the strategies and other hyper-parameters:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:760 +#: ../../source/tutorial-quickstart-xgboost.rst:975 msgid "" -"This defines various options for client data partitioning. Besides, " -"clients also have a option to conduct evaluation on centralised test set " -"by setting :code:`--centralised-eval`." +"After that, we start the simulation by calling " +":code:`fl.simulation.start_simulation`:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:764 -msgid "Example commands" +#: ../../source/tutorial-quickstart-xgboost.rst:995 +msgid "" +"One of key parameters for :code:`start_simulation` is :code:`client_fn` " +"which returns a function to construct a client. We define it as follows:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1038 +msgid "Arguments parser" +msgstr "参数解析器" + +#: ../../source/tutorial-quickstart-xgboost.rst:1040 +#, fuzzy +msgid "" +"In :code:`utils.py`, we define the arguments parsers for clients, server " +"and simulation, allowing users to specify different experimental " +"settings. Let's first see the sever side:" +msgstr "在 :code:`utils.py` 中,我们定义了客户端和服务器端的参数解析器,允许用户指定不同的实验设置。让我们先看看服务器端:" + +#: ../../source/tutorial-quickstart-xgboost.rst:1086 +#, fuzzy +msgid "" +"This allows user to specify training strategies / the number of total " +"clients / FL rounds / participating clients / clients for evaluation, and" +" evaluation fashion. Note that with :code:`--centralised-eval`, the sever" +" will do centralised evaluation and all functionalities for client " +"evaluation will be disabled." msgstr "" +"这允许用户指定总客户数/FL 轮数/参与客户数/评估客户数以及评估方式。请注意,如果使用 :code:`--centralised-" +"eval`,服务器将进行集中评估,客户端评估的所有功能将被禁用。" -#: ../../source/tutorial-quickstart-xgboost.rst:766 +#: ../../source/tutorial-quickstart-xgboost.rst:1090 +msgid "Then, the argument parser on client side:" +msgstr "然后是客户端的参数解析器:" + +#: ../../source/tutorial-quickstart-xgboost.rst:1144 +#, fuzzy msgid "" -"To run a centralised evaluated experiment on 5 clients with exponential " -"distribution for 50 rounds, we first start the server as below:" +"This defines various options for client data partitioning. Besides, " +"clients also have an option to conduct evaluation on centralised test set" +" by setting :code:`--centralised-eval`, as well as an option to perform " +"scaled learning rate based on the number of clients by setting :code" +":`--scaled-lr`." +msgstr "这定义了客户端数据分区的各种选项。此外,通过设置 :code:`-centralised-eval`,客户端还可以选择在集中测试集上进行评估。" + +#: ../../source/tutorial-quickstart-xgboost.rst:1148 +msgid "We also have an argument parser for simulation:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:773 +#: ../../source/tutorial-quickstart-xgboost.rst:1226 +msgid "This integrates all arguments for both client and server sides." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1229 +msgid "Example commands" +msgstr "命令示例" + +#: ../../source/tutorial-quickstart-xgboost.rst:1231 +#, fuzzy +msgid "" +"To run a centralised evaluated experiment with bagging strategy on 5 " +"clients with exponential distribution for 50 rounds, we first start the " +"server as below:" +msgstr "为了在 5 个客户端上进行 50 轮指数分布的集中评估实验,我们首先启动服务器,如下所示:" + +#: ../../source/tutorial-quickstart-xgboost.rst:1238 msgid "Then, on each client terminal, we start the clients:" +msgstr "然后,我们在每个客户终端上启动客户机:" + +#: ../../source/tutorial-quickstart-xgboost.rst:1244 +msgid "To run the same experiment with Flower simulation:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:779 +#: ../../source/tutorial-quickstart-xgboost.rst:1250 +#, fuzzy msgid "" -"The full `source code `_ for this comprehensive example can be found in" " :code:`examples/xgboost-comprehensive`." msgstr "" +"此综合示例的全部`源代码 `_ 可在 :code:`examples/xgboost-comprehensive` 中找到。" #: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:9 msgid "Build a strategy from scratch" -msgstr "" +msgstr "从零开始制定策略" #: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:11 msgid "" "Welcome to the third part of the Flower federated learning tutorial. In " "previous parts of this tutorial, we introduced federated learning with " -"PyTorch and Flower (`part 1 `__) and we learned how strategies " "can be used to customize the execution on both the server and the clients" -" (`part 2 `__)." msgstr "" +"欢迎来到 Flower 联邦学习教程的第三部分。在本教程的前几部分,我们介绍了 PyTorch 和 Flower 的联邦学习(`part 1 " +"`__),并学习了如何使用策略来定制服务器和客户端的执行(`part 2 " +"`__)。" #: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:13 msgid "" "In this notebook, we'll continue to customize the federated learning " "system we built previously by creating a custom version of FedAvg (again," -" using `Flower `__ and `PyTorch " +" using `Flower `__ and `PyTorch " "`__)." msgstr "" +"在本笔记中,我们将通过创建 FedAvg 的自定义版本(再次使用 `Flower `__ 和 " +"`PyTorch `__),继续定制我们之前构建的联邦学习系统。" #: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:15 #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:16 @@ -11919,19 +18220,22 @@ msgstr "" msgid "" "`Star Flower on GitHub `__ ⭐️ and join " "the Flower community on Slack to connect, ask questions, and get help: " -"`Join Slack `__ 🌼 We'd love to hear from " +"`Join Slack `__ 🌼 We'd love to hear from " "you in the ``#introductions`` channel! And if anything is unclear, head " "over to the ``#questions`` channel." msgstr "" +"`Star Flower on GitHub `__ ⭐️ 并加入 Slack " +"上的 Flower 社区,进行交流、提问并获得帮助: 加入 Slack `__ 🌼 " +"我们希望在 ``#introductions`` 频道听到您的声音!如果有任何不清楚的地方,请访问 ``#questions`` 频道。" #: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:17 msgid "Let's build a new ``Strategy`` from scratch!" -msgstr "" +msgstr "让我们从头开始构建一个新的``Strategy``!" #: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:29 #: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:29 msgid "Preparation" -msgstr "" +msgstr "准备工作" #: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:31 #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:32 @@ -11939,20 +18243,20 @@ msgstr "" msgid "" "Before we begin with the actual code, let's make sure that we have " "everything we need." -msgstr "" +msgstr "在开始实际代码之前,让我们先确保我们已经准备好了所需的一切。" #: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:43 #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:44 #: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:43 #: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:43 msgid "Installing dependencies" -msgstr "" +msgstr "安装依赖项" #: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:45 #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:46 #: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:45 msgid "First, we install the necessary packages:" -msgstr "" +msgstr "首先,我们安装必要的软件包:" #: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:65 #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:66 @@ -11961,11 +18265,10 @@ msgstr "" msgid "" "Now that we have all dependencies installed, we can import everything we " "need for this tutorial:" -msgstr "" +msgstr "现在我们已经安装了所有依赖项,可以导入本教程所需的所有内容:" #: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:101 #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:102 -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:104 #: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:101 msgid "" "It is possible to switch to a runtime that has GPU acceleration enabled " @@ -11977,12 +18280,16 @@ msgid "" "has GPU acceleration enabled, you should see the output ``Training on " "cuda``, otherwise it'll say ``Training on cpu``." msgstr "" +"可以切换到已启用 GPU 加速的运行时(在 Google Colab 上: 运行时 > 更改运行时类型 > 硬件加速: GPU > " +"保存``)。但请注意,Google Colab 并非总能提供 GPU 加速。如果在以下部分中看到与 GPU 可用性相关的错误,请考虑通过设置 " +"``DEVICE = torch.device(\"cpu\")`` 切回基于 CPU 的执行。如果运行时已启用 GPU " +"加速,你应该会看到输出``Training on cuda``,否则会显示``Training on cpu``。" #: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:114 #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:115 #: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:114 msgid "Data loading" -msgstr "" +msgstr "数据加载" #: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:116 #: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:116 @@ -11993,12 +18300,15 @@ msgid "" " ``num_clients`` which allows us to call ``load_datasets`` with different" " numbers of clients." msgstr "" +"现在,让我们加载 CIFAR-10 训练集和测试集,将它们分割成 10 " +"个较小的数据集(每个数据集又分为训练集和验证集),并将所有数据都封装在各自的 ``DataLoader`` 中。我们引入了一个新参数 " +"``num_clients``,它允许我们使用不同数量的客户端调用 ``load_datasets``。" #: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:167 #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:168 #: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:167 msgid "Model training/evaluation" -msgstr "" +msgstr "模型培训/评估" #: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:169 #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:170 @@ -12006,12 +18316,12 @@ msgstr "" msgid "" "Let's continue with the usual model definition (including " "``set_parameters`` and ``get_parameters``), training and test functions:" -msgstr "" +msgstr "让我们继续使用常见的模型定义(包括 `set_parameters` 和 `get_parameters`)、训练和测试函数:" #: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:258 #: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:258 msgid "Flower client" -msgstr "" +msgstr "Flower 客户端" #: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:260 #: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:260 @@ -12021,14 +18331,17 @@ msgid "" "``get_parameters``, ``fit``, and ``evaluate``. Here, we also pass the " "``cid`` to the client and use it log additional details:" msgstr "" +"为了实现 Flower 客户端,我们(再次)创建了 ``flwr.client.NumPyClient`` 的子类,并实现了 " +"``get_parameters``、``fit`` 和 ``evaluate``三个方法。在这里,我们还将 ``cid`` " +"传递给客户端,并使用它记录其他详细信息:" #: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:308 msgid "Let's test what we have so far before we continue:" -msgstr "" +msgstr "在继续之前,让我们先测试一下我们目前掌握的情况:" #: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:339 msgid "Build a Strategy from scratch" -msgstr "" +msgstr "从零开始构建策略" #: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:341 msgid "" @@ -12038,18 +18351,20 @@ msgid "" " it is in ``FedAvg`` and then change the configuration dictionary (one of" " the ``FitIns`` attributes)." msgstr "" +"让我们重写 ``configure_fit`` 方法,使其向一部分客户的优化器传递更高的学习率(可能还有其他超参数)。我们将保持 " +"``FedAvg`` 中的客户端采样,然后更改配置字典(``FitIns`` 属性之一)。" #: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:507 msgid "" "The only thing left is to use the newly created custom Strategy " "``FedCustom`` when starting the experiment:" -msgstr "" +msgstr "剩下的唯一工作就是在启动实验时使用新创建的自定义策略 ``FedCustom`` :" #: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:534 #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:932 #: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:697 msgid "Recap" -msgstr "" +msgstr "回顾" #: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:536 msgid "" @@ -12061,51 +18376,66 @@ msgid "" "functions to the constructor of your new class (``__init__``) and then " "call these functions whenever needed." msgstr "" +"在本笔记中,我们了解了如何实施自定义策略。自定义策略可以对客户端节点配置、结果聚合等进行细粒度控制。要定义自定义策略,只需覆盖(抽象)基类 " +"``Strategy`` " +"的抽象方法即可。为使自定义策略更加强大,您可以将自定义函数传递给新类的构造函数(`__init__``),然后在需要时调用这些函数。" #: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:550 #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:948 -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:749 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:729 #: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:715 #: ../../source/tutorial-series-what-is-federated-learning.ipynb:369 msgid "" "Before you continue, make sure to join the Flower community on Slack: " -"`Join Slack `__" +"`Join Slack `__" msgstr "" +"在继续之前,请务必加入 Slack 上的 Flower 社区:`Join Slack `__" #: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:552 #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:950 -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:751 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:731 #: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:717 #: ../../source/tutorial-series-what-is-federated-learning.ipynb:371 msgid "" "There's a dedicated ``#questions`` channel if you need help, but we'd " "also love to hear who you are in ``#introductions``!" -msgstr "" +msgstr "如果您需要帮助,我们有专门的 ``#questions`` 频道,但我们也很乐意在 ``#introductions`` 中了解您是谁!" #: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:554 msgid "" "The `Flower Federated Learning Tutorial - Part 4 " -"`__ introduces ``Client``, the flexible API underlying " "``NumPyClient``." msgstr "" +"Flower联邦学习教程 - 第4部分 `__ " +"介绍了``Client``,它是``NumPyClient``底层的灵活应用程序接口。" #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:9 msgid "Customize the client" -msgstr "" +msgstr "自定义客户端" #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:11 msgid "" "Welcome to the fourth part of the Flower federated learning tutorial. In " "the previous parts of this tutorial, we introduced federated learning " -"with PyTorch and Flower (`part 1 `__), we learned how " "strategies can be used to customize the execution on both the server and " -"the clients (`part 2 `__), and we built our own " -"custom strategy from scratch (`part 3 `__)." msgstr "" +"欢迎来到 Flower 联邦学习教程的第四部分。在本教程的前几部分中,我们介绍了 PyTorch 和 Flower 的联邦学习(`part 1 " +"`__),了解了如何使用策略来定制服务器和客户端的执行(`part 2 " +"`__),并从头开始构建了我们自己的定制策略(`part 3 " +"`__)。" #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:14 msgid "" @@ -12117,17 +18447,20 @@ msgid "" " a lot of flexibility that we didn't have before, but we'll also have to " "do a few things the we didn't have to do before." msgstr "" +"在本笔记中,我们将重温 ``NumPyClient`` 并引入一个用于构建客户端的新基类,简单命名为 " +"``Client``。在本教程的前几部分中,我们的客户端基于``NumPyClient``,这是一个便捷类,可以让我们轻松地与具有良好 NumPy" +" 互操作性的机器学习库协同工作。有了 ``Client``,我们获得了很多以前没有的灵活性,但我们也必须做一些以前不需要做的事情。" #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:18 msgid "" "Let's go deeper and see what it takes to move from ``NumPyClient`` to " "``Client``!" -msgstr "" +msgstr "让我们深入了解一下从 ``NumPyClient`` 到 ``Client`` 的过程!" #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:30 #: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:29 msgid "Step 0: Preparation" -msgstr "" +msgstr "步骤 0:准备工作" #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:117 msgid "" @@ -12135,10 +18468,12 @@ msgid "" "ten smaller datasets (each split into training and validation set), and " "wrap everything in their own ``DataLoader``." msgstr "" +"现在,让我们加载 CIFAR-10 训练集和测试集,将它们分割成十个较小的数据集(每个数据集又分为训练集和验证集),并将所有数据都封装在各自的 " +"``DataLoader`` 中。" #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:259 msgid "Step 1: Revisiting NumPyClient" -msgstr "" +msgstr "步骤 1:重温 NumPyClient" #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:261 msgid "" @@ -12147,6 +18482,9 @@ msgid "" "``get_parameters``, ``fit``, and ``evaluate``. Finally, we wrap the " "creation of instances of this class in a function called ``client_fn``:" msgstr "" +"到目前为止,我们通过子类化 ``flwr.client.NumPyClient`` " +"实现了我们的客户端。我们实现了三个方法:``get_parameters``, ``fit`, 和``evaluate``。最后,我们用一个名为 " +"``client_fn`` 的函数来创建该类的实例:" #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:309 msgid "" @@ -12155,12 +18493,14 @@ msgid "" "``FlowerClient`` to ``FlowerNumPyClient`` and ``client_fn`` to " "``numpyclient_fn``. Let's run it to see the output we get:" msgstr "" +"我们以前见过这种情况,目前没有什么新东西。与之前的笔记相比,唯一*小*的不同是命名,我们把 ``FlowerClient`` 改成了 " +"``FlowerNumPyClient``,把 `client_fn` 改成了 ``numpyclient_fn``。让我们运行它看看输出结果:" #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:339 msgid "" "This works as expected, two clients are training for three rounds of " "federated learning." -msgstr "" +msgstr "结果不出所料,两个客户端正在进行三轮联邦学习训练。" #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:341 msgid "" @@ -12170,6 +18510,8 @@ msgid "" "instance of our ``FlowerNumPyClient`` (along with loading the model and " "the data)." msgstr "" +"让我们再深入一点,讨论一下 Flower 是如何执行模拟的。每当一个客户端被选中进行工作时,`start_simulation`` 就会调用函数 " +"`numpyclient_fn` 来创建我们的 ``FlowerNumPyClient`` 实例(同时加载模型和数据)。" #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:343 msgid "" @@ -12181,34 +18523,39 @@ msgid "" "``Client``'s. ``NumPyClient`` is just a convenience abstraction built on " "top of ``Client``." msgstr "" +"但令人惊讶的部分也许就在这里: Flower 实际上并不直接使用 ``FlowerNumPyClient`` " +"对象。相反,它封装了该对象,使其看起来像 ``flwr.client.Client`` 的子类,而不是 " +"``flwr.client.NumPyClient``。事实上,Flower 核心框架不知道如何处理 " +"``NumPyClient``,它只知道如何处理 ``Client``。``NumPyClient`` " +"只是建立在``Client``之上的便捷抽象类。" #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:345 msgid "" "Instead of building on top of ``NumPyClient``, we can directly build on " "top of ``Client``." -msgstr "" +msgstr "与其在 ``NumPyClient`` 上构建,我们可以直接在 ``Client`` 上构建。" #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:357 msgid "Step 2: Moving from ``NumPyClient`` to ``Client``" -msgstr "" +msgstr "步骤 2:从 ``NumPyClient`` 移至 ``Client``" #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:359 msgid "" "Let's try to do the same thing using ``Client`` instead of " "``NumPyClient``." -msgstr "" +msgstr "让我们尝试使用 ``Client`` 代替 ``NumPyClient`` 做同样的事情。" #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:465 msgid "" "Before we discuss the code in more detail, let's try to run it! Gotta " "make sure our new ``Client``-based client works, right?" -msgstr "" +msgstr "在详细讨论代码之前,让我们试着运行它!必须确保我们基于 ``Client`` 的新客户端能正常运行,对吗?" #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:490 msgid "" "That's it, we're now using ``Client``. It probably looks similar to what " "we've done with ``NumPyClient``. So what's the difference?" -msgstr "" +msgstr "就是这样,我们现在开始使用 ``Client``。它看起来可能与我们使用 ``NumPyClient`` 所做的类似。那么有什么不同呢?" #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:492 msgid "" @@ -12225,6 +18572,10 @@ msgid "" "back to the server, which (finally!) deserializes them again in order to " "aggregate them with the updates received from other clients." msgstr "" +"首先,它的代码更多。但为什么呢?区别在于 ``Client`` 希望我们处理参数的序列化和反序列化。Flower " +"要想通过网络发送参数,最终需要将这些参数转化为 ``字节``。把参数(例如 NumPy 的 ``ndarray`` " +"参数)变成原始字节叫做序列化。将原始字节转换成更有用的东西(如 NumPy ``ndarray`)称为反序列化。Flower " +"需要同时做这两件事:它需要在服务器端序列化参数并将其发送到客户端,客户端需要反序列化参数以便将其用于本地训练,然后再次序列化更新后的参数并将其发送回服务器,服务器(最后)再次反序列化参数以便将其与从其他客户端接收到的更新汇总在一起。" #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:495 msgid "" @@ -12234,6 +18585,9 @@ msgid "" " and it knows how to handle these. This makes working with machine " "learning libraries that have good NumPy support (most of them) a breeze." msgstr "" +"Client 与 NumPyClient 之间的唯一**真正区别在于,NumPyClient " +"会为你处理序列化和反序列化。NumPyClient之所以能做到这一点,是因为它预计你会以NumPy " +"ndarray的形式返回参数,而且它知道如何处理这些参数。这使得与具有良好 NumPy 支持的大多数机器学习库一起工作变得轻而易举。" #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:497 msgid "" @@ -12247,16 +18601,20 @@ msgid "" "``*Ins`` and ``*Res`` objects in ``Client`` wrap all the individual " "values you're used to from ``NumPyClient``." msgstr "" +"在 API 方面,有一个主要区别:Client 中的所有方法都只接受一个参数(例如,``Client.fit`` 中的 " +"``FitIns``),并只返回一个值(例如,``Client.fit`` 中的 " +"``FitRes``)。另一方面,``NumPyClient``中的方法有多个参数(例如,``NumPyClient.fit``中的``parameters``和``config``)和多个返回值(例如,``NumPyClient.fit``中的``parameters``、``num_example``和``metrics``)。在" +" ``Client`` 中的这些 ``*Ins`` 和 ``*Res`` 对象封装了你在 ``NumPyClient`` 中习惯使用的所有单个值。" #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:510 msgid "Step 3: Custom serialization" -msgstr "" +msgstr "步骤 3:自定义序列化" #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:512 msgid "" "Here we will explore how to implement custom serialization with a simple " "example." -msgstr "" +msgstr "下面我们将通过一个简单的示例来探讨如何实现自定义序列化。" #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:514 msgid "" @@ -12266,6 +18624,8 @@ msgid "" "object. This is very useful for network communication. Indeed, without " "serialization, you could not just a Python object through the internet." msgstr "" +"首先,什么是序列化?序列化只是将对象转换为原始字节的过程,同样重要的是,反序列化是将原始字节转换回对象的过程。这对网络通信非常有用。事实上,如果没有序列化,你就无法通过互联网传输一个" +" Python 对象。" #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:516 msgid "" @@ -12273,7 +18633,7 @@ msgid "" "by sending Python objects back and forth between the clients and the " "server. This means that serialization is an essential part of Federated " "Learning." -msgstr "" +msgstr "通过在客户端和服务器之间来回发送 Python 对象,联合学习在很大程度上依赖于互联网通信进行训练。这意味着序列化是联邦学习的重要组成部分。" #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:518 msgid "" @@ -12285,10 +18645,13 @@ msgid "" "entries), converting them to a sparse matrix can greatly improve their " "bytesize." msgstr "" +"在下面的章节中,我们将编写一个基本示例,在发送包含参数的 ``ndarray`` 前,我们将首先把 ``ndarray`` " +"转换为稀疏矩阵,而不是发送序列化版本。这种技术可以用来节省带宽,因为在某些情况下,模型的参数是稀疏的(包含许多 0 " +"条目),将它们转换成稀疏矩阵可以大大提高它们的字节数。" #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:521 msgid "Our custom serialization/deserialization functions" -msgstr "" +msgstr "我们的定制序列化/反序列化功能" #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:523 msgid "" @@ -12296,23 +18659,25 @@ msgid "" "especially in ``ndarray_to_sparse_bytes`` for serialization and " "``sparse_bytes_to_ndarray`` for deserialization." msgstr "" +"这才是真正的序列化/反序列化,尤其是在用于序列化的 ``ndarray_too_sparse_bytes`` 和用于反序列化的 " +"``sparse_bytes_too_ndarray`` 中。" #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:525 msgid "" "Note that we imported the ``scipy.sparse`` library in order to convert " "our arrays." -msgstr "" +msgstr "请注意,为了转换数组,我们导入了 ``scipy.sparse`` 库。" #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:613 msgid "Client-side" -msgstr "" +msgstr "客户端" #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:615 msgid "" "To be able to able to serialize our ``ndarray``\\ s into sparse " "parameters, we will just have to call our custom functions in our " "``flwr.client.Client``." -msgstr "" +msgstr "为了能够将我们的 ``ndarray`` 序列化为稀疏参数,我们只需在 ``flwr.client.Client`` 中调用我们的自定义函数。" #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:617 msgid "" @@ -12320,6 +18685,8 @@ msgid "" "from our network using our custom ``ndarrays_to_sparse_parameters`` " "defined above." msgstr "" +"事实上,在 `get_parameters` 中,我们需要使用上文定义的自定义 `ndarrays_too_sparse_parameters` " +"序列化从网络中获取的参数。" #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:619 msgid "" @@ -12328,16 +18695,18 @@ msgid "" "need to serialize our local results with " "``ndarrays_to_sparse_parameters``." msgstr "" +"在 ``fit`` 中,我们首先需要使用自定义的 ``sparse_parameters_to_ndarrays`` " +"反序列化来自服务器的参数,然后使用 ``ndarrays_to_sparse_parameters`` 序列化本地结果。" #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:621 msgid "" "In ``evaluate``, we will only need to deserialize the global parameters " "with our custom function." -msgstr "" +msgstr "在 ``evaluate`` 中,我们只需要用自定义函数反序列化全局参数。" #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:725 msgid "Server-side" -msgstr "" +msgstr "服务器端" #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:727 msgid "" @@ -12347,24 +18716,26 @@ msgid "" " functions of the strategy will be inherited from the super class " "``FedAvg``." msgstr "" +"在本例中,我们将只使用 ``FedAvg`` 作为策略。要改变这里的序列化和反序列化,我们只需重新实现 ``FedAvg`` 的 " +"``evaluate`` 和 ``aggregate_fit`` 函数。策略的其他函数将从超类 ``FedAvg`` 继承。" #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:729 msgid "As you can see only one line as change in ``evaluate``:" -msgstr "" +msgstr "正如你所看到的,``evaluate``中只修改了一行:" #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:735 msgid "" "And for ``aggregate_fit``, we will first deserialize every result we " "received:" -msgstr "" +msgstr "而对于 ``aggregate_fit``,我们将首先反序列化收到的每个结果:" #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:744 msgid "And then serialize the aggregated result:" -msgstr "" +msgstr "然后将汇总结果序列化:" #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:903 msgid "We can now run our custom serialization example!" -msgstr "" +msgstr "现在我们可以运行自定义序列化示例!" #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:934 msgid "" @@ -12376,6 +18747,8 @@ msgid "" "possible in ``NumPyClient``. In order to do so, it requires us to handle " "parameter serialization and deserialization ourselves." msgstr "" +"在本部分教程中,我们已经了解了如何通过子类化 ``NumPyClient`` 或 ``Client`` 来构建客户端。NumPyClient " +"\"是一个便捷的抽象类,可以让我们更容易地与具有良好NumPy互操作性的机器学习库一起工作。``Client``是一个更灵活的抽象类,允许我们做一些在`NumPyClient``中做不到的事情。为此,它要求我们自己处理参数序列化和反序列化。" #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:952 msgid "" @@ -12383,77 +18756,105 @@ msgid "" "congratulations! You're now well equipped to understand the rest of the " "documentation. There are many topics we didn't cover in the tutorial, we " "recommend the following resources:" -msgstr "" +msgstr "这暂时是 Flower 教程的最后一部分,恭喜您!您现在已经具备了理解其余文档的能力。本教程还有许多内容没有涉及,我们推荐您参考以下资源:" #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:954 -msgid "`Read Flower Docs `__" -msgstr "" +msgid "`Read Flower Docs `__" +msgstr "阅读Flower文档 `__" #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:955 msgid "" "`Check out Flower Code Examples " "`__" -msgstr "" +msgstr "查看 Flower 代码示例 `__" #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:956 msgid "" "`Use Flower Baselines for your research " -"`__" -msgstr "" +"`__" +msgstr "使用 \"Flower Baselines \"进行研究 `__" #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:957 msgid "" -"`Watch Flower Summit 2023 videos `__" -msgstr "" +msgstr "观看 2023 年Flower峰会视频 `__" #: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:9 msgid "Get started with Flower" -msgstr "" +msgstr "开始使用Flower" #: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:11 #: ../../source/tutorial-series-what-is-federated-learning.ipynb:11 msgid "Welcome to the Flower federated learning tutorial!" -msgstr "" +msgstr "欢迎阅读Flower联邦学习教程!" #: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:13 +#, fuzzy msgid "" -"In this notebook, we'll build a federated learning system using Flower " -"and PyTorch. In part 1, we use PyTorch for the model training pipeline " -"and data loading. In part 2, we continue to federate the PyTorch-based " -"pipeline using Flower." +"In this notebook, we'll build a federated learning system using Flower, " +"`Flower Datasets `__ and PyTorch. In " +"part 1, we use PyTorch for the model training pipeline and data loading. " +"In part 2, we continue to federate the PyTorch-based pipeline using " +"Flower." msgstr "" +"在本笔记中,我们将使用 Flower 和 PyTorch 构建一个联邦学习系统。在第一部分中,我们使用 PyTorch " +"进行模型训练和数据加载。在第二部分中,我们将继续使用 Flower 联邦化基于 PyTorch 的框架。" #: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:17 msgid "Let's get stated!" -msgstr "" +msgstr "让我们开始吧!" #: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:31 msgid "" "Before we begin with any actual code, let's make sure that we have " "everything we need." -msgstr "" +msgstr "在开始编写实际代码之前,让我们先确保我们已经准备好了所需的一切。" #: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:45 +#, fuzzy msgid "" "Next, we install the necessary packages for PyTorch (``torch`` and " -"``torchvision``) and Flower (``flwr``):" +"``torchvision``), Flower Datasets (``flwr-datasets``) and Flower " +"(``flwr``):" +msgstr "接下来,我们为 PyTorch(`torch`` 和`torchvision``)和 Flower(`flwr`)安装必要的软件包:" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:105 +#, fuzzy +msgid "" +"It is possible to switch to a runtime that has GPU acceleration enabled " +"(on Google Colab: ``Runtime > Change runtime type > Hardware accelerator:" +" GPU > Save``). Note, however, that Google Colab is not always able to " +"offer GPU acceleration. If you see an error related to GPU availability " +"in one of the following sections, consider switching back to CPU-based " +"execution by setting ``DEVICE = torch.device(\"cpu\")``. If the runtime " +"has GPU acceleration enabled, you should see the output ``Training on " +"cuda``, otherwise it'll say ``Training on cpu``." msgstr "" +"可以切换到已启用 GPU 加速的运行时(在 Google Colab 上: 运行时 > 更改运行时类型 > 硬件加速: GPU > " +"保存``)。但请注意,Google Colab 并非总能提供 GPU 加速。如果在以下部分中看到与 GPU 可用性相关的错误,请考虑通过设置 " +"``DEVICE = torch.device(\"cpu\")`` 切回基于 CPU 的执行。如果运行时已启用 GPU " +"加速,你应该会看到输出``Training on cuda``,否则会显示``Training on cpu``。" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:117 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:118 msgid "Loading the data" -msgstr "" +msgstr "加载数据" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:119 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:120 +#, fuzzy msgid "" "Federated learning can be applied to many different types of tasks across" " different domains. In this tutorial, we introduce federated learning by " "training a simple convolutional neural network (CNN) on the popular " "CIFAR-10 dataset. CIFAR-10 can be used to train image classifiers that " -"distinguish between images from ten different classes:" +"distinguish between images from ten different classes: 'airplane', " +"'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', and " +"'truck'." msgstr "" +"联邦学习可应用于不同领域的多种不同类型任务。在本教程中,我们将通过在流行的 CIFAR-10 数据集上训练一个简单的卷积神经网络 (CNN) " +"来介绍联合学习。CIFAR-10 可用于训练图像分类器,以区分来自十个不同类别的图像:" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:150 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:131 msgid "" "We simulate having multiple datasets from multiple organizations (also " "called the \"cross-silo\" setting in federated learning) by splitting the" @@ -12463,23 +18864,26 @@ msgid "" "splitting because each organization already has their own data (so the " "data is naturally partitioned)." msgstr "" +"我们通过将原始 CIFAR-10 数据集拆分成多个分区来模拟来自多个组织的多个数据集(也称为联邦学习中的 \"跨分区 " +"\"设置)。每个分区代表一个组织的数据。我们这样做纯粹是为了实验目的,在现实世界中不需要拆分数据,因为每个组织都已经有了自己的数据(所以数据是自然分区的)。" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:152 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:133 +#, fuzzy msgid "" "Each organization will act as a client in the federated learning system. " "So having ten organizations participate in a federation means having ten " -"clients connected to the federated learning server:" -msgstr "" +"clients connected to the federated learning server." +msgstr "每个组织都将充当联邦学习系统中的客户端。因此,有十个组织参与联邦学习,就意味着有十个客户端连接到联邦学习服务器:" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:172 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:144 msgid "" -"Let's now load the CIFAR-10 training and test set, partition them into " -"ten smaller datasets (each split into training and validation set), and " -"wrap the resulting partitions by creating a PyTorch ``DataLoader`` for " -"each of them:" +"Let's now create the Federated Dataset abstraction that from ``flwr-" +"datasets`` that partitions the CIFAR-10. We will create small training " +"and test set for each edge device and wrap each of them into a PyTorch " +"``DataLoader``:" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:222 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:198 msgid "" "We now have a list of ten training sets and ten validation sets " "(``trainloaders`` and ``valloaders``) representing the data of ten " @@ -12490,14 +18894,17 @@ msgid "" "federated learning systems have their data naturally distributed across " "multiple partitions." msgstr "" +"现在,我们有一个包含十个训练集和十个验证集(`trainloaders`` 和`valloaders``)的列表,代表十个不同组织的数据。每对 " +"``trainloader``/``valloader`` 都包含 4500 个训练示例和 500 个验证数据。还有一个单独的 " +"``测试加载器``(我们没有拆分测试集)。同样,这只有在构建研究或教育系统时才有必要,实际的联邦学习系统的数据自然分布在多个分区中。" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:225 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:201 msgid "" "Let's take a look at the first batch of images and labels in the first " "training set (i.e., ``trainloaders[0]``) before we move on:" -msgstr "" +msgstr "在继续之前,让我们先看看第一个训练集中的第一批图像和标签(即 ``trainloaders[0]``):" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:264 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:240 msgid "" "The output above shows a random batch of images from the first " "``trainloader`` in our list of ten ``trainloaders``. It also prints the " @@ -12505,12 +18912,14 @@ msgid "" "we've seen above). If you run the cell again, you should see another " "batch of images." msgstr "" +"上面的输出显示了来自十个 \"trainloader \"列表中第一个 \"trainloader " +"\"的随机图像。它还打印了与每幅图像相关的标签(即我们上面看到的十个可能标签之一)。如果您再次运行该单元,应该会看到另一批图像。" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:276 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:252 msgid "Step 1: Centralized Training with PyTorch" -msgstr "" +msgstr "步骤 1:使用 PyTorch 进行集中训练" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:287 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:263 msgid "" "Next, we're going to use PyTorch to define a simple convolutional neural " "network. This introduction assumes basic familiarity with PyTorch, so it " @@ -12519,27 +18928,34 @@ msgid "" "MINUTE BLITZ " "`__." msgstr "" +"接下来,我们将使用 PyTorch 来定义一个简单的卷积神经网络。本介绍假定您对 PyTorch 有基本的了解,因此不会详细介绍与 PyTorch" +" 相关的内容。如果你想更深入地了解 PyTorch,我们推荐你阅读 `DEEP LEARNING WITH PYTORCH: a 60 " +"minute blitz " +"`__。" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:299 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:275 msgid "Defining the model" -msgstr "" +msgstr "定义模型" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:301 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:277 msgid "" "We use the simple CNN described in the `PyTorch tutorial " "`__:" msgstr "" +"我们使用` PyTorch 教程 " +"`__ 中描述的简单 CNN:" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:338 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:314 msgid "Let's continue with the usual training and test functions:" -msgstr "" +msgstr "让我们继续进行常规的训练和测试功能:" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:398 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:374 msgid "Training the model" -msgstr "" +msgstr "训练模型" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:400 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:376 msgid "" "We now have all the basic building blocks we need: a dataset, a model, a " "training function, and a test function. Let's put them together to train " @@ -12547,9 +18963,9 @@ msgid "" "(``trainloaders[0]``). This simulates the reality of most machine " "learning projects today: each organization has their own data and trains " "models only on this internal data:" -msgstr "" +msgstr "现在我们拥有了所需的所有基本构件:数据集、模型、训练函数和测试函数。让我们把它们放在一起,在我们其中一个组织的数据集(``trainloaders[0]``)上训练模型。这模拟了当今大多数机器学习项目的实际情况:每个组织都有自己的数据,并且只在这些内部数据上训练模型:" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:430 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:406 msgid "" "Training the simple CNN on our CIFAR-10 split for 5 epochs should result " "in a test set accuracy of about 41%, which is not good, but at the same " @@ -12557,12 +18973,14 @@ msgid "" "intent was just to show a simplistic centralized training pipeline that " "sets the stage for what comes next - federated learning!" msgstr "" +"在我们的 CIFAR-10 分片上对简单 CNN 进行 5 个遍历的训练后,测试集的准确率应为 " +"41%,这并不理想,但同时对本教程而言也并不重要。我们只是想展示一个简单的集中式训练流程,为接下来的联邦学习做好铺垫!" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:442 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:418 msgid "Step 2: Federated Learning with Flower" -msgstr "" +msgstr "步骤 2:使用 Flower 联邦学习" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:444 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:420 msgid "" "Step 1 demonstrated a simple centralized training pipeline. All data was " "in one place (i.e., a single ``trainloader`` and a single ``valloader``)." @@ -12570,12 +18988,14 @@ msgid "" "multiple organizations and where we train a model over these " "organizations using federated learning." msgstr "" +"步骤 1 演示了一个简单的集中式训练流程。所有数据都在一个地方(即一个 \"trainloader \"和一个 " +"\"valloader\")。接下来,我们将模拟在多个组织中拥有多个数据集的情况,并使用联邦学习在这些组织中训练一个模型。" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:456 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:432 msgid "Updating model parameters" -msgstr "" +msgstr "更新模型参数" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:458 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:434 msgid "" "In federated learning, the server sends the global model parameters to " "the client, and the client updates the local model with the parameters " @@ -12584,17 +19004,19 @@ msgid "" "updated/changed model parameters back to the server (or, alternatively, " "it sends just the gradients back to the server, not the full model " "parameters)." -msgstr "" +msgstr "在联邦学习中,服务器将全局模型参数发送给客户端,客户端根据从服务器接收到的参数更新本地模型。然后,客户端根据本地数据对模型进行训练(在本地更改模型参数),并将更新/更改后的模型参数发回服务器(或者,客户端只将梯度参数发回服务器,而不是全部模型参数)。" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:460 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:436 msgid "" "We need two helper functions to update the local model with parameters " "received from the server and to get the updated model parameters from the" " local model: ``set_parameters`` and ``get_parameters``. The following " "two functions do just that for the PyTorch model above." msgstr "" +"我们需要两个辅助函数,用从服务器接收到的参数更新本地模型,并从本地模型获取更新后的模型参数:`` " +"set_parameters```和`get_parameters``。下面两个函数就是为上面的 PyTorch 模型做这些工作的。" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:462 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:438 msgid "" "The details of how this works are not really important here (feel free to" " consult the PyTorch documentation if you want to learn more). In " @@ -12602,12 +19024,14 @@ msgid "" " The parameter tensors are then converted to/from a list of NumPy " "ndarray's (which Flower knows how to serialize/deserialize):" msgstr "" +"在这里,如何工作的细节并不重要(如果你想了解更多,请随时查阅 PyTorch 文档)。本质上,我们使用 ``state_dict`` 访问 " +"PyTorch 模型参数张量。然后,参数张量会被转换成/转换成 NumPy ndarray 列表(Flower 知道如何序列化/反序列化):" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:490 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:466 msgid "Implementing a Flower client" -msgstr "" +msgstr "实现 Flower 客户端" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:492 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:468 msgid "" "With that out of the way, let's move on to the interesting part. " "Federated learning systems consist of a server and multiple clients. In " @@ -12616,40 +19040,47 @@ msgid "" "``NumPyClient`` in this tutorial because it is easier to implement and " "requires us to write less boilerplate." msgstr "" +"说完这些,让我们进入有趣的部分。联邦学习系统由一个服务器和多个客户端组成。在 Flower 中,我们通过实现 " +"``flwr.client.Client`` 或 ``flwr.client.NumPyClient`` " +"的子类来创建客户端。在本教程中,我们使用``NumPyClient``,因为它更容易实现,需要我们编写的模板也更少。" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:494 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:470 msgid "" "To implement the Flower client, we create a subclass of " "``flwr.client.NumPyClient`` and implement the three methods " "``get_parameters``, ``fit``, and ``evaluate``:" msgstr "" +"为实现 Flower 客户端,我们创建了 ``flwr.client.NumPyClient`` 的子类,并实现了 " +"``get_parameters``、``fit`` 和``evaluate`` 三个方法:" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:496 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:472 msgid "``get_parameters``: Return the current local model parameters" -msgstr "" +msgstr "``get_parameters``: 返回当前本地模型参数" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:497 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:473 msgid "" "``fit``: Receive model parameters from the server, train the model " "parameters on the local data, and return the (updated) model parameters " "to the server" -msgstr "" +msgstr "``fit``: 从服务器接收模型参数,在本地数据上训练模型参数,并将(更新的)模型参数返回服务器" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:498 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:474 msgid "" "``evaluate``: Receive model parameters from the server, evaluate the " "model parameters on the local data, and return the evaluation result to " "the server" -msgstr "" +msgstr "``evaluate ``: 从服务器接收模型参数,在本地数据上评估模型参数,并将评估结果返回服务器" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:500 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:476 msgid "" "We mentioned that our clients will use the previously defined PyTorch " "components for model training and evaluation. Let's see a simple Flower " "client implementation that brings everything together:" msgstr "" +"我们提到,我们的客户端将使用之前定义的 PyTorch 组件进行模型训练和评估。让我们来看看一个简单的 Flower " +"客户端实现,它将一切都整合在一起:" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:537 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:513 msgid "" "Our class ``FlowerClient`` defines how local training/evaluation will be " "performed and allows Flower to call the local training/evaluation through" @@ -12663,12 +19094,18 @@ msgid "" " particular client for training (and ``FlowerClient.evaluate`` for " "evaluation)." msgstr "" +"我们的类 ``FlowerClient`` 定义了本地训练/评估的执行方式,并允许 Flower 通过 ``fit`` 和 " +"``evaluate`` 调用本地训练/评估。每个 ``FlowerClient`` " +"实例都代表联邦学习系统中的*单个客户端*。联邦学习系统有多个客户端(否则就没有什么可联邦的),因此每个客户端都将由自己的 " +"``FlowerClient`` 实例来代表。例如,如果我们的工作负载中有三个客户端,那么我们就会有三个 ``FlowerClient`` " +"实例。当服务器选择特定客户端进行训练时,Flower 会调用相应实例上的 ``FlowerClient.fit`` (评估时调用 " +"``FlowerClient.evaluate``)。" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:541 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:517 msgid "Using the Virtual Client Engine" -msgstr "" +msgstr "使用虚拟客户端引擎" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:543 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:519 msgid "" "In this notebook, we want to simulate a federated learning system with 10" " clients on a single machine. This means that the server and all 10 " @@ -12678,8 +19115,11 @@ msgid "" "exhaust the available memory resources, even if only a subset of these " "clients participates in a single round of federated learning." msgstr "" +"在本笔记中,我们要模拟一个联邦学习系统,在一台机器上有 10 个客户端。这意味着服务器和所有 10 个客户端都将位于一台机器上,并共享 " +"CPU、GPU 和内存等资源。有 10 个客户端就意味着内存中有 10 个 ``FlowerClient`` " +"实例。在单台机器上这样做会很快耗尽可用的内存资源,即使这些客户端中只有一个子集参与了一轮联邦学习。" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:545 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:521 msgid "" "In addition to the regular capabilities where server and clients run on " "multiple machines, Flower, therefore, provides special simulation " @@ -12694,12 +19134,18 @@ msgid "" "be used, for example, to load different local data partitions for " "different clients, as can be seen below:" msgstr "" +"除了服务器和客户端在多台机器上运行的常规功能外,Flower 还提供了特殊的模拟功能,即只有在训练或评估实际需要时才创建 " +"``FlowerClient`` 实例。为了让 Flower 框架能在必要时创建客户端,我们需要实现一个名为 ``client_fn`` " +"的函数,它能按需创建一个 ``FlowerClient`` 实例。每当 Flower 需要一个特定的客户端实例来调用 ``fit`` 或 " +"``evaluate`` 时,它就会调用 " +"``client_fn``(这些实例在使用后通常会被丢弃,因此它们不应保留任何本地状态)。客户端由一个客户端 ID 或简短的 ``cid`` " +"标识。例如,可以使用 ``cid`` 为不同的客户端加载不同的本地数据分区,如下所示:" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:580 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:556 msgid "Starting the training" -msgstr "" +msgstr "开始训练" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:582 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:558 msgid "" "We now have the class ``FlowerClient`` which defines client-side " "training/evaluation and ``client_fn`` which allows Flower to create " @@ -12707,8 +19153,11 @@ msgid "" "``evaluate`` on one particular client. The last step is to start the " "actual simulation using ``flwr.simulation.start_simulation``." msgstr "" +"现在我们有了定义客户端训练/评估的类 ``FlowerClient`` 和允许 Flower 在需要调用某个客户端的 ``fit` 或 " +"``evaluate` 时创建 ``FlowerClient`` 实例的 ``client_fn` 类。最后一步是使用 " +"``flwr.simulation.start_simulation`` 启动实际模拟。" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:584 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:560 msgid "" "The function ``start_simulation`` accepts a number of arguments, amongst " "them the ``client_fn`` used to create ``FlowerClient`` instances, the " @@ -12717,8 +19166,11 @@ msgid "" "encapsulates the federated learning approach/algorithm, for example, " "*Federated Averaging* (FedAvg)." msgstr "" +"函数 ``start_simulation`` 接受许多参数,其中包括用于创建 ``FlowerClient`` 实例的 " +"``client_fn``、要模拟的客户端数量(``num_clients``)、联邦学习轮数(``num_rounds``)和策略。策略封装了联邦学习方法/算法,例如*联邦平均*" +" (FedAvg)。" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:586 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:562 msgid "" "Flower has a number of built-in strategies, but we can also use our own " "strategy implementations to customize nearly all aspects of the federated" @@ -12727,16 +19179,18 @@ msgid "" "step is the actual call to ``start_simulation`` which - you guessed it - " "starts the simulation:" msgstr "" +"Flower 有许多内置策略,但我们也可以使用自己的策略实现来定制联邦学习方法的几乎所有方面。在本例中,我们使用内置的 ``FedAvg`` " +"实现,并使用一些基本参数对其进行定制。最后一步是实际调用 ``start_simulation``开始模拟:" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:628 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:608 msgid "Behind the scenes" -msgstr "" +msgstr "幕后" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:630 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:610 msgid "So how does this work? How does Flower execute this simulation?" -msgstr "" +msgstr "那么它是如何工作的呢?Flower 如何进行模拟?" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:632 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:612 #, python-format msgid "" "When we call ``start_simulation``, we tell Flower that there are 10 " @@ -12745,8 +19199,11 @@ msgid "" "select 100% of the available clients (``fraction_fit=1.0``), so it goes " "ahead and selects 10 random clients (i.e., 100% of 10)." msgstr "" +"当我们调用 ``start_simulation`` 时,我们会告诉 Flower 有 10 " +"个客户(`num_clients=10``)。然后,Flower 会要求 ``FedAvg`` 策略选择客户。``FedAvg`` 知道它应该选择" +" 100%的可用客户(``fraction_fit=1.0``),所以它会随机选择 10 个客户(即 10 的 100%)。" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:634 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:614 msgid "" "Flower then asks the selected 10 clients to train the model. When the " "server receives the model parameter updates from the clients, it hands " @@ -12754,18 +19211,22 @@ msgid "" "strategy aggregates those updates and returns the new global model, which" " then gets used in the next round of federated learning." msgstr "" +"然后,Flower 会要求选定的 10 " +"个客户端对模型进行训练。服务器收到客户端的模型参数更新后,会将这些更新交给策略(*FedAvg*)进行聚合。策略会聚合这些更新并返回新的全局模型,然后将其用于下一轮联邦学习。" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:646 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:626 msgid "Where's the accuracy?" -msgstr "" +msgstr "准确度在哪里找?" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:648 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:628 msgid "" "You may have noticed that all metrics except for ``losses_distributed`` " "are empty. Where did the ``{\"accuracy\": float(accuracy)}`` go?" msgstr "" +"您可能已经注意到,除了 ``losses_distributed`` 以外,所有指标都是空的。{\"准确度\": " +"float(准确度)}``去哪儿了?" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:650 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:630 msgid "" "Flower can automatically aggregate losses returned by individual clients," " but it cannot do the same for metrics in the generic metrics dictionary " @@ -12774,8 +19235,10 @@ msgid "" "metrics at all, so the framework does not (and can not) know how to " "handle these automatically." msgstr "" +"Flower 可以自动汇总单个客户端返回的损失值,但无法对通用度量字典中的度量进行同样的处理(即带有 \"准确度 " +"\"键的度量字典)。度量值字典可以包含非常不同种类的度量值,甚至包含根本不是度量值的键/值对,因此框架不知道(也无法知道)如何自动处理这些度量值。" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:652 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:632 msgid "" "As users, we need to tell the framework how to handle/aggregate these " "custom metrics, and we do so by passing metric aggregation functions to " @@ -12784,41 +19247,47 @@ msgid "" " are ``fit_metrics_aggregation_fn`` and " "``evaluate_metrics_aggregation_fn``." msgstr "" +"作为用户,我们需要告诉框架如何处理/聚合这些自定义指标,为此,我们将指标聚合函数传递给策略。然后,只要从客户端接收到拟合或评估指标,策略就会调用这些函数。两个可能的函数是" +" ``fit_metrics_aggregation_fn`` 和 ``evaluate_metrics_aggregation_fn``。" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:654 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:634 msgid "" "Let's create a simple weighted averaging function to aggregate the " "``accuracy`` metric we return from ``evaluate``:" -msgstr "" +msgstr "让我们创建一个简单的加权平均函数来汇总从 ``evaluate`` 返回的 ``accuracy`` 指标:" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:680 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:660 msgid "" "The only thing left to do is to tell the strategy to call this function " "whenever it receives evaluation metric dictionaries from the clients:" -msgstr "" +msgstr "剩下要做的就是告诉策略,每当它从客户端接收到评估度量字典时,都要调用这个函数:" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:717 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:697 msgid "" "We now have a full system that performs federated training and federated " "evaluation. It uses the ``weighted_average`` function to aggregate custom" " evaluation metrics and calculates a single ``accuracy`` metric across " "all clients on the server side." msgstr "" +"我们现在有了一个完整的系统,可以执行联邦训练和联邦评估。它使用 ``weighted_average`` " +"函数汇总自定义评估指标,并在服务器端计算所有客户端的单一 ``accuracy`` 指标。" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:719 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:699 msgid "" "The other two categories of metrics (``losses_centralized`` and " "``metrics_centralized``) are still empty because they only apply when " "centralized evaluation is being used. Part two of the Flower tutorial " "will cover centralized evaluation." msgstr "" +"其他两类指标(`losses_centralized`` 和 " +"`metrics_centralized`)仍然是空的,因为它们只适用于集中评估。Flower 教程的第二部分将介绍集中式评估。" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:731 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:711 #: ../../source/tutorial-series-what-is-federated-learning.ipynb:351 msgid "Final remarks" -msgstr "" +msgstr "结束语" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:733 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:713 msgid "" "Congratulations, you just trained a convolutional neural network, " "federated over 10 clients! With that, you understand the basics of " @@ -12827,60 +19296,70 @@ msgid "" " just CIFAR-10 images classification), for example NLP with Hugging Face " "Transformers or speech with SpeechBrain." msgstr "" +"恭喜您,你刚刚训练了一个由 10 个客户端组成的卷积神经网络!这样,你就了解了使用 Flower " +"进行联邦学习的基础知识。你所看到的方法同样适用于其他机器学习框架(不只是 PyTorch)和任务(不只是 CIFAR-10 图像分类),例如使用 " +"Hugging Face Transformers 的 NLP 或使用 SpeechBrain 的语音。" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:735 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:715 msgid "" "In the next notebook, we're going to cover some more advanced concepts. " "Want to customize your strategy? Initialize parameters on the server " "side? Or evaluate the aggregated model on the server side? We'll cover " "all this and more in the next tutorial." -msgstr "" +msgstr "在下一个笔记中,我们将介绍一些更先进的概念。想定制你的策略吗?在服务器端初始化参数?或者在服务器端评估聚合模型?我们将在下一个教程中介绍所有这些内容以及更多。" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:753 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:733 msgid "" "The `Flower Federated Learning Tutorial - Part 2 " -"`__ goes into more depth about strategies and all " "the advanced things you can build with them." msgstr "" +"`Flower 联邦学习教程 - 第 2 部分 `__ 更深入地介绍了策略以及可以使用策略构建的所有高级功能。" #: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:9 msgid "Use a federated learning strategy" -msgstr "" +msgstr "使用联邦学习策略" #: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:11 msgid "" "Welcome to the next part of the federated learning tutorial. In previous " "parts of this tutorial, we introduced federated learning with PyTorch and" -" Flower (`part 1 `__)." msgstr "" +"欢迎来到联邦学习教程的下一部分。在本教程的前几部分,我们介绍了使用 PyTorch 和 Flower 进行联邦学习(`第 1 部分 " +"`___)。" #: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:13 msgid "" "In this notebook, we'll begin to customize the federated learning system " "we built in the introductory notebook (again, using `Flower " -"`__ and `PyTorch `__)." +"`__ and `PyTorch `__)." msgstr "" +"在本笔记中,我们将开始定制在入门笔记中构建的联邦学习系统(再次使用 `Flower `__ 和 " +"`PyTorch `__)。" #: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:17 msgid "Let's move beyond FedAvg with Flower strategies!" -msgstr "" +msgstr "让我们超越 FedAvg,采用Flower策略!" #: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:309 msgid "Strategy customization" -msgstr "" +msgstr "策略定制" #: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:311 msgid "" "So far, everything should look familiar if you've worked through the " "introductory notebook. With that, we're ready to introduce a number of " "new features." -msgstr "" +msgstr "到目前为止,如果您已经阅读过入门笔记本,那么一切都应该很熟悉了。接下来,我们将介绍一些新功能。" #: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:323 msgid "Server-side parameter **initialization**" -msgstr "" +msgstr "服务器端参数 **初始化**" #: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:325 msgid "" @@ -12889,6 +19368,8 @@ msgid "" "over parameter initialization though. Flower therefore allows you to " "directly pass the initial parameters to the Strategy:" msgstr "" +"默认情况下,Flower 会通过向一个随机客户端询问初始参数来初始化全局模型。但在许多情况下,我们需要对参数初始化进行更多控制。因此,Flower" +" 允许您直接将初始参数传递给策略:" #: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:370 msgid "" @@ -12897,10 +19378,13 @@ msgid "" "closely, we can see that the logs do not show any calls to the " "``FlowerClient.get_parameters`` method." msgstr "" +"向 ``FedAvg`` 策略传递 ``initial_parameters`` 可以防止 Flower " +"向其中一个客户端询问初始参数。如果我们仔细观察,就会发现日志中没有显示对 ``FlowerClient.get_parameters`` " +"方法的任何调用。" #: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:382 msgid "Starting with a customized strategy" -msgstr "" +msgstr "从定制战略开始" #: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:384 msgid "" @@ -12909,24 +19393,26 @@ msgid "" "``FlowerClient`` instances, the number of clients to simulate " "``num_clients``, the number of rounds ``num_rounds``, and the strategy." msgstr "" +"我们以前见过函数 ``start_simulation``。它接受许多参数,其中包括用于创建 ``FlowerClient`` 实例的 " +"``client_fn``、要模拟的客户数量 ``num_clients``、回合数 ``num_rounds``和策略。" #: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:386 msgid "" "The strategy encapsulates the federated learning approach/algorithm, for " "example, ``FedAvg`` or ``FedAdagrad``. Let's try to use a different " "strategy this time:" -msgstr "" +msgstr "该策略封装了联邦学习方法/算法,例如`FedAvg``或`FedAdagrad``。这次让我们尝试使用不同的策略:" #: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:424 msgid "Server-side parameter **evaluation**" -msgstr "" +msgstr "服务器端参数**评估**" #: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:426 msgid "" "Flower can evaluate the aggregated model on the server-side or on the " "client-side. Client-side and server-side evaluation are similar in some " "ways, but different in others." -msgstr "" +msgstr "Flower 可以在服务器端或客户端评估聚合模型。客户端和服务器端评估在某些方面相似,但也有不同之处。" #: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:428 msgid "" @@ -12937,7 +19423,7 @@ msgid "" "aggregated model after each round of training without having to send the " "model to clients. We're also fortunate in the sense that our entire " "evaluation dataset is available at all times." -msgstr "" +msgstr "**集中评估**(或*服务器端评估*)在概念上很简单:它的工作方式与集中式机器学习中的评估方式相同。如果有一个服务器端数据集可用于评估目的,那就太好了。我们可以在每一轮训练后对新聚合的模型进行评估,而无需将模型发送给客户端。我们也很幸运,因为我们的整个评估数据集随时可用。" #: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:430 msgid "" @@ -12953,7 +19439,7 @@ msgid "" "also change over consecutive rounds. This can lead to evaluation results " "that are not stable, so even if we would not change the model, we'd see " "our evaluation results fluctuate over consecutive rounds." -msgstr "" +msgstr "**联邦评估**(或*客户端评估*)更为复杂,但也更为强大:它不需要集中的数据集,允许我们在更大的数据集上对模型进行评估,这通常会产生更真实的评估结果。事实上,如果我们想得到有代表性的评估结果,很多情况下都需要使用**联邦评估**。但是,这种能力是有代价的:一旦我们开始在客户端进行评估,我们就应该意识到,如果这些客户端并不总是可用,我们的评估数据集可能会在连续几轮学习中发生变化。此外,每个客户端所拥有的数据集也可能在连续几轮学习中发生变化。这可能会导致评估结果不稳定,因此即使我们不改变模型,也会看到评估结果在连续几轮中波动。" #: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:433 msgid "" @@ -12961,10 +19447,12 @@ msgid "" "implementing the ``evaluate`` method in ``FlowerClient``). Now let's see " "how we can evaluate aggregated model parameters on the server-side:" msgstr "" +"我们已经了解了联邦评估如何在客户端工作(即通过在 ``FlowerClient`` 中实现 ``evaluate`` " +"方法)。现在让我们看看如何在服务器端评估聚合模型参数:" #: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:490 msgid "Sending/receiving arbitrary values to/from clients" -msgstr "" +msgstr "向/从客户端发送/接收任意值" #: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:492 msgid "" @@ -12980,6 +19468,11 @@ msgid "" " it reads ``server_round`` and ``local_epochs`` and uses those values to " "improve the logging and configure the number of local training epochs:" msgstr "" +"在某些情况下,我们希望从服务器端配置客户端的执行(训练、评估)。其中一个例子就是服务器要求客户端训练一定数量的本地遍历。Flower " +"提供了一种使用字典从服务器向客户端发送配置值的方法。让我们来看一个例子:客户端通过 ``fit`` 中的 ``config`` " +"参数从服务器接收配置值(``evaluate`` 中也有 ``config`` 参数)。``fit`` 方法通过 ``config`` " +"参数接收配置字典,然后从字典中读取值。在本例中,它读取了 ``server_round`` 和 " +"``local_epochs``,并使用这些值来改进日志记录和配置本地训练遍历的数量:" #: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:546 msgid "" @@ -12989,12 +19482,14 @@ msgid "" "strategy, and the strategy calls this function for every round of " "federated learning:" msgstr "" +"那么,如何将配置字典从服务器发送到客户端呢?内置的 \"Flower策略\"(Flower " +"Strategies)提供了这样的方法,其工作原理与服务器端评估的工作原理类似。我们为策略提供一个函数,策略会在每一轮联邦学习中调用这个函数:" #: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:576 msgid "" "Next, we'll just pass this function to the FedAvg strategy before " "starting the simulation:" -msgstr "" +msgstr "接下来,我们只需在开始模拟前将此函数传递给 FedAvg 策略即可:" #: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:613 msgid "" @@ -13004,6 +19499,8 @@ msgid "" " round of federated learning, and then for two epochs during the third " "round." msgstr "" +"我们可以看到,客户端日志现在包含了当前一轮的联邦学习(从 ``config`` " +"字典中读取)。我们还可以将本地训练配置为在第一轮和第二轮联邦学习期间运行一个遍历,然后在第三轮联邦学习期间运行两个遍历。" #: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:615 msgid "" @@ -13013,16 +19510,19 @@ msgid "" "explicitly: our ``FlowerClient`` returns a dictionary containing a custom" " key/value pair as the third return value in ``evaluate``." msgstr "" +"客户端还可以向服务器返回任意值。为此,它们会从 ``fit`` 和/或 ``evaluate`` " +"返回一个字典。我们在本笔记中看到并使用了这一概念,但并未明确提及:我们的 ``FlowerClient`` 返回一个包含自定义键/值对的字典,作为" +" ``evaluate`` 中的第三个返回值。" #: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:627 msgid "Scaling federated learning" -msgstr "" +msgstr "扩大联邦学习的规模" #: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:629 msgid "" "As a last step in this notebook, let's see how we can use Flower to " "experiment with a large number of clients." -msgstr "" +msgstr "作为本笔记的最后一步,让我们看看如何使用 Flower 对大量客户端进行实验。" #: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:651 #, python-format @@ -13037,6 +19537,10 @@ msgid "" "available clients (so 50 clients) will be selected for training each " "round:" msgstr "" +"现在我们有 1000 个分区,每个分区有 45 个训练数据和 5 " +"个验证数据。鉴于每个客户端上的训练示例数量较少,我们可能需要对模型进行更长时间的训练,因此我们将客户端配置为执行 3 " +"个本地训练遍历。我们还应该调整每轮训练中被选中的客户端的比例(我们不希望每轮训练都有 1000 个客户端参与),因此我们将 " +"``fraction_fit`` 调整为 ``0.05``,这意味着每轮训练只选中 5%的可用客户端(即 50 个客户端):" #: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:699 msgid "" @@ -13044,7 +19548,7 @@ msgid "" "customizing the strategy, initializing parameters on the server side, " "choosing a different strategy, and evaluating models on the server-side. " "That's quite a bit of flexibility with so little code, right?" -msgstr "" +msgstr "在本笔记中,我们看到了如何通过自定义策略、在服务器端初始化参数、选择不同的策略以及在服务器端评估模型来逐步增强我们的系统。用这么少的代码就能实现这么大的灵活性,不是吗?" #: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:701 msgid "" @@ -13054,18 +19558,23 @@ msgid "" "simulation using the Flower Virtual Client Engine and ran an experiment " "involving 1000 clients in the same workload - all in a Jupyter Notebook!" msgstr "" +"在后面的章节中,我们将看到如何在服务器和客户端之间传递任意值,以完全自定义客户端执行。有了这种能力,我们使用 Flower " +"虚拟客户端引擎构建了一个大规模的联邦学习模拟,并在 Jupyter Notebook 中进行了一次实验,在相同的工作负载中运行了 1000 " +"个客户端!" #: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:719 msgid "" "The `Flower Federated Learning Tutorial - Part 3 " -"`__ shows how to build a fully custom ``Strategy`` " "from scratch." msgstr "" +"`Flower 联邦学习教程 - 第 3 部分 `__ 展示了如何从头开始构建完全自定义的 \"策略\"。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:9 msgid "What is Federated Learning?" -msgstr "" +msgstr "什么是联邦学习?" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:13 msgid "" @@ -13074,87 +19583,92 @@ msgid "" "parts of the tutorial, you will be able to build advanced federated " "learning systems that approach the current state of the art in the field." msgstr "" +"在本教程中,你将了解什么是联邦学习,用 Flower " +"搭建第一个系统,并逐步对其进行扩展。如果你能完成本教程的所有部分,你就能构建高级的联邦学习系统,从而接近该领域当前的技术水平。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:15 msgid "" "🧑‍🏫 This tutorial starts at zero and expects no familiarity with " "federated learning. Only a basic understanding of data science and Python" " programming is assumed." -msgstr "" +msgstr "🧑‍🏫 本教程从零开始,不要求熟悉联邦学习。仅假定对数据科学和 Python 编程有基本了解。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:17 msgid "" "`Star Flower on GitHub `__ ⭐️ and join " "the open-source Flower community on Slack to connect, ask questions, and " -"get help: `Join Slack `__ 🌼 We'd love to " +"get help: `Join Slack `__ 🌼 We'd love to " "hear from you in the ``#introductions`` channel! And if anything is " "unclear, head over to the ``#questions`` channel." msgstr "" +"`Star Flower on GitHub `__ ⭐️ 并加入 Slack " +"上的开源 Flower 社区,进行交流、提问并获得帮助: 加入 Slack `__ " +"🌼 我们希望在 ``#introductions`` 频道听到您的声音!如果有任何不清楚的地方,请访问 ``#questions`` 频道。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:19 msgid "Let's get started!" -msgstr "" +msgstr "让我们开始吧!" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:31 msgid "Classic machine learning" -msgstr "" +msgstr "经典机器学习" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:33 msgid "" "Before we begin to discuss federated learning, let us quickly recap how " "most machine learning works today." -msgstr "" +msgstr "在开始讨论联邦学习之前,让我们先快速回顾一下目前大多数机器学习的工作原理。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:35 msgid "" "In machine learning, we have a model, and we have data. The model could " "be a neural network (as depicted here), or something else, like classical" " linear regression." -msgstr "" +msgstr "在机器学习中,我们有一个模型和数据。模型可以是一个神经网络(如图所示),也可以是其他东西,比如经典的线性回归。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:41 -msgid "|e1dd4b4129b040bea23a894266227080|" +msgid "|31e4b1afa87c4b968327bbeafbf184d4|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:109 msgid "Model and data" -msgstr "" +msgstr "模型和数据" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:47 msgid "" "We train the model using the data to perform a useful task. A task could " "be to detect objects in images, transcribe an audio recording, or play a " "game like Go." -msgstr "" +msgstr "我们使用数据来训练模型,以完成一项有用的任务。任务可以是检测图像中的物体、转录音频或玩围棋等游戏。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:53 -msgid "|c0d4cc6a442948dca8da40d2440068d9|" +msgid "|c9d935b4284e4c389a33d86b33e07c0a|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:111 msgid "Train model using data" -msgstr "" +msgstr "使用数据训练模型" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:59 msgid "" "Now, in practice, the training data we work with doesn't originate on the" " machine we train the model on. It gets created somewhere else." -msgstr "" +msgstr "实际上,我们使用的训练数据并不来自我们训练模型的机器。它是在其他地方创建的。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:61 msgid "" "It originates on a smartphone by the user interacting with an app, a car " "collecting sensor data, a laptop receiving input via the keyboard, or a " "smart speaker listening to someone trying to sing a song." -msgstr "" +msgstr "它源于智能手机上用户与应用程序的交互、汽车上传感器数据的收集、笔记本电脑上键盘输入的接收,或者智能扬声器上某人试着唱的歌。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:67 -msgid "|174e1e4fa1f149a19bfbc8bc1126f46a|" +msgid "|00727b5faffb468f84dd1b03ded88638|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:113 msgid "Data on a phone" -msgstr "" +msgstr "手机上的数据" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:73 msgid "" @@ -13163,14 +19677,16 @@ msgid "" " the same app. But it could also be several organizations, all generating" " data for the same task." msgstr "" +"值得一提的是,这个 \"其他地方 " +"\"通常不只是一个地方,而是很多地方。它可能是多个运行同一应用程序的设备。但也可能是多个组织,都在为同一任务生成数据。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:79 -msgid "|4e021a3dc08249d2a89daa3ab03c2714|" +msgid "|daf0cf0ff4c24fd29439af78416cf47b|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:115 msgid "Data is on many devices" -msgstr "" +msgstr "数据存在于多种设备中" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:85 msgid "" @@ -13178,34 +19694,34 @@ msgid "" "that has been used in the past was to collect all data on a central " "server. This server can be somewhere in a data center, or somewhere in " "the cloud." -msgstr "" +msgstr "因此,要使用机器学习或任何类型的数据分析,过去使用的方法是在中央服务器上收集所有数据。这个服务器可以在数据中心的某个地方,也可以在云端的某个地方。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:91 -msgid "|e74a1d5ce7eb49688651f2167a59065b|" +msgid "|9f093007080d471d94ca90d3e9fde9b6|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:117 msgid "Central data collection" -msgstr "" +msgstr "集中数据收集" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:97 msgid "" "Once all the data is collected in one place, we can finally use machine " "learning algorithms to train our model on the data. This is the machine " "learning approach that we've basically always relied on." -msgstr "" +msgstr "一旦所有数据都收集到一处,我们最终就可以使用机器学习算法在数据上训练我们的模型。这就是我们基本上一直依赖的机器学习方法。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:103 -msgid "|eb29ec4c7aef4e93976795ed72df647e|" +msgid "|46a26e6150e0479fbd3dfd655f36eb13|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:119 msgid "Central model training" -msgstr "" +msgstr "集中模型训练" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:130 msgid "Challenges of classical machine learning" -msgstr "" +msgstr "经典机器学习面临的挑战" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:132 msgid "" @@ -13213,37 +19729,37 @@ msgid "" " cases. Great examples include categorizing holiday photos, or analyzing " "web traffic. Cases, where all the data is naturally available on a " "centralized server." -msgstr "" +msgstr "我们刚刚看到的经典机器学习方法可以在某些情况下使用。很好的例子包括对假日照片进行分类或分析网络流量。在这些案例中,所有数据自然都可以在中央服务器上获得。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:138 -msgid "|c2f699d8ac484f5081721a6f1511f70d|" +msgid "|3daba297595c4c7fb845d90404a6179a|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:173 msgid "Centralized possible" -msgstr "" +msgstr "可集中管理" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:144 msgid "" "But the approach can not be used in many other cases. Cases, where the " "data is not available on a centralized server, or cases where the data " "available on one server is not enough to train a good model." -msgstr "" +msgstr "但这种方法并不适用于许多其他情况。例如,集中服务器上没有数据,或者一台服务器上的数据不足以训练出一个好的模型。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:150 -msgid "|cf42accdacbf4e5eb4fa0503108ba7a7|" +msgid "|5769874fa9c4455b80b2efda850d39d7|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:175 msgid "Centralized impossible" -msgstr "" +msgstr "无法集中" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:156 msgid "" "There are many reasons why the classic centralized machine learning " "approach does not work for a large number of highly important real-world " "use cases. Those reasons include:" -msgstr "" +msgstr "传统的集中式机器学习方法无法满足现实世界中大量极为重要的使用案例,原因有很多。这些原因包括:" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:158 msgid "" @@ -13257,6 +19773,8 @@ msgid "" " in different parts of the world, and their data is governed by different" " data protection regulations." msgstr "" +"**法规**: " +"GDPR(欧洲)、CCPA(加利福尼亚)、PIPEDA(加拿大)、LGPD(巴西)、PDPL(阿根廷)、KVKK(土耳其)、POPI(南非)、FSS(俄罗斯)、CDPR(中国)、PDPB(印度)、PIPA(韩国)、APPI(日本)、PDP(印度尼西亚)、PDPA(新加坡)、APP(澳大利亚)等法规保护敏感数据不被移动。事实上,这些法规有时甚至会阻止单个组织将自己的用户数据用于人工智能培训,因为这些用户生活在世界不同地区,他们的数据受不同的数据保护法规管辖。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:160 msgid "" @@ -13267,6 +19785,8 @@ msgid "" "company that developed that keyboard, do you? In fact, that use case was " "the reason federated learning was invented in the first place." msgstr "" +"**用户偏好**: " +"除了法规之外,在一些使用案例中,用户只是希望数据永远不会离开他们的设备。如果你在手机的数字键盘上输入密码和信用卡信息,你不会希望这些密码最终出现在开发该键盘的公司的服务器上吧?事实上,这种用例正是联邦学习发明的初衷。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:161 msgid "" @@ -13279,30 +19799,32 @@ msgid "" "incredibly powerful and exceedingly expensive infrastructure to process " "and store. And most of the data isn't even useful." msgstr "" +"**数据量**: " +"有些传感器(如摄像头)产生的数据量很大,收集所有数据既不可行,也不经济(例如,由于带宽或通信效率的原因)。试想一下全国铁路服务,全国有数百个火车站。如果每个火车站都安装了许多安全摄像头,那么它们所产生的大量原始设备数据就需要功能强大且极其昂贵的基础设施来处理和存储。而大部分数据甚至都是无用的。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:164 msgid "Examples where centralized machine learning does not work include:" -msgstr "" +msgstr "集中式机器学习不起作用的例子包括:" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:166 msgid "" "Sensitive healthcare records from multiple hospitals to train cancer " "detection models" -msgstr "" +msgstr "用多家医院的敏感医疗记录训练癌症检测模型" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:167 msgid "" "Financial information from different organizations to detect financial " "fraud" -msgstr "" +msgstr "不同组织的财务信息,以侦查财务欺诈行为" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:168 msgid "Location data from your electric car to make better range prediction" -msgstr "" +msgstr "通过电动汽车的定位数据更好地预测续航里程" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:169 msgid "End-to-end encrypted messages to train better auto-complete models" -msgstr "" +msgstr "端到端加密信息可训练出更好的自动完成模型" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:171 msgid "" @@ -13314,10 +19836,12 @@ msgid "" "these cases to utilize private data? After all, these are all areas that " "would benefit significantly from recent advances in AI." msgstr "" +"像 `Brave `__浏览器或 `Signal " +"`__信息管理器这样的隐私增强系统的流行表明,用户关心隐私。事实上,他们会选择隐私性更好的产品。但是,我们能做些什么来将机器学习和数据科学应用到这些情况中,以利用隐私数据呢?毕竟,这些领域都将从人工智能的最新进展中受益匪浅。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:186 msgid "Federated learning" -msgstr "" +msgstr "联邦学习" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:188 msgid "" @@ -13325,15 +19849,15 @@ msgid "" "learning on distributed data by moving the training to the data, instead " "of moving the data to the training. Here's the single-sentence " "explanation:" -msgstr "" +msgstr "联邦学习简单地颠覆了这种方法。它通过将训练转移到数据上,而不是将数据转移到训练上,在分布式数据上实现机器学习。下面是一句话的解释:" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:190 msgid "Central machine learning: move the data to the computation" -msgstr "" +msgstr "集中式机器学习:将数据转移到计算中心" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:191 msgid "Federated (machine) learning: move the computation to the data" -msgstr "" +msgstr "联邦式(机器)学习:将计算转移到数据上" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:193 msgid "" @@ -13347,42 +19871,42 @@ msgid "" " few of the examples that come to mind. As we deploy federated learning, " "we discover more and more areas that can suddenly be reinvented because " "they now have access to vast amounts of previously inaccessible data." -msgstr "" +msgstr "这样,我们就能在以前不可能的领域使用机器学习(和其他数据科学方法)。现在,我们可以通过让不同的医院协同工作来训练优秀的医疗人工智能模型。我们可以通过在不同金融机构的数据上训练人工智能模型来解决金融欺诈问题。我们可以构建新颖的隐私增强型应用(如安全信息),其内置的人工智能比非隐私增强型应用更好。以上只是我想到的几个例子。随着联邦学习的部署,我们会发现越来越多的领域可以突然重获新生,因为它们现在可以访问大量以前无法访问的数据。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:196 msgid "" "So how does federated learning work, exactly? Let's start with an " "intuitive explanation." -msgstr "" +msgstr "那么,联邦学习究竟是如何运作的呢?让我们从直观的解释开始。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:199 msgid "Federated learning in five steps" -msgstr "" +msgstr "联邦学习的五个步骤" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:202 msgid "Step 0: Initialize global model" -msgstr "" +msgstr "步骤 0:初始化全局模型" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:204 msgid "" "We start by initializing the model on the server. This is exactly the " "same in classic centralized learning: we initialize the model parameters," " either randomly or from a previously saved checkpoint." -msgstr "" +msgstr "我们首先在服务器上初始化模型。这与经典的集中式学习完全相同:我们随机或从先前保存的检查点初始化模型参数。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:210 -msgid "|5ec8356bc2564fa09178b1ceed5beccc|" +msgid "|ba47ffb421814b0f8f9fa5719093d839|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:307 msgid "Initialize global model" -msgstr "" +msgstr "初始化全局模型" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:217 msgid "" "Step 1: Send model to a number of connected organizations/devices (client" " nodes)" -msgstr "" +msgstr "第 1 步:将模型发送到多个连接的组织/设备(客户节点)" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:219 msgid "" @@ -13392,21 +19916,21 @@ msgid "" "their local training using the same model parameters. We often use only a" " few of the connected nodes instead of all nodes. The reason for this is " "that selecting more and more client nodes has diminishing returns." -msgstr "" +msgstr "接下来,我们会将全局模型的参数发送到连接的客户端节点(如智能手机等边缘设备或企业的服务器)。这是为了确保每个参与节点都使用相同的模型参数开始本地训练。我们通常只使用几个连接节点,而不是所有节点。这样做的原因是,选择越来越多的客户端节点会导致收益递减。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:225 -msgid "|7c9329e97bd0430bad335ab605a897a7|" +msgid "|aeac5bf79cbf497082e979834717e01b|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:309 msgid "Send global model" -msgstr "" +msgstr "发送全局模型" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:232 msgid "" "Step 2: Train model locally on the data of each organization/device " "(client node)" -msgstr "" +msgstr "步骤 2:在本地对每个机构/设备(客户端节点)的数据进行模型训练" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:234 msgid "" @@ -13417,18 +19941,20 @@ msgid "" "This could be as little as one epoch on the local data, or even just a " "few steps (mini-batches)." msgstr "" +"现在,所有(选定的)客户端节点都有了最新版本的全局模型参数,它们开始进行本地训练。它们使用自己的本地数据集来训练自己的本地模型。它们不会一直训练到模型完全收敛为止,而只是训练一小段时间。这可能只是本地数据上的一个遍历,甚至只是几个步骤" +"(mini-batches)。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:240 -msgid "|88002bbce1094ba1a83c9151df18f707|" +msgid "|ce27ed4bbe95459dba016afc42486ba2|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:311 msgid "Train on local data" -msgstr "" +msgstr "根据本地数据进行训练" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:247 msgid "Step 3: Return model updates back to the server" -msgstr "" +msgstr "步骤 3:将模型参数更新返回服务器" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:249 msgid "" @@ -13439,19 +19965,19 @@ msgid "" "server. The model updates they send can either be the full model " "parameters or just the gradients that were accumulated during local " "training." -msgstr "" +msgstr "经过本地训练后,每个客户节点最初收到的模型参数都会略有不同。参数之所以不同,是因为每个客户端节点的本地数据集中都有不同的数据。然后,客户端节点将这些模型更新发回服务器。它们发送的模型更新既可以是完整的模型参数,也可以只是本地训练过程中积累的梯度。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:255 -msgid "|391766aee87c482c834c93f7c22225e2|" +msgid "|ae94a7f71dda443cbec2385751427d41|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:313 msgid "Send model updates" -msgstr "" +msgstr "发送模型参数更新" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:262 msgid "Step 4: Aggregate model updates into a new global model" -msgstr "" +msgstr "步骤 4:将模型更新聚合到新的全局模型中" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:264 msgid "" @@ -13461,6 +19987,8 @@ msgid "" "But didn't we want to have one model that contains the learnings from the" " data of all 100 client nodes?" msgstr "" +"服务器从选定的客户端节点接收模型更新。如果服务器选择了 100 个客户端节点,那么它现在就拥有 100 " +"个略有不同的原始全局模型版本,每个版本都是根据一个客户端的本地数据训练出来的。难道我们不希望有一个包含所有 100 个客户节点数据的模型吗?" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:266 msgid "" @@ -13478,18 +20006,24 @@ msgid "" "weighting - each of the 10 examples would influence the global model ten " "times as much as each of the 100 examples." msgstr "" +"为了得到一个单一的模型,我们必须将从客户端节点收到的所有模型更新合并起来。这个过程称为*聚合*,有许多不同的方法。最基本的方法称为 " +"*Federated Averaging* (`McMahan等人,2016 " +"`__),通常缩写为*FedAvg*。*FedAvg* 可以把100 " +"个模型更新进行平均。更准确地说,它取的是模型更新的*加权平均值*,根据每个客户端用于训练的数据数量进行加权。加权对于确保每个数据示例对生成的全局模型具有相同的" +" \"影响 \"非常重要。如果一个客户端有 10 个数据点,而另一个客户有 100 个数据点,那么在不加权的情况下,10 个示例对全局模型的影响是" +" 100 个示例的 10 倍。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:273 -msgid "|93b9a15bd27f4e91b40f642c253dfaac|" +msgid "|e61fce4d43d243e7bb08bdde97d81ce6|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:315 msgid "Aggregate model updates" -msgstr "" +msgstr "聚合模型参数更新" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:280 msgid "Step 5: Repeat steps 1 to 4 until the model converges" -msgstr "" +msgstr "步骤 5:重复步骤 1 至 4,直至模型收敛" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:282 msgid "" @@ -13499,6 +20033,8 @@ msgid "" "updated models to the server (step 3), and the server then aggregates the" " model updates to get a new version of the global model (step 4)." msgstr "" +"步骤 1 至 4 就是我们所说的单轮联邦学习。全局模型参数被发送到参与的客户端节点(第 1 步),客户端节点对其本地数据进行训练(第 2 " +"步),然后将更新后的模型发送到服务器(第 3 步),服务器汇总模型更新,得到新版本的全局模型(第 4 步)。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:284 msgid "" @@ -13510,6 +20046,8 @@ msgid "" "eventually arrive at a fully trained model that performs well across the " "data of all client nodes." msgstr "" +"在一轮迭代中,每个参与迭代的客户节点只训练一小段时间。这意味着,在聚合步骤(步骤 " +"4)之后,我们的模型已经在所有参与的客户节点的所有数据上训练过了,但只训练了一小会儿。然后,我们必须一次又一次地重复这一训练过程,最终得到一个经过全面训练的模型,该模型在所有客户节点的数据中都表现良好。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:289 msgid "" @@ -13521,6 +20059,8 @@ msgid "" "aggregate model updates? How can we handle failing client nodes " "(stragglers)?" msgstr "" +"恭喜您,现在您已经了解了联邦学习的基础知识。当然,要讨论的内容还有很多,但这只是联邦学习的一个缩影。在本教程的后半部分,我们将进行更详细的介绍。有趣的问题包括" +" 我们如何选择最好的客户端节点参与下一轮学习?聚合模型更新的最佳方法是什么?如何处理失败的客户端节点(落伍者)?" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:294 msgid "" @@ -13530,10 +20070,12 @@ msgid "" "abbreviated as FE. In fact, federated evaluation is an integral part of " "most federated learning systems." msgstr "" +"就像我们可以在不同客户节点的分散数据上训练一个模型一样,我们也可以在这些数据上对模型进行评估,以获得有价值的指标。这就是所谓的联邦评估,有时简称为" +" FE。事实上,联邦评估是大多数联邦学习系统不可或缺的一部分。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:297 msgid "Federated analytics" -msgstr "" +msgstr "联邦分析" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:299 msgid "" @@ -13545,11 +20087,11 @@ msgid "" "conjunction with other privacy-enhancing technologies like secure " "aggregation to prevent the server from seeing the results submitted by " "individual client nodes." -msgstr "" +msgstr "在很多情况下,机器学习并不是从数据中获取价值的必要条件。数据分析可以产生有价值的见解,但同样,往往没有足够的数据来获得明确的答案。人们患某种健康疾病的平均年龄是多少?联邦分析可以通过多个客户端节点进行此类查询。它通常与安全聚合等其他隐私增强技术结合使用,以防止服务器看到单个客户端节点提交的结果。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:303 msgid "Differential Privacy" -msgstr "" +msgstr "差分隐私" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:305 msgid "" @@ -13561,10 +20103,12 @@ msgid "" "distinguished or re-identified. This technique can be considered an " "optimization that provides a quantifiable privacy protection measure." msgstr "" +"差分隐私(DP)经常在联邦学习中被提及。这是一种在分析和共享统计数据时使用的隐私保护方法,可确保单个参与者的隐私。DP " +"通过在模型更新中添加统计噪声来实现这一目的,确保任何个体参与者的信息都无法被区分或重新识别。这种技术可被视为一种优化,提供了一种可量化的隐私保护措施。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:326 msgid "Flower" -msgstr "" +msgstr "Flower" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:328 msgid "" @@ -13576,33 +20120,373 @@ msgid "" " federated learning, analytics, and evaluation. It allows the user to " "federate any workload, any ML framework, and any programming language." msgstr "" +"联邦学习、联邦评估和联邦分析需要基础框架来来回移动机器学习模型,在本地数据上对其进行训练和评估,然后汇总更新的模型。Flower " +"提供的基础架构正是以简单、可扩展和安全的方式实现这些目标的。简而言之,Flower " +"为联邦学习、分析和评估提供了一种统一的方法。它允许用户联邦化任何工作负载、任何 ML 框架和任何编程语言。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:334 -msgid "|a23d9638f96342ef9d25209951e2d564|" +msgid "|08cb60859b07461588fe44e55810b050|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:340 msgid "" "Flower federated learning server and client nodes (car, scooter, personal" " computer, roomba, and phone)" -msgstr "" +msgstr "Flower联邦学习服务器和客户端节点(汽车、滑板车、个人电脑、roomba 和电话)" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:353 msgid "" "Congratulations, you just learned the basics of federated learning and " "how it relates to the classic (centralized) machine learning!" -msgstr "" +msgstr "恭喜您,您刚刚了解了联邦学习的基础知识,以及它与传统(集中式)机器学习的关系!" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:355 msgid "" "In the next part of this tutorial, we are going to build a first " "federated learning system with Flower." -msgstr "" +msgstr "在本教程的下一部分,我们将用 Flower 建立第一个联邦学习系统。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:373 msgid "" "The `Flower Federated Learning Tutorial - Part 1 " -"`__ shows how to build a simple federated learning system " "with PyTorch and Flower." msgstr "" +"`Flower 联邦学习教程 - 第 1 部分 `__ 展示了如何使用 PyTorch 和 Flower " +"构建一个简单的联邦学习系统。" + +#~ msgid "Before the release" +#~ msgstr "发布前" + +#~ msgid "" +#~ "Update the changelog (``changelog.md``) with" +#~ " all relevant changes that happened " +#~ "after the last release. If the " +#~ "last release was tagged ``v1.2.0``, you" +#~ " can use the following URL to " +#~ "see all commits that got merged " +#~ "into ``main`` since then:" +#~ msgstr "" +#~ "更新更新日志 (``changelog.md``),加入上次发布后发生的所有相关变更。如果上次发布的版本被标记为 " +#~ "``v1.2.0``,则可以使用以下 URL 查看此后合并到 ``main`` 的所有提交:" + +#~ msgid "" +#~ "`GitHub: Compare v1.2.0...main " +#~ "`_" +#~ msgstr "" +#~ "`GitHub: Compare v1.2.0...main " +#~ "`_" + +#~ msgid "" +#~ "Thank the authors who contributed since" +#~ " the last release. This can be " +#~ "done by running the ``./dev/add-" +#~ "shortlog.sh`` convenience script (it can " +#~ "be ran multiple times and will " +#~ "update the names in the list if" +#~ " new contributors were added in the" +#~ " meantime)." +#~ msgstr "" +#~ "感谢自上次发布以来做出贡献的作者。可以通过运行 ``./dev/add-shortlog.sh`` " +#~ "方便脚本来完成(可以多次运行,如果在此期间有新的贡献者加入,则会更新列表中的名字)。" + +#~ msgid "" +#~ "Update the ``changelog.md`` section header " +#~ "``Unreleased`` to contain the version " +#~ "number and date for the release " +#~ "you are building. Create a pull " +#~ "request with the change." +#~ msgstr "" +#~ "更新 ``changelog.md`` 部分的标题 ``Unreleased`` " +#~ "以包含你正在构建的版本的版本号和日期。创建一个包含更改的拉取请求。" + +#~ msgid "" +#~ "Second, create a virtual environment " +#~ "(and activate it). If you chose to" +#~ " use :code:`pyenv` (with the :code" +#~ ":`pyenv-virtualenv` plugin) and already " +#~ "have it installed , you can use" +#~ " the following convenience script (by " +#~ "default it will use :code:`Python " +#~ "3.8.17`, but you can change it by" +#~ " providing a specific :code:``)::" +#~ msgstr "" +#~ "其次,创建虚拟环境(并激活它)。如果您选择使用 :code:`pyenv`(使用 :code:`pyenv-" +#~ "virtualenv`插件),并且已经安装了该插件,则可以使用下面的便捷脚本(默认情况下使用 " +#~ ":code:`Python3.8.17`,但您可以通过提供特定的 :code:`<版本>`来更改)::" + +#~ msgid "flwr (Python API reference)" +#~ msgstr "flwr(Python API 参考)" + +#~ msgid "..." +#~ msgstr "..." + +#~ msgid "Starting a client with an insecure server connection:" +#~ msgstr "使用不安全的服务器连接启动客户端:" + +#~ msgid "server.strategy.FedAvg" +#~ msgstr "server.strategy.FedAvg" + +#~ msgid "server.strategy.FedAvgM" +#~ msgstr "server.strategy.FedAvgM" + +#~ msgid "Configurable FedAvg with Momentum strategy implementation." +#~ msgstr "可配置的 FedAvg 动量策略实施。" + +#~ msgid "Fraction of clients used during training. Defaults to 0.1." +#~ msgstr "训练期间使用客户的比例。默认为 0.1。" + +#~ msgid "Fraction of clients used during validation. Defaults to 0.1." +#~ msgstr "验证过程中使用的客户端比例。默认为 0.1。" + +#~ msgid "server.strategy.FedMedian" +#~ msgstr "server.strategy.FedMedian" + +#~ msgid "server.strategy.QFedAvg" +#~ msgstr "server.strategy.QFedAvg" + +#~ msgid "server.strategy.FedOpt" +#~ msgstr "server.strategy.FedOpt" + +#~ msgid "Configurable FedAdagrad strategy implementation." +#~ msgstr "可配置的 FedAdagrad 策略实施。" + +#~ msgid "Federated Optim strategy interface." +#~ msgstr "Federated Optim 策略界面。" + +#~ msgid "server.strategy.FedProx" +#~ msgstr "server.strategy.FedProx" + +#~ msgid "Configurable FedProx strategy implementation." +#~ msgstr "可配置的 FedProx 策略实施。" + +#~ msgid "server.strategy.FedAdagrad" +#~ msgstr "server.strategy.FedAdagrad" + +#~ msgid "Paper: https://arxiv.org/abs/2003.00295" +#~ msgstr "论文: https://arxiv.org/abs/2003.00295" + +#~ msgid "Federated learning strategy using Adagrad on server-side." +#~ msgstr "在服务器端使用 Adagrad 的联邦学习策略。" + +#~ msgid "server.strategy.FedAdam" +#~ msgstr "server.strategy.FedAdam" + +#~ msgid "server.strategy.FedYogi" +#~ msgstr "server.strategy.FedYogi" + +#~ msgid "Adaptive Federated Optimization using Yogi." +#~ msgstr "使用 Yogi 的自适应联合优化。" + +#~ msgid "Federated learning strategy using Yogi on server-side." +#~ msgstr "在服务器端使用 Yogi 的联邦学习策略。" + +#~ msgid "Paper: https://arxiv.org/abs/1803.01498" +#~ msgstr "论文:https://arxiv.org/abs/1803.01498" + +#~ msgid "server.strategy.Krum" +#~ msgstr "server.strategy.Krum" + +#~ msgid "Configurable Krum strategy implementation." +#~ msgstr "可配置的 Krum 策略实施。" + +#~ msgid "server.strategy.Bulyan" +#~ msgstr "server.strategy.Bulyan" + +#~ msgid "Bulyan strategy implementation." +#~ msgstr "Bulyan策略的实施。" + +#~ msgid "server.strategy.FedXgbNnAvg" +#~ msgstr "server.strategy.FedXgbNnAvg" + +#~ msgid "Federated XGBoost [Ma et al., 2023] strategy." +#~ msgstr "Federated XGBoost [Ma 等人,2023] 策略。" + +#~ msgid "server.strategy.DPFedAvgAdaptive" +#~ msgstr "server.strategy.DPFedAvgAdaptive" + +#~ msgid "" +#~ "**Fix the incorrect return types of " +#~ "Strategy** " +#~ "([#2432](https://github.com/adap/flower/pull/2432/files))" +#~ msgstr "" +#~ "**修复策略的错误返回类型** " +#~ "([#2432](https://github.com/adap/flower/pull/2432/files))" + +#~ msgid "" +#~ "The types of the return values in" +#~ " the docstrings in two methods " +#~ "(`aggregate_fit` and `aggregate_evaluate`) now " +#~ "match the hint types in the code." +#~ msgstr "" +#~ "两个方法(\"aggregate_fit \"和 " +#~ "\"aggregate_evaluate\")的文档说明中的返回值类型现在与代码中的提示类型一致。" + +#~ msgid "" +#~ "**Update Flower Examples** " +#~ "([#2384](https://github.com/adap/flower/pull/2384),[#2425](https://github.com/adap/flower/pull/2425)," +#~ " [#2526](https://github.com/adap/flower/pull/2526))" +#~ msgstr "" +#~ "** 更新 Flower Examples** " +#~ "([#2384](https://github.com/adap/flower/pull/2384),[#2425](https://github.com/adap/flower/pull/2425)," +#~ " [#2526](https://github.com/adap/flower/pull/2526))" + +#~ msgid "" +#~ "That's it for the client. We only" +#~ " have to implement :code:`Client` or " +#~ ":code:`NumPyClient` and call " +#~ ":code:`fl.client.start_client()`. The string " +#~ ":code:`\"0.0.0.0:8080\"` tells the client " +#~ "which server to connect to. In our" +#~ " case we can run the server and" +#~ " the client on the same machine, " +#~ "therefore we use :code:`\"0.0.0.0:8080\"`. If" +#~ " we run a truly federated workload" +#~ " with the server and clients running" +#~ " on different machines, all that " +#~ "needs to change is the " +#~ ":code:`server_address` we pass to the " +#~ "client." +#~ msgstr "" +#~ "对于客户端就需要做这么多。我们仅需要实现 " +#~ ":code:`Client`或者:code:`NumPyClient`然后调用:code:`fl.client.start_client()`。字符串" +#~ " :code:`\"0.0.0.0:8080\"` " +#~ "告诉客户端要连接到哪个服务器。在我们的例子中,我们可以在同一台机器上运行服务器和客户端,因此我们使用:code:`\"0.0.0.0:8080\"`。如果我们运行真正联邦学习的工作负载,服务器和客户端在不同的机器上运行,则需要更改的只是我们传递给客户端的" +#~ " server_address 。" + +#~ msgid "" +#~ "That's it for the client. We only" +#~ " have to implement :code:`Client` or " +#~ ":code:`NumPyClient` and call " +#~ ":code:`fl.client.start_client()`. The string " +#~ ":code:`\"[::]:8080\"` tells the client which" +#~ " server to connect to. In our " +#~ "case we can run the server and " +#~ "the client on the same machine, " +#~ "therefore we use :code:`\"[::]:8080\"`. If " +#~ "we run a truly federated workload " +#~ "with the server and clients running " +#~ "on different machines, all that needs" +#~ " to change is the :code:`server_address`" +#~ " we point the client at." +#~ msgstr "" +#~ "对于客户来说就是这样了。我们只需实现 :code:`Client` 或 " +#~ ":code:`NumPyClient` 并调用:code:`fl.client.start_client()` " +#~ "即可。字符串 :code:`\"[::]:8080\"` " +#~ "告诉客户端要连接到哪个服务器。在我们的例子中,我们可以在同一台机器上运行服务器和客户端,因此我们使用 " +#~ ":code:`\"[::]:8080\"`。如果我们运行真正联邦的工作负载,服务器和客户端运行在不同的机器上,则需要更改的只是我们指向客户端的" +#~ " server_address 。" + +#~ msgid "" +#~ "Let's now load the CIFAR-10 training " +#~ "and test set, partition them into " +#~ "ten smaller datasets (each split into" +#~ " training and validation set), and " +#~ "wrap the resulting partitions by " +#~ "creating a PyTorch ``DataLoader`` for " +#~ "each of them:" +#~ msgstr "" +#~ "现在,让我们加载 CIFAR-10 训练集和测试集,将它们分割成 10 " +#~ "个较小的数据集(每个数据集又分为训练集和验证集),并通过为每个数据集创建 PyTorch " +#~ "``DataLoader`` 来包装由此产生的分割集:" + +#~ msgid "|e1dd4b4129b040bea23a894266227080|" +#~ msgstr "|e1dd4b4129b040bea23a894266227080|" + +#~ msgid "|c0d4cc6a442948dca8da40d2440068d9|" +#~ msgstr "|c0d4cc6a442948dca8da40d2440068d9|" + +#~ msgid "|174e1e4fa1f149a19bfbc8bc1126f46a|" +#~ msgstr "|174e1e4fa1f149a19bfbc8bc1126f46a|" + +#~ msgid "|4e021a3dc08249d2a89daa3ab03c2714|" +#~ msgstr "|4e021a3dc08249d2a89daa3ab03c2714|" + +#~ msgid "|e74a1d5ce7eb49688651f2167a59065b|" +#~ msgstr "|e74a1d5ce7eb49688651f2167a59065b|" + +#~ msgid "|eb29ec4c7aef4e93976795ed72df647e|" +#~ msgstr "|eb29ec4c7aef4e93976795ed72df647e|" + +#~ msgid "|c2f699d8ac484f5081721a6f1511f70d|" +#~ msgstr "|c2f699d8ac484f5081721a6f1511f70d|" + +#~ msgid "|cf42accdacbf4e5eb4fa0503108ba7a7|" +#~ msgstr "|cf42accdacbf4e5eb4fa0503108ba7a7|" + +#~ msgid "|5ec8356bc2564fa09178b1ceed5beccc|" +#~ msgstr "|5ec8356bc2564fa09178b1ceed5beccc|" + +#~ msgid "|7c9329e97bd0430bad335ab605a897a7|" +#~ msgstr "|7c9329e97bd0430bad335ab605a897a7|" + +#~ msgid "|88002bbce1094ba1a83c9151df18f707|" +#~ msgstr "|88002bbce1094ba1a83c9151df18f707|" + +#~ msgid "|391766aee87c482c834c93f7c22225e2|" +#~ msgstr "|391766aee87c482c834c93f7c22225e2|" + +#~ msgid "|93b9a15bd27f4e91b40f642c253dfaac|" +#~ msgstr "|93b9a15bd27f4e91b40f642c253dfaac|" + +#~ msgid "|a23d9638f96342ef9d25209951e2d564|" +#~ msgstr "|a23d9638f96342ef9d25209951e2d564|" + +#~ msgid "Upload the whl (e.g., ``flwr-1.6.0-py3-none-any.whl``)" +#~ msgstr "上传 whl(例如 ``flwr-1.6.0-py3-none-any.whl``)" + +#~ msgid "" +#~ "Change ``!pip install -q 'flwr[simulation]'" +#~ " torch torchvision matplotlib`` to ``!pip" +#~ " install -q 'flwr-1.6.0-py3-none-" +#~ "any.whl[simulation]' torch torchvision matplotlib``" +#~ msgstr "" +#~ "将``!pip install -q 'flwr[simulation]' torch" +#~ " torchvision matplotlib``更改为``!pip install -q " +#~ "'flwr-1.6.0-py3-none-any.whl[simulation]' torch " +#~ "torch torchvision matplotlib``" + +#~ msgid "" +#~ "All that's left to do it to " +#~ "define a function that loads both " +#~ "model and data, creates a " +#~ ":code:`CifarClient`, and starts this client." +#~ " You load your data and model " +#~ "by using :code:`cifar.py`. Start " +#~ ":code:`CifarClient` with the function " +#~ ":code:`fl.client.start_numpy_client()` by pointing " +#~ "it at the same IP adress we " +#~ "used in :code:`server.py`:" +#~ msgstr "" +#~ "剩下要做的就是定义一个加载模型和数据的函数,创建一个 :code:`CifarClient` 并启动该客户端。使用" +#~ " :code:`cifar.py` 加载数据和模型。使用函数 " +#~ ":code:`fl.client.start_numpy_client()` 启动 " +#~ ":code:`CifarClient`,将其指向我们在 :code:`server.py` 中使用的相同 " +#~ "IP 地址:" + +#~ msgid "" +#~ "The :code:`VirtualClientEngine` schedules, launches" +#~ " and manages `virtual` clients. These " +#~ "clients are identical to `non-virtual`" +#~ " clients (i.e. the ones you launch" +#~ " via the command `flwr.client.start_numpy_client" +#~ " `_)" +#~ " in the sense that they can be" +#~ " configure by creating a class " +#~ "inheriting, for example, from " +#~ "`flwr.client.NumPyClient `_ and therefore " +#~ "behave in an identical way. In " +#~ "addition to that, clients managed by " +#~ "the :code:`VirtualClientEngine` are:" +#~ msgstr "" +#~ "代码:`VirtualClientEngine`调度、启动和管理`虚拟`客户端。这些客户端与 \"非虚拟 " +#~ "\"客户端(即通过命令 `flwr.client.start_numpy_client `_启动的客户端)完全相同,它们可以通过创建一个继承自 \"flwr.client.NumPyClient " +#~ "`_\" " +#~ "的类来配置,因此行为方式也完全相同。除此之外,由 :code:`VirtualClientEngine` " +#~ "管理的客户端还包括:" + diff --git a/doc/source/.gitignore b/doc/source/.gitignore new file mode 100644 index 000000000000..e9341a1383b7 --- /dev/null +++ b/doc/source/.gitignore @@ -0,0 +1 @@ +ref-api/ diff --git a/doc/source/_templates/base.html b/doc/source/_templates/base.html index 0cbe6e9e4456..768c560f4f6a 100644 --- a/doc/source/_templates/base.html +++ b/doc/source/_templates/base.html @@ -6,9 +6,9 @@ {% if current_language != 'en' %} - + {% else %} - + {% endif %} {%- if metatags %}{{ metatags }}{% endif -%} @@ -105,6 +105,6 @@ {%- endblock -%} {%- endblock scripts -%} - + diff --git a/doc/source/_templates/sidebar/lang.html b/doc/source/_templates/sidebar/lang.html index b5143bd7212b..b377a53f9c40 100644 --- a/doc/source/_templates/sidebar/lang.html +++ b/doc/source/_templates/sidebar/lang.html @@ -2,7 +2,8 @@ {% endif %} diff --git a/doc/source/_templates/sidebar/versioning.html b/doc/source/_templates/sidebar/versioning.html index dde7528d15e4..74f1cd8febb7 100644 --- a/doc/source/_templates/sidebar/versioning.html +++ b/doc/source/_templates/sidebar/versioning.html @@ -59,8 +59,8 @@ -
- +
+
diff --git a/doc/source/conf.py b/doc/source/conf.py index 503f76cb9eca..88cb5c05b1d8 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -86,7 +86,7 @@ author = "The Flower Authors" # The full version, including alpha/beta/rc tags -release = "1.7.0" +release = "1.8.0" # -- General configuration --------------------------------------------------- @@ -123,25 +123,27 @@ # The full name is still at the top of the page add_module_names = False + def find_test_modules(package_path): """Go through the python files and exclude every *_test.py file.""" full_path_modules = [] for root, dirs, files in os.walk(package_path): for file in files: - if file.endswith('_test.py'): + if file.endswith("_test.py"): # Construct the module path relative to the package directory full_path = os.path.join(root, file) relative_path = os.path.relpath(full_path, package_path) # Convert file path to dotted module path - module_path = os.path.splitext(relative_path)[0].replace(os.sep, '.') + module_path = os.path.splitext(relative_path)[0].replace(os.sep, ".") full_path_modules.append(module_path) modules = [] for full_path_module in full_path_modules: - parts = full_path_module.split('.') + parts = full_path_module.split(".") for i in range(len(parts)): - modules.append('.'.join(parts[i:])) + modules.append(".".join(parts[i:])) return modules + # Stop from documenting the *_test.py files. # That's the only way to do that in autosummary (make the modules as mock_imports). autodoc_mock_imports = find_test_modules(os.path.abspath("../../src/py/flwr")) @@ -173,6 +175,7 @@ def find_test_modules(package_path): "writing-documentation": "contributor-how-to-write-documentation.html", "apiref-binaries": "ref-api-cli.html", "fedbn-example-pytorch-from-centralized-to-federated": "example-fedbn-pytorch-from-centralized-to-federated.html", + "how-to-use-built-in-middleware-layers": "how-to-use-built-in-mods.html", # Restructuring: tutorials "tutorial/Flower-0-What-is-FL": "tutorial-series-what-is-federated-learning.html", "tutorial/Flower-1-Intro-to-FL-PyTorch": "tutorial-series-get-started-with-flower-pytorch.html", @@ -248,7 +251,7 @@ def find_test_modules(package_path): html_title = f"Flower Framework" html_logo = "_static/flower-logo.png" html_favicon = "_static/favicon.ico" -html_baseurl = "https://flower.dev/docs/framework/" +html_baseurl = "https://flower.ai/docs/framework/" html_theme_options = { # diff --git a/doc/source/contributor-explanation-architecture.rst b/doc/source/contributor-explanation-architecture.rst index 0e2ea1f6e66b..a20a84313118 100644 --- a/doc/source/contributor-explanation-architecture.rst +++ b/doc/source/contributor-explanation-architecture.rst @@ -4,7 +4,7 @@ Flower Architecture Edge Client Engine ------------------ -`Flower `_ core framework architecture with Edge Client Engine +`Flower `_ core framework architecture with Edge Client Engine .. figure:: _static/flower-architecture-ECE.png :width: 80 % @@ -12,7 +12,7 @@ Edge Client Engine Virtual Client Engine --------------------- -`Flower `_ core framework architecture with Virtual Client Engine +`Flower `_ core framework architecture with Virtual Client Engine .. figure:: _static/flower-architecture-VCE.png :width: 80 % @@ -20,7 +20,7 @@ Virtual Client Engine Virtual Client Engine and Edge Client Engine in the same workload ----------------------------------------------------------------- -`Flower `_ core framework architecture with both Virtual Client Engine and Edge Client Engine +`Flower `_ core framework architecture with both Virtual Client Engine and Edge Client Engine .. figure:: _static/flower-architecture.drawio.png :width: 80 % diff --git a/doc/source/contributor-how-to-build-docker-images.rst b/doc/source/contributor-how-to-build-docker-images.rst index d85e48155de0..3beae7422bef 100644 --- a/doc/source/contributor-how-to-build-docker-images.rst +++ b/doc/source/contributor-how-to-build-docker-images.rst @@ -17,7 +17,7 @@ Before we can start, we need to meet a few prerequisites in our local developmen #. Verify the Docker daemon is running. Please follow the first section on - `Run Flower using Docker `_ + `Run Flower using Docker `_ which covers this step in more detail. Currently, Flower provides two images, a base image and a server image. There will also be a client @@ -98,17 +98,17 @@ Building the server image * - ``FLWR_VERSION`` - Version of Flower to be installed. - Yes - - ``1.6.0`` + - ``1.7.0`` The following example creates a server image with the official Flower base image py3.11-ubuntu22.04 -and Flower 1.6.0: +and Flower 1.7.0: .. code-block:: bash $ cd src/docker/server/ $ docker build \ --build-arg BASE_IMAGE_TAG=py3.11-ubuntu22.04 \ - --build-arg FLWR_VERSION=1.6.0 \ + --build-arg FLWR_VERSION=1.7.0 \ -t flwr_server:0.1.0 . The name of image is ``flwr_server`` and the tag ``0.1.0``. Remember that the build arguments as well @@ -125,7 +125,7 @@ the tag of your image. $ docker build \ --build-arg BASE_REPOSITORY=flwr_base \ --build-arg BASE_IMAGE_TAG=0.1.0 \ - --build-arg FLWR_VERSION=1.6.0 \ + --build-arg FLWR_VERSION=1.7.0 \ -t flwr_server:0.1.0 . After creating the image, we can test whether the image is working: diff --git a/doc/source/contributor-how-to-contribute-translations.rst b/doc/source/contributor-how-to-contribute-translations.rst index d97a2cb8c64f..1614b8e5a040 100644 --- a/doc/source/contributor-how-to-contribute-translations.rst +++ b/doc/source/contributor-how-to-contribute-translations.rst @@ -2,7 +2,7 @@ Contribute translations ======================= Since `Flower 1.5 -`_ we +`_ we have introduced translations to our doc pages, but, as you might have noticed, the translations are often imperfect. If you speak languages other than English, you might be able to help us in our effort to make Federated Learning @@ -67,5 +67,5 @@ Add new languages ----------------- If you want to add a new language, you will first have to contact us, either on -`Slack `_, or by opening an issue on our `GitHub +`Slack `_, or by opening an issue on our `GitHub repo `_. diff --git a/doc/source/contributor-how-to-install-development-versions.rst b/doc/source/contributor-how-to-install-development-versions.rst index 243f4ef97e8e..558ec7f8ec46 100644 --- a/doc/source/contributor-how-to-install-development-versions.rst +++ b/doc/source/contributor-how-to-install-development-versions.rst @@ -19,8 +19,8 @@ Install ``flwr`` from a local copy of the Flower source code via ``pyproject.tom Install ``flwr`` from a local wheel file via ``pyproject.toml``: -- ``flwr = { path = "../../dist/flwr-1.0.0-py3-none-any.whl" }`` (without extras) -- ``flwr = { path = "../../dist/flwr-1.0.0-py3-none-any.whl", extras = ["simulation"] }`` (with extras) +- ``flwr = { path = "../../dist/flwr-1.8.0-py3-none-any.whl" }`` (without extras) +- ``flwr = { path = "../../dist/flwr-1.8.0-py3-none-any.whl", extras = ["simulation"] }`` (with extras) Please refer to the Poetry documentation for further details: `Poetry Dependency Specification `_ @@ -59,5 +59,5 @@ Open a development version of the same notebook from branch `branch-name` by cha Install a `whl` on Google Colab: 1. In the vertical icon grid on the left hand side, select ``Files`` > ``Upload to session storage`` -2. Upload the whl (e.g., ``flwr-1.7.0-py3-none-any.whl``) -3. Change ``!pip install -q 'flwr[simulation]' torch torchvision matplotlib`` to ``!pip install -q 'flwr-1.7.0-py3-none-any.whl[simulation]' torch torchvision matplotlib`` +2. Upload the whl (e.g., ``flwr-1.8.0-py3-none-any.whl``) +3. Change ``!pip install -q 'flwr[simulation]' torch torchvision matplotlib`` to ``!pip install -q 'flwr-1.8.0-py3-none-any.whl[simulation]' torch torchvision matplotlib`` diff --git a/doc/source/contributor-how-to-release-flower.rst b/doc/source/contributor-how-to-release-flower.rst index 2eef165c0ed0..acfac4197ec1 100644 --- a/doc/source/contributor-how-to-release-flower.rst +++ b/doc/source/contributor-how-to-release-flower.rst @@ -3,23 +3,15 @@ Release Flower This document describes the current release process. It may or may not change in the future. -Before the release ------------------- - -Update the changelog (``changelog.md``) with all relevant changes that happened after the last release. If the last release was tagged ``v1.2.0``, you can use the following URL to see all commits that got merged into ``main`` since then: - -`GitHub: Compare v1.2.0...main `_ - -Thank the authors who contributed since the last release. This can be done by running the ``./dev/add-shortlog.sh `` convenience script (it can be ran multiple times and will update the names in the list if new contributors were added in the meantime). - During the release ------------------ The version number of a release is stated in ``pyproject.toml``. To release a new version of Flower, the following things need to happen (in that order): -1. Update the ``changelog.md`` section header ``Unreleased`` to contain the version number and date for the release you are building. Create a pull request with the change. -2. Tag the release commit with the version number as soon as the PR is merged: ``git tag v0.12.3``, then ``git push --tags``. This will create a draft release on GitHub containing the correct artifacts and the relevant part of the changelog. -3. Check the draft release on GitHub, and if everything is good, publish it. +1. Run ``python3 src/py/flwr_tool/update_changelog.py `` in order to add every new change to the changelog (feel free to make manual changes to the changelog afterwards until it looks good). +2. Once the changelog has been updated with all the changes, run ``./dev/prepare-release-changelog.sh v``, where ```` is the version stated in ``pyproject.toml`` (notice the ``v`` added before it). This will replace the ``Unreleased`` header of the changelog by the version and current date, and it will add a thanking message for the contributors. Open a pull request with those changes. +3. Once the pull request is merged, tag the release commit with the version number as soon as the PR is merged: ``git tag v`` (notice the ``v`` added before the version number), then ``git push --tags``. This will create a draft release on GitHub containing the correct artifacts and the relevant part of the changelog. +4. Check the draft release on GitHub, and if everything is good, publish it. After the release ----------------- diff --git a/doc/source/contributor-ref-good-first-contributions.rst b/doc/source/contributor-ref-good-first-contributions.rst index 523a4679c6ef..cbf21e2845bc 100644 --- a/doc/source/contributor-ref-good-first-contributions.rst +++ b/doc/source/contributor-ref-good-first-contributions.rst @@ -14,7 +14,7 @@ Until the Flower core library matures it will be easier to get PR's accepted if they only touch non-core areas of the codebase. Good candidates to get started are: -- Documentation: What's missing? What could be expressed more clearly? +- Documentation: What's missing? What could be expressed more clearly? - Baselines: See below. - Examples: See below. @@ -22,9 +22,9 @@ are: Request for Flower Baselines ---------------------------- -If you are not familiar with Flower Baselines, you should probably check-out our `contributing guide for baselines `_. +If you are not familiar with Flower Baselines, you should probably check-out our `contributing guide for baselines `_. -You should then check out the open +You should then check out the open `issues `_ for baseline requests. If you find a baseline that you'd like to work on and that has no assignes, feel free to assign it to yourself and start working on it! diff --git a/doc/source/contributor-tutorial-contribute-on-github.rst b/doc/source/contributor-tutorial-contribute-on-github.rst index aca7b0e68968..273b47a636cc 100644 --- a/doc/source/contributor-tutorial-contribute-on-github.rst +++ b/doc/source/contributor-tutorial-contribute-on-github.rst @@ -3,9 +3,8 @@ Contribute on GitHub This guide is for people who want to get involved with Flower, but who are not used to contributing to GitHub projects. -If you're familiar with how contributing on GitHub works, you can directly checkout our -`getting started guide for contributors `_ -and examples of `good first contributions `_. +If you're familiar with how contributing on GitHub works, you can directly checkout our +`getting started guide for contributors `_. Setting up the repository @@ -17,9 +16,9 @@ Setting up the repository GitHub, itself, is a code hosting platform for version control and collaboration. It allows for everyone to collaborate and work from anywhere on remote repositories. - If you haven't already, you will need to create an account on `GitHub `_. + If you haven't already, you will need to create an account on `GitHub `_. - The idea behind the generic Git and GitHub workflow boils down to this: + The idea behind the generic Git and GitHub workflow boils down to this: you download code from a remote repository on GitHub, make changes locally and keep track of them using Git and then you upload your new history back to GitHub. 2. **Forking the Flower repository** @@ -27,7 +26,7 @@ Setting up the repository and click the ``Fork`` button situated on the top right of the page. .. image:: _static/fork_button.png - + You can change the name if you want, but this is not necessary as this version of Flower will be yours and will sit inside your own account (i.e., in your own list of repositories). Once created, you should see on the top left corner that you are looking at your own version of Flower. @@ -35,14 +34,14 @@ Setting up the repository 3. **Cloning your forked repository** The next step is to download the forked repository on your machine to be able to make changes to it. - On your forked repository page, you should first click on the ``Code`` button on the right, + On your forked repository page, you should first click on the ``Code`` button on the right, this will give you the ability to copy the HTTPS link of the repository. .. image:: _static/cloning_fork.png Once you copied the \, you can open a terminal on your machine, navigate to the place you want to download the repository to and type: - .. code-block:: shell + .. code-block:: shell $ git clone @@ -59,14 +58,14 @@ Setting up the repository To obtain it, we can do as previously mentioned by going to our fork repository on our GitHub account and copying the link. .. image:: _static/cloning_fork.png - + Once the \ is copied, we can type the following command in our terminal: .. code-block:: shell $ git remote add origin - + 5. **Add upstream** Now we will add an upstream address to our repository. Still in the same directroy, we must run the following command: @@ -77,10 +76,10 @@ Setting up the repository The following diagram visually explains what we did in the previous steps: - .. image:: _static/github_schema.png + .. image:: _static/github_schema.png - The upstream is the GitHub remote address of the parent repository (in this case Flower), - i.e. the one we eventually want to contribute to and therefore need an up-to-date history of. + The upstream is the GitHub remote address of the parent repository (in this case Flower), + i.e. the one we eventually want to contribute to and therefore need an up-to-date history of. The origin is just the GitHub remote address of the forked repository we created, i.e. the copy (fork) in our own account. To make sure our local version of the fork is up-to-date with the latest changes from the Flower repository, @@ -114,9 +113,9 @@ And with Flower's repository: $ git pull upstream main 1. **Create a new branch** - To make the history cleaner and easier to work with, it is good practice to + To make the history cleaner and easier to work with, it is good practice to create a new branch for each feature/project that needs to be implemented. - + To do so, just run the following command inside the repository's directory: .. code-block:: shell @@ -138,7 +137,7 @@ And with Flower's repository: $ ./dev/test.sh # to test that your code can be accepted $ ./baselines/dev/format.sh # same as above but for code added to baselines $ ./baselines/dev/test.sh # same as above but for code added to baselines - + 4. **Stage changes** Before creating a commit that will update your history, you must specify to Git which files it needs to take into account. @@ -185,21 +184,21 @@ Creating and merging a pull request (PR) Once you click the ``Compare & pull request`` button, you should see something similar to this: .. image:: _static/creating_pr.png - + At the top you have an explanation of which branch will be merged where: .. image:: _static/merging_branch.png - + In this example you can see that the request is to merge the branch ``doc-fixes`` from my forked repository to branch ``main`` from the Flower repository. - The input box in the middle is there for you to describe what your PR does and to link it to existing issues. + The input box in the middle is there for you to describe what your PR does and to link it to existing issues. We have placed comments (that won't be rendered once the PR is opened) to guide you through the process. It is important to follow the instructions described in comments. For instance, in order to not break how our changelog system works, you should read the information above the ``Changelog entry`` section carefully. You can also checkout some examples and details in the :ref:`changelogentry` appendix. - At the bottom you will find the button to open the PR. This will notify reviewers that a new PR has been opened and + At the bottom you will find the button to open the PR. This will notify reviewers that a new PR has been opened and that they should look over it to merge or to request changes. If your PR is not yet ready for review, and you don't want to notify anyone, you have the option to create a draft pull request: @@ -219,7 +218,7 @@ Creating and merging a pull request (PR) Merging will be blocked if there are ongoing requested changes. .. image:: _static/changes_requested.png - + To resolve them, just push the necessary changes to the branch associated with the PR: .. image:: _static/make_changes.png @@ -278,12 +277,12 @@ This is a tiny change, but it’ll allow us to test your end-to-end setup. After - Find the source file in ``doc/source`` - Make the change in the ``.rst`` file (beware, the dashes under the title should be the same length as the title itself) -- Build the docs and check the result: ``_ +- Build the docs and check the result: ``_ Rename file ::::::::::: -You might have noticed that the file name still reflects the old wording. +You might have noticed that the file name still reflects the old wording. If we just change the file, then we break all existing links to it - it is **very important** to avoid that, breaking links can harm our search engine ranking. Here’s how to change the file name: @@ -296,7 +295,7 @@ This will cause a redirect from ``saving-progress.html`` to ``save-progress.html Apply changes in the index file ::::::::::::::::::::::::::::::: -For the lateral navigation bar to work properly, it is very important to update the ``index.rst`` file as well. +For the lateral navigation bar to work properly, it is very important to update the ``index.rst`` file as well. This is where we define the whole arborescence of the navbar. - Find and modify the file name in ``index.rst`` @@ -344,7 +343,7 @@ Next steps Once you have made your first PR, and want to contribute more, be sure to check out the following : -- `Good first contributions `_, where you should particularly look into the :code:`baselines` contributions. +- `Good first contributions `_, where you should particularly look into the :code:`baselines` contributions. Appendix @@ -357,34 +356,35 @@ Changelog entry When opening a new PR, inside its description, there should be a ``Changelog entry`` header. -As per the comment above this section:: +Above this header you should see the following comment that explains how to write your changelog entry: - Inside the following 'Changelog entry' section, + Inside the following 'Changelog entry' section, you should put the description of your changes that will be added to the changelog alongside your PR title. - If the section is completely empty (without any token), - the changelog will just contain the title of the PR for the changelog entry, without any description. - If the 'Changelog entry' section is removed entirely, - it will categorize the PR as "General improvement" and add it to the changelog accordingly. - If the section contains some text other than tokens, it will use it to add a description to the change. + If the section is completely empty (without any token) or non-existant, + the changelog will just contain the title of the PR for the changelog entry, without any description. + + If the section contains some text other than tokens, it will use it to add a description to the change. + If the section contains one of the following tokens it will ignore any other text and put the PR under the corresponding section of the changelog: is for classifying a PR as a general improvement. + is to not add the PR to the changelog + is to add a general baselines change to the PR + is to add a general examples change to the PR + is to add a general sdk change to the PR + is to add a general simulations change to the PR Note that only one token should be used. Its content must have a specific format. We will break down what each possibility does: -- If the ``### Changelog entry`` section is removed, the following text will be added to the changelog:: - - - **General improvements** ([#PR_NUMBER](https://github.com/adap/flower/pull/PR_NUMBER)) - -- If the ``### Changelog entry`` section contains nothing but exists, the following text will be added to the changelog:: +- If the ``### Changelog entry`` section contains nothing or doesn't exist, the following text will be added to the changelog:: - **PR TITLE** ([#PR_NUMBER](https://github.com/adap/flower/pull/PR_NUMBER)) diff --git a/doc/source/example-fedbn-pytorch-from-centralized-to-federated.rst b/doc/source/example-fedbn-pytorch-from-centralized-to-federated.rst index 5ebaa337dde8..5d4dac0c0cda 100644 --- a/doc/source/example-fedbn-pytorch-from-centralized-to-federated.rst +++ b/doc/source/example-fedbn-pytorch-from-centralized-to-federated.rst @@ -3,11 +3,11 @@ Example: FedBN in PyTorch - From Centralized To Federated This tutorial will show you how to use Flower to build a federated version of an existing machine learning workload with `FedBN `_, a federated training strategy designed for non-iid data. We are using PyTorch to train a Convolutional Neural Network(with Batch Normalization layers) on the CIFAR-10 dataset. -When applying FedBN, only few changes needed compared to `Example: PyTorch - From Centralized To Federated `_. +When applying FedBN, only few changes needed compared to `Example: PyTorch - From Centralized To Federated `_. Centralized Training -------------------- -All files are revised based on `Example: PyTorch - From Centralized To Federated `_. +All files are revised based on `Example: PyTorch - From Centralized To Federated `_. The only thing to do is modifying the file called :code:`cifar.py`, revised part is shown below: The model architecture defined in class Net() is added with Batch Normalization layers accordingly. @@ -50,8 +50,8 @@ Let's take the next step and use what we've built to create a federated learning Federated Training ------------------ -If you have read `Example: PyTorch - From Centralized To Federated `_, the following parts are easy to follow, onyl :code:`get_parameters` and :code:`set_parameters` function in :code:`client.py` needed to revise. -If not, please read the `Example: PyTorch - From Centralized To Federated `_. first. +If you have read `Example: PyTorch - From Centralized To Federated `_, the following parts are easy to follow, onyl :code:`get_parameters` and :code:`set_parameters` function in :code:`client.py` needed to revise. +If not, please read the `Example: PyTorch - From Centralized To Federated `_. first. Our example consists of one *server* and two *clients*. In FedBN, :code:`server.py` keeps unchanged, we can start the server directly. @@ -66,7 +66,7 @@ Finally, we will revise our *client* logic by changing :code:`get_parameters` an class CifarClient(fl.client.NumPyClient): """Flower client implementing CIFAR-10 image classification using PyTorch.""" - + ... def get_parameters(self, config) -> List[np.ndarray]: @@ -79,7 +79,7 @@ Finally, we will revise our *client* logic by changing :code:`get_parameters` an params_dict = zip(keys, parameters) state_dict = OrderedDict({k: torch.tensor(v) for k, v in params_dict}) self.model.load_state_dict(state_dict, strict=False) - + ... Now, you can now open two additional terminal windows and run diff --git a/doc/source/example-jax-from-centralized-to-federated.rst b/doc/source/example-jax-from-centralized-to-federated.rst index 2b1823e9d408..6b06a288a67a 100644 --- a/doc/source/example-jax-from-centralized-to-federated.rst +++ b/doc/source/example-jax-from-centralized-to-federated.rst @@ -259,7 +259,7 @@ Having defined the federation process, we can run it. # Start Flower client client = FlowerClient(params, grad_fn, train_x, train_y, test_x, test_y) - fl.client.start_numpy_client(server_address="0.0.0.0:8080", client) + fl.client.start_client(server_address="0.0.0.0:8080", client.to_client()) if __name__ == "__main__": main() diff --git a/doc/source/example-pytorch-from-centralized-to-federated.rst b/doc/source/example-pytorch-from-centralized-to-federated.rst index d649658667da..58101135b8c0 100644 --- a/doc/source/example-pytorch-from-centralized-to-federated.rst +++ b/doc/source/example-pytorch-from-centralized-to-federated.rst @@ -278,7 +278,7 @@ We included type annotations to give you a better understanding of the data type return float(loss), self.num_examples["testset"], {"accuracy": float(accuracy)} All that's left to do it to define a function that loads both model and data, creates a :code:`CifarClient`, and starts this client. -You load your data and model by using :code:`cifar.py`. Start :code:`CifarClient` with the function :code:`fl.client.start_numpy_client()` by pointing it at the same IP adress we used in :code:`server.py`: +You load your data and model by using :code:`cifar.py`. Start :code:`CifarClient` with the function :code:`fl.client.start_client()` by pointing it at the same IP adress we used in :code:`server.py`: .. code-block:: python @@ -292,7 +292,7 @@ You load your data and model by using :code:`cifar.py`. Start :code:`CifarClient # Start client client = CifarClient(model, trainloader, testloader, num_examples) - fl.client.start_numpy_client(server_address="0.0.0.0:8080", client) + fl.client.start_client(server_address="0.0.0.0:8080", client.to_client()) if __name__ == "__main__": diff --git a/doc/source/example-walkthrough-pytorch-mnist.rst b/doc/source/example-walkthrough-pytorch-mnist.rst index ab311813f5de..0be0af6e1ca6 100644 --- a/doc/source/example-walkthrough-pytorch-mnist.rst +++ b/doc/source/example-walkthrough-pytorch-mnist.rst @@ -76,7 +76,7 @@ Inside the server helper script *run-server.sh* you will find the following code We can go a bit deeper and see that :code:`server.py` simply launches a server that will coordinate three rounds of training. -Flower Servers are very customizable, but for simple workloads, we can start a server using the :ref:`start_server ` function and leave all the configuration possibilities at their default values, as seen below. +Flower Servers are very customizable, but for simple workloads, we can start a server using the `start_server `_ function and leave all the configuration possibilities at their default values, as seen below. .. code-block:: python diff --git a/doc/source/how-to-configure-clients.rst b/doc/source/how-to-configure-clients.rst index 26c132125ccf..bfb5a8f63761 100644 --- a/doc/source/how-to-configure-clients.rst +++ b/doc/source/how-to-configure-clients.rst @@ -13,7 +13,7 @@ Configuration values are represented as a dictionary with ``str`` keys and value config_dict = { "dropout": True, # str key, bool value "learning_rate": 0.01, # str key, float value - "batch_size": 32, # str key, int value + "batch_size": 32, # str key, int value "optimizer": "sgd", # str key, str value } @@ -56,7 +56,7 @@ To make the built-in strategies use this function, we can pass it to ``FedAvg`` One the client side, we receive the configuration dictionary in ``fit``: .. code-block:: python - + class FlowerClient(flwr.client.NumPyClient): def fit(parameters, config): print(config["batch_size"]) # Prints `32` @@ -86,7 +86,7 @@ Configuring individual clients In some cases, it is necessary to send different configuration values to different clients. -This can be achieved by customizing an existing strategy or by `implementing a custom strategy from scratch `_. Here's a nonsensical example that customizes :code:`FedAvg` by adding a custom ``"hello": "world"`` configuration key/value pair to the config dict of a *single client* (only the first client in the list, the other clients in this round to not receive this "special" config value): +This can be achieved by customizing an existing strategy or by `implementing a custom strategy from scratch `_. Here's a nonsensical example that customizes :code:`FedAvg` by adding a custom ``"hello": "world"`` configuration key/value pair to the config dict of a *single client* (only the first client in the list, the other clients in this round to not receive this "special" config value): .. code-block:: python diff --git a/doc/source/how-to-enable-ssl-connections.rst b/doc/source/how-to-enable-ssl-connections.rst index fa59d4423c5a..051dd5711497 100644 --- a/doc/source/how-to-enable-ssl-connections.rst +++ b/doc/source/how-to-enable-ssl-connections.rst @@ -75,9 +75,9 @@ We are now going to show how to write a client which uses the previously generat client = MyFlowerClient() # Start client - fl.client.start_numpy_client( + fl.client.start_client( "localhost:8080", - client=client, + client=client.to_client(), root_certificates=Path(".cache/certificates/ca.crt").read_bytes(), ) diff --git a/doc/source/how-to-install-flower.rst b/doc/source/how-to-install-flower.rst index ff3dbb605846..dc88076424f8 100644 --- a/doc/source/how-to-install-flower.rst +++ b/doc/source/how-to-install-flower.rst @@ -57,7 +57,7 @@ Advanced installation options Install via Docker ~~~~~~~~~~~~~~~~~~ -`How to run Flower using Docker `_ +`How to run Flower using Docker `_ Install pre-release ~~~~~~~~~~~~~~~~~~~ diff --git a/doc/source/how-to-run-flower-using-docker.rst b/doc/source/how-to-run-flower-using-docker.rst index 27ff61c280cb..ed034c820142 100644 --- a/doc/source/how-to-run-flower-using-docker.rst +++ b/doc/source/how-to-run-flower-using-docker.rst @@ -1,5 +1,5 @@ Run Flower using Docker -==================== +======================= The simplest way to get started with Flower is by using the pre-made Docker images, which you can find on `Docker Hub `_. @@ -31,12 +31,12 @@ If you're looking to try out Flower, you can use the following command: .. code-block:: bash - $ docker run --rm -p 9091:9091 -p 9092:9092 flwr/server:1.6.0-py3.11-ubuntu22.04 \ + $ docker run --rm -p 9091:9091 -p 9092:9092 flwr/server:1.7.0-py3.11-ubuntu22.04 \ --insecure -The command will pull the Docker image with the tag ``1.6.0-py3.11-ubuntu22.04`` from Docker Hub. +The command will pull the Docker image with the tag ``1.7.0-py3.11-ubuntu22.04`` from Docker Hub. The tag contains the information which Flower, Python and Ubuntu is used. In this case, it -uses Flower 1.6.0, Python 3.11 and Ubuntu 22.04. The ``--rm`` flag tells Docker to remove +uses Flower 1.7.0, Python 3.11 and Ubuntu 22.04. The ``--rm`` flag tells Docker to remove the container after it exits. .. note:: @@ -54,14 +54,14 @@ to the Flower server. Here, we are passing the flag ``--insecure``. The ``--insecure`` flag enables insecure communication (using HTTP, not HTTPS) and should only be used for testing purposes. We strongly recommend enabling - `SSL `_ + `SSL `_ when deploying to a production environment. You can use ``--help`` to view all available flags that the server supports: .. code-block:: bash - $ docker run --rm flwr/server:1.6.0-py3.11-ubuntu22.04 --help + $ docker run --rm flwr/server:1.7.0-py3.11-ubuntu22.04 --help Mounting a volume to store the state on the host system ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -75,7 +75,7 @@ flag ``--database`` to specify the name of the database file. .. code-block:: bash $ docker run --rm \ - -p 9091:9091 -p 9092:9092 -v ~/:/app/ flwr/server:1.6.0-py3.11-ubuntu22.04 \ + -p 9091:9091 -p 9092:9092 -v ~/:/app/ flwr/server:1.7.0-py3.11-ubuntu22.04 \ --insecure \ --database state.db @@ -90,7 +90,7 @@ To enable SSL, you will need a CA certificate, a server certificate and a server .. note:: For testing purposes, you can generate your own self-signed certificates. The - `Enable SSL connections `_ + `Enable SSL connections `_ page contains a section that will guide you through the process. Assuming all files we need are in the local ``certificates`` directory, we can use the flag @@ -101,7 +101,7 @@ the server with the ``--certificates`` flag. .. code-block:: bash $ docker run --rm \ - -p 9091:9091 -p 9092:9092 -v ./certificates/:/app/ flwr/server:1.6.0-py3.11-ubuntu22.04 \ + -p 9091:9091 -p 9092:9092 -v ./certificates/:/app/ flwr/server:1.7.0-py3.11-ubuntu22.04 \ --certificates ca.crt server.pem server.key Using a different Flower or Python version @@ -118,19 +118,19 @@ updates of system dependencies that should not change the functionality of Flowe want to ensure that you always use the same image, you can specify the hash of the image instead of the tag. -The following command returns the current image hash referenced by the ``server:1.6.0-py3.11-ubuntu22.04`` tag: +The following command returns the current image hash referenced by the ``server:1.7.0-py3.11-ubuntu22.04`` tag: .. code-block:: bash - $ docker inspect --format='{{index .RepoDigests 0}}' flwr/server:1.6.0-py3.11-ubuntu22.04 - flwr/server@sha256:43fc389bcb016feab2b751b2ccafc9e9a906bb0885bd92b972329801086bc017 + $ docker inspect --format='{{index .RepoDigests 0}}' flwr/server:1.7.0-py3.11-ubuntu22.04 + flwr/server@sha256:c4be5012f9d73e3022e98735a889a463bb2f4f434448ebc19c61379920b1b327 Next, we can pin the hash when running a new server container: .. code-block:: bash $ docker run \ - --rm flwr/server@sha256:43fc389bcb016feab2b751b2ccafc9e9a906bb0885bd92b972329801086bc017 \ + --rm flwr/server@sha256:c4be5012f9d73e3022e98735a889a463bb2f4f434448ebc19c61379920b1b327 \ --insecure Setting environment variables @@ -141,4 +141,4 @@ To set a variable inside a Docker container, you can use the ``-e = .. code-block:: bash $ docker run -e FLWR_TELEMETRY_ENABLED=0 \ - --rm flwr/server:1.6.0-py3.11-ubuntu22.04 --insecure + --rm flwr/server:1.7.0-py3.11-ubuntu22.04 --insecure diff --git a/doc/source/how-to-run-simulations.rst b/doc/source/how-to-run-simulations.rst index 707e3d3ffe84..6e0520a79bf5 100644 --- a/doc/source/how-to-run-simulations.rst +++ b/doc/source/how-to-run-simulations.rst @@ -7,7 +7,7 @@ Run simulations Simulating Federated Learning workloads is useful for a multitude of use-cases: you might want to run your workload on a large cohort of clients but without having to source, configure and mange a large number of physical devices; you might want to run your FL workloads as fast as possible on the compute systems you have access to without having to go through a complex setup process; you might want to validate your algorithm on different scenarios at varying levels of data and system heterogeneity, client availability, privacy budgets, etc. These are among some of the use-cases where simulating FL workloads makes sense. Flower can accommodate these scenarios by means of its `VirtualClientEngine `_ or VCE. -The :code:`VirtualClientEngine` schedules, launches and manages `virtual` clients. These clients are identical to `non-virtual` clients (i.e. the ones you launch via the command `flwr.client.start_numpy_client `_) in the sense that they can be configure by creating a class inheriting, for example, from `flwr.client.NumPyClient `_ and therefore behave in an identical way. In addition to that, clients managed by the :code:`VirtualClientEngine` are: +The :code:`VirtualClientEngine` schedules, launches and manages `virtual` clients. These clients are identical to `non-virtual` clients (i.e. the ones you launch via the command `flwr.client.start_client `_) in the sense that they can be configure by creating a class inheriting, for example, from `flwr.client.NumPyClient `_ and therefore behave in an identical way. In addition to that, clients managed by the :code:`VirtualClientEngine` are: * resource-aware: this means that each client gets assigned a portion of the compute and memory on your system. You as a user can control this at the beginning of the simulation and allows you to control the degree of parallelism of your Flower FL simulation. The fewer the resources per client, the more clients can run concurrently on the same hardware. * self-managed: this means that you as a user do not need to launch clients manually, instead this gets delegated to :code:`VirtualClientEngine`'s internals. diff --git a/doc/source/how-to-save-and-load-model-checkpoints.rst b/doc/source/how-to-save-and-load-model-checkpoints.rst index 404df485fbae..0d711e375cd8 100644 --- a/doc/source/how-to-save-and-load-model-checkpoints.rst +++ b/doc/source/how-to-save-and-load-model-checkpoints.rst @@ -91,3 +91,7 @@ To load your progress, you simply append the following lines to your code. Note print("Loading pre-trained model from: ", latest_round_file) state_dict = torch.load(latest_round_file) net.load_state_dict(state_dict) + state_dict_ndarrays = [v.cpu().numpy() for v in net.state_dict().values()] + parameters = fl.common.ndarrays_to_parameters(state_dict_ndarrays) + +Return/use this object of type ``Parameters`` wherever necessary, such as in the ``initial_parameters`` when defining a ``Strategy``. \ No newline at end of file diff --git a/doc/source/how-to-upgrade-to-flower-1.0.rst b/doc/source/how-to-upgrade-to-flower-1.0.rst index fd380e95d69c..c4429d61d0a9 100644 --- a/doc/source/how-to-upgrade-to-flower-1.0.rst +++ b/doc/source/how-to-upgrade-to-flower-1.0.rst @@ -50,7 +50,7 @@ Strategies / ``start_server`` / ``start_simulation`` - Replace ``num_rounds=1`` in ``start_simulation`` with the new ``config=ServerConfig(...)`` (see previous item) - Remove ``force_final_distributed_eval`` parameter from calls to ``start_server``. Distributed evaluation on all clients can be enabled by configuring the strategy to sample all clients for evaluation after the last round of training. - Rename parameter/ndarray conversion functions: - + - ``parameters_to_weights`` --> ``parameters_to_ndarrays`` - ``weights_to_parameters`` --> ``ndarrays_to_parameters`` @@ -88,4 +88,4 @@ Along with the necessary changes above, there are a number of potential improvem Further help ------------ -Most official `Flower code examples `_ are already updated to Flower 1.0, they can serve as a reference for using the Flower 1.0 API. If there are further questionsm, `join the Flower Slack `_ and use the channgel ``#questions``. +Most official `Flower code examples `_ are already updated to Flower 1.0, they can serve as a reference for using the Flower 1.0 API. If there are further questionsm, `join the Flower Slack `_ and use the channgel ``#questions``. diff --git a/doc/source/how-to-use-built-in-middleware-layers.rst b/doc/source/how-to-use-built-in-middleware-layers.rst deleted file mode 100644 index 2e91623b26be..000000000000 --- a/doc/source/how-to-use-built-in-middleware-layers.rst +++ /dev/null @@ -1,87 +0,0 @@ -Use Built-in Middleware Layers -============================== - -**Note: This tutorial covers experimental features. The functionality and interfaces may change in future versions.** - -In this tutorial, we will learn how to utilize built-in middleware layers to augment the behavior of a ``FlowerCallable``. Middleware allows us to perform operations before and after a task is processed in the ``FlowerCallable``. - -What is middleware? -------------------- - -Middleware is a callable that wraps around a ``FlowerCallable``. It can manipulate or inspect incoming tasks (``TaskIns``) in the ``Fwd`` and the resulting tasks (``TaskRes``) in the ``Bwd``. The signature for a middleware layer (``Layer``) is as follows: - -.. code-block:: python - - FlowerCallable = Callable[[Fwd], Bwd] - Layer = Callable[[Fwd, FlowerCallable], Bwd] - -A typical middleware function might look something like this: - -.. code-block:: python - - def example_middleware(fwd: Fwd, ffn: FlowerCallable) -> Bwd: - # Do something with Fwd before passing to the inner ``FlowerCallable``. - bwd = ffn(fwd) - # Do something with Bwd before returning. - return bwd - -Using middleware layers ------------------------ - -To use middleware layers in your ``FlowerCallable``, you can follow these steps: - -1. Import the required middleware -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -First, import the built-in middleware layers you intend to use: - -.. code-block:: python - - import flwr as fl - from flwr.client.middleware import example_middleware1, example_middleware2 - -2. Define your client function -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Define your client function (``client_fn``) that will be wrapped by the middleware: - -.. code-block:: python - - def client_fn(cid): - # Your client code goes here. - return # your client - -3. Create the ``FlowerCallable`` with middleware -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Create your ``FlowerCallable`` and pass the middleware layers as a list to the ``layers`` argument. The order in which you provide the middleware layers matters: - -.. code-block:: python - - flower = fl.app.Flower( - client_fn=client_fn, - layers=[ - example_middleware1, # Middleware layer 1 - example_middleware2, # Middleware layer 2 - ] - ) - -Order of execution ------------------- - -When the ``FlowerCallable`` runs, the middleware layers are executed in the order they are provided in the list: - -1. ``example_middleware1`` (outermost layer) -2. ``example_middleware2`` (next layer) -3. Message handler (core function that handles ``TaskIns`` and returns ``TaskRes``) -4. ``example_middleware2`` (on the way back) -5. ``example_middleware1`` (outermost layer on the way back) - -Each middleware has a chance to inspect and modify the ``TaskIns`` in the ``Fwd`` before passing it to the next layer, and likewise with the ``TaskRes`` in the ``Bwd`` before returning it up the stack. - -Conclusion ----------- - -By following this guide, you have learned how to effectively use middleware layers to enhance your ``FlowerCallable``'s functionality. Remember that the order of middleware is crucial and affects how the input and output are processed. - -Enjoy building more robust and flexible ``FlowerCallable``s with middleware layers! diff --git a/doc/source/how-to-use-built-in-mods.rst b/doc/source/how-to-use-built-in-mods.rst new file mode 100644 index 000000000000..341139175074 --- /dev/null +++ b/doc/source/how-to-use-built-in-mods.rst @@ -0,0 +1,89 @@ +Use Built-in Mods +================= + +**Note: This tutorial covers experimental features. The functionality and interfaces may change in future versions.** + +In this tutorial, we will learn how to utilize built-in mods to augment the behavior of a ``ClientApp``. Mods (sometimes also called Modifiers) allow us to perform operations before and after a task is processed in the ``ClientApp``. + +What are Mods? +-------------- + +A Mod is a callable that wraps around a ``ClientApp``. It can manipulate or inspect the incoming ``Message`` and the resulting outgoing ``Message``. The signature for a ``Mod`` is as follows: + +.. code-block:: python + + ClientApp = Callable[[Message, Context], Message] + Mod = Callable[[Message, Context, ClientApp], Message] + +A typical mod function might look something like this: + +.. code-block:: python + + def example_mod(msg: Message, ctx: Context, nxt: ClientApp) -> Message: + # Do something with incoming Message (or Context) + # before passing to the inner ``ClientApp`` + msg = nxt(msg, ctx) + # Do something with outgoing Message (or Context) + # before returning + return msg + +Using Mods +---------- + +To use mods in your ``ClientApp``, you can follow these steps: + +1. Import the required mods +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +First, import the built-in mod you intend to use: + +.. code-block:: python + + import flwr as fl + from flwr.client.mod import example_mod_1, example_mod_2 + +2. Define your client function +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Define your client function (``client_fn``) that will be wrapped by the mod(s): + +.. code-block:: python + + def client_fn(cid): + # Your client code goes here. + return # your client + +3. Create the ``ClientApp`` with mods +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Create your ``ClientApp`` and pass the mods as a list to the ``mods`` argument. The order in which you provide the mods matters: + +.. code-block:: python + + app = fl.client.ClientApp( + client_fn=client_fn, + mods=[ + example_mod_1, # Mod 1 + example_mod_2, # Mod 2 + ] + ) + +Order of execution +------------------ + +When the ``ClientApp`` runs, the mods are executed in the order they are provided in the list: + +1. ``example_mod_1`` (outermost mod) +2. ``example_mod_2`` (next mod) +3. Message handler (core function that handles the incoming ``Message`` and returns the outgoing ``Message``) +4. ``example_mod_2`` (on the way back) +5. ``example_mod_1`` (outermost mod on the way back) + +Each mod has a chance to inspect and modify the incoming ``Message`` before passing it to the next mod, and likewise with the outgoing ``Message`` before returning it up the stack. + +Conclusion +---------- + +By following this guide, you have learned how to effectively use mods to enhance your ``ClientApp``'s functionality. Remember that the order of mods is crucial and affects how the input and output are processed. + +Enjoy building a more robust and flexible ``ClientApp`` with mods! diff --git a/doc/source/index.rst b/doc/source/index.rst index 5df591d6ce05..ea52a9421b61 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -4,7 +4,7 @@ Flower Framework Documentation .. meta:: :description: Check out the documentation of the main Flower Framework enabling easy Python development for Federated Learning. -Welcome to Flower's documentation. `Flower `_ is a friendly federated learning framework. +Welcome to Flower's documentation. `Flower `_ is a friendly federated learning framework. Join the Flower Community @@ -12,7 +12,7 @@ Join the Flower Community The Flower Community is growing quickly - we're a friendly group of researchers, engineers, students, professionals, academics, and other enthusiasts. -.. button-link:: https://flower.dev/join-slack +.. button-link:: https://flower.ai/join-slack :color: primary :shadow: @@ -91,7 +91,7 @@ Problem-oriented how-to guides show step-by-step how to achieve a specific goal. how-to-configure-logging how-to-enable-ssl-connections how-to-upgrade-to-flower-1.0 - how-to-use-built-in-middleware-layers + how-to-use-built-in-mods how-to-run-flower-using-docker .. toctree:: diff --git a/doc/source/ref-api-cli.rst b/doc/source/ref-api-cli.rst index 039a2ea27cf8..c0e8940061fc 100644 --- a/doc/source/ref-api-cli.rst +++ b/doc/source/ref-api-cli.rst @@ -1,42 +1,52 @@ Flower CLI reference ==================== -.. _flower-server-apiref: +.. _flower-superlink-apiref: -flower-server -~~~~~~~~~~~~~ +flower-superlink +~~~~~~~~~~~~~~~~ .. argparse:: :module: flwr.server.app - :func: _parse_args_server - :prog: flower-server + :func: _parse_args_run_superlink + :prog: flower-superlink -.. _flower-driver-apiref: +.. _flower-driver-api-apiref: flower-driver-api ~~~~~~~~~~~~~~~~~ .. argparse:: :module: flwr.server.app - :func: _parse_args_driver + :func: _parse_args_run_driver_api :prog: flower-driver-api -.. _flower-fleet-apiref: +.. _flower-fleet-api-apiref: flower-fleet-api ~~~~~~~~~~~~~~~~ .. argparse:: :module: flwr.server.app - :func: _parse_args_fleet + :func: _parse_args_run_fleet_api :prog: flower-fleet-api -.. .. _flower-client-apiref: +.. .. _flower-client-app-apiref: -.. flower-client -.. ~~~~~~~~~~~~~ +.. flower-client-app +.. ~~~~~~~~~~~~~~~~~ - .. argparse:: +.. .. argparse:: .. :filename: flwr.client -.. :func: run_client -.. :prog: flower-client +.. :func: _parse_args_run_client_app +.. :prog: flower-client-app + +.. .. _flower-server-app-apiref: + +.. flower-server-app +.. ~~~~~~~~~~~~~~~~~ + +.. .. argparse:: +.. :filename: flwr.server +.. :func: _parse_args_run_server_app +.. :prog: flower-server-app diff --git a/doc/source/ref-changelog.md b/doc/source/ref-changelog.md index 5f323bc80baa..41dc91873c6c 100644 --- a/doc/source/ref-changelog.md +++ b/doc/source/ref-changelog.md @@ -2,23 +2,102 @@ ## Unreleased -- **Add scikit-learn tabular data example** ([#2719](https://github.com/adap/flower/pull/2719)) +### What's new? + +### Incompatible changes + +## v1.7.0 (2024-02-05) + +### Thanks to our contributors + +We would like to give our special thanks to all the contributors who made the new version of Flower possible (in `git shortlog` order): + +`Aasheesh Singh`, `Adam Narozniak`, `Aml Hassan Esmil`, `Charles Beauville`, `Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo Gabrielli`, `Gustavo Bertoli`, `HelinLin`, `Heng Pan`, `Javier`, `M S Chaitanya Kumar`, `Mohammad Naseri`, `Nikos Vlachakis`, `Pritam Neog`, `Robert Kuska`, `Robert Steiner`, `Taner Topal`, `Yahia Salaheldin Shaaban`, `Yan Gao`, `Yasar Abbas` + +### What's new? + +- **Introduce stateful clients (experimental)** ([#2770](https://github.com/adap/flower/pull/2770), [#2686](https://github.com/adap/flower/pull/2686), [#2696](https://github.com/adap/flower/pull/2696), [#2643](https://github.com/adap/flower/pull/2643), [#2769](https://github.com/adap/flower/pull/2769)) + + Subclasses of `Client` and `NumPyClient` can now store local state that remains on the client. Let's start with the highlight first: this new feature is compatible with both simulated clients (via `start_simulation`) and networked clients (via `start_client`). It's also the first preview of new abstractions like `Context` and `RecordSet`. Clients can access state of type `RecordSet` via `state: RecordSet = self.context.state`. Changes to this `RecordSet` are preserved across different rounds of execution to enable stateful computations in a unified way across simulation and deployment. + +- **Improve performance** ([#2293](https://github.com/adap/flower/pull/2293)) + + Flower is faster than ever. All `FedAvg`-derived strategies now use in-place aggregation to reduce memory consumption. The Flower client serialization/deserialization has been rewritten from the ground up, which results in significant speedups, especially when the client-side training time is short. + +- **Support Federated Learning with Apple MLX and Flower** ([#2693](https://github.com/adap/flower/pull/2693)) + + Flower has official support for federated learning using [Appple MLX](https://ml-explore.github.io/mlx) via the new `quickstart-mlx` code example. + +- **Introduce new XGBoost cyclic strategy** ([#2666](https://github.com/adap/flower/pull/2666), [#2668](https://github.com/adap/flower/pull/2668)) + + A new strategy called `FedXgbCyclic` supports a client-by-client style of training (often called cyclic). The `xgboost-comprehensive` code example shows how to use it in a full project. In addition to that, `xgboost-comprehensive` now also supports simulation mode. With this, Flower offers best-in-class XGBoost support. + +- **Support Python 3.11** ([#2394](https://github.com/adap/flower/pull/2394)) + + Framework tests now run on Python 3.8, 3.9, 3.10, and 3.11. This will ensure better support for users using more recent Python versions. + +- **Update gRPC and ProtoBuf dependencies** ([#2814](https://github.com/adap/flower/pull/2814)) -- **General updates to Flower Examples** ([#2381](https://github.com/adap/flower/pull/2381)) + The `grpcio` and `protobuf` dependencies were updated to their latest versions for improved security and performance. -- **Retiring MXNet examples** The development of the MXNet fremework has ended and the project is now [archived on GitHub](https://github.com/apache/mxnet). Existing MXNet examples won't receive updates [#2724](https://github.com/adap/flower/pull/2724) +- **Introduce Docker image for Flower server** ([#2700](https://github.com/adap/flower/pull/2700), [#2688](https://github.com/adap/flower/pull/2688), [#2705](https://github.com/adap/flower/pull/2705), [#2695](https://github.com/adap/flower/pull/2695), [#2747](https://github.com/adap/flower/pull/2747), [#2746](https://github.com/adap/flower/pull/2746), [#2680](https://github.com/adap/flower/pull/2680), [#2682](https://github.com/adap/flower/pull/2682), [#2701](https://github.com/adap/flower/pull/2701)) + + The Flower server can now be run using an official Docker image. A new how-to guide explains [how to run Flower using Docker](https://flower.ai/docs/framework/how-to-run-flower-using-docker.html). An official Flower client Docker image will follow. + +- **Introduce** `flower-via-docker-compose` **example** ([#2626](https://github.com/adap/flower/pull/2626)) + +- **Introduce** `quickstart-sklearn-tabular` **example** ([#2719](https://github.com/adap/flower/pull/2719)) + +- **Introduce** `custom-metrics` **example** ([#1958](https://github.com/adap/flower/pull/1958)) + +- **Update code examples to use Flower Datasets** ([#2450](https://github.com/adap/flower/pull/2450), [#2456](https://github.com/adap/flower/pull/2456), [#2318](https://github.com/adap/flower/pull/2318), [#2712](https://github.com/adap/flower/pull/2712)) + + Several code examples were updated to use [Flower Datasets](https://flower.ai/docs/datasets/). + +- **General updates to Flower Examples** ([#2381](https://github.com/adap/flower/pull/2381), [#2805](https://github.com/adap/flower/pull/2805), [#2782](https://github.com/adap/flower/pull/2782), [#2806](https://github.com/adap/flower/pull/2806), [#2829](https://github.com/adap/flower/pull/2829), [#2825](https://github.com/adap/flower/pull/2825), [#2816](https://github.com/adap/flower/pull/2816), [#2726](https://github.com/adap/flower/pull/2726), [#2659](https://github.com/adap/flower/pull/2659), [#2655](https://github.com/adap/flower/pull/2655)) + + Many Flower code examples received substantial updates. - **Update Flower Baselines** - - HFedXGBoost [#2226](https://github.com/adap/flower/pull/2226) + - HFedXGBoost ([#2226](https://github.com/adap/flower/pull/2226), [#2771](https://github.com/adap/flower/pull/2771)) + - FedVSSL ([#2412](https://github.com/adap/flower/pull/2412)) + - FedNova ([#2179](https://github.com/adap/flower/pull/2179)) + - HeteroFL ([#2439](https://github.com/adap/flower/pull/2439)) + - FedAvgM ([#2246](https://github.com/adap/flower/pull/2246)) + - FedPara ([#2722](https://github.com/adap/flower/pull/2722)) + +- **Improve documentation** ([#2674](https://github.com/adap/flower/pull/2674), [#2480](https://github.com/adap/flower/pull/2480), [#2826](https://github.com/adap/flower/pull/2826), [#2727](https://github.com/adap/flower/pull/2727), [#2761](https://github.com/adap/flower/pull/2761), [#2900](https://github.com/adap/flower/pull/2900)) + +- **Improved testing and development infrastructure** ([#2797](https://github.com/adap/flower/pull/2797), [#2676](https://github.com/adap/flower/pull/2676), [#2644](https://github.com/adap/flower/pull/2644), [#2656](https://github.com/adap/flower/pull/2656), [#2848](https://github.com/adap/flower/pull/2848), [#2675](https://github.com/adap/flower/pull/2675), [#2735](https://github.com/adap/flower/pull/2735), [#2767](https://github.com/adap/flower/pull/2767), [#2732](https://github.com/adap/flower/pull/2732), [#2744](https://github.com/adap/flower/pull/2744), [#2681](https://github.com/adap/flower/pull/2681), [#2699](https://github.com/adap/flower/pull/2699), [#2745](https://github.com/adap/flower/pull/2745), [#2734](https://github.com/adap/flower/pull/2734), [#2731](https://github.com/adap/flower/pull/2731), [#2652](https://github.com/adap/flower/pull/2652), [#2720](https://github.com/adap/flower/pull/2720), [#2721](https://github.com/adap/flower/pull/2721), [#2717](https://github.com/adap/flower/pull/2717), [#2864](https://github.com/adap/flower/pull/2864), [#2694](https://github.com/adap/flower/pull/2694), [#2709](https://github.com/adap/flower/pull/2709), [#2658](https://github.com/adap/flower/pull/2658), [#2796](https://github.com/adap/flower/pull/2796), [#2692](https://github.com/adap/flower/pull/2692), [#2657](https://github.com/adap/flower/pull/2657), [#2813](https://github.com/adap/flower/pull/2813), [#2661](https://github.com/adap/flower/pull/2661), [#2398](https://github.com/adap/flower/pull/2398)) + + The Flower testing and development infrastructure has received substantial updates. This makes Flower 1.7 the most tested release ever. + +- **Update dependencies** ([#2753](https://github.com/adap/flower/pull/2753), [#2651](https://github.com/adap/flower/pull/2651), [#2739](https://github.com/adap/flower/pull/2739), [#2837](https://github.com/adap/flower/pull/2837), [#2788](https://github.com/adap/flower/pull/2788), [#2811](https://github.com/adap/flower/pull/2811), [#2774](https://github.com/adap/flower/pull/2774), [#2790](https://github.com/adap/flower/pull/2790), [#2751](https://github.com/adap/flower/pull/2751), [#2850](https://github.com/adap/flower/pull/2850), [#2812](https://github.com/adap/flower/pull/2812), [#2872](https://github.com/adap/flower/pull/2872), [#2736](https://github.com/adap/flower/pull/2736), [#2756](https://github.com/adap/flower/pull/2756), [#2857](https://github.com/adap/flower/pull/2857), [#2757](https://github.com/adap/flower/pull/2757), [#2810](https://github.com/adap/flower/pull/2810), [#2740](https://github.com/adap/flower/pull/2740), [#2789](https://github.com/adap/flower/pull/2789)) + +- **General improvements** ([#2803](https://github.com/adap/flower/pull/2803), [#2847](https://github.com/adap/flower/pull/2847), [#2877](https://github.com/adap/flower/pull/2877), [#2690](https://github.com/adap/flower/pull/2690), [#2889](https://github.com/adap/flower/pull/2889), [#2874](https://github.com/adap/flower/pull/2874), [#2819](https://github.com/adap/flower/pull/2819), [#2689](https://github.com/adap/flower/pull/2689), [#2457](https://github.com/adap/flower/pull/2457), [#2870](https://github.com/adap/flower/pull/2870), [#2669](https://github.com/adap/flower/pull/2669), [#2876](https://github.com/adap/flower/pull/2876), [#2885](https://github.com/adap/flower/pull/2885), [#2858](https://github.com/adap/flower/pull/2858), [#2867](https://github.com/adap/flower/pull/2867), [#2351](https://github.com/adap/flower/pull/2351), [#2886](https://github.com/adap/flower/pull/2886), [#2860](https://github.com/adap/flower/pull/2860), [#2828](https://github.com/adap/flower/pull/2828), [#2869](https://github.com/adap/flower/pull/2869), [#2875](https://github.com/adap/flower/pull/2875), [#2733](https://github.com/adap/flower/pull/2733), [#2488](https://github.com/adap/flower/pull/2488), [#2646](https://github.com/adap/flower/pull/2646), [#2879](https://github.com/adap/flower/pull/2879), [#2821](https://github.com/adap/flower/pull/2821), [#2855](https://github.com/adap/flower/pull/2855), [#2800](https://github.com/adap/flower/pull/2800), [#2807](https://github.com/adap/flower/pull/2807), [#2801](https://github.com/adap/flower/pull/2801), [#2804](https://github.com/adap/flower/pull/2804), [#2851](https://github.com/adap/flower/pull/2851), [#2787](https://github.com/adap/flower/pull/2787), [#2852](https://github.com/adap/flower/pull/2852), [#2672](https://github.com/adap/flower/pull/2672), [#2759](https://github.com/adap/flower/pull/2759)) + +### Incompatible changes + +- **Deprecate** `start_numpy_client` ([#2563](https://github.com/adap/flower/pull/2563), [#2718](https://github.com/adap/flower/pull/2718)) + + Until now, clients of type `NumPyClient` needed to be started via `start_numpy_client`. In our efforts to consolidate framework APIs, we have introduced changes, and now all client types should start via `start_client`. To continue using `NumPyClient` clients, you simply need to first call the `.to_client()` method and then pass returned `Client` object to `start_client`. The examples and the documentation have been updated accordingly. + +- **Deprecate legacy DP wrappers** ([#2749](https://github.com/adap/flower/pull/2749)) + + Legacy DP wrapper classes are deprecated, but still functional. This is in preparation for an all-new pluggable version of differential privacy support in Flower. + +- **Make optional arg** `--callable` **in** `flower-client` **a required positional arg** ([#2673](https://github.com/adap/flower/pull/2673)) + +- **Rename** `certificates` **to** `root_certificates` **in** `Driver` ([#2890](https://github.com/adap/flower/pull/2890)) - - FedVSSL [#2412](https://github.com/adap/flower/pull/2412) +- **Drop experimental** `Task` **fields** ([#2866](https://github.com/adap/flower/pull/2866), [#2865](https://github.com/adap/flower/pull/2865)) - - FedNova [#2179](https://github.com/adap/flower/pull/2179) + Experimental fields `sa`, `legacy_server_message` and `legacy_client_message` were removed from `Task` message. The removed fields are superseded by the new `RecordSet` abstraction. - - HeteroFL [#2439](https://github.com/adap/flower/pull/2439) +- **Retire MXNet examples** ([#2724](https://github.com/adap/flower/pull/2724)) - - FedAvgM [#2246](https://github.com/adap/flower/pull/2246) + The development of the MXNet fremework has ended and the project is now [archived on GitHub](https://github.com/apache/mxnet). Existing MXNet examples won't receive updates. ## v1.6.0 (2023-11-28) @@ -98,7 +177,7 @@ We would like to give our special thanks to all the contributors who made the ne - FedBN ([#2608](https://github.com/adap/flower/pull/2608), [#2615](https://github.com/adap/flower/pull/2615)) -- **General updates to Flower Examples** ([#2384](https://github.com/adap/flower/pull/2384),[#2425](https://github.com/adap/flower/pull/2425), [#2526](https://github.com/adap/flower/pull/2526), [#2302](https://github.com/adap/flower/pull/2302), [#2545](https://github.com/adap/flower/pull/2545)) +- **General updates to Flower Examples** ([#2384](https://github.com/adap/flower/pull/2384), [#2425](https://github.com/adap/flower/pull/2425), [#2526](https://github.com/adap/flower/pull/2526), [#2302](https://github.com/adap/flower/pull/2302), [#2545](https://github.com/adap/flower/pull/2545)) - **General updates to Flower Baselines** ([#2301](https://github.com/adap/flower/pull/2301), [#2305](https://github.com/adap/flower/pull/2305), [#2307](https://github.com/adap/flower/pull/2307), [#2327](https://github.com/adap/flower/pull/2327), [#2435](https://github.com/adap/flower/pull/2435), [#2462](https://github.com/adap/flower/pull/2462), [#2463](https://github.com/adap/flower/pull/2463), [#2461](https://github.com/adap/flower/pull/2461), [#2469](https://github.com/adap/flower/pull/2469), [#2466](https://github.com/adap/flower/pull/2466), [#2471](https://github.com/adap/flower/pull/2471), [#2472](https://github.com/adap/flower/pull/2472), [#2470](https://github.com/adap/flower/pull/2470)) @@ -106,7 +185,7 @@ We would like to give our special thanks to all the contributors who made the ne - **General updates to Flower SDKs** ([#2288](https://github.com/adap/flower/pull/2288), [#2429](https://github.com/adap/flower/pull/2429), [#2555](https://github.com/adap/flower/pull/2555), [#2543](https://github.com/adap/flower/pull/2543), [#2544](https://github.com/adap/flower/pull/2544), [#2597](https://github.com/adap/flower/pull/2597), [#2623](https://github.com/adap/flower/pull/2623)) -- **General improvements** ([#2309](https://github.com/adap/flower/pull/2309), [#2310](https://github.com/adap/flower/pull/2310), [2313](https://github.com/adap/flower/pull/2313), [#2316](https://github.com/adap/flower/pull/2316), [2317](https://github.com/adap/flower/pull/2317), [#2349](https://github.com/adap/flower/pull/2349), [#2360](https://github.com/adap/flower/pull/2360), [#2402](https://github.com/adap/flower/pull/2402), [#2446](https://github.com/adap/flower/pull/2446), [#2561](https://github.com/adap/flower/pull/2561), [#2273](https://github.com/adap/flower/pull/2273), [#2267](https://github.com/adap/flower/pull/2267), [#2274](https://github.com/adap/flower/pull/2274), [#2275](https://github.com/adap/flower/pull/2275), [#2432](https://github.com/adap/flower/pull/2432), [#2251](https://github.com/adap/flower/pull/2251), [#2321](https://github.com/adap/flower/pull/2321), [#1936](https://github.com/adap/flower/pull/1936), [#2408](https://github.com/adap/flower/pull/2408), [#2413](https://github.com/adap/flower/pull/2413), [#2401](https://github.com/adap/flower/pull/2401), [#2531](https://github.com/adap/flower/pull/2531), [#2534](https://github.com/adap/flower/pull/2534), [#2535](https://github.com/adap/flower/pull/2535), [#2521](https://github.com/adap/flower/pull/2521), [#2553](https://github.com/adap/flower/pull/2553), [#2596](https://github.com/adap/flower/pull/2596)) +- **General improvements** ([#2309](https://github.com/adap/flower/pull/2309), [#2310](https://github.com/adap/flower/pull/2310), [#2313](https://github.com/adap/flower/pull/2313), [#2316](https://github.com/adap/flower/pull/2316), [#2317](https://github.com/adap/flower/pull/2317), [#2349](https://github.com/adap/flower/pull/2349), [#2360](https://github.com/adap/flower/pull/2360), [#2402](https://github.com/adap/flower/pull/2402), [#2446](https://github.com/adap/flower/pull/2446), [#2561](https://github.com/adap/flower/pull/2561), [#2273](https://github.com/adap/flower/pull/2273), [#2267](https://github.com/adap/flower/pull/2267), [#2274](https://github.com/adap/flower/pull/2274), [#2275](https://github.com/adap/flower/pull/2275), [#2432](https://github.com/adap/flower/pull/2432), [#2251](https://github.com/adap/flower/pull/2251), [#2321](https://github.com/adap/flower/pull/2321), [#1936](https://github.com/adap/flower/pull/1936), [#2408](https://github.com/adap/flower/pull/2408), [#2413](https://github.com/adap/flower/pull/2413), [#2401](https://github.com/adap/flower/pull/2401), [#2531](https://github.com/adap/flower/pull/2531), [#2534](https://github.com/adap/flower/pull/2534), [#2535](https://github.com/adap/flower/pull/2535), [#2521](https://github.com/adap/flower/pull/2521), [#2553](https://github.com/adap/flower/pull/2553), [#2596](https://github.com/adap/flower/pull/2596)) Flower received many improvements under the hood, too many to list here. @@ -134,11 +213,11 @@ We would like to give our special thanks to all the contributors who made the ne The new simulation engine has been rewritten from the ground up, yet it remains fully backwards compatible. It offers much improved stability and memory handling, especially when working with GPUs. Simulations transparently adapt to different settings to scale simulation in CPU-only, CPU+GPU, multi-GPU, or multi-node multi-GPU environments. - Comprehensive documentation includes a new [how-to run simulations](https://flower.dev/docs/framework/how-to-run-simulations.html) guide, new [simulation-pytorch](https://flower.dev/docs/examples/simulation-pytorch.html) and [simulation-tensorflow](https://flower.dev/docs/examples/simulation-tensorflow.html) notebooks, and a new [YouTube tutorial series](https://www.youtube.com/watch?v=cRebUIGB5RU&list=PLNG4feLHqCWlnj8a_E1A_n5zr2-8pafTB). + Comprehensive documentation includes a new [how-to run simulations](https://flower.ai/docs/framework/how-to-run-simulations.html) guide, new [simulation-pytorch](https://flower.ai/docs/examples/simulation-pytorch.html) and [simulation-tensorflow](https://flower.ai/docs/examples/simulation-tensorflow.html) notebooks, and a new [YouTube tutorial series](https://www.youtube.com/watch?v=cRebUIGB5RU&list=PLNG4feLHqCWlnj8a_E1A_n5zr2-8pafTB). - **Restructure Flower Docs** ([#1824](https://github.com/adap/flower/pull/1824), [#1865](https://github.com/adap/flower/pull/1865), [#1884](https://github.com/adap/flower/pull/1884), [#1887](https://github.com/adap/flower/pull/1887), [#1919](https://github.com/adap/flower/pull/1919), [#1922](https://github.com/adap/flower/pull/1922), [#1920](https://github.com/adap/flower/pull/1920), [#1923](https://github.com/adap/flower/pull/1923), [#1924](https://github.com/adap/flower/pull/1924), [#1962](https://github.com/adap/flower/pull/1962), [#2006](https://github.com/adap/flower/pull/2006), [#2133](https://github.com/adap/flower/pull/2133), [#2203](https://github.com/adap/flower/pull/2203), [#2215](https://github.com/adap/flower/pull/2215), [#2122](https://github.com/adap/flower/pull/2122), [#2223](https://github.com/adap/flower/pull/2223), [#2219](https://github.com/adap/flower/pull/2219), [#2232](https://github.com/adap/flower/pull/2232), [#2233](https://github.com/adap/flower/pull/2233), [#2234](https://github.com/adap/flower/pull/2234), [#2235](https://github.com/adap/flower/pull/2235), [#2237](https://github.com/adap/flower/pull/2237), [#2238](https://github.com/adap/flower/pull/2238), [#2242](https://github.com/adap/flower/pull/2242), [#2231](https://github.com/adap/flower/pull/2231), [#2243](https://github.com/adap/flower/pull/2243), [#2227](https://github.com/adap/flower/pull/2227)) - Much effort went into a completely restructured Flower docs experience. The documentation on [flower.dev/docs](flower.dev/docs) is now divided into Flower Framework, Flower Baselines, Flower Android SDK, Flower iOS SDK, and code example projects. + Much effort went into a completely restructured Flower docs experience. The documentation on [flower.ai/docs](https://flower.ai/docs) is now divided into Flower Framework, Flower Baselines, Flower Android SDK, Flower iOS SDK, and code example projects. - **Introduce Flower Swift SDK** ([#1858](https://github.com/adap/flower/pull/1858), [#1897](https://github.com/adap/flower/pull/1897)) @@ -224,7 +303,7 @@ We would like to give our special thanks to all the contributors who made the ne - **Introduce new "What is Federated Learning?" tutorial** ([#1657](https://github.com/adap/flower/pull/1657), [#1721](https://github.com/adap/flower/pull/1721)) - A new [entry-level tutorial](https://flower.dev/docs/framework/tutorial-what-is-federated-learning.html) in our documentation explains the basics of Fedetated Learning. It enables anyone who's unfamiliar with Federated Learning to start their journey with Flower. Forward it to anyone who's interested in Federated Learning! + A new [entry-level tutorial](https://flower.ai/docs/framework/tutorial-what-is-federated-learning.html) in our documentation explains the basics of Fedetated Learning. It enables anyone who's unfamiliar with Federated Learning to start their journey with Flower. Forward it to anyone who's interested in Federated Learning! - **Introduce new Flower Baseline: FedProx MNIST** ([#1513](https://github.com/adap/flower/pull/1513), [#1680](https://github.com/adap/flower/pull/1680), [#1681](https://github.com/adap/flower/pull/1681), [#1679](https://github.com/adap/flower/pull/1679)) @@ -338,7 +417,7 @@ We would like to give our special thanks to all the contributors who made the ne - **Introduce new Flower Baseline: FedAvg MNIST** ([#1497](https://github.com/adap/flower/pull/1497), [#1552](https://github.com/adap/flower/pull/1552)) - Over the coming weeks, we will be releasing a number of new reference implementations useful especially to FL newcomers. They will typically revisit well known papers from the literature, and be suitable for integration in your own application or for experimentation, in order to deepen your knowledge of FL in general. Today's release is the first in this series. [Read more.](https://flower.dev/blog/2023-01-12-fl-starter-pack-fedavg-mnist-cnn/) + Over the coming weeks, we will be releasing a number of new reference implementations useful especially to FL newcomers. They will typically revisit well known papers from the literature, and be suitable for integration in your own application or for experimentation, in order to deepen your knowledge of FL in general. Today's release is the first in this series. [Read more.](https://flower.ai/blog/2023-01-12-fl-starter-pack-fedavg-mnist-cnn/) - **Improve GPU support in simulations** ([#1555](https://github.com/adap/flower/pull/1555)) @@ -348,16 +427,16 @@ We would like to give our special thanks to all the contributors who made the ne Some users reported that Jupyter Notebooks have not always been easy to use on GPU instances. We listened and made improvements to all of our Jupyter notebooks! Check out the updated notebooks here: - - [An Introduction to Federated Learning](https://flower.dev/docs/framework/tutorial-get-started-with-flower-pytorch.html) - - [Strategies in Federated Learning](https://flower.dev/docs/framework/tutorial-use-a-federated-learning-strategy-pytorch.html) - - [Building a Strategy](https://flower.dev/docs/framework/tutorial-build-a-strategy-from-scratch-pytorch.html) - - [Client and NumPyClient](https://flower.dev/docs/framework/tutorial-customize-the-client-pytorch.html) + - [An Introduction to Federated Learning](https://flower.ai/docs/framework/tutorial-get-started-with-flower-pytorch.html) + - [Strategies in Federated Learning](https://flower.ai/docs/framework/tutorial-use-a-federated-learning-strategy-pytorch.html) + - [Building a Strategy](https://flower.ai/docs/framework/tutorial-build-a-strategy-from-scratch-pytorch.html) + - [Client and NumPyClient](https://flower.ai/docs/framework/tutorial-customize-the-client-pytorch.html) - **Introduce optional telemetry** ([#1533](https://github.com/adap/flower/pull/1533), [#1544](https://github.com/adap/flower/pull/1544), [#1584](https://github.com/adap/flower/pull/1584)) After a [request for feedback](https://github.com/adap/flower/issues/1534) from the community, the Flower open-source project introduces optional collection of *anonymous* usage metrics to make well-informed decisions to improve Flower. Doing this enables the Flower team to understand how Flower is used and what challenges users might face. - **Flower is a friendly framework for collaborative AI and data science.** Staying true to this statement, Flower makes it easy to disable telemetry for users who do not want to share anonymous usage metrics. [Read more.](https://flower.dev/docs/telemetry.html). + **Flower is a friendly framework for collaborative AI and data science.** Staying true to this statement, Flower makes it easy to disable telemetry for users who do not want to share anonymous usage metrics. [Read more.](https://flower.ai/docs/telemetry.html). - **Introduce (experimental) Driver API** ([#1520](https://github.com/adap/flower/pull/1520), [#1525](https://github.com/adap/flower/pull/1525), [#1545](https://github.com/adap/flower/pull/1545), [#1546](https://github.com/adap/flower/pull/1546), [#1550](https://github.com/adap/flower/pull/1550), [#1551](https://github.com/adap/flower/pull/1551), [#1567](https://github.com/adap/flower/pull/1567)) @@ -389,7 +468,7 @@ We would like to give our special thanks to all the contributors who made the ne As usual, the documentation has improved quite a bit. It is another step in our effort to make the Flower documentation the best documentation of any project. Stay tuned and as always, feel free to provide feedback! - One highlight is the new [first time contributor guide](https://flower.dev/docs/first-time-contributors.html): if you've never contributed on GitHub before, this is the perfect place to start! + One highlight is the new [first time contributor guide](https://flower.ai/docs/first-time-contributors.html): if you've never contributed on GitHub before, this is the perfect place to start! ### Incompatible changes @@ -578,7 +657,7 @@ We would like to give our **special thanks** to all the contributors who made Fl - **Flower Baselines (preview): FedOpt, FedBN, FedAvgM** ([#919](https://github.com/adap/flower/pull/919), [#1127](https://github.com/adap/flower/pull/1127), [#914](https://github.com/adap/flower/pull/914)) - The first preview release of Flower Baselines has arrived! We're kickstarting Flower Baselines with implementations of FedOpt (FedYogi, FedAdam, FedAdagrad), FedBN, and FedAvgM. Check the documentation on how to use [Flower Baselines](https://flower.dev/docs/using-baselines.html). With this first preview release we're also inviting the community to [contribute their own baselines](https://flower.dev/docs/contributing-baselines.html). + The first preview release of Flower Baselines has arrived! We're kickstarting Flower Baselines with implementations of FedOpt (FedYogi, FedAdam, FedAdagrad), FedBN, and FedAvgM. Check the documentation on how to use [Flower Baselines](https://flower.ai/docs/using-baselines.html). With this first preview release we're also inviting the community to [contribute their own baselines](https://flower.ai/docs/contributing-baselines.html). - **C++ client SDK (preview) and code example** ([#1111](https://github.com/adap/flower/pull/1111)) @@ -624,7 +703,7 @@ We would like to give our **special thanks** to all the contributors who made Fl - New option to keep Ray running if Ray was already initialized in `start_simulation` ([#1177](https://github.com/adap/flower/pull/1177)) - Add support for custom `ClientManager` as a `start_simulation` parameter ([#1171](https://github.com/adap/flower/pull/1171)) - - New documentation for [implementing strategies](https://flower.dev/docs/framework/how-to-implement-strategies.html) ([#1097](https://github.com/adap/flower/pull/1097), [#1175](https://github.com/adap/flower/pull/1175)) + - New documentation for [implementing strategies](https://flower.ai/docs/framework/how-to-implement-strategies.html) ([#1097](https://github.com/adap/flower/pull/1097), [#1175](https://github.com/adap/flower/pull/1175)) - New mobile-friendly documentation theme ([#1174](https://github.com/adap/flower/pull/1174)) - Limit version range for (optional) `ray` dependency to include only compatible releases (`>=1.9.2,<1.12.0`) ([#1205](https://github.com/adap/flower/pull/1205)) diff --git a/doc/source/ref-example-projects.rst b/doc/source/ref-example-projects.rst index b47bd8e48997..8eb723000cac 100644 --- a/doc/source/ref-example-projects.rst +++ b/doc/source/ref-example-projects.rst @@ -23,8 +23,8 @@ The TensorFlow/Keras quickstart example shows CIFAR-10 image classification with MobileNetV2: - `Quickstart TensorFlow (Code) `_ -- `Quickstart TensorFlow (Tutorial) `_ -- `Quickstart TensorFlow (Blog Post) `_ +- `Quickstart TensorFlow (Tutorial) `_ +- `Quickstart TensorFlow (Blog Post) `_ Quickstart PyTorch @@ -34,7 +34,7 @@ The PyTorch quickstart example shows CIFAR-10 image classification with a simple Convolutional Neural Network: - `Quickstart PyTorch (Code) `_ -- `Quickstart PyTorch (Tutorial) `_ +- `Quickstart PyTorch (Tutorial) `_ PyTorch: From Centralized To Federated @@ -43,7 +43,7 @@ PyTorch: From Centralized To Federated This example shows how a regular PyTorch project can be federated using Flower: - `PyTorch: From Centralized To Federated (Code) `_ -- `PyTorch: From Centralized To Federated (Tutorial) `_ +- `PyTorch: From Centralized To Federated (Tutorial) `_ Federated Learning on Raspberry Pi and Nvidia Jetson @@ -52,7 +52,7 @@ Federated Learning on Raspberry Pi and Nvidia Jetson This example shows how Flower can be used to build a federated learning system that run across Raspberry Pi and Nvidia Jetson: - `Federated Learning on Raspberry Pi and Nvidia Jetson (Code) `_ -- `Federated Learning on Raspberry Pi and Nvidia Jetson (Blog Post) `_ +- `Federated Learning on Raspberry Pi and Nvidia Jetson (Blog Post) `_ diff --git a/doc/source/ref-faq.rst b/doc/source/ref-faq.rst index 13c44bc64b0e..932396e3c583 100644 --- a/doc/source/ref-faq.rst +++ b/doc/source/ref-faq.rst @@ -6,20 +6,20 @@ This page collects answers to commonly asked questions about Federated Learning .. dropdown:: :fa:`eye,mr-1` Can Flower run on Juptyter Notebooks / Google Colab? Yes, it can! Flower even comes with a few under-the-hood optimizations to make it work even better on Colab. Here's a quickstart example: - + * `Flower simulation PyTorch `_ * `Flower simulation TensorFlow/Keras `_ .. dropdown:: :fa:`eye,mr-1` How can I run Federated Learning on a Raspberry Pi? - Find the `blog post about federated learning on embedded device here `_ and the corresponding `GitHub code example `_. + Find the `blog post about federated learning on embedded device here `_ and the corresponding `GitHub code example `_. .. dropdown:: :fa:`eye,mr-1` Does Flower support federated learning on Android devices? - Yes, it does. Please take a look at our `blog post `_ or check out the code examples: + Yes, it does. Please take a look at our `blog post `_ or check out the code examples: - * `Android Kotlin example `_ - * `Android Java example `_ + * `Android Kotlin example `_ + * `Android Java example `_ .. dropdown:: :fa:`eye,mr-1` Can I combine federated learning with blockchain? diff --git a/doc/source/ref-telemetry.md b/doc/source/ref-telemetry.md index 206e641d8b41..49efef5c8559 100644 --- a/doc/source/ref-telemetry.md +++ b/doc/source/ref-telemetry.md @@ -41,7 +41,7 @@ Flower telemetry collects the following metrics: **Source.** Flower telemetry tries to store a random source ID in `~/.flwr/source` the first time a telemetry event is generated. The source ID is important to identify whether an issue is recurring or whether an issue is triggered by multiple clusters running concurrently (which often happens in simulation). For example, if a device runs multiple workloads at the same time, and this results in an issue, then, in order to reproduce the issue, multiple workloads must be started at the same time. -You may delete the source ID at any time. If you wish for all events logged under a specific source ID to be deleted, you can send a deletion request mentioning the source ID to `telemetry@flower.dev`. All events related to that source ID will then be permanently deleted. +You may delete the source ID at any time. If you wish for all events logged under a specific source ID to be deleted, you can send a deletion request mentioning the source ID to `telemetry@flower.ai`. All events related to that source ID will then be permanently deleted. We will not collect any personally identifiable information. If you think any of the metrics collected could be misused in any way, please [get in touch with us](#how-to-contact-us). We will update this page to reflect any changes to the metrics collected and publish changes in the changelog. @@ -63,4 +63,4 @@ FLWR_TELEMETRY_ENABLED=0 FLWR_TELEMETRY_LOGGING=1 python server.py # or client.p ## How to contact us -We want to hear from you. If you have any feedback or ideas on how to improve the way we handle anonymous usage metrics, reach out to us via [Slack](https://flower.dev/join-slack/) (channel `#telemetry`) or email (`telemetry@flower.dev`). +We want to hear from you. If you have any feedback or ideas on how to improve the way we handle anonymous usage metrics, reach out to us via [Slack](https://flower.ai/join-slack/) (channel `#telemetry`) or email (`telemetry@flower.ai`). diff --git a/doc/source/tutorial-quickstart-huggingface.rst b/doc/source/tutorial-quickstart-huggingface.rst index 7718e6558456..1e06120b452f 100644 --- a/doc/source/tutorial-quickstart-huggingface.rst +++ b/doc/source/tutorial-quickstart-huggingface.rst @@ -212,9 +212,9 @@ We can now start client instances using: .. code-block:: python - fl.client.start_numpy_client( + fl.client.start_client( server_address="127.0.0.1:8080", - client=IMDBClient() + client=IMDBClient().to_client() ) diff --git a/doc/source/tutorial-quickstart-ios.rst b/doc/source/tutorial-quickstart-ios.rst index 7c8007baaa75..aa94a72580c1 100644 --- a/doc/source/tutorial-quickstart-ios.rst +++ b/doc/source/tutorial-quickstart-ios.rst @@ -7,14 +7,14 @@ Quickstart iOS .. meta:: :description: Read this Federated Learning quickstart tutorial for creating an iOS app using Flower to train a neural network on MNIST. -In this tutorial we will learn how to train a Neural Network on MNIST using Flower and CoreML on iOS devices. +In this tutorial we will learn how to train a Neural Network on MNIST using Flower and CoreML on iOS devices. -First of all, for running the Flower Python server, it is recommended to create a virtual environment and run everything within a `virtualenv `_. +First of all, for running the Flower Python server, it is recommended to create a virtual environment and run everything within a `virtualenv `_. For the Flower client implementation in iOS, it is recommended to use Xcode as our IDE. -Our example consists of one Python *server* and two iPhone *clients* that all have the same model. +Our example consists of one Python *server* and two iPhone *clients* that all have the same model. -*Clients* are responsible for generating individual weight updates for the model based on their local datasets. +*Clients* are responsible for generating individual weight updates for the model based on their local datasets. These updates are then sent to the *server* which will aggregate them to produce a better model. Finally, the *server* sends this improved version of the model back to each *client*. A complete cycle of weight updates is called a *round*. @@ -44,10 +44,10 @@ For simplicity reasons we will use the complete Flower client with CoreML, that public func getParameters() -> GetParametersRes { let parameters = parameters.weightsToParameters() let status = Status(code: .ok, message: String()) - + return GetParametersRes(parameters: parameters, status: status) } - + /// Calls the routine to fit the local model /// /// - Returns: The result from the local training, e.g., updated parameters @@ -55,17 +55,17 @@ For simplicity reasons we will use the complete Flower client with CoreML, that let status = Status(code: .ok, message: String()) let result = runMLTask(configuration: parameters.parametersToWeights(parameters: ins.parameters), task: .train) let parameters = parameters.weightsToParameters() - + return FitRes(parameters: parameters, numExamples: result.numSamples, status: status) } - + /// Calls the routine to evaluate the local model /// /// - Returns: The result from the evaluation, e.g., loss public func evaluate(ins: EvaluateIns) -> EvaluateRes { let status = Status(code: .ok, message: String()) let result = runMLTask(configuration: parameters.parametersToWeights(parameters: ins.parameters), task: .test) - + return EvaluateRes(loss: Float(result.loss), numExamples: result.numSamples, status: status) } @@ -88,12 +88,12 @@ For the MNIST dataset, we need to preprocess it into :code:`MLBatchProvider` obj // prepare train dataset let trainBatchProvider = DataLoader.trainBatchProvider() { _ in } - + // prepare test dataset let testBatchProvider = DataLoader.testBatchProvider() { _ in } - + // load them together - let dataLoader = MLDataLoader(trainBatchProvider: trainBatchProvider, + let dataLoader = MLDataLoader(trainBatchProvider: trainBatchProvider, testBatchProvider: testBatchProvider) Since CoreML does not allow the model parameters to be seen before training, and accessing the model parameters during or after the training can only be done by specifying the layer name, @@ -122,7 +122,7 @@ Then start the Flower gRPC client and start communicating to the server by passi self.flwrGRPC.startFlwrGRPC(client: self.mlFlwrClient) That's it for the client. We only have to implement :code:`Client` or call the provided -:code:`MLFlwrClient` and call :code:`startFlwrGRPC()`. The attribute :code:`hostname` and :code:`port` tells the client which server to connect to. +:code:`MLFlwrClient` and call :code:`startFlwrGRPC()`. The attribute :code:`hostname` and :code:`port` tells the client which server to connect to. This can be done by entering the hostname and port in the application before clicking the start button to start the federated learning process. Flower Server diff --git a/doc/source/tutorial-quickstart-jax.rst b/doc/source/tutorial-quickstart-jax.rst index 945f231e112e..d2b9243e2bb3 100644 --- a/doc/source/tutorial-quickstart-jax.rst +++ b/doc/source/tutorial-quickstart-jax.rst @@ -265,7 +265,7 @@ Having defined the federation process, we can run it. # Start Flower client client = FlowerClient(params, grad_fn, train_x, train_y, test_x, test_y) - fl.client.start_numpy_client(server_address="0.0.0.0:8080", client) + fl.client.start_client(server_address="0.0.0.0:8080", client=client.to_client()) if __name__ == "__main__": main() diff --git a/doc/source/tutorial-quickstart-mxnet.rst b/doc/source/tutorial-quickstart-mxnet.rst index ff8d4b2087dd..08304483af86 100644 --- a/doc/source/tutorial-quickstart-mxnet.rst +++ b/doc/source/tutorial-quickstart-mxnet.rst @@ -9,13 +9,13 @@ Quickstart MXNet .. meta:: :description: Check out this Federated Learning quickstart tutorial for using Flower with MXNet to train a Sequential model on MNIST. -In this tutorial, we will learn how to train a :code:`Sequential` model on MNIST using Flower and MXNet. +In this tutorial, we will learn how to train a :code:`Sequential` model on MNIST using Flower and MXNet. -It is recommended to create a virtual environment and run everything within this `virtualenv `_. +It is recommended to create a virtual environment and run everything within this `virtualenv `_. -Our example consists of one *server* and two *clients* all having the same model. +Our example consists of one *server* and two *clients* all having the same model. -*Clients* are responsible for generating individual model parameter updates for the model based on their local datasets. +*Clients* are responsible for generating individual model parameter updates for the model based on their local datasets. These updates are then sent to the *server* which will aggregate them to produce an updated global model. Finally, the *server* sends this improved version of the model back to each *client*. A complete cycle of parameters updates is called a *round*. @@ -35,12 +35,12 @@ Since we want to use MXNet, let's go ahead and install it: Flower Client ------------- -Now that we have all our dependencies installed, let's run a simple distributed training with two clients and one server. Our training procedure and network architecture are based on MXNet´s `Hand-written Digit Recognition tutorial `_. +Now that we have all our dependencies installed, let's run a simple distributed training with two clients and one server. Our training procedure and network architecture are based on MXNet´s `Hand-written Digit Recognition tutorial `_. In a file called :code:`client.py`, import Flower and MXNet related packages: .. code-block:: python - + import flwr as fl import numpy as np @@ -58,7 +58,7 @@ In addition, define the device allocation in MXNet with: DEVICE = [mx.gpu() if mx.test_utils.list_gpus() else mx.cpu()] -We use MXNet to load MNIST, a popular image classification dataset of handwritten digits for machine learning. The MXNet utility :code:`mx.test_utils.get_mnist()` downloads the training and test data. +We use MXNet to load MNIST, a popular image classification dataset of handwritten digits for machine learning. The MXNet utility :code:`mx.test_utils.get_mnist()` downloads the training and test data. .. code-block:: python @@ -72,7 +72,7 @@ We use MXNet to load MNIST, a popular image classification dataset of handwritte val_data = mx.io.NDArrayIter(mnist["test_data"], mnist["test_label"], batch_size) return train_data, val_data -Define the training and loss with MXNet. We train the model by looping over the dataset, measure the corresponding loss, and optimize it. +Define the training and loss with MXNet. We train the model by looping over the dataset, measure the corresponding loss, and optimize it. .. code-block:: python @@ -110,7 +110,7 @@ Define the training and loss with MXNet. We train the model by looping over the return trainings_metric, num_examples -Next, we define the validation of our machine learning model. We loop over the test set and measure both loss and accuracy on the test set. +Next, we define the validation of our machine learning model. We loop over the test set and measure both loss and accuracy on the test set. .. code-block:: python @@ -155,7 +155,7 @@ Our Flower clients will use a simple :code:`Sequential` model: init = nd.random.uniform(shape=(2, 784)) model(init) -After loading the dataset with :code:`load_data()` we perform one forward propagation to initialize the model and model parameters with :code:`model(init)`. Next, we implement a Flower client. +After loading the dataset with :code:`load_data()` we perform one forward propagation to initialize the model and model parameters with :code:`model(init)`. Next, we implement a Flower client. The Flower server interacts with clients through an interface called :code:`Client`. When the server selects a particular client for training, it @@ -207,7 +207,7 @@ They can be implemented in the following way: [accuracy, loss], num_examples = test(model, val_data) print("Evaluation accuracy & loss", accuracy, loss) return float(loss[1]), val_data.batch_size, {"accuracy": float(accuracy[1])} - + We can now create an instance of our class :code:`MNISTClient` and add one line to actually run this client: diff --git a/doc/source/tutorial-quickstart-pytorch.rst b/doc/source/tutorial-quickstart-pytorch.rst index fb77d107b63f..32f9c5ebb3a1 100644 --- a/doc/source/tutorial-quickstart-pytorch.rst +++ b/doc/source/tutorial-quickstart-pytorch.rst @@ -10,13 +10,13 @@ Quickstart PyTorch .. youtube:: jOmmuzMIQ4c :width: 100% -In this tutorial we will learn how to train a Convolutional Neural Network on CIFAR10 using Flower and PyTorch. +In this tutorial we will learn how to train a Convolutional Neural Network on CIFAR10 using Flower and PyTorch. -First of all, it is recommended to create a virtual environment and run everything within a `virtualenv `_. +First of all, it is recommended to create a virtual environment and run everything within a `virtualenv `_. -Our example consists of one *server* and two *clients* all having the same model. +Our example consists of one *server* and two *clients* all having the same model. -*Clients* are responsible for generating individual weight-updates for the model based on their local datasets. +*Clients* are responsible for generating individual weight-updates for the model based on their local datasets. These updates are then sent to the *server* which will aggregate them to produce a better model. Finally, the *server* sends this improved version of the model back to each *client*. A complete cycle of weight updates is called a *round*. @@ -26,7 +26,7 @@ Now that we have a rough idea of what is going on, let's get started. We first n $ pip install flwr -Since we want to use PyTorch to solve a computer vision task, let's go ahead and install PyTorch and the **torchvision** library: +Since we want to use PyTorch to solve a computer vision task, let's go ahead and install PyTorch and the **torchvision** library: .. code-block:: shell @@ -36,12 +36,12 @@ Since we want to use PyTorch to solve a computer vision task, let's go ahead and Flower Client ------------- -Now that we have all our dependencies installed, let's run a simple distributed training with two clients and one server. Our training procedure and network architecture are based on PyTorch's `Deep Learning with PyTorch `_. +Now that we have all our dependencies installed, let's run a simple distributed training with two clients and one server. Our training procedure and network architecture are based on PyTorch's `Deep Learning with PyTorch `_. In a file called :code:`client.py`, import Flower and PyTorch related packages: .. code-block:: python - + from collections import OrderedDict import torch @@ -59,7 +59,7 @@ In addition, we define the device allocation in PyTorch with: DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") -We use PyTorch to load CIFAR10, a popular colored image classification dataset for machine learning. The PyTorch :code:`DataLoader()` downloads the training and test data that are then normalized. +We use PyTorch to load CIFAR10, a popular colored image classification dataset for machine learning. The PyTorch :code:`DataLoader()` downloads the training and test data that are then normalized. .. code-block:: python @@ -75,7 +75,7 @@ We use PyTorch to load CIFAR10, a popular colored image classification dataset f num_examples = {"trainset" : len(trainset), "testset" : len(testset)} return trainloader, testloader, num_examples -Define the loss and optimizer with PyTorch. The training of the dataset is done by looping over the dataset, measure the corresponding loss and optimize it. +Define the loss and optimizer with PyTorch. The training of the dataset is done by looping over the dataset, measure the corresponding loss and optimize it. .. code-block:: python @@ -91,7 +91,7 @@ Define the loss and optimizer with PyTorch. The training of the dataset is done loss.backward() optimizer.step() -Define then the validation of the machine learning network. We loop over the test set and measure the loss and accuracy of the test set. +Define then the validation of the machine learning network. We loop over the test set and measure the loss and accuracy of the test set. .. code-block:: python @@ -139,7 +139,7 @@ The Flower clients will use a simple CNN adapted from 'PyTorch: A 60 Minute Blit net = Net().to(DEVICE) trainloader, testloader, num_examples = load_data() -After loading the data set with :code:`load_data()` we define the Flower interface. +After loading the data set with :code:`load_data()` we define the Flower interface. The Flower server interacts with clients through an interface called :code:`Client`. When the server selects a particular client for training, it @@ -191,10 +191,10 @@ to actually run this client: .. code-block:: python - fl.client.start_numpy_client(server_address="[::]:8080", client=CifarClient()) + fl.client.start_client(server_address="[::]:8080", client=CifarClient().to_client()) That's it for the client. We only have to implement :code:`Client` or -:code:`NumPyClient` and call :code:`fl.client.start_client()` or :code:`fl.client.start_numpy_client()`. The string :code:`"[::]:8080"` tells the client which server to connect to. In our case we can run the server and the client on the same machine, therefore we use +:code:`NumPyClient` and call :code:`fl.client.start_client()`. If you implement a client of type :code:`NumPyClient` you'll need to first call its :code:`to_client()` method. The string :code:`"[::]:8080"` tells the client which server to connect to. In our case we can run the server and the client on the same machine, therefore we use :code:`"[::]:8080"`. If we run a truly federated workload with the server and clients running on different machines, all that needs to change is the :code:`server_address` we point the client at. diff --git a/doc/source/tutorial-quickstart-scikitlearn.rst b/doc/source/tutorial-quickstart-scikitlearn.rst index b33068e975fa..b95118aa091f 100644 --- a/doc/source/tutorial-quickstart-scikitlearn.rst +++ b/doc/source/tutorial-quickstart-scikitlearn.rst @@ -7,13 +7,13 @@ Quickstart scikit-learn .. meta:: :description: Check out this Federated Learning quickstart tutorial for using Flower with scikit-learn to train a linear regression model. -In this tutorial, we will learn how to train a :code:`Logistic Regression` model on MNIST using Flower and scikit-learn. +In this tutorial, we will learn how to train a :code:`Logistic Regression` model on MNIST using Flower and scikit-learn. -It is recommended to create a virtual environment and run everything within this `virtualenv `_. +It is recommended to create a virtual environment and run everything within this `virtualenv `_. -Our example consists of one *server* and two *clients* all having the same model. +Our example consists of one *server* and two *clients* all having the same model. -*Clients* are responsible for generating individual model parameter updates for the model based on their local datasets. +*Clients* are responsible for generating individual model parameter updates for the model based on their local datasets. These updates are then sent to the *server* which will aggregate them to produce an updated global model. Finally, the *server* sends this improved version of the model back to each *client*. A complete cycle of parameters updates is called a *round*. @@ -59,7 +59,7 @@ Please check out :code:`utils.py` `here `_, a popular image classification dataset of handwritten digits for machine learning. The utility :code:`utils.load_mnist()` downloads the training and test data. The training set is split afterwards into 10 partitions with :code:`utils.partition()`. +We load the MNIST dataset from `OpenML `_, a popular image classification dataset of handwritten digits for machine learning. The utility :code:`utils.load_mnist()` downloads the training and test data. The training set is split afterwards into 10 partitions with :code:`utils.partition()`. .. code-block:: python @@ -145,10 +145,10 @@ to actually run this client: .. code-block:: python - fl.client.start_numpy_client("0.0.0.0:8080", client=MnistClient()) + fl.client.start_client("0.0.0.0:8080", client=MnistClient().to_client()) That's it for the client. We only have to implement :code:`Client` or -:code:`NumPyClient` and call :code:`fl.client.start_client()` or :code:`fl.client.start_numpy_client()`. The string :code:`"0.0.0.0:8080"` tells the client which server to connect to. In our case we can run the server and the client on the same machine, therefore we use +:code:`NumPyClient` and call :code:`fl.client.start_client()`. If you implement a client of type :code:`NumPyClient` you'll need to first call its :code:`to_client()` method. The string :code:`"0.0.0.0:8080"` tells the client which server to connect to. In our case we can run the server and the client on the same machine, therefore we use :code:`"0.0.0.0:8080"`. If we run a truly federated workload with the server and clients running on different machines, all that needs to change is the :code:`server_address` we pass to the client. diff --git a/doc/source/tutorial-quickstart-tensorflow.rst b/doc/source/tutorial-quickstart-tensorflow.rst index 64b2255a9ac6..bd63eb461d21 100644 --- a/doc/source/tutorial-quickstart-tensorflow.rst +++ b/doc/source/tutorial-quickstart-tensorflow.rst @@ -84,11 +84,11 @@ to actually run this client: .. code-block:: python - fl.client.start_numpy_client(server_address="[::]:8080", client=CifarClient()) + fl.client.start_client(server_address="[::]:8080", client=CifarClient().to_client()) That's it for the client. We only have to implement :code:`Client` or -:code:`NumPyClient` and call :code:`fl.client.start_client()` or :code:`fl.client.start_numpy_client()`. The string :code:`"[::]:8080"` tells the client which server to connect to. In our case we can run the server and the client on the same machine, therefore we use +:code:`NumPyClient` and call :code:`fl.client.start_client()`. If you implement a client of type :code:`NumPyClient` you'll need to first call its :code:`to_client()` method. The string :code:`"[::]:8080"` tells the client which server to connect to. In our case we can run the server and the client on the same machine, therefore we use :code:`"[::]:8080"`. If we run a truly federated workload with the server and clients running on different machines, all that needs to change is the :code:`server_address` we point the client at. diff --git a/doc/source/tutorial-quickstart-xgboost.rst b/doc/source/tutorial-quickstart-xgboost.rst index 7eb58da7f2f6..ec9101f4b3fd 100644 --- a/doc/source/tutorial-quickstart-xgboost.rst +++ b/doc/source/tutorial-quickstart-xgboost.rst @@ -36,7 +36,7 @@ and then we dive into a more complex example (`full code xgboost-comprehensive < Environment Setup -------------------- -First of all, it is recommended to create a virtual environment and run everything within a `virtualenv `_. +First of all, it is recommended to create a virtual environment and run everything within a `virtualenv `_. We first need to install Flower and Flower Datasets. You can do this by running : @@ -595,9 +595,164 @@ Comprehensive Federated XGBoost Now that you have known how federated XGBoost work with Flower, it's time to run some more comprehensive experiments by customising the experimental settings. In the xgboost-comprehensive example (`full code `_), -we provide more options to define various experimental setups, including data partitioning and centralised/distributed evaluation. +we provide more options to define various experimental setups, including aggregation strategies, data partitioning and centralised/distributed evaluation. +We also support `Flower simulation `_ making it easy to simulate large client cohorts in a resource-aware manner. Let's take a look! +Cyclic training +~~~~~~~~~~~~~~~~~~ + +In addition to bagging aggregation, we offer a cyclic training scheme, which performs FL in a client-by-client fashion. +Instead of aggregating multiple clients, there is only one single client participating in the training per round in the cyclic training scenario. +The trained local XGBoost trees will be passed to the next client as an initialised model for next round's boosting. + +To do this, we first customise a :code:`ClientManager` in :code:`server_utils.py`: + +.. code-block:: python + + class CyclicClientManager(SimpleClientManager): + """Provides a cyclic client selection rule.""" + + def sample( + self, + num_clients: int, + min_num_clients: Optional[int] = None, + criterion: Optional[Criterion] = None, + ) -> List[ClientProxy]: + """Sample a number of Flower ClientProxy instances.""" + + # Block until at least num_clients are connected. + if min_num_clients is None: + min_num_clients = num_clients + self.wait_for(min_num_clients) + + # Sample clients which meet the criterion + available_cids = list(self.clients) + if criterion is not None: + available_cids = [ + cid for cid in available_cids if criterion.select(self.clients[cid]) + ] + + if num_clients > len(available_cids): + log( + INFO, + "Sampling failed: number of available clients" + " (%s) is less than number of requested clients (%s).", + len(available_cids), + num_clients, + ) + return [] + + # Return all available clients + return [self.clients[cid] for cid in available_cids] + +The customised :code:`ClientManager` samples all available clients in each FL round based on the order of connection to the server. +Then, we define a new strategy :code:`FedXgbCyclic` in :code:`flwr.server.strategy.fedxgb_cyclic.py`, +in order to sequentially select only one client in given round and pass the received model to next client. + +.. code-block:: python + + class FedXgbCyclic(FedAvg): + """Configurable FedXgbCyclic strategy implementation.""" + + # pylint: disable=too-many-arguments,too-many-instance-attributes, line-too-long + def __init__( + self, + **kwargs: Any, + ): + self.global_model: Optional[bytes] = None + super().__init__(**kwargs) + + def aggregate_fit( + self, + server_round: int, + results: List[Tuple[ClientProxy, FitRes]], + failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]], + ) -> Tuple[Optional[Parameters], Dict[str, Scalar]]: + """Aggregate fit results using bagging.""" + if not results: + return None, {} + # Do not aggregate if there are failures and failures are not accepted + if not self.accept_failures and failures: + return None, {} + + # Fetch the client model from last round as global model + for _, fit_res in results: + update = fit_res.parameters.tensors + for bst in update: + self.global_model = bst + + return ( + Parameters(tensor_type="", tensors=[cast(bytes, self.global_model)]), + {}, + ) + +Unlike the original :code:`FedAvg`, we don't perform aggregation here. +Instead, we just make a copy of the received client model as global model by overriding :code:`aggregate_fit`. + +Also, the customised :code:`configure_fit` and :code:`configure_evaluate` methods ensure the clients to be sequentially selected given FL round: + +.. code-block:: python + + def configure_fit( + self, server_round: int, parameters: Parameters, client_manager: ClientManager + ) -> List[Tuple[ClientProxy, FitIns]]: + """Configure the next round of training.""" + config = {} + if self.on_fit_config_fn is not None: + # Custom fit config function provided + config = self.on_fit_config_fn(server_round) + fit_ins = FitIns(parameters, config) + + # Sample clients + sample_size, min_num_clients = self.num_fit_clients( + client_manager.num_available() + ) + clients = client_manager.sample( + num_clients=sample_size, + min_num_clients=min_num_clients, + ) + + # Sample the clients sequentially given server_round + sampled_idx = (server_round - 1) % len(clients) + sampled_clients = [clients[sampled_idx]] + + # Return client/config pairs + return [(client, fit_ins) for client in sampled_clients] + + def configure_evaluate( + self, server_round: int, parameters: Parameters, client_manager: ClientManager + ) -> List[Tuple[ClientProxy, EvaluateIns]]: + """Configure the next round of evaluation.""" + # Do not configure federated evaluation if fraction eval is 0. + if self.fraction_evaluate == 0.0: + return [] + + # Parameters and config + config = {} + if self.on_evaluate_config_fn is not None: + # Custom evaluation config function provided + config = self.on_evaluate_config_fn(server_round) + evaluate_ins = EvaluateIns(parameters, config) + + # Sample clients + sample_size, min_num_clients = self.num_evaluation_clients( + client_manager.num_available() + ) + clients = client_manager.sample( + num_clients=sample_size, + min_num_clients=min_num_clients, + ) + + # Sample the clients sequentially given server_round + sampled_idx = (server_round - 1) % len(clients) + sampled_clients = [clients[sampled_idx]] + + # Return client/config pairs + return [(client, evaluate_ins) for client in sampled_clients] + + + Customised data partitioning ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -634,7 +789,7 @@ Currently, we provide four supported partitioner type to simulate the uniformity Customised centralised/distributed evaluation ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -To facilitate centralised evaluation, we define a function in :code:`server.py`: +To facilitate centralised evaluation, we define a function in :code:`server_utils.py`: .. code-block:: python @@ -670,51 +825,265 @@ This function returns a evaluation function which instantiates a :code:`Booster` The evaluation is conducted by calling :code:`eval_set()` method, and the tested AUC value is reported. As for distributed evaluation on the clients, it's same as the quick-start example by -overriding the :code:`evaluate()` method insides the :code:`XgbClient` class in :code:`client.py`. +overriding the :code:`evaluate()` method insides the :code:`XgbClient` class in :code:`client_utils.py`. -Arguments parser -~~~~~~~~~~~~~~~~~~~~~~ +Flower simulation +~~~~~~~~~~~~~~~~~~~~ +We also provide an example code (:code:`sim.py`) to use the simulation capabilities of Flower to simulate federated XGBoost training on either a single machine or a cluster of machines. -In :code:`utils.py`, we define the arguments parsers for clients and server, allowing users to specify different experimental settings. -Let's first see the sever side: +.. code-block:: python + + from logging import INFO + import xgboost as xgb + from tqdm import tqdm + + import flwr as fl + from flwr_datasets import FederatedDataset + from flwr.common.logger import log + from flwr.server.strategy import FedXgbBagging, FedXgbCyclic + + from dataset import ( + instantiate_partitioner, + train_test_split, + transform_dataset_to_dmatrix, + separate_xy, + resplit, + ) + from utils import ( + sim_args_parser, + NUM_LOCAL_ROUND, + BST_PARAMS, + ) + from server_utils import ( + eval_config, + fit_config, + evaluate_metrics_aggregation, + get_evaluate_fn, + CyclicClientManager, + ) + from client_utils import XgbClient + +After importing all required packages, we define a :code:`main()` function to perform the simulation process: .. code-block:: python - import argparse + def main(): + # Parse arguments for experimental settings + args = sim_args_parser() + # Load (HIGGS) dataset and conduct partitioning + partitioner = instantiate_partitioner( + partitioner_type=args.partitioner_type, num_partitions=args.pool_size + ) + fds = FederatedDataset( + dataset="jxie/higgs", + partitioners={"train": partitioner}, + resplitter=resplit, + ) - def server_args_parser(): - """Parse arguments to define experimental settings on server side.""" - parser = argparse.ArgumentParser() + # Load centralised test set + if args.centralised_eval or args.centralised_eval_client: + log(INFO, "Loading centralised test set...") + test_data = fds.load_full("test") + test_data.set_format("numpy") + num_test = test_data.shape[0] + test_dmatrix = transform_dataset_to_dmatrix(test_data) + + # Load partitions and reformat data to DMatrix for xgboost + log(INFO, "Loading client local partitions...") + train_data_list = [] + valid_data_list = [] + + # Load and process all client partitions. This upfront cost is amortized soon + # after the simulation begins since clients wont need to preprocess their partition. + for node_id in tqdm(range(args.pool_size), desc="Extracting client partition"): + # Extract partition for client with node_id + partition = fds.load_partition(node_id=node_id, split="train") + partition.set_format("numpy") + + if args.centralised_eval_client: + # Use centralised test set for evaluation + train_data = partition + num_train = train_data.shape[0] + x_test, y_test = separate_xy(test_data) + valid_data_list.append(((x_test, y_test), num_test)) + else: + # Train/test splitting + train_data, valid_data, num_train, num_val = train_test_split( + partition, test_fraction=args.test_fraction, seed=args.seed + ) + x_valid, y_valid = separate_xy(valid_data) + valid_data_list.append(((x_valid, y_valid), num_val)) - parser.add_argument( - "--pool-size", default=2, type=int, help="Number of total clients." - ) - parser.add_argument( - "--num-rounds", default=5, type=int, help="Number of FL rounds." - ) - parser.add_argument( - "--num-clients-per-round", - default=2, - type=int, - help="Number of clients participate in training each round.", - ) - parser.add_argument( - "--num-evaluate-clients", - default=2, - type=int, - help="Number of clients selected for evaluation.", + x_train, y_train = separate_xy(train_data) + train_data_list.append(((x_train, y_train), num_train)) + +We first load the dataset and perform data partitioning, and the pre-processed data is stored in a :code:`list`. +After the simulation begins, the clients won't need to pre-process their partitions again. + +Then, we define the strategies and other hyper-parameters: + +.. code-block:: python + + # Define strategy + if args.train_method == "bagging": + # Bagging training + strategy = FedXgbBagging( + evaluate_function=get_evaluate_fn(test_dmatrix) + if args.centralised_eval + else None, + fraction_fit=(float(args.num_clients_per_round) / args.pool_size), + min_fit_clients=args.num_clients_per_round, + min_available_clients=args.pool_size, + min_evaluate_clients=args.num_evaluate_clients + if not args.centralised_eval + else 0, + fraction_evaluate=1.0 if not args.centralised_eval else 0.0, + on_evaluate_config_fn=eval_config, + on_fit_config_fn=fit_config, + evaluate_metrics_aggregation_fn=evaluate_metrics_aggregation + if not args.centralised_eval + else None, ) - parser.add_argument( - "--centralised-eval", - action="store_true", - help="Conduct centralised evaluation (True), or client evaluation on hold-out data (False).", + else: + # Cyclic training + strategy = FedXgbCyclic( + fraction_fit=1.0, + min_available_clients=args.pool_size, + fraction_evaluate=1.0, + evaluate_metrics_aggregation_fn=evaluate_metrics_aggregation, + on_evaluate_config_fn=eval_config, + on_fit_config_fn=fit_config, ) - args = parser.parse_args() - return args + # Resources to be assigned to each virtual client + # In this example we use CPU by default + client_resources = { + "num_cpus": args.num_cpus_per_client, + "num_gpus": 0.0, + } + + # Hyper-parameters for xgboost training + num_local_round = NUM_LOCAL_ROUND + params = BST_PARAMS + + # Setup learning rate + if args.train_method == "bagging" and args.scaled_lr: + new_lr = params["eta"] / args.pool_size + params.update({"eta": new_lr}) + +After that, we start the simulation by calling :code:`fl.simulation.start_simulation`: + +.. code-block:: python -This allows user to specify the number of total clients / FL rounds / participating clients / clients for evaluation, + # Start simulation + fl.simulation.start_simulation( + client_fn=get_client_fn( + train_data_list, + valid_data_list, + args.train_method, + params, + num_local_round, + ), + num_clients=args.pool_size, + client_resources=client_resources, + config=fl.server.ServerConfig(num_rounds=args.num_rounds), + strategy=strategy, + client_manager=CyclicClientManager() if args.train_method == "cyclic" else None, + ) + +One of key parameters for :code:`start_simulation` is :code:`client_fn` which returns a function to construct a client. +We define it as follows: + +.. code-block:: python + + def get_client_fn( + train_data_list, valid_data_list, train_method, params, num_local_round + ): + """Return a function to construct a client. + + The VirtualClientEngine will execute this function whenever a client is sampled by + the strategy to participate. + """ + + def client_fn(cid: str) -> fl.client.Client: + """Construct a FlowerClient with its own dataset partition.""" + x_train, y_train = train_data_list[int(cid)][0] + x_valid, y_valid = valid_data_list[int(cid)][0] + + # Reformat data to DMatrix + train_dmatrix = xgb.DMatrix(x_train, label=y_train) + valid_dmatrix = xgb.DMatrix(x_valid, label=y_valid) + + # Fetch the number of examples + num_train = train_data_list[int(cid)][1] + num_val = valid_data_list[int(cid)][1] + + # Create and return client + return XgbClient( + train_dmatrix, + valid_dmatrix, + num_train, + num_val, + num_local_round, + params, + train_method, + ) + + return client_fn + + + +Arguments parser +~~~~~~~~~~~~~~~~~~~~~~ + +In :code:`utils.py`, we define the arguments parsers for clients, server and simulation, allowing users to specify different experimental settings. +Let's first see the sever side: + +.. code-block:: python + + import argparse + + + def server_args_parser(): + """Parse arguments to define experimental settings on server side.""" + parser = argparse.ArgumentParser() + + parser.add_argument( + "--train-method", + default="bagging", + type=str, + choices=["bagging", "cyclic"], + help="Training methods selected from bagging aggregation or cyclic training.", + ) + parser.add_argument( + "--pool-size", default=2, type=int, help="Number of total clients." + ) + parser.add_argument( + "--num-rounds", default=5, type=int, help="Number of FL rounds." + ) + parser.add_argument( + "--num-clients-per-round", + default=2, + type=int, + help="Number of clients participate in training each round.", + ) + parser.add_argument( + "--num-evaluate-clients", + default=2, + type=int, + help="Number of clients selected for evaluation.", + ) + parser.add_argument( + "--centralised-eval", + action="store_true", + help="Conduct centralised evaluation (True), or client evaluation on hold-out data (False).", + ) + + args = parser.parse_args() + return args + +This allows user to specify training strategies / the number of total clients / FL rounds / participating clients / clients for evaluation, and evaluation fashion. Note that with :code:`--centralised-eval`, the sever will do centralised evaluation and all functionalities for client evaluation will be disabled. @@ -723,60 +1092,159 @@ Then, the argument parser on client side: .. code-block:: python def client_args_parser(): - """Parse arguments to define experimental settings on client side.""" - parser = argparse.ArgumentParser() + """Parse arguments to define experimental settings on client side.""" + parser = argparse.ArgumentParser() + + parser.add_argument( + "--train-method", + default="bagging", + type=str, + choices=["bagging", "cyclic"], + help="Training methods selected from bagging aggregation or cyclic training.", + ) + parser.add_argument( + "--num-partitions", default=10, type=int, help="Number of partitions." + ) + parser.add_argument( + "--partitioner-type", + default="uniform", + type=str, + choices=["uniform", "linear", "square", "exponential"], + help="Partitioner types.", + ) + parser.add_argument( + "--node-id", + default=0, + type=int, + help="Node ID used for the current client.", + ) + parser.add_argument( + "--seed", default=42, type=int, help="Seed used for train/test splitting." + ) + parser.add_argument( + "--test-fraction", + default=0.2, + type=float, + help="Test fraction for train/test splitting.", + ) + parser.add_argument( + "--centralised-eval", + action="store_true", + help="Conduct evaluation on centralised test set (True), or on hold-out data (False).", + ) + parser.add_argument( + "--scaled-lr", + action="store_true", + help="Perform scaled learning rate based on the number of clients (True).", + ) + + args = parser.parse_args() + return args - parser.add_argument( - "--num-partitions", default=10, type=int, help="Number of partitions." - ) - parser.add_argument( - "--partitioner-type", - default="uniform", - type=str, - choices=["uniform", "linear", "square", "exponential"], - help="Partitioner types.", - ) - parser.add_argument( - "--node-id", - default=0, - type=int, - help="Node ID used for the current client.", - ) - parser.add_argument( - "--seed", default=42, type=int, help="Seed used for train/test splitting." - ) - parser.add_argument( - "--test-fraction", - default=0.2, - type=float, - help="Test fraction for train/test splitting.", - ) - parser.add_argument( - "--centralised-eval", - action="store_true", - help="Conduct centralised evaluation (True), or client evaluation on hold-out data (False).", - ) +This defines various options for client data partitioning. +Besides, clients also have an option to conduct evaluation on centralised test set by setting :code:`--centralised-eval`, +as well as an option to perform scaled learning rate based on the number of clients by setting :code:`--scaled-lr`. - args = parser.parse_args() - return args +We also have an argument parser for simulation: -This defines various options for client data partitioning. -Besides, clients also have a option to conduct evaluation on centralised test set by setting :code:`--centralised-eval`. +.. code-block:: python + + def sim_args_parser(): + """Parse arguments to define experimental settings on server side.""" + parser = argparse.ArgumentParser() + + parser.add_argument( + "--train-method", + default="bagging", + type=str, + choices=["bagging", "cyclic"], + help="Training methods selected from bagging aggregation or cyclic training.", + ) + + # Server side + parser.add_argument( + "--pool-size", default=5, type=int, help="Number of total clients." + ) + parser.add_argument( + "--num-rounds", default=30, type=int, help="Number of FL rounds." + ) + parser.add_argument( + "--num-clients-per-round", + default=5, + type=int, + help="Number of clients participate in training each round.", + ) + parser.add_argument( + "--num-evaluate-clients", + default=5, + type=int, + help="Number of clients selected for evaluation.", + ) + parser.add_argument( + "--centralised-eval", + action="store_true", + help="Conduct centralised evaluation (True), or client evaluation on hold-out data (False).", + ) + parser.add_argument( + "--num-cpus-per-client", + default=2, + type=int, + help="Number of CPUs used for per client.", + ) + + # Client side + parser.add_argument( + "--partitioner-type", + default="uniform", + type=str, + choices=["uniform", "linear", "square", "exponential"], + help="Partitioner types.", + ) + parser.add_argument( + "--seed", default=42, type=int, help="Seed used for train/test splitting." + ) + parser.add_argument( + "--test-fraction", + default=0.2, + type=float, + help="Test fraction for train/test splitting.", + ) + parser.add_argument( + "--centralised-eval-client", + action="store_true", + help="Conduct evaluation on centralised test set (True), or on hold-out data (False).", + ) + parser.add_argument( + "--scaled-lr", + action="store_true", + help="Perform scaled learning rate based on the number of clients (True).", + ) + + args = parser.parse_args() + return args + +This integrates all arguments for both client and server sides. Example commands ~~~~~~~~~~~~~~~~~~~~~ -To run a centralised evaluated experiment on 5 clients with exponential distribution for 50 rounds, +To run a centralised evaluated experiment with bagging strategy on 5 clients with exponential distribution for 50 rounds, we first start the server as below: .. code-block:: shell - $ python3 server.py --pool-size=5 --num-rounds=50 --num-clients-per-round=5 --centralised-eval + $ python3 server.py --train-method=bagging --pool-size=5 --num-rounds=50 --num-clients-per-round=5 --centralised-eval Then, on each client terminal, we start the clients: .. code-block:: shell - $ python3 clients.py --num-partitions=5 --partitioner-type=exponential --node-id=NODE_ID + $ python3 clients.py --train-method=bagging --num-partitions=5 --partitioner-type=exponential --node-id=NODE_ID + +To run the same experiment with Flower simulation: + +.. code-block:: shell + + $ python3 sim.py --train-method=bagging --pool-size=5 --num-rounds=50 --num-clients-per-round=5 --partitioner-type=exponential --centralised-eval The full `code `_ for this comprehensive example can be found in :code:`examples/xgboost-comprehensive`. diff --git a/doc/source/tutorial-series-customize-the-client-pytorch.ipynb b/doc/source/tutorial-series-customize-the-client-pytorch.ipynb index 0ff67de6f51d..bcfdeb30d3c7 100644 --- a/doc/source/tutorial-series-customize-the-client-pytorch.ipynb +++ b/doc/source/tutorial-series-customize-the-client-pytorch.ipynb @@ -7,11 +7,11 @@ "source": [ "# Customize the client\n", "\n", - "Welcome to the fourth part of the Flower federated learning tutorial. In the previous parts of this tutorial, we introduced federated learning with PyTorch and Flower ([part 1](https://flower.dev/docs/framework/tutorial-get-started-with-flower-pytorch.html)), we learned how strategies can be used to customize the execution on both the server and the clients ([part 2](https://flower.dev/docs/framework/tutorial-use-a-federated-learning-strategy-pytorch.html)), and we built our own custom strategy from scratch ([part 3](https://flower.dev/docs/framework/tutorial-build-a-strategy-from-scratch-pytorch.html)).\n", + "Welcome to the fourth part of the Flower federated learning tutorial. In the previous parts of this tutorial, we introduced federated learning with PyTorch and Flower ([part 1](https://flower.ai/docs/framework/tutorial-get-started-with-flower-pytorch.html)), we learned how strategies can be used to customize the execution on both the server and the clients ([part 2](https://flower.ai/docs/framework/tutorial-use-a-federated-learning-strategy-pytorch.html)), and we built our own custom strategy from scratch ([part 3](https://flower.ai/docs/framework/tutorial-build-a-strategy-from-scratch-pytorch.html)).\n", "\n", "In this notebook, we revisit `NumPyClient` and introduce a new baseclass for building clients, simply named `Client`. In previous parts of this tutorial, we've based our client on `NumPyClient`, a convenience class which makes it easy to work with machine learning libraries that have good NumPy interoperability. With `Client`, we gain a lot of flexibility that we didn't have before, but we'll also have to do a few things the we didn't have to do before.\n", "\n", - "> [Star Flower on GitHub](https://github.com/adap/flower) ⭐️ and join the Flower community on Slack to connect, ask questions, and get help: [Join Slack](https://flower.dev/join-slack) 🌼 We'd love to hear from you in the `#introductions` channel! And if anything is unclear, head over to the `#questions` channel.\n", + "> [Star Flower on GitHub](https://github.com/adap/flower) ⭐️ and join the Flower community on Slack to connect, ask questions, and get help: [Join Slack](https://flower.ai/join-slack) 🌼 We'd love to hear from you in the `#introductions` channel! And if anything is unclear, head over to the `#questions` channel.\n", "\n", "Let's go deeper and see what it takes to move from `NumPyClient` to `Client`!" ] diff --git a/doc/source/tutorial-series-get-started-with-flower-pytorch.ipynb b/doc/source/tutorial-series-get-started-with-flower-pytorch.ipynb index bbd916b32375..f4b8acaa5bb8 100644 --- a/doc/source/tutorial-series-get-started-with-flower-pytorch.ipynb +++ b/doc/source/tutorial-series-get-started-with-flower-pytorch.ipynb @@ -9,9 +9,9 @@ "\n", "Welcome to the Flower federated learning tutorial!\n", "\n", - "In this notebook, we'll build a federated learning system using Flower, [Flower Datasets](https://flower.dev/docs/datasets/) and PyTorch. In part 1, we use PyTorch for the model training pipeline and data loading. In part 2, we continue to federate the PyTorch-based pipeline using Flower.\n", + "In this notebook, we'll build a federated learning system using Flower, [Flower Datasets](https://flower.ai/docs/datasets/) and PyTorch. In part 1, we use PyTorch for the model training pipeline and data loading. In part 2, we continue to federate the PyTorch-based pipeline using Flower.\n", "\n", - "> [Star Flower on GitHub](https://github.com/adap/flower) ⭐️ and join the Flower community on Slack to connect, ask questions, and get help: [Join Slack](https://flower.dev/join-slack) 🌼 We'd love to hear from you in the `#introductions` channel! And if anything is unclear, head over to the `#questions` channel.\n", + "> [Star Flower on GitHub](https://github.com/adap/flower) ⭐️ and join the Flower community on Slack to connect, ask questions, and get help: [Join Slack](https://flower.ai/join-slack) 🌼 We'd love to hear from you in the `#introductions` channel! And if anything is unclear, head over to the `#questions` channel.\n", "\n", "Let's get stated!" ] @@ -83,7 +83,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "It is possible to switch to a runtime that has GPU acceleration enabled (on Google Colab: `Runtime > Change runtime type > Hardware acclerator: GPU > Save`). Note, however, that Google Colab is not always able to offer GPU acceleration. If you see an error related to GPU availability in one of the following sections, consider switching back to CPU-based execution by setting `DEVICE = torch.device(\"cpu\")`. If the runtime has GPU acceleration enabled, you should see the output `Training on cuda`, otherwise it'll say `Training on cpu`." + "It is possible to switch to a runtime that has GPU acceleration enabled (on Google Colab: `Runtime > Change runtime type > Hardware accelerator: GPU > Save`). Note, however, that Google Colab is not always able to offer GPU acceleration. If you see an error related to GPU availability in one of the following sections, consider switching back to CPU-based execution by setting `DEVICE = torch.device(\"cpu\")`. If the runtime has GPU acceleration enabled, you should see the output `Training on cuda`, otherwise it'll say `Training on cpu`." ] }, { @@ -368,14 +368,14 @@ "metadata": {}, "outputs": [], "source": [ - "def get_parameters(net) -> List[np.ndarray]:\n", - " return [val.cpu().numpy() for _, val in net.state_dict().items()]\n", - "\n", - "\n", "def set_parameters(net, parameters: List[np.ndarray]):\n", " params_dict = zip(net.state_dict().keys(), parameters)\n", " state_dict = OrderedDict({k: torch.Tensor(v) for k, v in params_dict})\n", - " net.load_state_dict(state_dict, strict=True)" + " net.load_state_dict(state_dict, strict=True)\n", + "\n", + "\n", + "def get_parameters(net) -> List[np.ndarray]:\n", + " return [val.cpu().numpy() for _, val in net.state_dict().items()]" ] }, { @@ -485,7 +485,7 @@ ")\n", "\n", "# Specify the resources each of your clients need. By default, each\n", - "# client will be allocated 1x CPU and 0x CPUs\n", + "# client will be allocated 1x CPU and 0x GPUs\n", "client_resources = {\"num_cpus\": 1, \"num_gpus\": 0.0}\n", "if DEVICE.type == \"cuda\":\n", " # here we are asigning an entire GPU for each client.\n", diff --git a/doc/source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb b/doc/source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb index 06f53cd8e1b1..c758b8f637b0 100644 --- a/doc/source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb +++ b/doc/source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb @@ -7,11 +7,11 @@ "source": [ "# Use a federated learning strategy\n", "\n", - "Welcome to the next part of the federated learning tutorial. In previous parts of this tutorial, we introduced federated learning with PyTorch and Flower ([part 1](https://flower.dev/docs/framework/tutorial-get-started-with-flower-pytorch.html)).\n", + "Welcome to the next part of the federated learning tutorial. In previous parts of this tutorial, we introduced federated learning with PyTorch and Flower ([part 1](https://flower.ai/docs/framework/tutorial-get-started-with-flower-pytorch.html)).\n", "\n", - "In this notebook, we'll begin to customize the federated learning system we built in the introductory notebook (again, using [Flower](https://flower.dev/) and [PyTorch](https://pytorch.org/)).\n", + "In this notebook, we'll begin to customize the federated learning system we built in the introductory notebook (again, using [Flower](https://flower.ai/) and [PyTorch](https://pytorch.org/)).\n", "\n", - "> [Star Flower on GitHub](https://github.com/adap/flower) ⭐️ and join the Flower community on Slack to connect, ask questions, and get help: [Join Slack](https://flower.dev/join-slack) 🌼 We'd love to hear from you in the `#introductions` channel! And if anything is unclear, head over to the `#questions` channel.\n", + "> [Star Flower on GitHub](https://github.com/adap/flower) ⭐️ and join the Flower community on Slack to connect, ask questions, and get help: [Join Slack](https://flower.ai/join-slack) 🌼 We'd love to hear from you in the `#introductions` channel! And if anything is unclear, head over to the `#questions` channel.\n", "\n", "Let's move beyond FedAvg with Flower strategies!" ] diff --git a/e2e/bare-https/client.py b/e2e/bare-https/client.py index 20a5b4875ddf..b4570b36512d 100644 --- a/e2e/bare-https/client.py +++ b/e2e/bare-https/client.py @@ -25,15 +25,15 @@ def evaluate(self, parameters, config): def client_fn(cid): return FlowerClient().to_client() -flower = fl.flower.Flower( +app = fl.client.ClientApp( client_fn=client_fn, ) if __name__ == "__main__": # Start Flower client - fl.client.start_numpy_client( + fl.client.start_client( server_address="127.0.0.1:8080", - client=FlowerClient(), + client=FlowerClient().to_client(), root_certificates=Path("certificates/ca.crt").read_bytes(), insecure=False, ) diff --git a/e2e/bare-https/driver.py b/e2e/bare-https/driver.py index 5c44e4c641ae..f7bfeb613f6a 100644 --- a/e2e/bare-https/driver.py +++ b/e2e/bare-https/driver.py @@ -3,7 +3,7 @@ # Start Flower server -hist = fl.driver.start_driver( +hist = fl.server.start_driver( server_address="127.0.0.1:9091", config=fl.server.ServerConfig(num_rounds=3), root_certificates=Path("certificates/ca.crt").read_bytes(), diff --git a/e2e/bare/client.py b/e2e/bare/client.py index 8e5c3adff5e6..a9425b39778a 100644 --- a/e2e/bare/client.py +++ b/e2e/bare/client.py @@ -3,6 +3,8 @@ import flwr as fl import numpy as np +from flwr.common.configsrecord import ConfigsRecord + SUBSET_SIZE = 1000 STATE_VAR = 'timestamp' @@ -18,13 +20,15 @@ def get_parameters(self, config): def _record_timestamp_to_state(self): """Record timestamp to client's state.""" t_stamp = datetime.now().timestamp() - if STATE_VAR in self.state.state: - self.state.state[STATE_VAR] += f",{t_stamp}" - else: - self.state.state[STATE_VAR] = str(t_stamp) + value = str(t_stamp) + if STATE_VAR in self.context.state.configs.keys(): + value = self.context.state.get_configs(STATE_VAR)[STATE_VAR] # type: ignore + value += f",{t_stamp}" + + self.context.state.set_configs(name=STATE_VAR, record=ConfigsRecord({STATE_VAR: value})) def _retrieve_timestamp_from_state(self): - return self.state.state[STATE_VAR] + return self.context.state.get_configs(STATE_VAR)[STATE_VAR] def fit(self, parameters, config): model_params = parameters @@ -42,10 +46,10 @@ def evaluate(self, parameters, config): def client_fn(cid): return FlowerClient().to_client() -flower = fl.flower.Flower( +app = fl.client.ClientApp( client_fn=client_fn, ) if __name__ == "__main__": # Start Flower client - fl.client.start_numpy_client(server_address="127.0.0.1:8080", client=FlowerClient()) + fl.client.start_client(server_address="127.0.0.1:8080", client=FlowerClient().to_client()) diff --git a/e2e/bare/driver.py b/e2e/bare/driver.py index 6bd61e344ad1..defc2ad56213 100644 --- a/e2e/bare/driver.py +++ b/e2e/bare/driver.py @@ -2,7 +2,7 @@ # Start Flower server -hist = fl.driver.start_driver( +hist = fl.server.start_driver( server_address="0.0.0.0:9091", config=fl.server.ServerConfig(num_rounds=3), ) diff --git a/e2e/bare/pyproject.toml b/e2e/bare/pyproject.toml index b9a4028806c3..cde8728f5c34 100644 --- a/e2e/bare/pyproject.toml +++ b/e2e/bare/pyproject.toml @@ -10,4 +10,4 @@ authors = ["The Flower Authors "] [tool.poetry.dependencies] python = "^3.8" -flwr = { path = "../../", develop = true, extras = ["simulation"] } +flwr = { path = "../../", develop = true, extras = ["simulation", "rest"] } diff --git a/e2e/fastai/client.py b/e2e/fastai/client.py index 4425fed25277..c4bfb89c2dde 100644 --- a/e2e/fastai/client.py +++ b/e2e/fastai/client.py @@ -53,14 +53,14 @@ def client_fn(cid): return FlowerClient().to_client() -flower = fl.flower.Flower( +app = fl.client.ClientApp( client_fn=client_fn, ) if __name__ == "__main__": # Start Flower client - fl.client.start_numpy_client( + fl.client.start_client( server_address="127.0.0.1:8080", - client=FlowerClient(), + client=FlowerClient().to_client(), ) diff --git a/e2e/fastai/driver.py b/e2e/fastai/driver.py index 2b1b35d9e89c..cc452ea523ca 100644 --- a/e2e/fastai/driver.py +++ b/e2e/fastai/driver.py @@ -1,6 +1,6 @@ import flwr as fl -hist = fl.driver.start_driver( +hist = fl.server.start_driver( server_address="0.0.0.0:9091", config=fl.server.ServerConfig(num_rounds=3), ) diff --git a/e2e/jax/client.py b/e2e/jax/client.py index 495d6a671981..a4e4d1f55117 100644 --- a/e2e/jax/client.py +++ b/e2e/jax/client.py @@ -53,10 +53,10 @@ def evaluate( def client_fn(cid): return FlowerClient().to_client() -flower = fl.flower.Flower( +app = fl.client.ClientApp( client_fn=client_fn, ) if __name__ == "__main__": # Start Flower client - fl.client.start_numpy_client(server_address="127.0.0.1:8080", client=FlowerClient()) + fl.client.start_client(server_address="127.0.0.1:8080", client=FlowerClient().to_client()) diff --git a/e2e/jax/driver.py b/e2e/jax/driver.py index 2b1b35d9e89c..cc452ea523ca 100644 --- a/e2e/jax/driver.py +++ b/e2e/jax/driver.py @@ -1,6 +1,6 @@ import flwr as fl -hist = fl.driver.start_driver( +hist = fl.server.start_driver( server_address="0.0.0.0:9091", config=fl.server.ServerConfig(num_rounds=3), ) diff --git a/e2e/mxnet/.gitignore b/e2e/mxnet/.gitignore deleted file mode 100644 index 10d00b5797e2..000000000000 --- a/e2e/mxnet/.gitignore +++ /dev/null @@ -1 +0,0 @@ -*.gz diff --git a/e2e/mxnet/README.md b/e2e/mxnet/README.md deleted file mode 100644 index 3fa76bac5ce0..000000000000 --- a/e2e/mxnet/README.md +++ /dev/null @@ -1,5 +0,0 @@ -# Flower with MXNet testing - -This directory is used for testing Flower with MXNet by using a simple NN with MNIST data. - -It uses the `FedAvg` strategy. \ No newline at end of file diff --git a/e2e/mxnet/client.py b/e2e/mxnet/client.py deleted file mode 100644 index 2f0b714e708c..000000000000 --- a/e2e/mxnet/client.py +++ /dev/null @@ -1,141 +0,0 @@ -"""Flower client example using MXNet for MNIST classification. - -The code is generally adapted from: - -https://mxnet.apache.org/api/python/docs/tutorials/packages/gluon/image/mnist.html -""" - - -import flwr as fl -import numpy as np -import mxnet as mx -from mxnet import nd -from mxnet import gluon -from mxnet.gluon import nn -from mxnet import autograd as ag -import mxnet.ndarray as F - -SUBSET_SIZE = 50 - -# Fixing the random seed -mx.random.seed(42) - -# Setup context to GPU or CPU -DEVICE = [mx.gpu() if mx.test_utils.list_gpus() else mx.cpu()] - -def model(): - net = nn.Sequential() - net.add(nn.Dense(256, activation="relu")) - net.add(nn.Dense(64, activation="relu")) - net.add(nn.Dense(10)) - net.collect_params().initialize() - return net - -def load_data(): - print("Download Dataset") - mnist = mx.test_utils.get_mnist() - batch_size = 100 - train_data = mx.io.NDArrayIter( - mnist["train_data"][:SUBSET_SIZE], mnist["train_label"][:SUBSET_SIZE], batch_size, shuffle=True - ) - val_data = mx.io.NDArrayIter(mnist["test_data"][:10], mnist["test_label"][:10], batch_size) - return train_data, val_data - - -def train(net, train_data, epoch): - trainer = gluon.Trainer(net.collect_params(), "sgd", {"learning_rate": 0.01}) - accuracy_metric = mx.metric.Accuracy() - loss_metric = mx.metric.CrossEntropy() - metrics = mx.metric.CompositeEvalMetric() - for child_metric in [accuracy_metric, loss_metric]: - metrics.add(child_metric) - softmax_cross_entropy_loss = gluon.loss.SoftmaxCrossEntropyLoss() - for i in range(epoch): - train_data.reset() - num_examples = 0 - for batch in train_data: - data = gluon.utils.split_and_load( - batch.data[0], ctx_list=DEVICE, batch_axis=0 - ) - label = gluon.utils.split_and_load( - batch.label[0], ctx_list=DEVICE, batch_axis=0 - ) - outputs = [] - with ag.record(): - for x, y in zip(data, label): - z = net(x) - loss = softmax_cross_entropy_loss(z, y) - loss.backward() - outputs.append(z.softmax()) - num_examples += len(x) - metrics.update(label, outputs) - trainer.step(batch.data[0].shape[0]) - trainings_metric = metrics.get_name_value() - print("Accuracy & loss at epoch %d: %s" % (i, trainings_metric)) - return trainings_metric, num_examples - - -def test(net, val_data): - accuracy_metric = mx.metric.Accuracy() - loss_metric = mx.metric.CrossEntropy() - metrics = mx.metric.CompositeEvalMetric() - for child_metric in [accuracy_metric, loss_metric]: - metrics.add(child_metric) - val_data.reset() - num_examples = 0 - for batch in val_data: - data = gluon.utils.split_and_load(batch.data[0], ctx_list=DEVICE, batch_axis=0) - label = gluon.utils.split_and_load( - batch.label[0], ctx_list=DEVICE, batch_axis=0 - ) - outputs = [] - for x in data: - outputs.append(net(x).softmax()) - num_examples += len(x) - metrics.update(label, outputs) - metrics.update(label, outputs) - return metrics.get_name_value(), num_examples - -train_data, val_data = load_data() - -model = model() -init = nd.random.uniform(shape=(2, 784)) -model(init) - -# Flower Client -class FlowerClient(fl.client.NumPyClient): - def get_parameters(self, config): - param = [] - for val in model.collect_params(".*weight").values(): - p = val.data() - param.append(p.asnumpy()) - return param - - def set_parameters(self, parameters): - params = zip(model.collect_params(".*weight").keys(), parameters) - for key, value in params: - model.collect_params().setattr(key, value) - - def fit(self, parameters, config): - self.set_parameters(parameters) - [accuracy, loss], num_examples = train(model, train_data, epoch=2) - results = {"accuracy": float(accuracy[1]), "loss": float(loss[1])} - return self.get_parameters(config={}), num_examples, results - - def evaluate(self, parameters, config): - self.set_parameters(parameters) - [accuracy, loss], num_examples = test(model, val_data) - print("Evaluation accuracy & loss", accuracy, loss) - return float(loss[1]), num_examples, {"accuracy": float(accuracy[1])} - - -def client_fn(cid): - return FlowerClient().to_client() - -flower = fl.flower.Flower( - client_fn=client_fn, -) - -if __name__ == "__main__": - # Start Flower client - fl.client.start_numpy_client(server_address="127.0.0.1:8080", client=FlowerClient()) diff --git a/e2e/mxnet/driver.py b/e2e/mxnet/driver.py deleted file mode 100644 index 2b1b35d9e89c..000000000000 --- a/e2e/mxnet/driver.py +++ /dev/null @@ -1,7 +0,0 @@ -import flwr as fl - -hist = fl.driver.start_driver( - server_address="0.0.0.0:9091", - config=fl.server.ServerConfig(num_rounds=3), -) -assert (hist.losses_distributed[0][1] / hist.losses_distributed[-1][1]) >= 1 diff --git a/e2e/mxnet/pyproject.toml b/e2e/mxnet/pyproject.toml deleted file mode 100644 index 71bd0e6374bd..000000000000 --- a/e2e/mxnet/pyproject.toml +++ /dev/null @@ -1,15 +0,0 @@ -[build-system] -requires = ["poetry-core>=1.4.0"] -build-backend = "poetry.core.masonry.api" - -[tool.poetry] -name = "mxnet_example" -version = "0.1.0" -description = "MXNet example with MNIST and CNN" -authors = ["The Flower Authors "] - -[tool.poetry.dependencies] -python = "^3.8" -flwr = { path = "../../", develop = true, extras = ["simulation"] } -mxnet = "^1.7.0" -numpy = "1.23.1" diff --git a/e2e/mxnet/simulation.py b/e2e/mxnet/simulation.py deleted file mode 100644 index 5f0e5334bd08..000000000000 --- a/e2e/mxnet/simulation.py +++ /dev/null @@ -1,11 +0,0 @@ -import flwr as fl - -from client import client_fn - -hist = fl.simulation.start_simulation( - client_fn=client_fn, - num_clients=2, - config=fl.server.ServerConfig(num_rounds=3), -) - -assert hist.losses_distributed[-1][1] == 0 or (hist.losses_distributed[0][1] / hist.losses_distributed[-1][1]) >= 0.98 diff --git a/e2e/opacus/client.py b/e2e/opacus/client.py index 2e5c363381fa..00437a31233c 100644 --- a/e2e/opacus/client.py +++ b/e2e/opacus/client.py @@ -137,12 +137,12 @@ def client_fn(cid): model = Net() return FlowerClient(model).to_client() -flower = fl.flower.Flower( +app = fl.client.ClientApp( client_fn=client_fn, ) if __name__ == "__main__": - fl.client.start_numpy_client( + fl.client.start_client( server_address="127.0.0.1:8080", - client=FlowerClient(model) + client=FlowerClient(model).to_client() ) diff --git a/e2e/opacus/driver.py b/e2e/opacus/driver.py index 5a0309914ee0..75acd9ccea24 100644 --- a/e2e/opacus/driver.py +++ b/e2e/opacus/driver.py @@ -1,6 +1,6 @@ import flwr as fl -hist = fl.driver.start_driver( +hist = fl.server.start_driver( server_address="0.0.0.0:9091", config=fl.server.ServerConfig(num_rounds=3), ) diff --git a/e2e/pandas/client.py b/e2e/pandas/client.py index 5b8670091cb3..0ecd75df3ae8 100644 --- a/e2e/pandas/client.py +++ b/e2e/pandas/client.py @@ -1,4 +1,3 @@ -import warnings from typing import Dict, List, Tuple import numpy as np @@ -36,13 +35,13 @@ def fit( def client_fn(cid): return FlowerClient().to_client() -flower = fl.flower.Flower( +app = fl.client.ClientApp( client_fn=client_fn, ) if __name__ == "__main__": # Start Flower client - fl.client.start_numpy_client( + fl.client.start_client( server_address="127.0.0.1:8080", - client=FlowerClient(), + client=FlowerClient().to_client(), ) diff --git a/e2e/pandas/driver.py b/e2e/pandas/driver.py index b33e1e54f4a0..f5dc74c9f3f8 100644 --- a/e2e/pandas/driver.py +++ b/e2e/pandas/driver.py @@ -3,7 +3,7 @@ from strategy import FedAnalytics # Start Flower server -hist = fl.driver.start_driver( +hist = fl.server.start_driver( server_address="0.0.0.0:9091", config=fl.server.ServerConfig(num_rounds=1), strategy=FedAnalytics(), diff --git a/e2e/pytorch-lightning/client.py b/e2e/pytorch-lightning/client.py index 71b178eca8c3..fde550e31c08 100644 --- a/e2e/pytorch-lightning/client.py +++ b/e2e/pytorch-lightning/client.py @@ -55,7 +55,7 @@ def client_fn(cid): # Flower client return FlowerClient(model, train_loader, val_loader, test_loader).to_client() -flower = fl.flower.Flower( +app = fl.client.ClientApp( client_fn=client_fn, ) @@ -65,8 +65,8 @@ def main() -> None: train_loader, val_loader, test_loader = mnist.load_data() # Flower client - client = FlowerClient(model, train_loader, val_loader, test_loader) - fl.client.start_numpy_client(server_address="127.0.0.1:8080", client=client) + client = FlowerClient(model, train_loader, val_loader, test_loader).to_client() + fl.client.start_client(server_address="127.0.0.1:8080", client=client) if __name__ == "__main__": diff --git a/e2e/pytorch-lightning/driver.py b/e2e/pytorch-lightning/driver.py index 2b1b35d9e89c..cc452ea523ca 100644 --- a/e2e/pytorch-lightning/driver.py +++ b/e2e/pytorch-lightning/driver.py @@ -1,6 +1,6 @@ import flwr as fl -hist = fl.driver.start_driver( +hist = fl.server.start_driver( server_address="0.0.0.0:9091", config=fl.server.ServerConfig(num_rounds=3), ) diff --git a/e2e/pytorch-lightning/pyproject.toml b/e2e/pytorch-lightning/pyproject.toml index e79eb72a56df..951349c03a04 100644 --- a/e2e/pytorch-lightning/pyproject.toml +++ b/e2e/pytorch-lightning/pyproject.toml @@ -11,5 +11,5 @@ authors = ["The Flower Authors "] [tool.poetry.dependencies] python = "^3.8" flwr = { path = "../../", develop = true, extras = ["simulation"] } -pytorch-lightning = "1.6.0" +pytorch-lightning = "2.1.3" torchvision = "0.14.1" diff --git a/e2e/pytorch/client.py b/e2e/pytorch/client.py index d180ad5d4eca..0f1b8e159f7d 100644 --- a/e2e/pytorch/client.py +++ b/e2e/pytorch/client.py @@ -11,6 +11,7 @@ from tqdm import tqdm import flwr as fl +from flwr.common.configsrecord import ConfigsRecord # ############################################################################# # 1. Regular PyTorch pipeline: nn.Module, train, test, and DataLoader @@ -95,14 +96,15 @@ def get_parameters(self, config): def _record_timestamp_to_state(self): """Record timestamp to client's state.""" t_stamp = datetime.now().timestamp() - if STATE_VAR in self.state.state: - self.state.state[STATE_VAR] += f",{t_stamp}" - else: - self.state.state[STATE_VAR] = str(t_stamp) + value = str(t_stamp) + if STATE_VAR in self.context.state.configs.keys(): + value = self.context.state.get_configs(STATE_VAR)[STATE_VAR] # type: ignore + value += f",{t_stamp}" + + self.context.state.set_configs(name=STATE_VAR, record=ConfigsRecord({STATE_VAR: value})) def _retrieve_timestamp_from_state(self): - return self.state.state[STATE_VAR] - + return self.context.state.get_configs(STATE_VAR)[STATE_VAR] def fit(self, parameters, config): set_parameters(net, parameters) train(net, trainloader, epochs=1) @@ -124,14 +126,14 @@ def set_parameters(model, parameters): def client_fn(cid): return FlowerClient().to_client() -flower = fl.flower.Flower( +app = fl.client.ClientApp( client_fn=client_fn, ) if __name__ == "__main__": # Start Flower client - fl.client.start_numpy_client( + fl.client.start_client( server_address="127.0.0.1:8080", - client=FlowerClient(), + client=FlowerClient().to_client(), ) diff --git a/e2e/pytorch/driver.py b/e2e/pytorch/driver.py index ca860ea47b2d..2ea4de69a62b 100644 --- a/e2e/pytorch/driver.py +++ b/e2e/pytorch/driver.py @@ -18,7 +18,7 @@ def weighted_average(metrics: List[Tuple[int, Metrics]]) -> Metrics: strategy = fl.server.strategy.FedAvg(evaluate_metrics_aggregation_fn=weighted_average) # Start Flower server -hist = fl.driver.start_driver( +hist = fl.server.start_driver( server_address="0.0.0.0:9091", config=fl.server.ServerConfig(num_rounds=3), strategy=strategy, diff --git a/e2e/scikit-learn/client.py b/e2e/scikit-learn/client.py index fdca96c1697a..e073d3cb2748 100644 --- a/e2e/scikit-learn/client.py +++ b/e2e/scikit-learn/client.py @@ -46,10 +46,10 @@ def evaluate(self, parameters, config): # type: ignore def client_fn(cid): return FlowerClient().to_client() -flower = fl.flower.Flower( +app = fl.client.ClientApp( client_fn=client_fn, ) if __name__ == "__main__": # Start Flower client - fl.client.start_numpy_client(server_address="0.0.0.0:8080", client=FlowerClient()) + fl.client.start_client(server_address="0.0.0.0:8080", client=FlowerClient().to_client()) diff --git a/e2e/scikit-learn/driver.py b/e2e/scikit-learn/driver.py index 032a2f7a0dc6..29051d02c6b6 100644 --- a/e2e/scikit-learn/driver.py +++ b/e2e/scikit-learn/driver.py @@ -36,7 +36,7 @@ def evaluate(server_round, parameters: fl.common.NDArrays, config): evaluate_fn=get_evaluate_fn(model), on_fit_config_fn=fit_round, ) - hist = fl.driver.start_driver( + hist = fl.server.start_driver( server_address="0.0.0.0:9091", strategy=strategy, config=fl.server.ServerConfig(num_rounds=3), diff --git a/e2e/strategies/client.py b/e2e/strategies/client.py index eb4598cb5439..3b49f770dc6b 100644 --- a/e2e/strategies/client.py +++ b/e2e/strategies/client.py @@ -47,11 +47,11 @@ def client_fn(cid): return FlowerClient().to_client() -flower = fl.flower.Flower( +app = fl.client.ClientApp( client_fn=client_fn, ) if __name__ == "__main__": # Start Flower client - fl.client.start_numpy_client(server_address="127.0.0.1:8080", client=FlowerClient()) + fl.client.start_client(server_address="127.0.0.1:8080", client=FlowerClient().to_client()) diff --git a/e2e/tabnet/client.py b/e2e/tabnet/client.py index 3c10df0c79f1..0290ba4629de 100644 --- a/e2e/tabnet/client.py +++ b/e2e/tabnet/client.py @@ -81,10 +81,10 @@ def evaluate(self, parameters, config): def client_fn(cid): return FlowerClient().to_client() -flower = fl.flower.Flower( +app = fl.client.ClientApp( client_fn=client_fn, ) if __name__ == "__main__": # Start Flower client - fl.client.start_numpy_client(server_address="127.0.0.1:8080", client=FlowerClient()) + fl.client.start_client(server_address="127.0.0.1:8080", client=FlowerClient().to_client()) diff --git a/e2e/tabnet/driver.py b/e2e/tabnet/driver.py index 2b1b35d9e89c..cc452ea523ca 100644 --- a/e2e/tabnet/driver.py +++ b/e2e/tabnet/driver.py @@ -1,6 +1,6 @@ import flwr as fl -hist = fl.driver.start_driver( +hist = fl.server.start_driver( server_address="0.0.0.0:9091", config=fl.server.ServerConfig(num_rounds=3), ) diff --git a/e2e/tensorflow/client.py b/e2e/tensorflow/client.py index 4ad2d5ebda57..10ee91136241 100644 --- a/e2e/tensorflow/client.py +++ b/e2e/tensorflow/client.py @@ -34,10 +34,10 @@ def evaluate(self, parameters, config): def client_fn(cid): return FlowerClient().to_client() -flower = fl.flower.Flower( +app = fl.client.ClientApp( client_fn=client_fn, ) if __name__ == "__main__": # Start Flower client - fl.client.start_numpy_client(server_address="127.0.0.1:8080", client=FlowerClient()) + fl.client.start_client(server_address="127.0.0.1:8080", client=FlowerClient().to_client()) diff --git a/e2e/tensorflow/driver.py b/e2e/tensorflow/driver.py index ca860ea47b2d..2ea4de69a62b 100644 --- a/e2e/tensorflow/driver.py +++ b/e2e/tensorflow/driver.py @@ -18,7 +18,7 @@ def weighted_average(metrics: List[Tuple[int, Metrics]]) -> Metrics: strategy = fl.server.strategy.FedAvg(evaluate_metrics_aggregation_fn=weighted_average) # Start Flower server -hist = fl.driver.start_driver( +hist = fl.server.start_driver( server_address="0.0.0.0:9091", config=fl.server.ServerConfig(num_rounds=3), strategy=strategy, diff --git a/e2e/test_driver.sh b/e2e/test_driver.sh index 32314bd22533..3d4864a1b0fb 100755 --- a/e2e/test_driver.sh +++ b/e2e/test_driver.sh @@ -13,13 +13,34 @@ case "$1" in ;; esac -timeout 2m flower-server $server_arg & +case "$2" in + rest) + rest_arg="--rest" + server_address="http://localhost:9093" + db_arg="--database :flwr-in-memory-state:" + ;; + sqlite) + rest_arg="" + server_address="127.0.0.1:9092" + db_arg="--database $(date +%s).db" + ;; + *) + rest_arg="" + server_address="127.0.0.1:9092" + db_arg="--database :flwr-in-memory-state:" + ;; +esac + +timeout 2m flower-superlink $server_arg $db_arg $rest_arg & +sl_pid=$! sleep 3 -timeout 2m flower-client client:flower $client_arg --server 127.0.0.1:9092 & +timeout 2m flower-client-app client:app $client_arg $rest_arg --server $server_address & +cl1_pid=$! sleep 3 -timeout 2m flower-client client:flower $client_arg --server 127.0.0.1:9092 & +timeout 2m flower-client-app client:app $client_arg $rest_arg --server $server_address & +cl2_pid=$! sleep 3 timeout 2m python driver.py & @@ -29,7 +50,7 @@ wait $pid res=$? if [[ "$res" = "0" ]]; - then echo "Training worked correctly" && pkill flower-client && pkill flower-server; + then echo "Training worked correctly"; kill $cl1_pid; kill $cl2_pid; kill $sl_pid; else echo "Training had an issue" && exit 1; fi diff --git a/examples/advanced-pytorch/README.md b/examples/advanced-pytorch/README.md index db0245e41453..c1ba85b95879 100644 --- a/examples/advanced-pytorch/README.md +++ b/examples/advanced-pytorch/README.md @@ -1,6 +1,6 @@ # Advanced Flower Example (PyTorch) -This example demonstrates an advanced federated learning setup using Flower with PyTorch. It differs from the quickstart example in the following ways: +This example demonstrates an advanced federated learning setup using Flower with PyTorch. This example uses [Flower Datasets](https://flower.ai/docs/datasets/) and it differs from the quickstart example in the following ways: - 10 clients (instead of just 2) - Each client holds a local dataset of 5000 training examples and 1000 test examples (note that using the `run.sh` script will only select 10 data samples by default, as the `--toy` argument is set). @@ -59,12 +59,13 @@ pip install -r requirements.txt The included `run.sh` will start the Flower server (using `server.py`), sleep for 2 seconds to ensure that the server is up, and then start 10 Flower clients (using `client.py`) with only a small subset of the data (in order to run on any machine), -but this can be changed by removing the `--toy True` argument in the script. You can simply start everything in a terminal as follows: +but this can be changed by removing the `--toy` argument in the script. You can simply start everything in a terminal as follows: ```shell -poetry run ./run.sh +# After activating your environment +./run.sh ``` The `run.sh` script starts processes in the background so that you don't have to open eleven terminal windows. If you experiment with the code example and something goes wrong, simply using `CTRL + C` on Linux (or `CMD + C` on macOS) wouldn't normally kill all these processes, which is why the script ends with `trap "trap - SIGTERM && kill -- -$$" SIGINT SIGTERM EXIT` and `wait`. This simply allows you to stop the experiment using `CTRL + C` (or `CMD + C`). If you change the script and anything goes wrong you can still use `killall python` (or `killall python3`) to kill all background processes (or a more specific command if you have other Python processes running that you don't want to kill). -You can also manually run `poetry run python3 server.py` and `poetry run python3 client.py` for as many clients as you want but you have to make sure that each command is ran in a different terminal window (or a different computer on the network). +You can also manually run `python3 server.py` and `python3 client.py --client-id ` for as many clients as you want but you have to make sure that each command is run in a different terminal window (or a different computer on the network). In addition, you can make your clients use either `EfficienNet` (default) or `AlexNet` (but all clients in the experiment should use the same). Switch between models using the `--model` flag when launching `client.py` and `server.py`. diff --git a/examples/advanced-pytorch/client.py b/examples/advanced-pytorch/client.py index f9ffb6181fd8..d4c8abe3d404 100644 --- a/examples/advanced-pytorch/client.py +++ b/examples/advanced-pytorch/client.py @@ -1,11 +1,11 @@ import utils from torch.utils.data import DataLoader -import torchvision.datasets import torch import flwr as fl import argparse from collections import OrderedDict import warnings +import datasets warnings.filterwarnings("ignore") @@ -13,47 +13,49 @@ class CifarClient(fl.client.NumPyClient): def __init__( self, - trainset: torchvision.datasets, - testset: torchvision.datasets, - device: str, + trainset: datasets.Dataset, + testset: datasets.Dataset, + device: torch.device, + model_str: str, validation_split: int = 0.1, ): self.device = device self.trainset = trainset self.testset = testset self.validation_split = validation_split + if model_str == "alexnet": + self.model = utils.load_alexnet(classes=10) + else: + self.model = utils.load_efficientnet(classes=10) def set_parameters(self, parameters): - """Loads a efficientnet model and replaces it parameters with the ones given.""" - model = utils.load_efficientnet(classes=10) - params_dict = zip(model.state_dict().keys(), parameters) + """Loads a alexnet or efficientnet model and replaces it parameters with the + ones given.""" + + params_dict = zip(self.model.state_dict().keys(), parameters) state_dict = OrderedDict({k: torch.tensor(v) for k, v in params_dict}) - model.load_state_dict(state_dict, strict=True) - return model + self.model.load_state_dict(state_dict, strict=True) def fit(self, parameters, config): """Train parameters on the locally held training set.""" # Update local model parameters - model = self.set_parameters(parameters) + self.set_parameters(parameters) # Get hyperparameters for this round batch_size: int = config["batch_size"] epochs: int = config["local_epochs"] - n_valset = int(len(self.trainset) * self.validation_split) - - valset = torch.utils.data.Subset(self.trainset, range(0, n_valset)) - trainset = torch.utils.data.Subset( - self.trainset, range(n_valset, len(self.trainset)) - ) + train_valid = self.trainset.train_test_split(self.validation_split) + trainset = train_valid["train"] + valset = train_valid["test"] - trainLoader = DataLoader(trainset, batch_size=batch_size, shuffle=True) - valLoader = DataLoader(valset, batch_size=batch_size) + train_loader = DataLoader(trainset, batch_size=batch_size, shuffle=True) + val_loader = DataLoader(valset, batch_size=batch_size) - results = utils.train(model, trainLoader, valLoader, epochs, self.device) + results = utils.train(self.model, train_loader, val_loader, epochs, self.device) - parameters_prime = utils.get_model_params(model) + parameters_prime = utils.get_model_params(self.model) num_examples_train = len(trainset) return parameters_prime, num_examples_train, results @@ -61,7 +63,7 @@ def fit(self, parameters, config): def evaluate(self, parameters, config): """Evaluate parameters on the locally held test set.""" # Update local model parameters - model = self.set_parameters(parameters) + self.set_parameters(parameters) # Get config values steps: int = config["val_steps"] @@ -69,17 +71,17 @@ def evaluate(self, parameters, config): # Evaluate global model parameters on the local test data and return results testloader = DataLoader(self.testset, batch_size=16) - loss, accuracy = utils.test(model, testloader, steps, self.device) + loss, accuracy = utils.test(self.model, testloader, steps, self.device) return float(loss), len(self.testset), {"accuracy": float(accuracy)} -def client_dry_run(device: str = "cpu"): +def client_dry_run(device: torch.device = "cpu"): """Weak tests to check whether all client methods are working as expected.""" model = utils.load_efficientnet(classes=10) trainset, testset = utils.load_partition(0) - trainset = torch.utils.data.Subset(trainset, range(10)) - testset = torch.utils.data.Subset(testset, range(10)) + trainset = trainset.select(range(10)) + testset = testset.select(range(10)) client = CifarClient(trainset, testset, device) client.fit( utils.get_model_params(model), @@ -102,7 +104,7 @@ def main() -> None: help="Do a dry-run to check the client", ) parser.add_argument( - "--partition", + "--client-id", type=int, default=0, choices=range(0, 10), @@ -112,9 +114,7 @@ def main() -> None: ) parser.add_argument( "--toy", - type=bool, - default=False, - required=False, + action="store_true", help="Set to true to quicky run the client using only 10 datasamples. \ Useful for testing purposes. Default: False", ) @@ -125,6 +125,14 @@ def main() -> None: required=False, help="Set to true to use GPU. Default: False", ) + parser.add_argument( + "--model", + type=str, + default="efficientnet", + choices=["efficientnet", "alexnet"], + help="Use either Efficientnet or Alexnet models. \ + If you want to achieve differential privacy, please use the Alexnet model", + ) args = parser.parse_args() @@ -136,16 +144,14 @@ def main() -> None: client_dry_run(device) else: # Load a subset of CIFAR-10 to simulate the local data partition - trainset, testset = utils.load_partition(args.partition) + trainset, testset = utils.load_partition(args.client_id) if args.toy: - trainset = torch.utils.data.Subset(trainset, range(10)) - testset = torch.utils.data.Subset(testset, range(10)) - + trainset = trainset.select(range(10)) + testset = testset.select(range(10)) # Start Flower client - client = CifarClient(trainset, testset, device) - - fl.client.start_numpy_client(server_address="127.0.0.1:8080", client=client) + client = CifarClient(trainset, testset, device, args.model).to_client() + fl.client.start_client(server_address="127.0.0.1:8080", client=client) if __name__ == "__main__": diff --git a/examples/advanced-pytorch/pyproject.toml b/examples/advanced-pytorch/pyproject.toml index a12f3c47de70..89fd5a32a89e 100644 --- a/examples/advanced-pytorch/pyproject.toml +++ b/examples/advanced-pytorch/pyproject.toml @@ -14,6 +14,7 @@ authors = [ [tool.poetry.dependencies] python = ">=3.8,<3.11" flwr = ">=1.0,<2.0" +flwr-datasets = { extras = ["vision"], version = ">=0.0.2,<1.0.0" } torch = "1.13.1" torchvision = "0.14.1" validators = "0.18.2" diff --git a/examples/advanced-pytorch/requirements.txt b/examples/advanced-pytorch/requirements.txt index ba7b284df90e..f4d6a0774162 100644 --- a/examples/advanced-pytorch/requirements.txt +++ b/examples/advanced-pytorch/requirements.txt @@ -1,4 +1,5 @@ flwr>=1.0, <2.0 +flwr-datasets[vision]>=0.0.2, <1.0.0 torch==1.13.1 torchvision==0.14.1 validators==0.18.2 diff --git a/examples/advanced-pytorch/run.sh b/examples/advanced-pytorch/run.sh index 212285f504f9..c3d52491b987 100755 --- a/examples/advanced-pytorch/run.sh +++ b/examples/advanced-pytorch/run.sh @@ -2,20 +2,12 @@ set -e cd "$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"/ -# Download the CIFAR-10 dataset -python -c "from torchvision.datasets import CIFAR10; CIFAR10('./dataset', download=True)" - -# Download the EfficientNetB0 model -python -c "import torch; torch.hub.load( \ - 'NVIDIA/DeepLearningExamples:torchhub', \ - 'nvidia_efficientnet_b0', pretrained=True)" - -python server.py & -sleep 3 # Sleep for 3s to give the server enough time to start +python server.py --toy & +sleep 10 # Sleep for 10s to give the server enough time to start and dowload the dataset for i in `seq 0 9`; do echo "Starting client $i" - python client.py --partition=${i} --toy True & + python client.py --client-id=${i} --toy & done # Enable CTRL+C to stop all background processes diff --git a/examples/advanced-pytorch/server.py b/examples/advanced-pytorch/server.py index 8343e62da69f..489694ab1ea1 100644 --- a/examples/advanced-pytorch/server.py +++ b/examples/advanced-pytorch/server.py @@ -10,6 +10,8 @@ import warnings +from flwr_datasets import FederatedDataset + warnings.filterwarnings("ignore") @@ -39,18 +41,13 @@ def evaluate_config(server_round: int): def get_evaluate_fn(model: torch.nn.Module, toy: bool): """Return an evaluation function for server-side evaluation.""" - # Load data and model here to avoid the overhead of doing it in `evaluate` itself - trainset, _, _ = utils.load_data() - - n_train = len(trainset) + # Load data here to avoid the overhead of doing it in `evaluate` itself + centralized_data = utils.load_centralized_data() if toy: # use only 10 samples as validation set - valset = torch.utils.data.Subset(trainset, range(n_train - 10, n_train)) - else: - # Use the last 5k training examples as a validation set - valset = torch.utils.data.Subset(trainset, range(n_train - 5000, n_train)) + centralized_data = centralized_data.select(range(10)) - valLoader = DataLoader(valset, batch_size=16) + val_loader = DataLoader(centralized_data, batch_size=16) # The `evaluate` function will be called after every round def evaluate( @@ -63,7 +60,7 @@ def evaluate( state_dict = OrderedDict({k: torch.tensor(v) for k, v in params_dict}) model.load_state_dict(state_dict, strict=True) - loss, accuracy = utils.test(model, valLoader) + loss, accuracy = utils.test(model, val_loader) return loss, {"accuracy": accuracy} return evaluate @@ -79,23 +76,32 @@ def main(): parser = argparse.ArgumentParser(description="Flower") parser.add_argument( "--toy", - type=bool, - default=False, - required=False, + action="store_true", help="Set to true to use only 10 datasamples for validation. \ Useful for testing purposes. Default: False", ) + parser.add_argument( + "--model", + type=str, + default="efficientnet", + choices=["efficientnet", "alexnet"], + help="Use either Efficientnet or Alexnet models. \ + If you want to achieve differential privacy, please use the Alexnet model", + ) args = parser.parse_args() - model = utils.load_efficientnet(classes=10) + if args.model == "alexnet": + model = utils.load_alexnet(classes=10) + else: + model = utils.load_efficientnet(classes=10) model_parameters = [val.cpu().numpy() for _, val in model.state_dict().items()] # Create strategy strategy = fl.server.strategy.FedAvg( - fraction_fit=0.2, - fraction_evaluate=0.2, + fraction_fit=1.0, + fraction_evaluate=1.0, min_fit_clients=2, min_evaluate_clients=2, min_available_clients=10, diff --git a/examples/advanced-pytorch/utils.py b/examples/advanced-pytorch/utils.py index 8788ead90dee..186f079010dc 100644 --- a/examples/advanced-pytorch/utils.py +++ b/examples/advanced-pytorch/utils.py @@ -1,59 +1,59 @@ import torch -import torchvision.transforms as transforms -from torchvision.datasets import CIFAR10 - +from torchvision.transforms import Compose, ToTensor, Normalize, Resize, CenterCrop +from torchvision.models import efficientnet_b0, AlexNet import warnings -warnings.filterwarnings("ignore") +from flwr_datasets import FederatedDataset -# DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") +warnings.filterwarnings("ignore") -def load_data(): - """Load CIFAR-10 (training and test set).""" - transform = transforms.Compose( - [ - transforms.Resize(256), - transforms.CenterCrop(224), - transforms.ToTensor(), - transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), - ] - ) - trainset = CIFAR10("./dataset", train=True, download=True, transform=transform) - testset = CIFAR10("./dataset", train=False, download=True, transform=transform) +def load_partition(node_id, toy: bool = False): + """Load partition CIFAR10 data.""" + fds = FederatedDataset(dataset="cifar10", partitioners={"train": 10}) + partition = fds.load_partition(node_id) + # Divide data on each node: 80% train, 20% test + partition_train_test = partition.train_test_split(test_size=0.2) + partition_train_test = partition_train_test.with_transform(apply_transforms) + return partition_train_test["train"], partition_train_test["test"] - num_examples = {"trainset": len(trainset), "testset": len(testset)} - return trainset, testset, num_examples +def load_centralized_data(): + fds = FederatedDataset(dataset="cifar10", partitioners={"train": 10}) + centralized_data = fds.load_full("test") + centralized_data = centralized_data.with_transform(apply_transforms) + return centralized_data -def load_partition(idx: int): - """Load 1/10th of the training and test data to simulate a partition.""" - assert idx in range(10) - trainset, testset, num_examples = load_data() - n_train = int(num_examples["trainset"] / 10) - n_test = int(num_examples["testset"] / 10) - train_parition = torch.utils.data.Subset( - trainset, range(idx * n_train, (idx + 1) * n_train) - ) - test_parition = torch.utils.data.Subset( - testset, range(idx * n_test, (idx + 1) * n_test) +def apply_transforms(batch): + """Apply transforms to the partition from FederatedDataset.""" + pytorch_transforms = Compose( + [ + Resize(256), + CenterCrop(224), + ToTensor(), + Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), + ] ) - return (train_parition, test_parition) + batch["img"] = [pytorch_transforms(img) for img in batch["img"]] + return batch -def train(net, trainloader, valloader, epochs, device: str = "cpu"): +def train( + net, trainloader, valloader, epochs, device: torch.device = torch.device("cpu") +): """Train the network on the training set.""" print("Starting training...") net.to(device) # move model to GPU if available criterion = torch.nn.CrossEntropyLoss().to(device) optimizer = torch.optim.SGD( - net.parameters(), lr=0.1, momentum=0.9, weight_decay=1e-4 + net.parameters(), lr=0.001, momentum=0.9, weight_decay=1e-4 ) net.train() for _ in range(epochs): - for images, labels in trainloader: + for batch in trainloader: + images, labels = batch["img"], batch["label"] images, labels = images.to(device), labels.to(device) optimizer.zero_grad() loss = criterion(net(images), labels) @@ -74,7 +74,9 @@ def train(net, trainloader, valloader, epochs, device: str = "cpu"): return results -def test(net, testloader, steps: int = None, device: str = "cpu"): +def test( + net, testloader, steps: int = None, device: torch.device = torch.device("cpu") +): """Validate the network on the entire test set.""" print("Starting evalutation...") net.to(device) # move model to GPU if available @@ -82,7 +84,8 @@ def test(net, testloader, steps: int = None, device: str = "cpu"): correct, loss = 0, 0.0 net.eval() with torch.no_grad(): - for batch_idx, (images, labels) in enumerate(testloader): + for batch_idx, batch in enumerate(testloader): + images, labels = batch["img"], batch["label"] images, labels = images.to(device), labels.to(device) outputs = net(images) loss += criterion(outputs, labels).item() @@ -95,36 +98,21 @@ def test(net, testloader, steps: int = None, device: str = "cpu"): return loss, accuracy -def replace_classifying_layer(efficientnet_model, num_classes: int = 10): - """Replaces the final layer of the classifier.""" - num_features = efficientnet_model.classifier.fc.in_features - efficientnet_model.classifier.fc = torch.nn.Linear(num_features, num_classes) - - -def load_efficientnet(entrypoint: str = "nvidia_efficientnet_b0", classes: int = None): - """Loads pretrained efficientnet model from torch hub. Replaces final classifying - layer if classes is specified. - - Args: - entrypoint: EfficientNet model to download. - For supported entrypoints, please refer - https://pytorch.org/hub/nvidia_deeplearningexamples_efficientnet/ - classes: Number of classes in final classifying layer. Leave as None to get the downloaded - model untouched. - Returns: - EfficientNet Model - - Note: One alternative implementation can be found at https://github.com/lukemelas/EfficientNet-PyTorch - """ - efficientnet = torch.hub.load( - "NVIDIA/DeepLearningExamples:torchhub", entrypoint, pretrained=True - ) - - if classes is not None: - replace_classifying_layer(efficientnet, classes) +def load_efficientnet(classes: int = 10): + """Loads EfficienNetB0 from TorchVision.""" + efficientnet = efficientnet_b0(pretrained=True) + # Re-init output linear layer with the right number of classes + model_classes = efficientnet.classifier[1].in_features + if classes != model_classes: + efficientnet.classifier[1] = torch.nn.Linear(model_classes, classes) return efficientnet def get_model_params(model): """Returns a model's parameters.""" return [val.cpu().numpy() for _, val in model.state_dict().items()] + + +def load_alexnet(classes): + """Load AlexNet model from TorchVision.""" + return AlexNet(num_classes=classes) diff --git a/examples/advanced-tensorflow/README.md b/examples/advanced-tensorflow/README.md index 31bf5edb64c6..59866fd99a06 100644 --- a/examples/advanced-tensorflow/README.md +++ b/examples/advanced-tensorflow/README.md @@ -1,9 +1,9 @@ # Advanced Flower Example (TensorFlow/Keras) -This example demonstrates an advanced federated learning setup using Flower with TensorFlow/Keras. It differs from the quickstart example in the following ways: +This example demonstrates an advanced federated learning setup using Flower with TensorFlow/Keras. This example uses [Flower Datasets](https://flower.ai/docs/datasets/) and it differs from the quickstart example in the following ways: - 10 clients (instead of just 2) -- Each client holds a local dataset of 5000 training examples and 1000 test examples (note that by default only a small subset of this data is used when running the `run.sh` script) +- Each client holds a local dataset of 1/10 of the train datasets and 80% is training examples and 20% as test examples (note that by default only a small subset of this data is used when running the `run.sh` script) - Server-side model evaluation after parameter aggregation - Hyperparameter schedule using config functions - Custom return values @@ -57,10 +57,11 @@ pip install -r requirements.txt ## Run Federated Learning with TensorFlow/Keras and Flower -The included `run.sh` will call a script to generate certificates (which will be used by server and clients), start the Flower server (using `server.py`), sleep for 2 seconds to ensure the the server is up, and then start 10 Flower clients (using `client.py`). You can simply start everything in a terminal as follows: +The included `run.sh` will call a script to generate certificates (which will be used by server and clients), start the Flower server (using `server.py`), sleep for 10 seconds to ensure the the server is up, and then start 10 Flower clients (using `client.py`). You can simply start everything in a terminal as follows: ```shell -poetry run ./run.sh +# Once you have activated your environment +./run.sh ``` The `run.sh` script starts processes in the background so that you don't have to open eleven terminal windows. If you experiment with the code example and something goes wrong, simply using `CTRL + C` on Linux (or `CMD + C` on macOS) wouldn't normally kill all these processes, which is why the script ends with `trap "trap - SIGTERM && kill -- -$$" SIGINT SIGTERM EXIT` and `wait`. This simply allows you to stop the experiment using `CTRL + C` (or `CMD + C`). If you change the script and anything goes wrong you can still use `killall python` (or `killall python3`) to kill all background processes (or a more specific command if you have other Python processes running that you don't want to kill). diff --git a/examples/advanced-tensorflow/client.py b/examples/advanced-tensorflow/client.py index 1c0b61575635..17d1d2306270 100644 --- a/examples/advanced-tensorflow/client.py +++ b/examples/advanced-tensorflow/client.py @@ -6,6 +6,8 @@ import flwr as fl +from flwr_datasets import FederatedDataset + # Make TensorFlow logs less verbose os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3" @@ -74,7 +76,7 @@ def main() -> None: # Parse command line argument `partition` parser = argparse.ArgumentParser(description="Flower") parser.add_argument( - "--partition", + "--client-id", type=int, default=0, choices=range(0, 10), @@ -84,9 +86,7 @@ def main() -> None: ) parser.add_argument( "--toy", - type=bool, - default=False, - required=False, + action="store_true", help="Set to true to quicky run the client using only 10 datasamples. " "Useful for testing purposes. Default: False", ) @@ -99,16 +99,16 @@ def main() -> None: model.compile("adam", "sparse_categorical_crossentropy", metrics=["accuracy"]) # Load a subset of CIFAR-10 to simulate the local data partition - (x_train, y_train), (x_test, y_test) = load_partition(args.partition) + x_train, y_train, x_test, y_test = load_partition(args.client_id) if args.toy: x_train, y_train = x_train[:10], y_train[:10] x_test, y_test = x_test[:10], y_test[:10] # Start Flower client - client = CifarClient(model, x_train, y_train, x_test, y_test) + client = CifarClient(model, x_train, y_train, x_test, y_test).to_client() - fl.client.start_numpy_client( + fl.client.start_client( server_address="127.0.0.1:8080", client=client, root_certificates=Path(".cache/certificates/ca.crt").read_bytes(), @@ -117,15 +117,16 @@ def main() -> None: def load_partition(idx: int): """Load 1/10th of the training and test data to simulate a partition.""" - assert idx in range(10) - (x_train, y_train), (x_test, y_test) = tf.keras.datasets.cifar10.load_data() - return ( - x_train[idx * 5000 : (idx + 1) * 5000], - y_train[idx * 5000 : (idx + 1) * 5000], - ), ( - x_test[idx * 1000 : (idx + 1) * 1000], - y_test[idx * 1000 : (idx + 1) * 1000], - ) + # Download and partition dataset + fds = FederatedDataset(dataset="cifar10", partitioners={"train": 10}) + partition = fds.load_partition(idx) + partition.set_format("numpy") + + # Divide data on each node: 80% train, 20% test + partition = partition.train_test_split(test_size=0.2) + x_train, y_train = partition["train"]["img"] / 255.0, partition["train"]["label"] + x_test, y_test = partition["test"]["img"] / 255.0, partition["test"]["label"] + return x_train, y_train, x_test, y_test if __name__ == "__main__": diff --git a/examples/advanced-tensorflow/pyproject.toml b/examples/advanced-tensorflow/pyproject.toml index 293ba64b3f43..2f16d8a15584 100644 --- a/examples/advanced-tensorflow/pyproject.toml +++ b/examples/advanced-tensorflow/pyproject.toml @@ -11,5 +11,6 @@ authors = ["The Flower Authors "] [tool.poetry.dependencies] python = ">=3.8,<3.11" flwr = ">=1.0,<2.0" +flwr-datasets = { extras = ["vision"], version = ">=0.0.2,<1.0.0" } tensorflow-cpu = {version = ">=2.9.1,<2.11.1 || >2.11.1", markers = "platform_machine == \"x86_64\""} tensorflow-macos = {version = ">=2.9.1,<2.11.1 || >2.11.1", markers = "sys_platform == \"darwin\" and platform_machine == \"arm64\""} diff --git a/examples/advanced-tensorflow/requirements.txt b/examples/advanced-tensorflow/requirements.txt index 7a70c46a8128..0cb5fe8c07af 100644 --- a/examples/advanced-tensorflow/requirements.txt +++ b/examples/advanced-tensorflow/requirements.txt @@ -1,3 +1,4 @@ flwr>=1.0, <2.0 +flwr-datasets = { extras = ["vision"], version = ">=0.0.2,<1.0.0" } tensorflow-cpu>=2.9.1, != 2.11.1 ; platform_machine == "x86_64" tensorflow-macos>=2.9.1, != 2.11.1 ; sys_platform == "darwin" and platform_machine == "arm64" diff --git a/examples/advanced-tensorflow/run.sh b/examples/advanced-tensorflow/run.sh index 8ddb6a252b52..4acef1371571 100755 --- a/examples/advanced-tensorflow/run.sh +++ b/examples/advanced-tensorflow/run.sh @@ -5,14 +5,11 @@ echo "Starting server" python server.py & -sleep 3 # Sleep for 3s to give the server enough time to start +sleep 10 # Sleep for 10s to give the server enough time to start and download the dataset -# Ensure that the Keras dataset used in client.py is already cached. -python -c "import tensorflow as tf; tf.keras.datasets.cifar10.load_data()" - -for i in `seq 0 9`; do +for i in $(seq 0 9); do echo "Starting client $i" - python client.py --partition=${i} --toy True & + python client.py --client-id=${i} --toy & done # This will allow you to use CTRL+C to stop all background processes diff --git a/examples/advanced-tensorflow/server.py b/examples/advanced-tensorflow/server.py index e1eb3d4fd8f7..26dde312bee5 100644 --- a/examples/advanced-tensorflow/server.py +++ b/examples/advanced-tensorflow/server.py @@ -4,6 +4,8 @@ import flwr as fl import tensorflow as tf +from flwr_datasets import FederatedDataset + def main() -> None: # Load and compile model for @@ -43,11 +45,11 @@ def main() -> None: def get_evaluate_fn(model): """Return an evaluation function for server-side evaluation.""" - # Load data and model here to avoid the overhead of doing it in `evaluate` itself - (x_train, y_train), _ = tf.keras.datasets.cifar10.load_data() - - # Use the last 5k training examples as a validation set - x_val, y_val = x_train[45000:50000], y_train[45000:50000] + # Load data here to avoid the overhead of doing it in `evaluate` itself + fds = FederatedDataset(dataset="cifar10", partitioners={"train": 10}) + test = fds.load_full("test") + test.set_format("numpy") + x_test, y_test = test["img"] / 255.0, test["label"] # The `evaluate` function will be called after every round def evaluate( @@ -56,7 +58,7 @@ def evaluate( config: Dict[str, fl.common.Scalar], ) -> Optional[Tuple[float, Dict[str, fl.common.Scalar]]]: model.set_weights(parameters) # Update model with the latest parameters - loss, accuracy = model.evaluate(x_val, y_val) + loss, accuracy = model.evaluate(x_test, y_test) return loss, {"accuracy": accuracy} return evaluate diff --git a/examples/custom-metrics/README.md b/examples/custom-metrics/README.md new file mode 100644 index 000000000000..317fb6336106 --- /dev/null +++ b/examples/custom-metrics/README.md @@ -0,0 +1,106 @@ +# Flower Example using Custom Metrics + +This simple example demonstrates how to calculate custom metrics over multiple clients beyond the traditional ones available in the ML frameworks. In this case, it demonstrates the use of ready-available `scikit-learn` metrics: accuracy, recall, precision, and f1-score. + +Once both the test values (`y_test`) and the predictions (`y_pred`) are available on the client side (`client.py`), other metrics or custom ones are possible to be calculated. + +The main takeaways of this implementation are: + +- the use of the `output_dict` on the client side - inside `evaluate` method on `client.py` +- the use of the `evaluate_metrics_aggregation_fn` - to aggregate the metrics on the server side, part of the `strategy` on `server.py` + +This example is based on the `quickstart-tensorflow` with CIFAR-10, source [here](https://flower.ai/docs/quickstart-tensorflow.html), with the addition of [Flower Datasets](https://flower.ai/docs/datasets/index.html) to retrieve the CIFAR-10. + +Using the CIFAR-10 dataset for classification, this is a multi-class classification problem, thus some changes on how to calculate the metrics using `average='micro'` and `np.argmax` is required. For binary classification, this is not required. Also, for unsupervised learning tasks, such as using a deep autoencoder, a custom metric based on reconstruction error could be implemented on client side. + +## Project Setup + +Start by cloning the example project. We prepared a single-line command that you can copy into your shell which will checkout the example for you: + +```shell +git clone --depth=1 https://github.com/adap/flower.git && mv flower/examples/custom-metrics . && rm -rf flower && cd custom-metrics +``` + +This will create a new directory called `custom-metrics` containing the following files: + +```shell +-- pyproject.toml +-- requirements.txt +-- client.py +-- server.py +-- run.sh +-- README.md +``` + +### Installing Dependencies + +Project dependencies (such as `scikit-learn`, `tensorflow` and `flwr`) are defined in `pyproject.toml` and `requirements.txt`. We recommend [Poetry](https://python-poetry.org/docs/) to install those dependencies and manage your virtual environment ([Poetry installation](https://python-poetry.org/docs/#installation)) or [pip](https://pip.pypa.io/en/latest/development/), but feel free to use a different way of installing dependencies and managing virtual environments if you have other preferences. + +#### Poetry + +```shell +poetry install +poetry shell +``` + +Poetry will install all your dependencies in a newly created virtual environment. To verify that everything works correctly you can run the following command: + +```shell +poetry run python3 -c "import flwr" +``` + +If you don't see any errors you're good to go! + +#### pip + +Write the command below in your terminal to install the dependencies according to the configuration file requirements.txt. + +```shell +python -m venv venv +source venv/bin/activate +pip install -r requirements.txt +``` + +## Run Federated Learning with Custom Metrics + +Afterwards you are ready to start the Flower server as well as the clients. You can simply start the server in a terminal as follows: + +```shell +python server.py +``` + +Now you are ready to start the Flower clients which will participate in the learning. To do so simply open two more terminals and run the following command in each: + +```shell +python client.py +``` + +Alternatively you can run all of it in one shell as follows: + +```shell +python server.py & +# Wait for a few seconds to give the server enough time to start, then: +python client.py & +python client.py +``` + +or + +```shell +chmod +x run.sh +./run.sh +``` + +You will see that Keras is starting a federated training. Have a look to the [Flower Quickstarter documentation](https://flower.ai/docs/quickstart-tensorflow.html) for a detailed explanation. You can add `steps_per_epoch=3` to `model.fit()` if you just want to evaluate that everything works without having to wait for the client-side training to finish (this will save you a lot of time during development). + +Running `run.sh` will result in the following output (after 3 rounds): + +```shell +INFO flwr 2024-01-17 17:45:23,794 | app.py:228 | app_fit: metrics_distributed { + 'accuracy': [(1, 0.10000000149011612), (2, 0.10000000149011612), (3, 0.3393000066280365)], + 'acc': [(1, 0.1), (2, 0.1), (3, 0.3393)], + 'rec': [(1, 0.1), (2, 0.1), (3, 0.3393)], + 'prec': [(1, 0.1), (2, 0.1), (3, 0.3393)], + 'f1': [(1, 0.10000000000000002), (2, 0.10000000000000002), (3, 0.3393)] +} +``` diff --git a/examples/custom-metrics/client.py b/examples/custom-metrics/client.py new file mode 100644 index 000000000000..d0230e455477 --- /dev/null +++ b/examples/custom-metrics/client.py @@ -0,0 +1,73 @@ +import os + +import flwr as fl +import numpy as np +import tensorflow as tf +from sklearn.metrics import accuracy_score, recall_score, precision_score, f1_score +from flwr_datasets import FederatedDataset + + +# Make TensorFlow log less verbose +os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3" + + +# Load model (MobileNetV2) +model = tf.keras.applications.MobileNetV2((32, 32, 3), classes=10, weights=None) +model.compile("adam", "sparse_categorical_crossentropy", metrics=["accuracy"]) + +# Load data with Flower Datasets (CIFAR-10) +fds = FederatedDataset(dataset="cifar10", partitioners={"train": 10}) +train = fds.load_full("train") +test = fds.load_full("test") + +# Using Numpy format +train_np = train.with_format("numpy") +test_np = test.with_format("numpy") +x_train, y_train = train_np["img"], train_np["label"] +x_test, y_test = test_np["img"], test_np["label"] + + +# Method for extra learning metrics calculation +def eval_learning(y_test, y_pred): + acc = accuracy_score(y_test, y_pred) + rec = recall_score( + y_test, y_pred, average="micro" + ) # average argument required for multi-class + prec = precision_score(y_test, y_pred, average="micro") + f1 = f1_score(y_test, y_pred, average="micro") + return acc, rec, prec, f1 + + +# Define Flower client +class FlowerClient(fl.client.NumPyClient): + def get_parameters(self, config): + return model.get_weights() + + def fit(self, parameters, config): + model.set_weights(parameters) + model.fit(x_train, y_train, epochs=1, batch_size=32) + return model.get_weights(), len(x_train), {} + + def evaluate(self, parameters, config): + model.set_weights(parameters) + loss, accuracy = model.evaluate(x_test, y_test) + y_pred = model.predict(x_test) + y_pred = np.argmax(y_pred, axis=1).reshape( + -1, 1 + ) # MobileNetV2 outputs 10 possible classes, argmax returns just the most probable + + acc, rec, prec, f1 = eval_learning(y_test, y_pred) + output_dict = { + "accuracy": accuracy, # accuracy from tensorflow model.evaluate + "acc": acc, + "rec": rec, + "prec": prec, + "f1": f1, + } + return loss, len(x_test), output_dict + + +# Start Flower client +fl.client.start_client( + server_address="127.0.0.1:8080", client=FlowerClient().to_client() +) diff --git a/examples/custom-metrics/pyproject.toml b/examples/custom-metrics/pyproject.toml new file mode 100644 index 000000000000..8a2da6562018 --- /dev/null +++ b/examples/custom-metrics/pyproject.toml @@ -0,0 +1,19 @@ +[build-system] +requires = ["poetry-core>=1.4.0"] +build-backend = "poetry.core.masonry.api" + +[tool.poetry] +name = "custom-metrics" +version = "0.1.0" +description = "Federated Learning with Flower and Custom Metrics" +authors = [ + "The Flower Authors ", + "Gustavo Bertoli " +] + +[tool.poetry.dependencies] +python = ">=3.8,<3.11" +flwr = ">=1.0,<2.0" +flwr-datasets = { version = "*", extras = ["vision"] } +scikit-learn = "^1.2.2" +tensorflow = "==2.12.0" \ No newline at end of file diff --git a/examples/custom-metrics/requirements.txt b/examples/custom-metrics/requirements.txt new file mode 100644 index 000000000000..69d867c5f287 --- /dev/null +++ b/examples/custom-metrics/requirements.txt @@ -0,0 +1,4 @@ +flwr>=1.0,<2.0 +flwr-datasets[vision] +scikit-learn>=1.2.2 +tensorflow==2.12.0 diff --git a/examples/mt-pytorch-callable/run.sh b/examples/custom-metrics/run.sh similarity index 55% rename from examples/mt-pytorch-callable/run.sh rename to examples/custom-metrics/run.sh index d2bf34f834b1..c64f362086aa 100755 --- a/examples/mt-pytorch-callable/run.sh +++ b/examples/custom-metrics/run.sh @@ -1,9 +1,4 @@ #!/bin/bash -set -e -cd "$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"/ - -# Download the CIFAR-10 dataset -python -c "from torchvision.datasets import CIFAR10; CIFAR10('./data', download=True)" echo "Starting server" python server.py & @@ -14,7 +9,7 @@ for i in `seq 0 1`; do python client.py & done -# Enable CTRL+C to stop all background processes +# This will allow you to use CTRL+C to stop all background processes trap "trap - SIGTERM && kill -- -$$" SIGINT SIGTERM # Wait for all background processes to complete wait diff --git a/examples/custom-metrics/server.py b/examples/custom-metrics/server.py new file mode 100644 index 000000000000..f8420bf51f16 --- /dev/null +++ b/examples/custom-metrics/server.py @@ -0,0 +1,58 @@ +import flwr as fl +import numpy as np + + +# Define metrics aggregation function +def average_metrics(metrics): + """Aggregate metrics from multiple clients by calculating mean averages. + + Parameters: + - metrics (list): A list containing tuples, where each tuple represents metrics for a client. + Each tuple is structured as (num_examples, metric), where: + - num_examples (int): The number of examples used to compute the metrics. + - metric (dict): A dictionary containing custom metrics provided as `output_dict` + in the `evaluate` method from `client.py`. + + Returns: + A dictionary with the aggregated metrics, calculating mean averages. The keys of the + dictionary represent different metrics, including: + - 'accuracy': Mean accuracy calculated by TensorFlow. + - 'acc': Mean accuracy from scikit-learn. + - 'rec': Mean recall from scikit-learn. + - 'prec': Mean precision from scikit-learn. + - 'f1': Mean F1 score from scikit-learn. + + Note: If a weighted average is required, the `num_examples` parameter can be leveraged. + + Example: + Example `metrics` list for two clients after the last round: + [(10000, {'prec': 0.108, 'acc': 0.108, 'f1': 0.108, 'accuracy': 0.1080000028014183, 'rec': 0.108}), + (10000, {'f1': 0.108, 'rec': 0.108, 'accuracy': 0.1080000028014183, 'prec': 0.108, 'acc': 0.108})] + """ + + # Here num_examples are not taken into account by using _ + accuracies_tf = np.mean([metric["accuracy"] for _, metric in metrics]) + accuracies = np.mean([metric["acc"] for _, metric in metrics]) + recalls = np.mean([metric["rec"] for _, metric in metrics]) + precisions = np.mean([metric["prec"] for _, metric in metrics]) + f1s = np.mean([metric["f1"] for _, metric in metrics]) + + return { + "accuracy": accuracies_tf, + "acc": accuracies, + "rec": recalls, + "prec": precisions, + "f1": f1s, + } + + +# Define strategy and the custom aggregation function for the evaluation metrics +strategy = fl.server.strategy.FedAvg(evaluate_metrics_aggregation_fn=average_metrics) + + +# Start Flower server +fl.server.start_server( + server_address="0.0.0.0:8080", + config=fl.server.ServerConfig(num_rounds=3), + strategy=strategy, +) diff --git a/examples/doc/source/_templates/base.html b/examples/doc/source/_templates/base.html index e4fe80720b74..08030fb08c15 100644 --- a/examples/doc/source/_templates/base.html +++ b/examples/doc/source/_templates/base.html @@ -5,7 +5,7 @@ - + {%- if metatags %}{{ metatags }}{% endif -%} @@ -99,6 +99,6 @@ {%- endblock -%} {%- endblock scripts -%} - + diff --git a/examples/doc/source/conf.py b/examples/doc/source/conf.py index 3d629c39c7ea..bf177aa5ae24 100644 --- a/examples/doc/source/conf.py +++ b/examples/doc/source/conf.py @@ -30,7 +30,7 @@ author = "The Flower Authors" # The full version, including alpha/beta/rc tags -release = "1.7.0" +release = "1.8.0" # -- General configuration --------------------------------------------------- @@ -76,7 +76,7 @@ html_title = f"Flower Examples {release}" html_logo = "_static/flower-logo.png" html_favicon = "_static/favicon.ico" -html_baseurl = "https://flower.dev/docs/examples/" +html_baseurl = "https://flower.ai/docs/examples/" html_theme_options = { # diff --git a/examples/embedded-devices/Dockerfile b/examples/embedded-devices/Dockerfile index ea63839bc9d6..a85c05c4bb7a 100644 --- a/examples/embedded-devices/Dockerfile +++ b/examples/embedded-devices/Dockerfile @@ -8,6 +8,7 @@ RUN pip3 install --upgrade pip # Install flower RUN pip3 install flwr>=1.0 +RUN pip3 install flwr-datsets>=0.2 RUN pip3 install tqdm==4.65.0 WORKDIR /client diff --git a/examples/embedded-devices/README.md b/examples/embedded-devices/README.md index 4c79eafbbf84..5afe3965bd5b 100644 --- a/examples/embedded-devices/README.md +++ b/examples/embedded-devices/README.md @@ -192,7 +192,8 @@ On the machine of your choice, launch the server: # Launch your server. # Will wait for at least 2 clients to be connected, then will train for 3 FL rounds # The command below will sample all clients connected (since sample_fraction=1.0) -python server.py --rounds 3 --min_num_clients 2 --sample_fraction 1.0 # append `--mnist` if you want to use that dataset/model setting +# The the server is dataset agnostic (use the same command for MNIST and CIFAR10) +python server.py --rounds 3 --min_num_clients 2 --sample_fraction 1.0 ``` > If you are on macOS with Apple Silicon (i.e. M1, M2 chips), you might encounter a `grpcio`-related issue when launching your server. If you are in a conda environment you can solve this easily by doing: `pip uninstall grpcio` and then `conda install grpcio`. diff --git a/examples/embedded-devices/client_pytorch.py b/examples/embedded-devices/client_pytorch.py index 5d236c9e9389..f326db7c678c 100644 --- a/examples/embedded-devices/client_pytorch.py +++ b/examples/embedded-devices/client_pytorch.py @@ -6,18 +6,19 @@ import torch import torch.nn as nn import torch.nn.functional as F -from torch.utils.data import DataLoader, random_split -from torchvision.datasets import CIFAR10, MNIST +from torch.utils.data import DataLoader from torchvision.transforms import Compose, Normalize, ToTensor from torchvision.models import mobilenet_v3_small from tqdm import tqdm +from flwr_datasets import FederatedDataset + parser = argparse.ArgumentParser(description="Flower Embedded devices") parser.add_argument( "--server_address", type=str, default="0.0.0.0:8080", - help=f"gRPC server address (deafault '0.0.0.0:8080')", + help=f"gRPC server address (default '0.0.0.0:8080')", ) parser.add_argument( "--cid", @@ -28,25 +29,13 @@ parser.add_argument( "--mnist", action="store_true", - help="If you use Raspberry Pi Zero clients (which just have 512MB or RAM) use MNIST", + help="If you use Raspberry Pi Zero clients (which just have 512MB or RAM) use " + "MNIST", ) - warnings.filterwarnings("ignore", category=UserWarning) NUM_CLIENTS = 50 -# a config for mobilenetv2 that works for -# small input sizes (i.e. 32x32 as in CIFAR) -mb2_cfg = [ - (1, 16, 1, 1), - (6, 24, 2, 1), - (6, 32, 3, 2), - (6, 64, 4, 2), - (6, 96, 3, 1), - (6, 160, 3, 2), - (6, 320, 1, 1), -] - class Net(nn.Module): """Model (simple CNN adapted from 'PyTorch: A 60 Minute Blitz').""" @@ -73,7 +62,9 @@ def train(net, trainloader, optimizer, epochs, device): """Train the model on the training set.""" criterion = torch.nn.CrossEntropyLoss() for _ in range(epochs): - for images, labels in tqdm(trainloader): + for batch in tqdm(trainloader): + batch = list(batch.values()) + images, labels = batch[0], batch[1] optimizer.zero_grad() criterion(net(images.to(device)), labels.to(device)).backward() optimizer.step() @@ -84,7 +75,9 @@ def test(net, testloader, device): criterion = torch.nn.CrossEntropyLoss() correct, loss = 0, 0.0 with torch.no_grad(): - for images, labels in tqdm(testloader): + for batch in tqdm(testloader): + batch = list(batch.values()) + images, labels = batch[0], batch[1] outputs = net(images.to(device)) labels = labels.to(device) loss += criterion(outputs, labels).item() @@ -95,44 +88,33 @@ def test(net, testloader, device): def prepare_dataset(use_mnist: bool): """Get MNIST/CIFAR-10 and return client partitions and global testset.""" - dataset = MNIST if use_mnist else CIFAR10 if use_mnist: + fds = FederatedDataset(dataset="mnist", partitioners={"train": NUM_CLIENTS}) + img_key = "image" norm = Normalize((0.1307,), (0.3081,)) else: + fds = FederatedDataset(dataset="cifar10", partitioners={"train": NUM_CLIENTS}) + img_key = "img" norm = Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) - - trf = Compose([ToTensor(), norm]) - trainset = dataset("./data", train=True, download=True, transform=trf) - testset = dataset("./data", train=False, download=True, transform=trf) - - print("Partitioning dataset (IID)...") - - # Split trainset into `num_partitions` trainsets - num_images = len(trainset) // NUM_CLIENTS - partition_len = [num_images] * NUM_CLIENTS - - trainsets = random_split( - trainset, partition_len, torch.Generator().manual_seed(2023) - ) - - val_ratio = 0.1 - - # Create dataloaders with train+val support - train_partitions = [] - val_partitions = [] - for trainset_ in trainsets: - num_total = len(trainset_) - num_val = int(val_ratio * num_total) - num_train = num_total - num_val - - for_train, for_val = random_split( - trainset_, [num_train, num_val], torch.Generator().manual_seed(2023) - ) - - train_partitions.append(for_train) - val_partitions.append(for_val) - - return train_partitions, val_partitions, testset + pytorch_transforms = Compose([ToTensor(), norm]) + + def apply_transforms(batch): + """Apply transforms to the partition from FederatedDataset.""" + batch[img_key] = [pytorch_transforms(img) for img in batch[img_key]] + return batch + + trainsets = [] + validsets = [] + for node_id in range(NUM_CLIENTS): + partition = fds.load_partition(node_id, "train") + # Divide data on each node: 90% train, 10% test + partition = partition.train_test_split(test_size=0.1) + partition = partition.with_transform(apply_transforms) + trainsets.append(partition["train"]) + validsets.append(partition["test"]) + testset = fds.load_full("test") + testset = testset.with_transform(apply_transforms) + return trainsets, validsets, testset # Flower client, adapted from Pytorch quickstart/simulation example @@ -148,8 +130,6 @@ def __init__(self, trainset, valset, use_mnist): self.model = Net() else: self.model = mobilenet_v3_small(num_classes=10) - # let's not reduce spatial resolution too early - self.model.features[0][0].stride = (1, 1) # Determine device self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") self.model.to(self.device) # send model to device @@ -200,15 +180,15 @@ def main(): assert args.cid < NUM_CLIENTS use_mnist = args.mnist - # Download CIFAR-10 dataset and partition it + # Download dataset and partition it trainsets, valsets, _ = prepare_dataset(use_mnist) # Start Flower client setting its associated data partition - fl.client.start_numpy_client( + fl.client.start_client( server_address=args.server_address, client=FlowerClient( trainset=trainsets[args.cid], valset=valsets[args.cid], use_mnist=use_mnist - ), + ).to_client(), ) diff --git a/examples/embedded-devices/client_tf.py b/examples/embedded-devices/client_tf.py index 3457af1c7a66..ae793ecd81e0 100644 --- a/examples/embedded-devices/client_tf.py +++ b/examples/embedded-devices/client_tf.py @@ -6,6 +6,8 @@ import tensorflow as tf from tensorflow import keras as keras +from flwr_datasets import FederatedDataset + parser = argparse.ArgumentParser(description="Flower Embedded devices") parser.add_argument( "--server_address", @@ -32,30 +34,28 @@ def prepare_dataset(use_mnist: bool): """Download and partitions the CIFAR-10/MNIST dataset.""" if use_mnist: - (x_train, y_train), testset = tf.keras.datasets.mnist.load_data() + fds = FederatedDataset(dataset="mnist", partitioners={"train": NUM_CLIENTS}) + img_key = "image" else: - (x_train, y_train), testset = tf.keras.datasets.cifar10.load_data() + fds = FederatedDataset(dataset="cifar10", partitioners={"train": NUM_CLIENTS}) + img_key = "img" partitions = [] - # We keep all partitions equal-sized in this example - partition_size = math.floor(len(x_train) / NUM_CLIENTS) - for cid in range(NUM_CLIENTS): - # Split dataset into non-overlapping NUM_CLIENT partitions - idx_from, idx_to = int(cid) * partition_size, (int(cid) + 1) * partition_size - - x_train_cid, y_train_cid = ( - x_train[idx_from:idx_to] / 255.0, - y_train[idx_from:idx_to], + for node_id in range(NUM_CLIENTS): + partition = fds.load_partition(node_id, "train") + partition.set_format("numpy") + # Divide data on each node: 90% train, 10% test + partition = partition.train_test_split(test_size=0.1) + x_train, y_train = ( + partition["train"][img_key] / 255.0, + partition["train"]["label"], ) - - # now partition into train/validation - # Use 10% of the client's training data for validation - split_idx = math.floor(len(x_train_cid) * 0.9) - - client_train = (x_train_cid[:split_idx], y_train_cid[:split_idx]) - client_val = (x_train_cid[split_idx:], y_train_cid[split_idx:]) - partitions.append((client_train, client_val)) - - return partitions, testset + x_test, y_test = partition["test"][img_key] / 255.0, partition["test"]["label"] + partitions.append(((x_train, y_train), (x_test, y_test))) + data_centralized = fds.load_full("test") + data_centralized.set_format("numpy") + x_centralized = data_centralized[img_key] / 255.0 + y_centralized = data_centralized["label"] + return partitions, (x_centralized, y_centralized) class FlowerClient(fl.client.NumPyClient): @@ -68,7 +68,7 @@ def __init__(self, trainset, valset, use_mnist: bool): # Instantiate model if use_mnist: # small model for MNIST - self.model = model = keras.Sequential( + self.model = keras.Sequential( [ keras.Input(shape=(28, 28, 1)), keras.layers.Conv2D(32, kernel_size=(5, 5), activation="relu"), @@ -118,14 +118,16 @@ def main(): assert args.cid < NUM_CLIENTS use_mnist = args.mnist - # Download CIFAR-10 dataset and partition it + # Download dataset and partition it partitions, _ = prepare_dataset(use_mnist) trainset, valset = partitions[args.cid] # Start Flower client setting its associated data partition - fl.client.start_numpy_client( + fl.client.start_client( server_address=args.server_address, - client=FlowerClient(trainset=trainset, valset=valset, use_mnist=use_mnist), + client=FlowerClient( + trainset=trainset, valset=valset, use_mnist=use_mnist + ).to_client(), ) diff --git a/examples/embedded-devices/requirements_pytorch.txt b/examples/embedded-devices/requirements_pytorch.txt index 797ca6db6244..f859c4efef17 100644 --- a/examples/embedded-devices/requirements_pytorch.txt +++ b/examples/embedded-devices/requirements_pytorch.txt @@ -1,4 +1,5 @@ flwr>=1.0, <2.0 +flwr-datasets[vision]>=0.0.2, <1.0.0 torch==1.13.1 torchvision==0.14.1 tqdm==4.65.0 diff --git a/examples/embedded-devices/requirements_tf.txt b/examples/embedded-devices/requirements_tf.txt index c7068d40b9c2..ff65b9c31648 100644 --- a/examples/embedded-devices/requirements_tf.txt +++ b/examples/embedded-devices/requirements_tf.txt @@ -1,2 +1,3 @@ flwr>=1.0, <2.0 +flwr-datasets[vision]>=0.0.2, <1.0.0 tensorflow >=2.9.1, != 2.11.1 diff --git a/examples/embedded-devices/server.py b/examples/embedded-devices/server.py index 2a15f792297e..2a6194aa5088 100644 --- a/examples/embedded-devices/server.py +++ b/examples/embedded-devices/server.py @@ -30,16 +30,11 @@ default=2, help="Minimum number of available clients required for sampling (default: 2)", ) -parser.add_argument( - "--mnist", - action="store_true", - help="If you use Raspberry Pi Zero clients (which just have 512MB or RAM) use MNIST", -) # Define metric aggregation function def weighted_average(metrics: List[Tuple[int, Metrics]]) -> Metrics: - """Thist function averages teh `accuracy` metric sent by the clients in a `evaluate` + """This function averages teh `accuracy` metric sent by the clients in a `evaluate` stage (i.e. clients received the global model and evaluate it on their local validation sets).""" # Multiply accuracy of each client by number of examples used diff --git a/examples/flower-in-30-minutes/tutorial.ipynb b/examples/flower-in-30-minutes/tutorial.ipynb index 336ec4c19644..8f9eccf65b74 100644 --- a/examples/flower-in-30-minutes/tutorial.ipynb +++ b/examples/flower-in-30-minutes/tutorial.ipynb @@ -776,7 +776,7 @@ "\n", " return FlowerClient(\n", " trainloader=trainloaders[int(cid)], vallodaer=valloaders[int(cid)]\n", - " )\n", + " ).to_client()\n", "\n", " return client_fn\n", "\n", diff --git a/examples/flower-simulation-step-by-step-pytorch/Part-I/client.py b/examples/flower-simulation-step-by-step-pytorch/Part-I/client.py index b10c32d36c42..eac831ad1932 100644 --- a/examples/flower-simulation-step-by-step-pytorch/Part-I/client.py +++ b/examples/flower-simulation-step-by-step-pytorch/Part-I/client.py @@ -98,7 +98,7 @@ def client_fn(cid: str): trainloader=trainloaders[int(cid)], vallodaer=valloaders[int(cid)], num_classes=num_classes, - ) + ).to_client() # return the function to spawn client return client_fn diff --git a/examples/flower-simulation-step-by-step-pytorch/Part-II/client.py b/examples/flower-simulation-step-by-step-pytorch/Part-II/client.py index d269d4892a0e..7da9547d7362 100644 --- a/examples/flower-simulation-step-by-step-pytorch/Part-II/client.py +++ b/examples/flower-simulation-step-by-step-pytorch/Part-II/client.py @@ -75,6 +75,6 @@ def client_fn(cid: str): trainloader=trainloaders[int(cid)], vallodaer=valloaders[int(cid)], model_cfg=model_cfg, - ) + ).to_client() return client_fn diff --git a/examples/flower-simulation-step-by-step-pytorch/README.md b/examples/flower-simulation-step-by-step-pytorch/README.md index 55b8d837b090..beb8dd7f6f95 100644 --- a/examples/flower-simulation-step-by-step-pytorch/README.md +++ b/examples/flower-simulation-step-by-step-pytorch/README.md @@ -1,5 +1,7 @@ # Flower Simulation Step-by-Step +> Since this tutorial (and its video series) was put together, Flower has been updated a few times. As a result, some of the steps to construct the environment (see below) have been updated. Some parts of the code have also been updated. Overall, the content of this tutorial and how things work remains the same as in the video tutorials. + This directory contains the code developed in the `Flower Simulation` tutorial series on Youtube. You can find all the videos [here](https://www.youtube.com/playlist?list=PLNG4feLHqCWlnj8a_E1A_n5zr2-8pafTB) or clicking on the video preview below. - In `Part-I` (7 videos) we developed from scratch a complete Federated Learning pipeline for simulation using PyTorch. @@ -19,20 +21,17 @@ As presented in the video, we first need to create a Python environment. You are # I'm assuming you are running this on an Ubuntu 22.04 machine (GPU is not required) # create the environment -conda create -n flower_tutorial python=3.8 -y +conda create -n flower_tutorial python=3.9 -y # activate your environment (depending on how you installed conda you might need to use `conda activate ...` instead) source activate flower_tutorial # install PyToch (other versions would likely work) conda install pytorch==1.13.1 torchvision==0.14.1 torchaudio==0.13.1 pytorch-cuda=11.6 -c pytorch -c nvidia -y -# conda install pytorch==1.13.1 torchvision==0.14.1 torchaudio==0.13.1 -c pytorch # If you don't have a GPU +# conda install pytorch==1.13.1 torchvision==0.14.1 torchaudio==0.13.1 -c pytorch -y # If you don't have a GPU -# install flower (for FL) and hydra (for configs) -pip install flwr==1.4.0 hydra-core==1.3.2 -# install ray -# you might see some warning messages after installing it (you can ignore them) -pip install ray==1.11.1 +# Install Flower and other dependencies +pip install -r requirements.txt ``` If you are running this on macOS with Apple Silicon (i.e. M1, M2), you'll need a different `grpcio` package if you see an error when running the code. To fix this do: diff --git a/examples/flower-simulation-step-by-step-pytorch/requirements.txt b/examples/flower-simulation-step-by-step-pytorch/requirements.txt new file mode 100644 index 000000000000..a322192ca711 --- /dev/null +++ b/examples/flower-simulation-step-by-step-pytorch/requirements.txt @@ -0,0 +1,2 @@ +flwr[simulation]>=1.0, <2.0 +hydra-core==1.3.2 \ No newline at end of file diff --git a/examples/flower-via-docker-compose/.gitignore b/examples/flower-via-docker-compose/.gitignore new file mode 100644 index 000000000000..de5b2e7692bf --- /dev/null +++ b/examples/flower-via-docker-compose/.gitignore @@ -0,0 +1,17 @@ +# ignore __pycache__ directories +__pycache__/ + +# ignore .pyc files +*.pyc + +# ignore .vscode directory +.vscode/ + +# ignore .npz files +*.npz + +# ignore .csv files +*.csv + +# ignore docker-compose.yaml file +docker-compose.yml \ No newline at end of file diff --git a/examples/flower-via-docker-compose/Dockerfile b/examples/flower-via-docker-compose/Dockerfile new file mode 100644 index 000000000000..ee6fee3103a5 --- /dev/null +++ b/examples/flower-via-docker-compose/Dockerfile @@ -0,0 +1,19 @@ +# Use an official Python runtime as a parent image +FROM python:3.10-slim-buster + +# Set the working directory in the container to /app +WORKDIR /app + +# Copy the requirements file into the container +COPY ./requirements.txt /app/requirements.txt + +# Install gcc and other dependencies +RUN apt-get update && apt-get install -y \ + gcc \ + python3-dev && \ + rm -rf /var/lib/apt/lists/* + +# Install any needed packages specified in requirements.txt +RUN pip install -r requirements.txt + + diff --git a/examples/flower-via-docker-compose/README.md b/examples/flower-via-docker-compose/README.md new file mode 100644 index 000000000000..3ef1ac37bcda --- /dev/null +++ b/examples/flower-via-docker-compose/README.md @@ -0,0 +1,254 @@ +# Leveraging Flower and Docker for Device Heterogeneity Management in Federated Learning + +

+ Flower Website + Docker Logo +

+ +## Introduction + +In this example, we tackle device heterogeneity in federated learning, arising from differences in memory and CPU capabilities across devices. This diversity affects training efficiency and inclusivity. Our strategy includes simulating this heterogeneity by setting CPU and memory limits in a Docker setup, using a custom Docker compose generator script. This approach creates a varied training environment and enables us to develop strategies to manage these disparities effectively. + +## Handling Device Heterogeneity + +1. **System Metrics Access**: + + - Effective management of device heterogeneity begins with monitoring system metrics of each container. We integrate the following services to achieve this: + - **Cadvisor**: Collects comprehensive metrics from each Docker container. + - **Prometheus**: Using `prometheus.yaml` for configuration, it scrapes data from Cadvisor at scheduled intervals, serving as a robust time-series database. Users can access the Prometheus UI at `http://localhost:9090` to create and run queries using PromQL, allowing for detailed insight into container performance. + +2. **Mitigating Heterogeneity**: + + - In this basic use case, we address device heterogeneity by establishing rules tailored to each container's system capabilities. This involves modifying training parameters, such as batch sizes and learning rates, based on each device's memory capacity and CPU availability. These settings are specified in the `client_configs` array in the `create_docker_compose` script. For example: + + ```python + client_configs = [ + {"mem_limit": "3g", "batch_size": 32, "cpus": 4, "learning_rate": 0.001}, + {"mem_limit": "6g", "batch_size": 256, "cpus": 1, "learning_rate": 0.05}, + {"mem_limit": "4g", "batch_size": 64, "cpus": 3, "learning_rate": 0.02}, + {"mem_limit": "5g", "batch_size": 128, "cpus": 2.5, "learning_rate": 0.09}, + ] + ``` + +## Prerequisites + +Docker must be installed and the Docker daemon running on your server. If you don't already have Docker installed, you can get [installation instructions for your specific Linux distribution or macOS from Docker](https://docs.docker.com/engine/install/). Besides Docker, the only extra requirement is having Python installed. You don't need to create a new environment for this example since all dependencies will be installed inside Docker containers automatically. + +## Running the Example + +Running this example is easy. For a more detailed step-by-step guide, including more useful material, refer to the detailed guide in the following section. + +```bash + +# Generate docker compose file +python helpers/generate_docker_compose.py # by default will configure to use 2 clients for 100 rounds + +# Build docker images +docker-compose build + +# Launch everything +docker-compose up +``` + +On your favourite browser, go to `http://localhost:3000` to see the Graphana dashboard showing system-level and application-level metrics. + +To stop all containers, open a new terminal and `cd` into this directory, then run `docker-compose down`. Alternatively, you can do `ctrl+c` on the same terminal and then run `docker-compose down` to ensure everything is terminated. + +## Running the Example (detailed) + +### Step 1: Configure Docker Compose + +Execute the following command to run the `helpers/generate_docker_compose.py` script. This script creates the docker-compose configuration needed to set up the environment. + +```bash +python helpers/generate_docker_compose.py +``` + +Within the script, specify the number of clients (`total_clients`) and resource limitations for each client in the `client_configs` array. You can adjust the number of rounds by passing `--num_rounds` to the above command. + +### Step 2: Build and Launch Containers + +1. **Execute Initialization Script**: + + - To build the Docker images and start the containers, use the following command: + + ```bash + # this is the only command you need to execute to run the entire example + docker-compose up + ``` + + - If you make any changes to the Dockerfile or other configuration files, you should rebuild the images to reflect these changes. This can be done by adding the `--build` flag to the command: + + ```bash + docker-compose up --build + ``` + + - The `--build` flag instructs Docker Compose to rebuild the images before starting the containers, ensuring that any code or configuration changes are included. + + - To stop all services, you have two options: + + - Run `docker-compose down` in another terminal if you are in the same directory. This command will stop and remove the containers, networks, and volumes created by `docker-compose up`. + - Press `Ctrl+C` once in the terminal where `docker-compose up` is running. This will stop the containers but won't remove them or the networks and volumes they use. + +2. **Services Startup**: + + - Several services will automatically launch as defined in your `docker-compose.yml` file: + + - **Monitoring Services**: Prometheus for metrics collection, Cadvisor for container monitoring, and Grafana for data visualization. + - **Flower Federated Learning Environment**: The Flower server and client containers are initialized and start running. + + - After launching the services, verify that all Docker containers are running correctly by executing the `docker ps` command. Here's an example output: + + ```bash + ➜ ~ docker ps + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + 9f05820eba45 flower-via-docker-compose-client2 "python client.py --…" 50 seconds ago Up 48 seconds 0.0.0.0:6002->6002/tcp client2 + a0333715d504 flower-via-docker-compose-client1 "python client.py --…" 50 seconds ago Up 48 seconds 0.0.0.0:6001->6001/tcp client1 + 0da2bf735965 flower-via-docker-compose-server "python server.py --…" 50 seconds ago Up 48 seconds 0.0.0.0:6000->6000/tcp, 0.0.0.0:8000->8000/tcp, 0.0.0.0:8265->8265/tcp server + c57ef50657ae grafana/grafana:latest "/run.sh --config=/e…" 50 seconds ago Up 49 seconds 0.0.0.0:3000->3000/tcp grafana + 4f274c2083dc prom/prometheus:latest "/bin/prometheus --c…" 50 seconds ago Up 49 seconds 0.0.0.0:9090->9090/tcp prometheus + e9f4c9644a1c gcr.io/cadvisor/cadvisor:v0.47.0 "/usr/bin/cadvisor -…" 50 seconds ago Up 49 seconds 0.0.0.0:8080->8080/tcp cadvisor + ``` + + - To monitor the resource utilization of your containers in real-time and see the limits imposed in the Docker Compose file, you can use the `docker stats` command. This command provides a live stream of container CPU, memory, and network usage statistics. + + ```bash + ➜ ~ docker stats + CONTAINER ID NAME CPU % MEM USAGE / LIMIT MEM % NET I/O BLOCK I/O PIDS + 9f05820eba45 client2 104.44% 1.968GiB / 6GiB 32.80% 148MB / 3.22MB 0B / 284MB 82 + a0333715d504 client1 184.69% 1.498GiB / 3GiB 49.92% 149MB / 2.81MB 1.37MB / 284MB 82 + 0da2bf735965 server 0.12% 218.5MiB / 15.61GiB 1.37% 1.47MB / 2.89MB 2.56MB / 2.81MB 45 + c57ef50657ae grafana 0.24% 96.19MiB / 400MiB 24.05% 18.9kB / 3.79kB 77.8kB / 152kB 20 + 4f274c2083dc prometheus 1.14% 52.73MiB / 500MiB 10.55% 6.79MB / 211kB 1.02MB / 1.31MB 15 + e9f4c9644a1c cadvisor 7.31% 32.14MiB / 500MiB 6.43% 139kB / 6.66MB 500kB / 0B 18 + ``` + +3. **Automated Grafana Configuration**: + + - Grafana is configured to load pre-defined data sources and dashboards for immediate monitoring, facilitated by provisioning files. The provisioning files include `prometheus-datasource.yml` for data sources, located in the `./config/provisioning/datasources` directory, and `dashboard_index.json` for dashboards, in the `./config/provisioning/dashboards` directory. The `grafana.ini` file is also tailored to enhance user experience: + - **Admin Credentials**: We provide default admin credentials in the `grafana.ini` configuration, which simplifies access by eliminating the need for users to go through the initial login process. + - **Default Dashboard Path**: A default dashboard path is set in `grafana.ini` to ensure that the dashboard with all the necessary panels is rendered when Grafana is accessed. + + These files and settings are directly mounted into the Grafana container via Docker Compose volume mappings. This setup guarantees that upon startup, Grafana is pre-configured for monitoring, requiring no additional manual setup. + +4. **Begin Training Process**: + + - The federated learning training automatically begins once all client containers are successfully connected to the Flower server. This synchronizes the learning process across all participating clients. + +By following these steps, you will have a fully functional federated learning environment with device heterogeneity and monitoring capabilities. + +## Model Training and Dataset Integration + +### Data Pipeline with FLWR-Datasets + +We have integrated [`flwr-datasets`](https://flower.ai/docs/datasets/) into our data pipeline, which is managed within the `load_data.py` file in the `helpers/` directory. This script facilitates standardized access to datasets across the federated network and incorporates a `data_sampling_percentage` argument. This argument allows users to specify the percentage of the dataset to be used for training and evaluation, accommodating devices with lower memory capabilities to prevent Out-of-Memory (OOM) errors. + +### Model Selection and Dataset + +For the federated learning system, we have selected the MobileNet model due to its efficiency in image classification tasks. The model is trained and evaluated on the CIFAR-10 dataset. The combination of MobileNet and CIFAR-10 is ideal for demonstrating the capabilities of our federated learning solution in a heterogeneous device environment. + +- **MobileNet**: A streamlined architecture for mobile and embedded devices that balances performance and computational cost. +- **CIFAR-10 Dataset**: A standard benchmark dataset for image classification, containing various object classes that pose a comprehensive challenge for the learning model. + +By integrating these components, our framework is well-prepared to handle the intricacies of training over a distributed network with varying device capabilities and data availability. + +## Visualizing with Grafana + +### Access Grafana Dashboard + +Visit `http://localhost:3000` to enter Grafana. The automated setup ensures that you're greeted with a series of pre-configured dashboards, including the default screen with a comprehensive set of graphs. These dashboards are ready for immediate monitoring and can be customized to suit your specific requirements. + +### Dashboard Configuration + +The `dashboard_index.json` file, located in the `./config/provisioning/dashboards` directory, serves as the backbone of our Grafana dashboard's configuration. It defines the structure and settings of the dashboard panels, which are rendered when you access Grafana. This JSON file contains the specifications for various panels such as model accuracy, CPU usage, memory utilization, and network traffic. Each panel's configuration includes the data source, queries, visualization type, and other display settings like thresholds and colors. + +For instance, in our project setup, the `dashboard_index.json` configures a panel to display the model's accuracy over time using a time-series graph, and another panel to show the CPU usage across clients using a graph that plots data points as they are received. This file is fundamental for creating a customized and informative dashboard that provides a snapshot of the federated learning system's health and performance metrics. + +By modifying the `dashboard_index.json` file, users can tailor the Grafana dashboard to include additional metrics or change the appearance and behavior of existing panels to better fit their monitoring requirements. + +### Grafana Default Dashboard + +Below is the default Grafana dashboard that users will see upon accessing Grafana: + +grafana_home_screen + +This comprehensive dashboard provides insights into various system metrics across client-server containers. It includes visualizations such as: + +- **Application Metrics**: The "Model Accuracy" graph shows an upward trend as rounds of training progress, which is a positive indicator of the model learning and improving over time. Conversely, the "Model Loss" graph trends downward, suggesting that the model is becoming more precise and making fewer mistakes as it trains. + +- **CPU Usage**: The sharp spikes in the red graph, representing "client1", indicate peak CPU usage, which is considerably higher than that of "client2" (blue graph). This difference is due to "client1" being allocated more computing resources (up to 4 CPU cores) compared to "client2", which is limited to just 1 CPU core, hence the more subdued CPU usage pattern. + +- **Memory Utilization**: Both clients are allocated a similar amount of memory, reflected in the nearly same lines for memory usage. This uniform allocation allows for a straightforward comparison of how each client manages memory under similar conditions. + +- **Network Traffic**: Monitor incoming and outgoing network traffic to each client, which is crucial for understanding data exchange volumes during federated learning cycles. + +Together, these metrics paint a detailed picture of the federated learning operation, showcasing resource usage and model performance. Such insights are invaluable for system optimization, ensuring balanced load distribution and efficient model training. + +## Comprehensive Monitoring System Integration + +### Capturing Container Metrics with cAdvisor + +cAdvisor is seamlessly integrated into our monitoring setup to capture a variety of system and container metrics, such as CPU, memory, and network usage. These metrics are vital for analyzing the performance and resource consumption of the containers in the federated learning environment. + +### Custom Metrics: Setup and Monitoring via Prometheus + +In addition to the standard metrics captured by cAdvisor, we have implemented a process to track custom metrics like model's accuracy and loss within Grafana, using Prometheus as the backbone for metric collection. + +1. **Prometheus Client Installation**: + + - We began by installing the `prometheus_client` library in our Python environment, enabling us to define and expose custom metrics that Prometheus can scrape. + +2. **Defining Metrics in Server Script**: + + - Within our `server.py` script, we have established two key Prometheus Gauge metrics, specifically tailored for monitoring our federated learning model: `model_accuracy` and `model_loss`. These custom gauges are instrumental in capturing the most recent values of the model's accuracy and loss, which are essential metrics for evaluating the model's performance. The gauges are defined as follows: + + ```python + from prometheus_client import Gauge + + accuracy_gauge = Gauge('model_accuracy', 'Current accuracy of the global model') + loss_gauge = Gauge('model_loss', 'Current loss of the global model') + ``` + +3. **Exposing Metrics via HTTP Endpoint**: + + - We leveraged the `start_http_server` function from the `prometheus_client` library to launch an HTTP server on port 8000. This server provides the `/metrics` endpoint, where the custom metrics are accessible for Prometheus scraping. The function is called at the end of the `main` method in `server.py`: + + ```python + start_http_server(8000) + ``` + +4. **Updating Metrics Recording Strategy**: + + - The core of our metrics tracking lies in the `strategy.py` file, particularly within the `aggregate_evaluate` method. This method is crucial as it's where the federated learning model's accuracy and loss values are computed after each round of training with the aggregated data from all clients. + + ```python + self.accuracy_gauge.set(accuracy_aggregated) + self.loss_gauge.set(loss_aggregated) + ``` + +5. **Configuring Prometheus Scraping**: + + - In the `prometheus.yml` file, under `scrape_configs`, we configured a new job to scrape the custom metrics from the HTTP server. This setup includes the job's name, the scraping interval, and the target server's URL. + +### Visualizing the Monitoring Architecture + +The image below depicts the Prometheus scraping process as it is configured in our monitoring setup. Within this architecture: + +- The "Prometheus server" is the central component that retrieves and stores metrics. +- "cAdvisor" and the "HTTP server" we set up to expose our custom metrics are represented as "Prometheus targets" in the diagram. cAdvisor captures container metrics, while the HTTP server serves our custom `model_accuracy` and `model_loss` metrics at the `/metrics` endpoint. +- These targets are periodically scraped by the Prometheus server, aggregating data from both system-level and custom performance metrics. +- The aggregated data is then made available to the "Prometheus web UI" and "Grafana," as shown, enabling detailed visualization and analysis through the Grafana dashboard. + +prometheus-architecture + +By incorporating these steps, we have enriched our monitoring capabilities to not only include system-level metrics but also critical performance indicators of our federated learning model. This approach is pivotal for understanding and improving the learning process. Similarly, you can apply this methodology to track any other metric that you find interesting or relevant to your specific needs. This flexibility allows for a comprehensive and customized monitoring environment, tailored to the unique aspects and requirements of your federated learning system. + +## Additional Resources + +- **Grafana Tutorials**: Explore a variety of tutorials on Grafana at [Grafana Tutorials](https://grafana.com/tutorials/). +- **Prometheus Overview**: Learn more about Prometheus at their [official documentation](https://prometheus.io/docs/introduction/overview/). +- **cAdvisor Guide**: For information on monitoring Docker containers with cAdvisor, see this [Prometheus guide](https://prometheus.io/docs/guides/cadvisor/). + +## Conclusion + +This project serves as a foundational example of managing device heterogeneity within the federated learning context, employing the Flower framework alongside Docker, Prometheus, and Grafana. It's designed to be a starting point for users to explore and further adapt to the complexities of device heterogeneity in federated learning environments. diff --git a/examples/flower-via-docker-compose/client.py b/examples/flower-via-docker-compose/client.py new file mode 100644 index 000000000000..c894143532a1 --- /dev/null +++ b/examples/flower-via-docker-compose/client.py @@ -0,0 +1,110 @@ +import os +import argparse +import flwr as fl +import tensorflow as tf +import logging +from helpers.load_data import load_data +import os +from model.model import Model + +logging.basicConfig(level=logging.INFO) # Configure logging +logger = logging.getLogger(__name__) # Create logger for the module + +# Make TensorFlow log less verbose +os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3" + +# Parse command line arguments +parser = argparse.ArgumentParser(description="Flower client") + +parser.add_argument( + "--server_address", type=str, default="server:8080", help="Address of the server" +) +parser.add_argument( + "--batch_size", type=int, default=32, help="Batch size for training" +) +parser.add_argument( + "--learning_rate", type=float, default=0.1, help="Learning rate for the optimizer" +) +parser.add_argument("--client_id", type=int, default=1, help="Unique ID for the client") +parser.add_argument( + "--total_clients", type=int, default=2, help="Total number of clients" +) +parser.add_argument( + "--data_percentage", type=float, default=0.5, help="Portion of client data to use" +) + +args = parser.parse_args() + +# Create an instance of the model and pass the learning rate as an argument +model = Model(learning_rate=args.learning_rate) + +# Compile the model +model.compile() + + +class Client(fl.client.NumPyClient): + def __init__(self, args): + self.args = args + + logger.info("Preparing data...") + (x_train, y_train), (x_test, y_test) = load_data( + data_sampling_percentage=self.args.data_percentage, + client_id=self.args.client_id, + total_clients=self.args.total_clients, + ) + + self.x_train = x_train + self.y_train = y_train + self.x_test = x_test + self.y_test = y_test + + def get_parameters(self, config): + # Return the parameters of the model + return model.get_model().get_weights() + + def fit(self, parameters, config): + # Set the weights of the model + model.get_model().set_weights(parameters) + + # Train the model + history = model.get_model().fit( + self.x_train, self.y_train, batch_size=self.args.batch_size + ) + + # Calculate evaluation metric + results = { + "accuracy": float(history.history["accuracy"][-1]), + } + + # Get the parameters after training + parameters_prime = model.get_model().get_weights() + + # Directly return the parameters and the number of examples trained on + return parameters_prime, len(self.x_train), results + + def evaluate(self, parameters, config): + # Set the weights of the model + model.get_model().set_weights(parameters) + + # Evaluate the model and get the loss and accuracy + loss, accuracy = model.get_model().evaluate( + self.x_test, self.y_test, batch_size=self.args.batch_size + ) + + # Return the loss, the number of examples evaluated on and the accuracy + return float(loss), len(self.x_test), {"accuracy": float(accuracy)} + + +# Function to Start the Client +def start_fl_client(): + try: + client = Client(args).to_client() + fl.client.start_client(server_address=args.server_address, client=client) + except Exception as e: + logger.error("Error starting FL client: %s", e) + return {"status": "error", "message": str(e)} + + +if __name__ == "__main__": + # Call the function to start the client + start_fl_client() diff --git a/examples/flower-via-docker-compose/config/grafana.ini b/examples/flower-via-docker-compose/config/grafana.ini new file mode 100644 index 000000000000..775f39d7ec22 --- /dev/null +++ b/examples/flower-via-docker-compose/config/grafana.ini @@ -0,0 +1,12 @@ +[security] +allow_embedding = true +admin_user = admin +admin_password = admin + +[dashboards] +default_home_dashboard_path = /etc/grafana/provisioning/dashboards/dashboard_index.json + +[auth.anonymous] +enabled = true +org_name = Main Org. +org_role = Admin diff --git a/examples/flower-via-docker-compose/config/prometheus.yml b/examples/flower-via-docker-compose/config/prometheus.yml new file mode 100644 index 000000000000..46cf07b9dcee --- /dev/null +++ b/examples/flower-via-docker-compose/config/prometheus.yml @@ -0,0 +1,19 @@ + +global: + scrape_interval: 1s + evaluation_interval: 1s + +rule_files: +scrape_configs: + - job_name: 'cadvisor' + scrape_interval: 1s + metrics_path: '/metrics' + static_configs: + - targets: ['cadvisor:8080'] + labels: + group: 'cadvisor' + - job_name: 'server_metrics' + scrape_interval: 1s + metrics_path: '/metrics' + static_configs: + - targets: ['server:8000'] \ No newline at end of file diff --git a/examples/flower-via-docker-compose/config/provisioning/dashboards/dashboard_index.json b/examples/flower-via-docker-compose/config/provisioning/dashboards/dashboard_index.json new file mode 100644 index 000000000000..b52f19c57508 --- /dev/null +++ b/examples/flower-via-docker-compose/config/provisioning/dashboards/dashboard_index.json @@ -0,0 +1,1051 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": { + "type": "datasource", + "uid": "grafana" + }, + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "description": "Simple exporter for cadvisor only", + "editable": true, + "fiscalYearStartMonth": 0, + "gnetId": 14282, + "graphTooltip": 0, + "id": 12, + "links": [], + "liveNow": false, + "panels": [ + { + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 22, + "title": "Application metrics", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "db69454e-e558-479e-b4fc-80db52bf91da" + }, + "description": "Averaged federated accuracy across clients", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "smooth", + "lineStyle": { + "fill": "solid" + }, + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 1 + }, + "id": 23, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": false + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "db69454e-e558-479e-b4fc-80db52bf91da" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "model_accuracy", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "__auto", + "range": true, + "refId": "A", + "useBackend": false + } + ], + "title": "Model Accuracy", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "db69454e-e558-479e-b4fc-80db52bf91da" + }, + "description": "Averaged Federated Loss across clients", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "smooth", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 1 + }, + "id": 21, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": false + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "db69454e-e558-479e-b4fc-80db52bf91da" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "model_loss", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "__auto", + "range": true, + "refId": "A", + "useBackend": false + } + ], + "title": "Model Loss", + "type": "timeseries" + }, + { + "collapsed": false, + "datasource": { + "type": "prometheus", + "uid": "db69454e-e558-479e-b4fc-80db52bf91da" + }, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 9 + }, + "id": 8, + "panels": [], + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "db69454e-e558-479e-b4fc-80db52bf91da" + }, + "refId": "A" + } + ], + "title": "CPU", + "type": "row" + }, + { + "aliasColors": { + "client1": "red", + "client2": "blue", + "server": "yellow" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { + "type": "prometheus", + "uid": "db69454e-e558-479e-b4fc-80db52bf91da" + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 24, + "x": 0, + "y": 10 + }, + "hiddenSeries": false, + "id": 15, + "legend": { + "alignAsTable": true, + "avg": true, + "current": false, + "max": true, + "min": false, + "rightSide": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null as zero", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "10.2.2", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "db69454e-e558-479e-b4fc-80db52bf91da" + }, + "editorMode": "code", + "expr": "sum(rate(container_cpu_usage_seconds_total{instance=~\"$host\",name=~\"$container\",name=~\".+\", name !~ \"(prometheus|cadvisor|grafana)\"}[10s])) by (name) *100", + "hide": false, + "interval": "", + "legendFormat": "{{name}}", + "range": true, + "refId": "A" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "CPU Usage", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:606", + "format": "percent", + "logBase": 1, + "show": true + }, + { + "$$hashKey": "object:607", + "format": "short", + "logBase": 1, + "show": true + } + ], + "yaxis": { + "align": false + } + }, + { + "collapsed": false, + "datasource": { + "type": "prometheus", + "uid": "db69454e-e558-479e-b4fc-80db52bf91da" + }, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 17 + }, + "id": 11, + "panels": [], + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "db69454e-e558-479e-b4fc-80db52bf91da" + }, + "refId": "A" + } + ], + "title": "Memory", + "type": "row" + }, + { + "aliasColors": { + "client1": "red", + "client2": "blue", + "server": "yellow" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { + "type": "prometheus", + "uid": "db69454e-e558-479e-b4fc-80db52bf91da" + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 18 + }, + "hiddenSeries": false, + "id": 9, + "legend": { + "alignAsTable": true, + "avg": true, + "current": false, + "max": true, + "min": false, + "rightSide": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null as zero", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "10.2.2", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "db69454e-e558-479e-b4fc-80db52bf91da" + }, + "editorMode": "code", + "expr": "sum(container_memory_rss{instance=~\"$host\",name=~\"$container\",name=~\".+\", name !~ \"(prometheus|cadvisor|grafana)\"}) by (name)", + "hide": false, + "interval": "", + "legendFormat": "{{name}}", + "range": true, + "refId": "A" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Memory Usage", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:606", + "format": "bytes", + "logBase": 1, + "show": true + }, + { + "$$hashKey": "object:607", + "format": "short", + "logBase": 1, + "show": true + } + ], + "yaxis": { + "align": false + } + }, + { + "aliasColors": { + "client1": "red", + "client2": "blue", + "server": "yellow" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { + "type": "prometheus", + "uid": "db69454e-e558-479e-b4fc-80db52bf91da" + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 18 + }, + "hiddenSeries": false, + "id": 14, + "legend": { + "alignAsTable": true, + "avg": true, + "current": false, + "max": true, + "min": false, + "rightSide": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null as zero", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "10.2.2", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "db69454e-e558-479e-b4fc-80db52bf91da" + }, + "editorMode": "code", + "expr": "sum(container_memory_cache{instance=~\"$host\",name=~\"$container\",name=~\".+\", name !~ \"(prometheus|cadvisor|grafana)\"}) by (name)", + "hide": false, + "interval": "", + "legendFormat": "{{name}}", + "range": true, + "refId": "A" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Memory Cached", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:606", + "format": "bytes", + "logBase": 1, + "show": true + }, + { + "$$hashKey": "object:607", + "format": "short", + "logBase": 1, + "show": true + } + ], + "yaxis": { + "align": false + } + }, + { + "collapsed": false, + "datasource": { + "type": "prometheus", + "uid": "db69454e-e558-479e-b4fc-80db52bf91da" + }, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 26 + }, + "id": 2, + "panels": [], + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "db69454e-e558-479e-b4fc-80db52bf91da" + }, + "refId": "A" + } + ], + "title": "Network", + "type": "row" + }, + { + "aliasColors": { + "client1": "red", + "client2": "blue", + "server": "yellow" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { + "type": "prometheus", + "uid": "db69454e-e558-479e-b4fc-80db52bf91da" + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 27 + }, + "hiddenSeries": false, + "id": 4, + "legend": { + "alignAsTable": true, + "avg": true, + "current": false, + "hideEmpty": false, + "hideZero": false, + "max": true, + "min": false, + "rightSide": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "10.2.2", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "db69454e-e558-479e-b4fc-80db52bf91da" + }, + "editorMode": "code", + "expr": "sum(rate(container_network_receive_bytes_total{instance=~\"$host\",name=~\"$container\",name=~\".+\", name !~ \"(prometheus|cadvisor|grafana)\"}[10s])) by (name)", + "hide": false, + "interval": "", + "legendFormat": "{{name}}", + "range": true, + "refId": "A" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Received Network Traffic", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:674", + "format": "Bps", + "logBase": 1, + "show": true + }, + { + "$$hashKey": "object:675", + "format": "short", + "logBase": 1, + "show": true + } + ], + "yaxis": { + "align": false + } + }, + { + "aliasColors": { + "client1": "red", + "client2": "blue", + "server": "yellow" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { + "type": "prometheus", + "uid": "db69454e-e558-479e-b4fc-80db52bf91da" + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 27 + }, + "hiddenSeries": false, + "id": 6, + "legend": { + "alignAsTable": true, + "avg": true, + "current": false, + "max": true, + "min": false, + "rightSide": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "10.2.2", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "db69454e-e558-479e-b4fc-80db52bf91da" + }, + "editorMode": "code", + "expr": "sum(rate(container_network_transmit_bytes_total{instance=~\"$host\",name=~\"$container\",name=~\".+\", name !~ \"(prometheus|cadvisor|grafana)\"}[10s])) by (name)", + "interval": "", + "legendFormat": "{{name}}", + "range": true, + "refId": "A" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Sent Network Traffic", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:832", + "format": "Bps", + "logBase": 1, + "show": true + }, + { + "$$hashKey": "object:833", + "format": "short", + "logBase": 1, + "show": true + } + ], + "yaxis": { + "align": false + } + }, + { + "collapsed": false, + "datasource": { + "type": "prometheus", + "uid": "db69454e-e558-479e-b4fc-80db52bf91da" + }, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 35 + }, + "id": 19, + "panels": [], + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "db69454e-e558-479e-b4fc-80db52bf91da" + }, + "refId": "A" + } + ], + "title": "Misc", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "db69454e-e558-479e-b4fc-80db52bf91da" + }, + "fieldConfig": { + "defaults": { + "custom": { + "align": "auto", + "cellOptions": { + "type": "auto" + }, + "filterable": false, + "inspect": false + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "id" + }, + "properties": [ + { + "id": "custom.width", + "value": 260 + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Running" + }, + "properties": [ + { + "id": "unit", + "value": "d" + }, + { + "id": "decimals", + "value": 1 + }, + { + "id": "custom.cellOptions", + "value": { + "type": "color-text" + } + }, + { + "id": "color", + "value": { + "fixedColor": "dark-green", + "mode": "fixed" + } + } + ] + } + ] + }, + "gridPos": { + "h": 10, + "w": 24, + "x": 0, + "y": 36 + }, + "id": 17, + "options": { + "cellHeight": "sm", + "footer": { + "countRows": false, + "fields": "", + "reducer": [ + "sum" + ], + "show": false + }, + "showHeader": true, + "sortBy": [] + }, + "pluginVersion": "10.2.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "db69454e-e558-479e-b4fc-80db52bf91da" + }, + "expr": "(time() - container_start_time_seconds{instance=~\"$host\",name=~\"$container\",name=~\".+\"})/86400", + "format": "table", + "instant": true, + "interval": "", + "legendFormat": "{{name}}", + "refId": "A" + } + ], + "title": "Containers Info", + "transformations": [ + { + "id": "filterFieldsByName", + "options": { + "include": { + "names": [ + "container_label_com_docker_compose_project", + "container_label_com_docker_compose_project_working_dir", + "image", + "instance", + "name", + "Value", + "container_label_com_docker_compose_service" + ] + } + } + }, + { + "id": "organize", + "options": { + "excludeByName": {}, + "indexByName": {}, + "renameByName": { + "Value": "Running", + "container_label_com_docker_compose_project": "Label", + "container_label_com_docker_compose_project_working_dir": "Working dir", + "container_label_com_docker_compose_service": "Service", + "image": "Registry Image", + "instance": "Instance", + "name": "Name" + } + } + } + ], + "type": "table" + } + ], + "refresh": "auto", + "schemaVersion": 38, + "tags": [], + "templating": { + "list": [ + { + "allValue": ".*", + "current": { + "selected": false, + "text": "All", + "value": "$__all" + }, + "datasource": { + "type": "prometheus", + "uid": "db69454e-e558-479e-b4fc-80db52bf91da" + }, + "definition": "label_values({__name__=~\"container.*\"},instance)", + "hide": 0, + "includeAll": true, + "label": "Host", + "multi": false, + "name": "host", + "options": [], + "query": { + "query": "label_values({__name__=~\"container.*\"},instance)", + "refId": "Prometheus-host-Variable-Query" + }, + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 5, + "tagValuesQuery": "", + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": ".*", + "current": { + "selected": false, + "text": "All", + "value": "$__all" + }, + "datasource": { + "type": "prometheus", + "uid": "db69454e-e558-479e-b4fc-80db52bf91da" + }, + "definition": "label_values({__name__=~\"container.*\", instance=~\"$host\"},name)", + "hide": 0, + "includeAll": true, + "label": "Container", + "multi": false, + "name": "container", + "options": [], + "query": { + "query": "label_values({__name__=~\"container.*\", instance=~\"$host\"},name)", + "refId": "Prometheus-container-Variable-Query" + }, + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "tagValuesQuery": "", + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-15m", + "to": "now" + }, + "timepicker": {}, + "timezone": "", + "title": "Cadvisor exporter Copy", + "uid": "fcf2a8da-792c-4b9f-a22f-876820b53c2f", + "version": 2, + "weekStart": "" +} \ No newline at end of file diff --git a/examples/flower-via-docker-compose/config/provisioning/dashboards/dashboards.yml b/examples/flower-via-docker-compose/config/provisioning/dashboards/dashboards.yml new file mode 100644 index 000000000000..e0d542f58f2b --- /dev/null +++ b/examples/flower-via-docker-compose/config/provisioning/dashboards/dashboards.yml @@ -0,0 +1,12 @@ +apiVersion: 1 + +providers: +- name: 'default' + orgId: 1 + folder: '' + type: file + disableDeletion: false + editable: true + updateIntervalSeconds: 5 # Optional: How often Grafana will scan for changed dashboards + options: + path: /etc/grafana/provisioning/dashboards diff --git a/examples/flower-via-docker-compose/config/provisioning/datasources/prometheus-datasource.yml b/examples/flower-via-docker-compose/config/provisioning/datasources/prometheus-datasource.yml new file mode 100644 index 000000000000..7c8ce00fdcdc --- /dev/null +++ b/examples/flower-via-docker-compose/config/provisioning/datasources/prometheus-datasource.yml @@ -0,0 +1,9 @@ +apiVersion: 1 + +datasources: +- name: Prometheus + type: prometheus + access: proxy + uid: db69454e-e558-479e-b4fc-80db52bf91da + url: http://host.docker.internal:9090 + isDefault: true diff --git a/examples/flower-via-docker-compose/helpers/generate_docker_compose.py b/examples/flower-via-docker-compose/helpers/generate_docker_compose.py new file mode 100644 index 000000000000..cde553a95e68 --- /dev/null +++ b/examples/flower-via-docker-compose/helpers/generate_docker_compose.py @@ -0,0 +1,147 @@ +import random +import argparse + +parser = argparse.ArgumentParser(description="Generated Docker Compose") +parser.add_argument( + "--total_clients", type=int, default=2, help="Total clients to spawn (default: 2)" +) +parser.add_argument( + "--num_rounds", type=int, default=100, help="Number of FL rounds (default: 100)" +) +parser.add_argument( + "--data_percentage", + type=float, + default=0.6, + help="Portion of client data to use (default: 0.6)", +) +parser.add_argument( + "--random", action="store_true", help="Randomize client configurations" +) + + +def create_docker_compose(args): + # cpus is used to set the number of CPUs available to the container as a fraction of the total number of CPUs on the host machine. + # mem_limit is used to set the memory limit for the container. + client_configs = [ + {"mem_limit": "3g", "batch_size": 32, "cpus": 4, "learning_rate": 0.001}, + {"mem_limit": "6g", "batch_size": 256, "cpus": 1, "learning_rate": 0.05}, + {"mem_limit": "4g", "batch_size": 64, "cpus": 3, "learning_rate": 0.02}, + {"mem_limit": "5g", "batch_size": 128, "cpus": 2.5, "learning_rate": 0.09}, + # Add or modify the configurations depending on your host machine + ] + + docker_compose_content = f""" +version: '3' +services: + prometheus: + image: prom/prometheus:latest + container_name: prometheus + ports: + - 9090:9090 + deploy: + restart_policy: + condition: on-failure + command: + - --config.file=/etc/prometheus/prometheus.yml + volumes: + - ./config/prometheus.yml:/etc/prometheus/prometheus.yml:ro + depends_on: + - cadvisor + + cadvisor: + image: gcr.io/cadvisor/cadvisor:v0.47.0 + container_name: cadvisor + privileged: true + deploy: + restart_policy: + condition: on-failure + ports: + - "8080:8080" + volumes: + - /:/rootfs:ro + - /var/run:/var/run:ro + - /sys:/sys:ro + - /var/lib/docker/:/var/lib/docker:ro + - /dev/disk/:/dev/disk:ro + - /var/run/docker.sock:/var/run/docker.sock + + grafana: + image: grafana/grafana:latest + container_name: grafana + ports: + - 3000:3000 + deploy: + restart_policy: + condition: on-failure + volumes: + - grafana-storage:/var/lib/grafana + - ./config/grafana.ini:/etc/grafana/grafana.ini + - ./config/provisioning/datasources:/etc/grafana/provisioning/datasources + - ./config/provisioning/dashboards:/etc/grafana/provisioning/dashboards + depends_on: + - prometheus + - cadvisor + command: + - --config=/etc/grafana/grafana.ini + + + server: + container_name: server + build: + context: . + dockerfile: Dockerfile + command: python server.py --number_of_rounds={args.num_rounds} + environment: + FLASK_RUN_PORT: 6000 + DOCKER_HOST_IP: host.docker.internal + volumes: + - .:/app + - /var/run/docker.sock:/var/run/docker.sock + ports: + - "6000:6000" + - "8265:8265" + - "8000:8000" + depends_on: + - prometheus + - grafana +""" + # Add client services + for i in range(1, args.total_clients + 1): + if args.random: + config = random.choice(client_configs) + else: + config = client_configs[(i - 1) % len(client_configs)] + docker_compose_content += f""" + client{i}: + container_name: client{i} + build: + context: . + dockerfile: Dockerfile + command: python client.py --server_address=server:8080 --data_percentage={args.data_percentage} --client_id={i} --total_clients={args.total_clients} --batch_size={config["batch_size"]} --learning_rate={config["learning_rate"]} + deploy: + resources: + limits: + cpus: "{(config['cpus'])}" + memory: "{config['mem_limit']}" + volumes: + - .:/app + - /var/run/docker.sock:/var/run/docker.sock + ports: + - "{6000 + i}:{6000 + i}" + depends_on: + - server + environment: + FLASK_RUN_PORT: {6000 + i} + container_name: client{i} + DOCKER_HOST_IP: host.docker.internal +""" + + docker_compose_content += "volumes:\n grafana-storage:\n" + + with open("docker-compose.yml", "w") as file: + file.write(docker_compose_content) + + +if __name__ == "__main__": + args = parser.parse_args() + create_docker_compose(args) diff --git a/examples/flower-via-docker-compose/helpers/load_data.py b/examples/flower-via-docker-compose/helpers/load_data.py new file mode 100644 index 000000000000..1f2784946868 --- /dev/null +++ b/examples/flower-via-docker-compose/helpers/load_data.py @@ -0,0 +1,37 @@ +import numpy as np +import tensorflow as tf +from flwr_datasets import FederatedDataset +import logging + +logging.basicConfig(level=logging.INFO) # Configure logging +logger = logging.getLogger(__name__) # Create logger for the module + + +def load_data(data_sampling_percentage=0.5, client_id=1, total_clients=2): + """Load federated dataset partition based on client ID. + + Args: + data_sampling_percentage (float): Percentage of the dataset to use for training. + client_id (int): Unique ID for the client. + total_clients (int): Total number of clients. + + Returns: + Tuple of arrays: `(x_train, y_train), (x_test, y_test)`. + """ + + # Download and partition dataset + fds = FederatedDataset(dataset="cifar10", partitioners={"train": total_clients}) + partition = fds.load_partition(client_id - 1, "train") + partition.set_format("numpy") + + # Divide data on each client: 80% train, 20% test + partition = partition.train_test_split(test_size=0.2) + x_train, y_train = partition["train"]["img"] / 255.0, partition["train"]["label"] + x_test, y_test = partition["test"]["img"] / 255.0, partition["test"]["label"] + + # Apply data sampling + num_samples = int(data_sampling_percentage * len(x_train)) + indices = np.random.choice(len(x_train), num_samples, replace=False) + x_train, y_train = x_train[indices], y_train[indices] + + return (x_train, y_train), (x_test, y_test) diff --git a/examples/flower-via-docker-compose/model/model.py b/examples/flower-via-docker-compose/model/model.py new file mode 100644 index 000000000000..ab26d089b858 --- /dev/null +++ b/examples/flower-via-docker-compose/model/model.py @@ -0,0 +1,18 @@ +import tensorflow as tf + + +# Class for the model. In this case, we are using the MobileNetV2 model from Keras +class Model: + def __init__(self, learning_rate): + self.learning_rate = learning_rate + self.loss_function = tf.keras.losses.SparseCategoricalCrossentropy() + self.model = tf.keras.applications.MobileNetV2( + (32, 32, 3), alpha=0.1, classes=10, weights=None + ) + self.optimizer = tf.keras.optimizers.Adam(learning_rate=self.learning_rate) + + def compile(self): + self.model.compile(self.optimizer, self.loss_function, metrics=["accuracy"]) + + def get_model(self): + return self.model diff --git a/examples/flower-via-docker-compose/requirements.txt b/examples/flower-via-docker-compose/requirements.txt new file mode 100644 index 000000000000..b93e5b1d9f2b --- /dev/null +++ b/examples/flower-via-docker-compose/requirements.txt @@ -0,0 +1,5 @@ +flwr==1.7.0 +tensorflow==2.13.1 +numpy==1.24.3 +prometheus_client == 0.19.0 +flwr_datasets[vision] == 0.0.2 diff --git a/examples/flower-via-docker-compose/server.py b/examples/flower-via-docker-compose/server.py new file mode 100644 index 000000000000..99d1a7ef7399 --- /dev/null +++ b/examples/flower-via-docker-compose/server.py @@ -0,0 +1,47 @@ +import argparse +import flwr as fl +import logging +from strategy.strategy import FedCustom +from prometheus_client import start_http_server, Gauge + +# Initialize Logging +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger(__name__) + +# Define a gauge to track the global model accuracy +accuracy_gauge = Gauge("model_accuracy", "Current accuracy of the global model") + +# Define a gauge to track the global model loss +loss_gauge = Gauge("model_loss", "Current loss of the global model") + +# Parse command line arguments +parser = argparse.ArgumentParser(description="Flower Server") +parser.add_argument( + "--number_of_rounds", + type=int, + default=100, + help="Number of FL rounds (default: 100)", +) +args = parser.parse_args() + + +# Function to Start Federated Learning Server +def start_fl_server(strategy, rounds): + try: + fl.server.start_server( + server_address="0.0.0.0:8080", + config=fl.server.ServerConfig(num_rounds=rounds), + strategy=strategy, + ) + except Exception as e: + logger.error(f"FL Server error: {e}", exc_info=True) + + +# Main Function +if __name__ == "__main__": + # Start Prometheus Metrics Server + start_http_server(8000) + + # Initialize Strategy Instance and Start FL Server + strategy_instance = FedCustom(accuracy_gauge=accuracy_gauge, loss_gauge=loss_gauge) + start_fl_server(strategy=strategy_instance, rounds=args.number_of_rounds) diff --git a/examples/flower-via-docker-compose/strategy/strategy.py b/examples/flower-via-docker-compose/strategy/strategy.py new file mode 100644 index 000000000000..9471a99f037f --- /dev/null +++ b/examples/flower-via-docker-compose/strategy/strategy.py @@ -0,0 +1,60 @@ +from typing import Dict, List, Optional, Tuple, Union +from flwr.common import Scalar, EvaluateRes +from flwr.server.client_proxy import ClientProxy +from flwr.server.strategy.aggregate import aggregate, weighted_loss_avg +import flwr as fl +import logging +from prometheus_client import Gauge + +logging.basicConfig(level=logging.INFO) # Configure logging +logger = logging.getLogger(__name__) # Create logger for the module + + +class FedCustom(fl.server.strategy.FedAvg): + def __init__( + self, accuracy_gauge: Gauge = None, loss_gauge: Gauge = None, *args, **kwargs + ): + super().__init__(*args, **kwargs) + + self.accuracy_gauge = accuracy_gauge + self.loss_gauge = loss_gauge + + def __repr__(self) -> str: + return "FedCustom" + + def aggregate_evaluate( + self, + server_round: int, + results: List[Tuple[ClientProxy, EvaluateRes]], + failures: List[Union[Tuple[ClientProxy, EvaluateRes], BaseException]], + ) -> Tuple[Optional[float], Dict[str, Scalar]]: + """Aggregate evaluation losses and accuracy using weighted average.""" + + if not results: + return None, {} + + # Calculate weighted average for loss using the provided function + loss_aggregated = weighted_loss_avg( + [ + (evaluate_res.num_examples, evaluate_res.loss) + for _, evaluate_res in results + ] + ) + + # Calculate weighted average for accuracy + accuracies = [ + evaluate_res.metrics["accuracy"] * evaluate_res.num_examples + for _, evaluate_res in results + ] + examples = [evaluate_res.num_examples for _, evaluate_res in results] + accuracy_aggregated = ( + sum(accuracies) / sum(examples) if sum(examples) != 0 else 0 + ) + + # Update the Prometheus gauges with the latest aggregated accuracy and loss values + self.accuracy_gauge.set(accuracy_aggregated) + self.loss_gauge.set(loss_aggregated) + + metrics_aggregated = {"loss": loss_aggregated, "accuracy": accuracy_aggregated} + + return loss_aggregated, metrics_aggregated diff --git a/examples/mt-pytorch-callable/README.md b/examples/mt-pytorch-callable/README.md deleted file mode 100644 index 120e28098344..000000000000 --- a/examples/mt-pytorch-callable/README.md +++ /dev/null @@ -1,49 +0,0 @@ -# Deploy 🧪 - -🧪 = this page covers experimental features that might change in future versions of Flower - -This how-to guide describes the deployment of a long-running Flower server. - -## Preconditions - -Let's assume the following project structure: - -```bash -$ tree . -. -└── client.py -├── driver.py -├── requirements.txt -``` - -## Install dependencies - -```bash -pip install -r requirements.txt -``` - -## Start the long-running Flower server - -```bash -flower-server --insecure -``` - -## Start the long-running Flower client - -In a new terminal window, start the first long-running Flower client: - -```bash -flower-client --insecure client:flower -``` - -In yet another new terminal window, start the second long-running Flower client: - -```bash -flower-client --insecure client:flower -``` - -## Start the Driver script - -```bash -python driver.py -``` diff --git a/examples/mt-pytorch-callable/client.py b/examples/mt-pytorch-callable/client.py deleted file mode 100644 index 4195a714ca89..000000000000 --- a/examples/mt-pytorch-callable/client.py +++ /dev/null @@ -1,123 +0,0 @@ -import warnings -from collections import OrderedDict - -import flwr as fl -import torch -import torch.nn as nn -import torch.nn.functional as F -from torch.utils.data import DataLoader -from torchvision.datasets import CIFAR10 -from torchvision.transforms import Compose, Normalize, ToTensor -from tqdm import tqdm - - -# ############################################################################# -# 1. Regular PyTorch pipeline: nn.Module, train, test, and DataLoader -# ############################################################################# - -warnings.filterwarnings("ignore", category=UserWarning) -DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") - - -class Net(nn.Module): - """Model (simple CNN adapted from 'PyTorch: A 60 Minute Blitz')""" - - def __init__(self) -> None: - super(Net, self).__init__() - self.conv1 = nn.Conv2d(3, 6, 5) - self.pool = nn.MaxPool2d(2, 2) - self.conv2 = nn.Conv2d(6, 16, 5) - self.fc1 = nn.Linear(16 * 5 * 5, 120) - self.fc2 = nn.Linear(120, 84) - self.fc3 = nn.Linear(84, 10) - - def forward(self, x: torch.Tensor) -> torch.Tensor: - x = self.pool(F.relu(self.conv1(x))) - x = self.pool(F.relu(self.conv2(x))) - x = x.view(-1, 16 * 5 * 5) - x = F.relu(self.fc1(x)) - x = F.relu(self.fc2(x)) - return self.fc3(x) - - -def train(net, trainloader, epochs): - """Train the model on the training set.""" - criterion = torch.nn.CrossEntropyLoss() - optimizer = torch.optim.SGD(net.parameters(), lr=0.001, momentum=0.9) - for _ in range(epochs): - for images, labels in tqdm(trainloader): - optimizer.zero_grad() - criterion(net(images.to(DEVICE)), labels.to(DEVICE)).backward() - optimizer.step() - - -def test(net, testloader): - """Validate the model on the test set.""" - criterion = torch.nn.CrossEntropyLoss() - correct, loss = 0, 0.0 - with torch.no_grad(): - for images, labels in tqdm(testloader): - outputs = net(images.to(DEVICE)) - labels = labels.to(DEVICE) - loss += criterion(outputs, labels).item() - correct += (torch.max(outputs.data, 1)[1] == labels).sum().item() - accuracy = correct / len(testloader.dataset) - return loss, accuracy - - -def load_data(): - """Load CIFAR-10 (training and test set).""" - trf = Compose([ToTensor(), Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) - trainset = CIFAR10("./data", train=True, download=True, transform=trf) - testset = CIFAR10("./data", train=False, download=True, transform=trf) - return DataLoader(trainset, batch_size=32, shuffle=True), DataLoader(testset) - - -# ############################################################################# -# 2. Federation of the pipeline with Flower -# ############################################################################# - -# Load model and data (simple CNN, CIFAR-10) -net = Net().to(DEVICE) -trainloader, testloader = load_data() - - -# Define Flower client -class FlowerClient(fl.client.NumPyClient): - def get_parameters(self, config): - return [val.cpu().numpy() for _, val in net.state_dict().items()] - - def set_parameters(self, parameters): - params_dict = zip(net.state_dict().keys(), parameters) - state_dict = OrderedDict({k: torch.tensor(v) for k, v in params_dict}) - net.load_state_dict(state_dict, strict=True) - - def fit(self, parameters, config): - self.set_parameters(parameters) - train(net, trainloader, epochs=1) - return self.get_parameters(config={}), len(trainloader.dataset), {} - - def evaluate(self, parameters, config): - self.set_parameters(parameters) - loss, accuracy = test(net, testloader) - return loss, len(testloader.dataset), {"accuracy": accuracy} - - -def client_fn(cid: str): - """.""" - return FlowerClient().to_client() - - -# To run this: `flower-client client:flower` -flower = fl.flower.Flower( - client_fn=client_fn, -) - - -if __name__ == "__main__": - # Start Flower client - fl.client.start_client( - server_address="0.0.0.0:9092", - client=FlowerClient().to_client(), - transport="grpc-rere", - ) diff --git a/examples/mt-pytorch-callable/driver.py b/examples/mt-pytorch-callable/driver.py deleted file mode 100644 index 1248672b6813..000000000000 --- a/examples/mt-pytorch-callable/driver.py +++ /dev/null @@ -1,25 +0,0 @@ -from typing import List, Tuple - -import flwr as fl -from flwr.common import Metrics - - -# Define metric aggregation function -def weighted_average(metrics: List[Tuple[int, Metrics]]) -> Metrics: - # Multiply accuracy of each client by number of examples used - accuracies = [num_examples * m["accuracy"] for num_examples, m in metrics] - examples = [num_examples for num_examples, _ in metrics] - - # Aggregate and return custom metric (weighted average) - return {"accuracy": sum(accuracies) / sum(examples)} - - -# Define strategy -strategy = fl.server.strategy.FedAvg(evaluate_metrics_aggregation_fn=weighted_average) - -# Start Flower driver -fl.driver.start_driver( - server_address="0.0.0.0:9091", - config=fl.server.ServerConfig(num_rounds=3), - strategy=strategy, -) diff --git a/examples/mt-pytorch-callable/pyproject.toml b/examples/mt-pytorch-callable/pyproject.toml deleted file mode 100644 index 0d1a91836006..000000000000 --- a/examples/mt-pytorch-callable/pyproject.toml +++ /dev/null @@ -1,16 +0,0 @@ -[build-system] -requires = ["poetry-core>=1.4.0"] -build-backend = "poetry.core.masonry.api" - -[tool.poetry] -name = "quickstart-pytorch" -version = "0.1.0" -description = "PyTorch Federated Learning Quickstart with Flower" -authors = ["The Flower Authors "] - -[tool.poetry.dependencies] -python = ">=3.8,<3.11" -flwr = { path = "../../", develop = true, extras = ["simulation", "rest"] } -torch = "1.13.1" -torchvision = "0.14.1" -tqdm = "4.65.0" diff --git a/examples/mt-pytorch-callable/requirements.txt b/examples/mt-pytorch-callable/requirements.txt deleted file mode 100644 index 797ca6db6244..000000000000 --- a/examples/mt-pytorch-callable/requirements.txt +++ /dev/null @@ -1,4 +0,0 @@ -flwr>=1.0, <2.0 -torch==1.13.1 -torchvision==0.14.1 -tqdm==4.65.0 diff --git a/examples/mt-pytorch-callable/server.py b/examples/mt-pytorch-callable/server.py deleted file mode 100644 index fe691a88aba0..000000000000 --- a/examples/mt-pytorch-callable/server.py +++ /dev/null @@ -1,25 +0,0 @@ -from typing import List, Tuple - -import flwr as fl -from flwr.common import Metrics - - -# Define metric aggregation function -def weighted_average(metrics: List[Tuple[int, Metrics]]) -> Metrics: - # Multiply accuracy of each client by number of examples used - accuracies = [num_examples * m["accuracy"] for num_examples, m in metrics] - examples = [num_examples for num_examples, _ in metrics] - - # Aggregate and return custom metric (weighted average) - return {"accuracy": sum(accuracies) / sum(examples)} - - -# Define strategy -strategy = fl.server.strategy.FedAvg(evaluate_metrics_aggregation_fn=weighted_average) - -# Start Flower server -fl.server.start_server( - server_address="0.0.0.0:8080", - config=fl.server.ServerConfig(num_rounds=3), - strategy=strategy, -) diff --git a/examples/mt-pytorch/README.md b/examples/mt-pytorch/README.md index ef9516314e26..0f676044ee90 100644 --- a/examples/mt-pytorch/README.md +++ b/examples/mt-pytorch/README.md @@ -1,73 +1,52 @@ -# Multi-Tenant Federated Learning with Flower and PyTorch +# Flower App (PyTorch) 🧪 -This example contains experimental code. Please consult the regular PyTorch code examples ([quickstart](https://github.com/adap/flower/tree/main/examples/quickstart-pytorch), [advanced](https://github.com/adap/flower/tree/main/examples/advanced-pytorch)) to learn how to use Flower with PyTorch. +🧪 = This example covers experimental features that might change in future versions of Flower -## Setup +Please consult the regular PyTorch code examples ([quickstart](https://github.com/adap/flower/tree/main/examples/quickstart-pytorch), [advanced](https://github.com/adap/flower/tree/main/examples/advanced-pytorch)) to learn how to use Flower with PyTorch. -```bash -./dev/venv-reset.sh -``` - -## Run with Driver API +This how-to guide describes the deployment of a long-running Flower server. -Terminal 1: start Flower server - -```bash -flower-server -``` +## Preconditions -Terminal 2+3: start two Flower client nodes +Let's assume the following project structure: ```bash -python client.py +$ tree . +. +├── client.py +├── server.py +├── task.py +└── requirements.txt ``` -Terminal 4: start Driver script - -Using: +## Install dependencies ```bash -python start_driver.py +pip install -r requirements.txt ``` -Or, alternatively: +## Start the SuperLink ```bash -python driver.py +flower-superlink --insecure ``` -## Run in legacy mode +## Start the long-running Flower client -Terminal 1: start Flower server +In a new terminal window, start the first long-running Flower client: ```bash -python server.py +flower-client-app client:app --insecure ``` -Terminal 2+3: start two clients +In yet another new terminal window, start the second long-running Flower client: ```bash -python client.py +flower-client-app client:app --insecure ``` -## Run with Driver API (REST transport layer) - -Terminal 1: start Flower server and enable REST transport layer +## Start the driver ```bash -flower-server --rest -``` - -Terminal 2: start Driver script - -```bash -python driver.py -``` - -Open file `client.py` adjust `server_address` and `transport`. - -Terminal 3+4: start two Flower client nodes - -```bash -python client.py +python start_driver.py ``` diff --git a/examples/mt-pytorch/client.py b/examples/mt-pytorch/client.py index 23cc736fd62b..1f2db323ac34 100644 --- a/examples/mt-pytorch/client.py +++ b/examples/mt-pytorch/client.py @@ -34,9 +34,20 @@ def evaluate(self, parameters, config): return loss, len(testloader.dataset), {"accuracy": accuracy} -# Start Flower client -fl.client.start_numpy_client( - server_address="0.0.0.0:9092", # "0.0.0.0:9093" for REST - client=FlowerClient(), - transport="grpc-rere", # "rest" for REST +def client_fn(cid: str): + return FlowerClient().to_client() + + +# To run this: `flower-client client:app` +app = fl.client.ClientApp( + client_fn=client_fn, ) + + +if __name__ == "__main__": + # Start Flower client + fl.client.start_client( + server_address="0.0.0.0:9092", # "0.0.0.0:9093" for REST + client_fn=client_fn, + transport="grpc-rere", # "rest" for REST + ) diff --git a/examples/mt-pytorch/driver.py b/examples/mt-pytorch/driver.py index ad4d5e1caabe..06091c954cef 100644 --- a/examples/mt-pytorch/driver.py +++ b/examples/mt-pytorch/driver.py @@ -2,7 +2,7 @@ import random import time -from flwr.driver import GrpcDriver +from flwr.server.driver import GrpcDriver from flwr.common import ( ServerMessage, FitIns, @@ -43,7 +43,7 @@ def weighted_average(metrics: List[Tuple[int, Metrics]]) -> Metrics: # -------------------------------------------------------------------------- Driver SDK -driver = GrpcDriver(driver_service_address="0.0.0.0:9091", certificates=None) +driver = GrpcDriver(driver_service_address="0.0.0.0:9091", root_certificates=None) # -------------------------------------------------------------------------- Driver SDK anonymous_client_nodes = False diff --git a/examples/mt-pytorch/start_driver.py b/examples/mt-pytorch/start_driver.py index 3241a548950a..307f4ebd1a3b 100644 --- a/examples/mt-pytorch/start_driver.py +++ b/examples/mt-pytorch/start_driver.py @@ -33,9 +33,10 @@ def weighted_average(metrics: List[Tuple[int, Metrics]]) -> Metrics: fit_metrics_aggregation_fn=weighted_average, ) -# Start Flower server -fl.driver.start_driver( - server_address="0.0.0.0:9091", - config=fl.server.ServerConfig(num_rounds=3), - strategy=strategy, -) +if __name__ == "__main__": + # Start Flower server + fl.server.driver.start_driver( + server_address="0.0.0.0:9091", + config=fl.server.ServerConfig(num_rounds=3), + strategy=strategy, + ) diff --git a/examples/mt-pytorch/start_server.py b/examples/mt-pytorch/start_server.py deleted file mode 100644 index d96edd7d45ad..000000000000 --- a/examples/mt-pytorch/start_server.py +++ /dev/null @@ -1,41 +0,0 @@ -from typing import List, Tuple - -import flwr as fl -from flwr.common import Metrics - - -# Define metric aggregation function -def weighted_average(metrics: List[Tuple[int, Metrics]]) -> Metrics: - examples = [num_examples for num_examples, _ in metrics] - - # Multiply accuracy of each client by number of examples used - train_losses = [num_examples * m["train_loss"] for num_examples, m in metrics] - train_accuracies = [ - num_examples * m["train_accuracy"] for num_examples, m in metrics - ] - val_losses = [num_examples * m["val_loss"] for num_examples, m in metrics] - val_accuracies = [num_examples * m["val_accuracy"] for num_examples, m in metrics] - - # Aggregate and return custom metric (weighted average) - return { - "train_loss": sum(train_losses) / sum(examples), - "train_accuracy": sum(train_accuracies) / sum(examples), - "val_loss": sum(val_losses) / sum(examples), - "val_accuracy": sum(val_accuracies) / sum(examples), - } - - -# Define strategy -strategy = fl.server.strategy.FedAvg( - fraction_fit=1.0, # Select all available clients - fraction_evaluate=0.0, # Disable evaluation - min_available_clients=2, - fit_metrics_aggregation_fn=weighted_average, -) - -# Start Flower server -fl.server.start_server( - server_address="0.0.0.0:9092", - config=fl.server.ServerConfig(num_rounds=3), - strategy=strategy, -) diff --git a/examples/opacus/dp_cifar_client.py b/examples/opacus/dp_cifar_client.py index bab1451ba707..cc30e7728222 100644 --- a/examples/opacus/dp_cifar_client.py +++ b/examples/opacus/dp_cifar_client.py @@ -28,7 +28,7 @@ def load_data(): model = Net() trainloader, testloader, sample_rate = load_data() -fl.client.start_numpy_client( +fl.client.start_client( server_address="127.0.0.1:8080", - client=DPCifarClient(model, trainloader, testloader), + client=DPCifarClient(model, trainloader, testloader).to_client(), ) diff --git a/examples/opacus/dp_cifar_simulation.py b/examples/opacus/dp_cifar_simulation.py index 14a9d037685b..d957caf8785c 100644 --- a/examples/opacus/dp_cifar_simulation.py +++ b/examples/opacus/dp_cifar_simulation.py @@ -1,14 +1,14 @@ import math from collections import OrderedDict -from typing import Callable, Optional, Tuple +from typing import Callable, Dict, Optional, Tuple import flwr as fl import numpy as np import torch import torchvision.transforms as transforms -from opacus.dp_model_inspector import DPModelInspector from torch.utils.data import DataLoader from torchvision.datasets import CIFAR10 +from flwr.common.typing import Scalar from dp_cifar_main import DEVICE, PARAMS, DPCifarClient, Net, test @@ -23,8 +23,6 @@ def client_fn(cid: str) -> fl.client.Client: # Load model. model = Net() # Check model is compatible with Opacus. - # inspector = DPModelInspector() - # print(f"Is the model valid? {inspector.validate(model)}") # Load data partition (divide CIFAR10 into NUM_CLIENTS distinct partitions, using 30% for validation). transform = transforms.Compose( @@ -45,12 +43,14 @@ def client_fn(cid: str) -> fl.client.Client: client_trainloader = DataLoader(client_trainset, PARAMS["batch_size"]) client_testloader = DataLoader(client_testset, PARAMS["batch_size"]) - return DPCifarClient(model, client_trainloader, client_testloader) + return DPCifarClient(model, client_trainloader, client_testloader).to_client() # Define an evaluation function for centralized evaluation (using whole CIFAR10 testset). def get_evaluate_fn() -> Callable[[fl.common.NDArrays], Optional[Tuple[float, float]]]: - def evaluate(weights: fl.common.NDArrays) -> Optional[Tuple[float, float]]: + def evaluate( + server_round: int, parameters: fl.common.NDArrays, config: Dict[str, Scalar] + ): transform = transforms.Compose( [ transforms.ToTensor(), @@ -63,7 +63,7 @@ def evaluate(weights: fl.common.NDArrays) -> Optional[Tuple[float, float]]: state_dict = OrderedDict( { k: torch.tensor(np.atleast_1d(v)) - for k, v in zip(model.state_dict().keys(), weights) + for k, v in zip(model.state_dict().keys(), parameters) } ) model.load_state_dict(state_dict, strict=True) @@ -82,7 +82,7 @@ def main() -> None: client_fn=client_fn, num_clients=NUM_CLIENTS, client_resources={"num_cpus": 1}, - num_rounds=3, + config=fl.server.ServerConfig(num_rounds=3), strategy=fl.server.strategy.FedAvg( fraction_fit=0.1, fraction_evaluate=0.1, evaluate_fn=get_evaluate_fn() ), diff --git a/examples/pytorch-federated-variational-autoencoder/client.py b/examples/pytorch-federated-variational-autoencoder/client.py index ceb55c79f564..fc71f7e70c0b 100644 --- a/examples/pytorch-federated-variational-autoencoder/client.py +++ b/examples/pytorch-federated-variational-autoencoder/client.py @@ -93,7 +93,9 @@ def evaluate(self, parameters, config): loss = test(net, testloader) return float(loss), len(testloader), {} - fl.client.start_numpy_client(server_address="127.0.0.1:8080", client=CifarClient()) + fl.client.start_client( + server_address="127.0.0.1:8080", client=CifarClient().to_client() + ) if __name__ == "__main__": diff --git a/examples/pytorch-from-centralized-to-federated/README.md b/examples/pytorch-from-centralized-to-federated/README.md index fccb14158ecd..06ee89dddcac 100644 --- a/examples/pytorch-from-centralized-to-federated/README.md +++ b/examples/pytorch-from-centralized-to-federated/README.md @@ -2,7 +2,7 @@ This example demonstrates how an already existing centralized PyTorch-based machine learning project can be federated with Flower. -This introductory example for Flower uses PyTorch, but you're not required to be a PyTorch expert to run the example. The example will help you to understand how Flower can be used to build federated learning use cases based on existing machine learning projects. This example uses [Flower Datasets](https://flower.dev/docs/datasets/) to download, partition and preprocess the CIFAR-10 dataset. +This introductory example for Flower uses PyTorch, but you're not required to be a PyTorch expert to run the example. The example will help you to understand how Flower can be used to build federated learning use cases based on existing machine learning projects. This example uses [Flower Datasets](https://flower.ai/docs/datasets/) to download, partition and preprocess the CIFAR-10 dataset. ## Project Setup diff --git a/examples/pytorch-from-centralized-to-federated/client.py b/examples/pytorch-from-centralized-to-federated/client.py index 61c7e7f762b3..f89e03bc2053 100644 --- a/examples/pytorch-from-centralized-to-federated/client.py +++ b/examples/pytorch-from-centralized-to-federated/client.py @@ -93,8 +93,8 @@ def main() -> None: _ = model(next(iter(trainloader))["img"].to(DEVICE)) # Start client - client = CifarClient(model, trainloader, testloader) - fl.client.start_numpy_client(server_address="127.0.0.1:8080", client=client) + client = CifarClient(model, trainloader, testloader).to_client() + fl.client.start_client(server_address="127.0.0.1:8080", client=client) if __name__ == "__main__": diff --git a/examples/quickstart-cpp/driver.py b/examples/quickstart-cpp/driver.py index 037623ee77cf..f19cf0e9bd98 100644 --- a/examples/quickstart-cpp/driver.py +++ b/examples/quickstart-cpp/driver.py @@ -3,7 +3,7 @@ # Start Flower server for three rounds of federated learning if __name__ == "__main__": - fl.driver.start_driver( + fl.server.start_driver( server_address="0.0.0.0:9091", config=fl.server.ServerConfig(num_rounds=3), strategy=FedAvgCpp(), diff --git a/examples/quickstart-fastai/client.py b/examples/quickstart-fastai/client.py index a88abbe525dc..6bb2a751d544 100644 --- a/examples/quickstart-fastai/client.py +++ b/examples/quickstart-fastai/client.py @@ -43,7 +43,7 @@ def evaluate(self, parameters, config): # Start Flower client -fl.client.start_numpy_client( +fl.client.start_client( server_address="127.0.0.1:8080", - client=FlowerClient(), + client=FlowerClient().to_client(), ) diff --git a/examples/quickstart-huggingface/README.md b/examples/quickstart-huggingface/README.md index c1e3cc4edc06..5fdba887f181 100644 --- a/examples/quickstart-huggingface/README.md +++ b/examples/quickstart-huggingface/README.md @@ -1,6 +1,6 @@ # Federated HuggingFace Transformers using Flower and PyTorch -This introductory example to using [HuggingFace](https://huggingface.co) Transformers with Flower with PyTorch. This example has been extended from the [quickstart-pytorch](https://flower.dev/docs/examples/quickstart-pytorch.html) example. The training script closely follows the [HuggingFace course](https://huggingface.co/course/chapter3?fw=pt), so you are encouraged to check that out for detailed explaination for the transformer pipeline. +This introductory example to using [HuggingFace](https://huggingface.co) Transformers with Flower with PyTorch. This example has been extended from the [quickstart-pytorch](https://flower.ai/docs/examples/quickstart-pytorch.html) example. The training script closely follows the [HuggingFace course](https://huggingface.co/course/chapter3?fw=pt), so you are encouraged to check that out for a detailed explanation of the transformer pipeline. Like `quickstart-pytorch`, running this example in itself is also meant to be quite easy. @@ -62,13 +62,13 @@ Now you are ready to start the Flower clients which will participate in the lear Start client 1 in the first terminal: ```shell -python3 client.py +python3 client.py --node-id 0 ``` Start client 2 in the second terminal: ```shell -python3 client.py +python3 client.py --node-id 1 ``` You will see that PyTorch is starting a federated training. diff --git a/examples/quickstart-huggingface/client.py b/examples/quickstart-huggingface/client.py index 8717d710ad9c..5dc461d30536 100644 --- a/examples/quickstart-huggingface/client.py +++ b/examples/quickstart-huggingface/client.py @@ -1,58 +1,48 @@ -from collections import OrderedDict +import argparse import warnings +from collections import OrderedDict import flwr as fl import torch -import numpy as np - -import random -from torch.utils.data import DataLoader - -from datasets import load_dataset from evaluate import load as load_metric - -from transformers import AutoTokenizer, DataCollatorWithPadding +from torch.optim import AdamW +from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification -from transformers import AdamW +from transformers import AutoTokenizer, DataCollatorWithPadding + +from flwr_datasets import FederatedDataset warnings.filterwarnings("ignore", category=UserWarning) DEVICE = torch.device("cpu") CHECKPOINT = "distilbert-base-uncased" # transformer model checkpoint -def load_data(): +def load_data(node_id): """Load IMDB data (training and eval)""" - raw_datasets = load_dataset("imdb") - raw_datasets = raw_datasets.shuffle(seed=42) - - # remove unnecessary data split - del raw_datasets["unsupervised"] + fds = FederatedDataset(dataset="imdb", partitioners={"train": 1_000}) + partition = fds.load_partition(node_id) + # Divide data: 80% train, 20% test + partition_train_test = partition.train_test_split(test_size=0.2) tokenizer = AutoTokenizer.from_pretrained(CHECKPOINT) def tokenize_function(examples): return tokenizer(examples["text"], truncation=True) - # random 100 samples - population = random.sample(range(len(raw_datasets["train"])), 100) - - tokenized_datasets = raw_datasets.map(tokenize_function, batched=True) - tokenized_datasets["train"] = tokenized_datasets["train"].select(population) - tokenized_datasets["test"] = tokenized_datasets["test"].select(population) - - tokenized_datasets = tokenized_datasets.remove_columns("text") - tokenized_datasets = tokenized_datasets.rename_column("label", "labels") + partition_train_test = partition_train_test.map(tokenize_function, batched=True) + partition_train_test = partition_train_test.remove_columns("text") + partition_train_test = partition_train_test.rename_column("label", "labels") data_collator = DataCollatorWithPadding(tokenizer=tokenizer) trainloader = DataLoader( - tokenized_datasets["train"], + partition_train_test["train"], shuffle=True, batch_size=32, collate_fn=data_collator, ) testloader = DataLoader( - tokenized_datasets["test"], batch_size=32, collate_fn=data_collator + partition_train_test["test"], batch_size=32, collate_fn=data_collator ) return trainloader, testloader @@ -88,12 +78,12 @@ def test(net, testloader): return loss, accuracy -def main(): +def main(node_id): net = AutoModelForSequenceClassification.from_pretrained( CHECKPOINT, num_labels=2 ).to(DEVICE) - trainloader, testloader = load_data() + trainloader, testloader = load_data(node_id) # Flower client class IMDBClient(fl.client.NumPyClient): @@ -118,8 +108,20 @@ def evaluate(self, parameters, config): return float(loss), len(testloader), {"accuracy": float(accuracy)} # Start client - fl.client.start_numpy_client(server_address="127.0.0.1:8080", client=IMDBClient()) + fl.client.start_client( + server_address="127.0.0.1:8080", client=IMDBClient().to_client() + ) if __name__ == "__main__": - main() + parser = argparse.ArgumentParser(description="Flower") + parser.add_argument( + "--node-id", + choices=list(range(1_000)), + required=True, + type=int, + help="Partition of the dataset divided into 1,000 iid partitions created " + "artificially.", + ) + node_id = parser.parse_args().node_id + main(node_id) diff --git a/examples/quickstart-huggingface/pyproject.toml b/examples/quickstart-huggingface/pyproject.toml index eb9687c5152c..50ba0b37f8d2 100644 --- a/examples/quickstart-huggingface/pyproject.toml +++ b/examples/quickstart-huggingface/pyproject.toml @@ -14,6 +14,7 @@ authors = [ [tool.poetry.dependencies] python = ">=3.8,<3.11" flwr = ">=1.0,<2.0" +flwr-datasets = ">=0.0.2,<1.0.0" torch = ">=1.13.1,<2.0" transformers = ">=4.30.0,<5.0" evaluate = ">=0.4.0,<1.0" diff --git a/examples/quickstart-huggingface/requirements.txt b/examples/quickstart-huggingface/requirements.txt index aeb2d13fc4a4..3cd5735625ba 100644 --- a/examples/quickstart-huggingface/requirements.txt +++ b/examples/quickstart-huggingface/requirements.txt @@ -1,4 +1,5 @@ flwr>=1.0, <2.0 +flwr-datasets>=0.0.2, <1.0.0 torch>=1.13.1, <2.0 transformers>=4.30.0, <5.0 evaluate>=0.4.0, <1.0 diff --git a/examples/quickstart-huggingface/run.sh b/examples/quickstart-huggingface/run.sh index c64f362086aa..e722a24a21a9 100755 --- a/examples/quickstart-huggingface/run.sh +++ b/examples/quickstart-huggingface/run.sh @@ -6,7 +6,7 @@ sleep 3 # Sleep for 3s to give the server enough time to start for i in `seq 0 1`; do echo "Starting client $i" - python client.py & + python client.py --node-id ${i}& done # This will allow you to use CTRL+C to stop all background processes diff --git a/examples/quickstart-jax/client.py b/examples/quickstart-jax/client.py index f9b056276deb..afd6f197bcde 100644 --- a/examples/quickstart-jax/client.py +++ b/examples/quickstart-jax/client.py @@ -52,4 +52,6 @@ def evaluate( # Start Flower client -fl.client.start_numpy_client(server_address="127.0.0.1:8080", client=FlowerClient()) +fl.client.start_client( + server_address="127.0.0.1:8080", client=FlowerClient().to_client() +) diff --git a/examples/quickstart-mlcube/client.py b/examples/quickstart-mlcube/client.py index 0a1962d8da8a..46ddd45f52ce 100644 --- a/examples/quickstart-mlcube/client.py +++ b/examples/quickstart-mlcube/client.py @@ -43,8 +43,9 @@ def main(): os.path.dirname(os.path.abspath(__file__)), "workspaces", workspace_name ) - fl.client.start_numpy_client( - server_address="0.0.0.0:8080", client=MLCubeClient(workspace=workspace) + fl.client.start_client( + server_address="0.0.0.0:8080", + client=MLCubeClient(workspace=workspace).to_client(), ) diff --git a/examples/quickstart-pandas/README.md b/examples/quickstart-pandas/README.md index 2defc468c2ef..efcda43cf34d 100644 --- a/examples/quickstart-pandas/README.md +++ b/examples/quickstart-pandas/README.md @@ -1,6 +1,7 @@ # Flower Example using Pandas -This introductory example to Flower uses Pandas, but deep knowledge of Pandas is not necessarily required to run the example. However, it will help you understand how to adapt Flower to your use case. +This introductory example to Flower uses Pandas, but deep knowledge of Pandas is not necessarily required to run the example. However, it will help you understand how to adapt Flower to your use case. This example uses [Flower Datasets](https://flower.ai/docs/datasets/) to +download, partition and preprocess the dataset. Running this example in itself is quite easy. ## Project Setup @@ -69,13 +70,13 @@ Now you are ready to start the Flower clients which will participate in the lear Start client 1 in the first terminal: ```shell -$ python3 client.py +$ python3 client.py --node-id 0 ``` Start client 2 in the second terminal: ```shell -$ python3 client.py +$ python3 client.py --node-id 1 ``` -You will see that the server is printing aggregated statistics about the dataset distributed amongst clients. Have a look to the [Flower Quickstarter documentation](https://flower.dev/docs/quickstart-pandas.html) for a detailed explanation. +You will see that the server is printing aggregated statistics about the dataset distributed amongst clients. Have a look to the [Flower Quickstarter documentation](https://flower.ai/docs/quickstart-pandas.html) for a detailed explanation. diff --git a/examples/quickstart-pandas/client.py b/examples/quickstart-pandas/client.py index 3feab3f6a0f4..8585922e4572 100644 --- a/examples/quickstart-pandas/client.py +++ b/examples/quickstart-pandas/client.py @@ -1,4 +1,4 @@ -import warnings +import argparse from typing import Dict, List, Tuple import numpy as np @@ -6,10 +6,10 @@ import flwr as fl +from flwr_datasets import FederatedDataset -df = pd.read_csv("./data/client.csv") -column_names = ["sepal length (cm)", "sepal width (cm)"] +column_names = ["sepal_length", "sepal_width"] def compute_hist(df: pd.DataFrame, col_name: str) -> np.ndarray: @@ -19,23 +19,47 @@ def compute_hist(df: pd.DataFrame, col_name: str) -> np.ndarray: # Define Flower client class FlowerClient(fl.client.NumPyClient): + def __init__(self, X: pd.DataFrame): + self.X = X + def fit( self, parameters: List[np.ndarray], config: Dict[str, str] ) -> Tuple[List[np.ndarray], int, Dict]: hist_list = [] # Execute query locally - for c in column_names: - hist = compute_hist(df, c) + for c in self.X.columns: + hist = compute_hist(self.X, c) hist_list.append(hist) return ( hist_list, - len(df), + len(self.X), {}, ) -# Start Flower client -fl.client.start_numpy_client( - server_address="127.0.0.1:8080", - client=FlowerClient(), -) +if __name__ == "__main__": + N_CLIENTS = 2 + + parser = argparse.ArgumentParser(description="Flower") + parser.add_argument( + "--node-id", + type=int, + choices=range(0, N_CLIENTS), + required=True, + help="Specifies the node id of artificially partitioned datasets.", + ) + args = parser.parse_args() + partition_id = args.node_id + + # Load the partition data + fds = FederatedDataset(dataset="hitorilabs/iris", partitioners={"train": N_CLIENTS}) + + dataset = fds.load_partition(partition_id, "train").with_format("pandas")[:] + # Use just the specified columns + X = dataset[column_names] + + # Start Flower client + fl.client.start_client( + server_address="127.0.0.1:8080", + client=FlowerClient(X).to_client(), + ) diff --git a/examples/quickstart-pandas/pyproject.toml b/examples/quickstart-pandas/pyproject.toml index de20eaf61d63..6229210d6488 100644 --- a/examples/quickstart-pandas/pyproject.toml +++ b/examples/quickstart-pandas/pyproject.toml @@ -12,6 +12,6 @@ maintainers = ["The Flower Authors "] [tool.poetry.dependencies] python = ">=3.8,<3.11" flwr = ">=1.0,<2.0" +flwr-datasets = { extras = ["vision"], version = ">=0.0.2,<1.0.0" } numpy = "1.23.2" pandas = "2.0.0" -scikit-learn = "1.3.1" diff --git a/examples/quickstart-pandas/requirements.txt b/examples/quickstart-pandas/requirements.txt index 14308a55faaf..d44a3c6adab9 100644 --- a/examples/quickstart-pandas/requirements.txt +++ b/examples/quickstart-pandas/requirements.txt @@ -1,4 +1,4 @@ flwr>=1.0, <2.0 +flwr-datasets[vision]>=0.0.2, <1.0.0 numpy==1.23.2 pandas==2.0.0 -scikit-learn==1.3.1 diff --git a/examples/quickstart-pandas/run.sh b/examples/quickstart-pandas/run.sh index 6b85ce30bf45..571fa8bfb3e4 100755 --- a/examples/quickstart-pandas/run.sh +++ b/examples/quickstart-pandas/run.sh @@ -2,13 +2,9 @@ echo "Starting server" python server.py & sleep 3 # Sleep for 3s to give the server enough time to start -# Download data -mkdir -p ./data -python -c "from sklearn.datasets import load_iris; load_iris(as_frame=True)['data'].to_csv('./data/client.csv')" - for i in `seq 0 1`; do echo "Starting client $i" - python client.py & + python client.py --node-id ${i} & done # This will allow you to use CTRL+C to stop all background processes diff --git a/examples/quickstart-pandas/server.py b/examples/quickstart-pandas/server.py index c82304374836..af4c2a796788 100644 --- a/examples/quickstart-pandas/server.py +++ b/examples/quickstart-pandas/server.py @@ -1,5 +1,4 @@ -import pickle -from typing import Callable, Dict, List, Optional, Tuple, Union +from typing import Dict, List, Optional, Tuple, Union import numpy as np @@ -9,9 +8,6 @@ EvaluateRes, FitIns, FitRes, - Metrics, - MetricsAggregationFn, - NDArrays, Parameters, Scalar, ndarrays_to_parameters, @@ -23,11 +19,6 @@ class FedAnalytics(Strategy): - def __init__( - self, compute_fns: List[Callable] = None, col_names: List[str] = None - ) -> None: - super().__init__() - def initialize_parameters( self, client_manager: Optional[ClientManager] = None ) -> Optional[Parameters]: diff --git a/examples/quickstart-pytorch-lightning/README.md b/examples/quickstart-pytorch-lightning/README.md index 1287b50bca65..1d404a5d714f 100644 --- a/examples/quickstart-pytorch-lightning/README.md +++ b/examples/quickstart-pytorch-lightning/README.md @@ -1,6 +1,6 @@ # Flower Example using PyTorch Lightning -This introductory example to Flower uses PyTorch, but deep knowledge of PyTorch Lightning is not necessarily required to run the example. However, it will help you understand how to adapt Flower to your use case. Running this example in itself is quite easy. This example uses [Flower Datasets](https://flower.dev/docs/datasets/) to download, partition and preprocess the MNIST dataset. +This introductory example to Flower uses PyTorch, but deep knowledge of PyTorch Lightning is not necessarily required to run the example. However, it will help you understand how to adapt Flower to your use case. Running this example in itself is quite easy. This example uses [Flower Datasets](https://flower.ai/docs/datasets/) to download, partition and preprocess the MNIST dataset. ## Project Setup diff --git a/examples/quickstart-pytorch-lightning/client.py b/examples/quickstart-pytorch-lightning/client.py index 1dabd5732b9b..fc5f1ee03cfe 100644 --- a/examples/quickstart-pytorch-lightning/client.py +++ b/examples/quickstart-pytorch-lightning/client.py @@ -72,8 +72,8 @@ def main() -> None: train_loader, val_loader, test_loader = mnist.load_data(node_id) # Flower client - client = FlowerClient(model, train_loader, val_loader, test_loader) - fl.client.start_numpy_client(server_address="127.0.0.1:8080", client=client) + client = FlowerClient(model, train_loader, val_loader, test_loader).to_client() + fl.client.start_client(server_address="127.0.0.1:8080", client=client) if __name__ == "__main__": diff --git a/examples/quickstart-pytorch/README.md b/examples/quickstart-pytorch/README.md index 6de0dcf7ab32..3b9b9b310608 100644 --- a/examples/quickstart-pytorch/README.md +++ b/examples/quickstart-pytorch/README.md @@ -1,6 +1,6 @@ # Flower Example using PyTorch -This introductory example to Flower uses PyTorch, but deep knowledge of PyTorch is not necessarily required to run the example. However, it will help you understand how to adapt Flower to your use case. Running this example in itself is quite easy. This example uses [Flower Datasets](https://flower.dev/docs/datasets/) to download, partition and preprocess the CIFAR-10 dataset. +This introductory example to Flower uses PyTorch, but deep knowledge of PyTorch is not necessarily required to run the example. However, it will help you understand how to adapt Flower to your use case. Running this example in itself is quite easy. This example uses [Flower Datasets](https://flower.ai/docs/datasets/) to download, partition and preprocess the CIFAR-10 dataset. ## Project Setup diff --git a/examples/quickstart-pytorch/client.py b/examples/quickstart-pytorch/client.py index 1edb42d1ec81..b5ea4c94dd21 100644 --- a/examples/quickstart-pytorch/client.py +++ b/examples/quickstart-pytorch/client.py @@ -132,7 +132,7 @@ def evaluate(self, parameters, config): # Start Flower client -fl.client.start_numpy_client( +fl.client.start_client( server_address="127.0.0.1:8080", - client=FlowerClient(), + client=FlowerClient().to_client(), ) diff --git a/examples/quickstart-sklearn-tabular/README.md b/examples/quickstart-sklearn-tabular/README.md index d62525c96c18..373aaea5999c 100644 --- a/examples/quickstart-sklearn-tabular/README.md +++ b/examples/quickstart-sklearn-tabular/README.md @@ -3,7 +3,7 @@ This example of Flower uses `scikit-learn`'s `LogisticRegression` model to train a federated learning system on "iris" (tabular) dataset. It will help you understand how to adapt Flower for use with `scikit-learn`. -Running this example in itself is quite easy. This example uses [Flower Datasets](https://flower.dev/docs/datasets/) to +Running this example in itself is quite easy. This example uses [Flower Datasets](https://flower.ai/docs/datasets/) to download, partition and preprocess the dataset. ## Project Setup diff --git a/examples/quickstart-tabnet/client.py b/examples/quickstart-tabnet/client.py index da391a95710a..2289b1b55b3d 100644 --- a/examples/quickstart-tabnet/client.py +++ b/examples/quickstart-tabnet/client.py @@ -79,4 +79,6 @@ def evaluate(self, parameters, config): # Start Flower client -fl.client.start_numpy_client(server_address="127.0.0.1:8080", client=TabNetClient()) +fl.client.start_client( + server_address="127.0.0.1:8080", client=TabNetClient().to_client() +) diff --git a/examples/quickstart-tensorflow/README.md b/examples/quickstart-tensorflow/README.md index 92d38c9340d7..8d5e9434b086 100644 --- a/examples/quickstart-tensorflow/README.md +++ b/examples/quickstart-tensorflow/README.md @@ -1,7 +1,7 @@ # Flower Example using TensorFlow/Keras This introductory example to Flower uses Keras but deep knowledge of Keras is not necessarily required to run the example. However, it will help you understand how to adapt Flower to your use case. -Running this example in itself is quite easy. This example uses [Flower Datasets](https://flower.dev/docs/datasets/) to download, partition and preprocess the CIFAR-10 dataset. +Running this example in itself is quite easy. This example uses [Flower Datasets](https://flower.ai/docs/datasets/) to download, partition and preprocess the CIFAR-10 dataset. ## Project Setup diff --git a/examples/quickstart-tensorflow/client.py b/examples/quickstart-tensorflow/client.py index d998adbdd899..37abbbcc46ec 100644 --- a/examples/quickstart-tensorflow/client.py +++ b/examples/quickstart-tensorflow/client.py @@ -52,4 +52,6 @@ def evaluate(self, parameters, config): # Start Flower client -fl.client.start_numpy_client(server_address="127.0.0.1:8080", client=CifarClient()) +fl.client.start_client( + server_address="127.0.0.1:8080", client=CifarClient().to_client() +) diff --git a/examples/quickstart-xgboost-horizontal/.gitignore b/examples/quickstart-xgboost-horizontal/.gitignore deleted file mode 100644 index 4a6ddf5b9142..000000000000 --- a/examples/quickstart-xgboost-horizontal/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -dataset - diff --git a/examples/quickstart-xgboost-horizontal/README.md b/examples/quickstart-xgboost-horizontal/README.md deleted file mode 100644 index 346a33da7412..000000000000 --- a/examples/quickstart-xgboost-horizontal/README.md +++ /dev/null @@ -1,19 +0,0 @@ -# Federated XGBoost in Horizontal Setting (PyTorch) - -[![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/adap/flower/blob/main/examples/quickstart-xgboost-horizontal/code_horizontal.ipynb) (or open the [Jupyter Notebook](https://github.com/adap/flower/blob/main/examples/quickstart-xgboost-horizontal/code_horizontal.ipynb)) - -This example demonstrates a federated XGBoost using Flower with PyTorch. This is a novel method to conduct federated XGBoost in the horizontal setting. It differs from the previous methods in the following ways: - -- We aggregate and conduct federated learning on client tree’s prediction outcomes by sending clients' built XGBoost trees to the server and then sharing to the clients. -- The exchange of privacy-sensitive information (gradients) is not needed. -- The model is a CNN with 1D convolution kernel size = the number of XGBoost trees in the client tree ensembles. -- Using 1D convolution, we make the tree learning rate (a hyperparameter of XGBoost) learnable. - -## Project Setup - -This implementation can be easily run in Google Colab with the button at the top of the README or as a standalone Jupyter notebook, -it will automatically download and extract the example data inside a `dataset` folder and `binary_classification` and `regression` sub-folders. - -## Datasets - -This implementation supports both binary classification and regression datasets in SVM light format, loaded from ([LIBSVM Data](https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/)). Simply download the dataset files from the website and put them in the folder location indicated above. diff --git a/examples/quickstart-xgboost-horizontal/code_horizontal.ipynb b/examples/quickstart-xgboost-horizontal/code_horizontal.ipynb deleted file mode 100644 index 4d76e0c26023..000000000000 --- a/examples/quickstart-xgboost-horizontal/code_horizontal.ipynb +++ /dev/null @@ -1,1560 +0,0 @@ -{ - "cells": [ - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Initialization" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "executionInfo": { - "elapsed": 15871, - "status": "ok", - "timestamp": 1670356049976, - "user": { - "displayName": "Chenyang Ma", - "userId": "17975430055716133031" - }, - "user_tz": 0 - }, - "outputId": "2c588ea0-a383-4461-e633-794e73d0f57a" - }, - "outputs": [], - "source": [ - "import os\n", - "import urllib.request\n", - "import bz2\n", - "import shutil\n", - "\n", - "CLASSIFICATION_PATH = os.path.join(\"dataset\", \"binary_classification\")\n", - "REGRESSION_PATH = os.path.join(\"dataset\", \"regression\")\n", - "\n", - "if not os.path.exists(CLASSIFICATION_PATH):\n", - " os.makedirs(CLASSIFICATION_PATH)\n", - " urllib.request.urlretrieve(\n", - " \"https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/binary/cod-rna\",\n", - " f\"{os.path.join(CLASSIFICATION_PATH, 'cod-rna')}\",\n", - " )\n", - " urllib.request.urlretrieve(\n", - " \"https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/binary/cod-rna.t\",\n", - " f\"{os.path.join(CLASSIFICATION_PATH, 'cod-rna.t')}\",\n", - " )\n", - " urllib.request.urlretrieve(\n", - " \"https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/binary/cod-rna.r\",\n", - " f\"{os.path.join(CLASSIFICATION_PATH, 'cod-rna.r')}\",\n", - " )\n", - " urllib.request.urlretrieve(\n", - " \"https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/binary/ijcnn1.t.bz2\",\n", - " f\"{os.path.join(CLASSIFICATION_PATH, 'ijcnn1.t.bz2')}\",\n", - " )\n", - " urllib.request.urlretrieve(\n", - " \"https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/binary/ijcnn1.tr.bz2\",\n", - " f\"{os.path.join(CLASSIFICATION_PATH, 'ijcnn1.tr.bz2')}\",\n", - " )\n", - " for filepath in os.listdir(CLASSIFICATION_PATH):\n", - " if filepath[-3:] == \"bz2\":\n", - " abs_filepath = os.path.join(CLASSIFICATION_PATH, filepath)\n", - " with bz2.BZ2File(abs_filepath) as fr, open(abs_filepath[:-4], \"wb\") as fw:\n", - " shutil.copyfileobj(fr, fw)\n", - "\n", - "if not os.path.exists(REGRESSION_PATH):\n", - " os.makedirs(REGRESSION_PATH)\n", - " urllib.request.urlretrieve(\n", - " \"https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/regression/eunite2001\",\n", - " f\"{os.path.join(REGRESSION_PATH, 'eunite2001')}\",\n", - " )\n", - " urllib.request.urlretrieve(\n", - " \"https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/regression/eunite2001.t\",\n", - " f\"{os.path.join(REGRESSION_PATH, 'eunite2001.t')}\",\n", - " )\n", - " urllib.request.urlretrieve(\n", - " \"https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/regression/YearPredictionMSD.bz2\",\n", - " f\"{os.path.join(REGRESSION_PATH, 'YearPredictionMSD.bz2')}\",\n", - " )\n", - " urllib.request.urlretrieve(\n", - " \"https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/regression/YearPredictionMSD.t.bz2\",\n", - " f\"{os.path.join(REGRESSION_PATH, 'YearPredictionMSD.t.bz2')}\",\n", - " )\n", - " for filepath in os.listdir(REGRESSION_PATH):\n", - " if filepath[-3:] == \"bz2\":\n", - " abs_filepath = os.path.join(REGRESSION_PATH, filepath)\n", - " with bz2.BZ2File(abs_filepath) as fr, open(abs_filepath[:-4], \"wb\") as fw:\n", - " shutil.copyfileobj(fr, fw)\n", - "\n", - "\n", - "!nvidia-smi\n", - "!pip install matplotlib scikit-learn tqdm torch torchmetrics torchsummary xgboost\n", - "!pip install -U \"flwr-nightly[simulation]\"" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Import relevant modules" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "executionInfo": { - "elapsed": 7, - "status": "ok", - "timestamp": 1670356049977, - "user": { - "displayName": "Chenyang Ma", - "userId": "17975430055716133031" - }, - "user_tz": 0 - }, - "outputId": "5289e33e-e18e-491b-d536-6b1052598994" - }, - "outputs": [], - "source": [ - "import xgboost as xgb\n", - "from xgboost import XGBClassifier, XGBRegressor\n", - "from sklearn.metrics import mean_squared_error, accuracy_score\n", - "from sklearn.datasets import load_svmlight_file\n", - "\n", - "import numpy as np\n", - "import torch, torch.nn as nn\n", - "import torch.nn.functional as F\n", - "import torchvision\n", - "from torchmetrics import Accuracy, MeanSquaredError\n", - "from tqdm import trange, tqdm\n", - "from torchsummary import summary\n", - "from torch.utils.data import DataLoader, Dataset, random_split\n", - "\n", - "print(\"Imported modules.\")" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Import Flower relevant modules for Federated XGBoost" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import flwr as fl\n", - "from flwr.common.typing import Parameters\n", - "from collections import OrderedDict\n", - "from typing import Any, Dict, List, Optional, Tuple, Union\n", - "from flwr.common import NDArray, NDArrays\n", - "\n", - "print(\"Imported modules.\")" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Define utility function for xgboost trees" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from matplotlib import pyplot as plt # pylint: disable=E0401\n", - "\n", - "\n", - "def plot_xgbtree(tree: Union[XGBClassifier, XGBRegressor], n_tree: int) -> None:\n", - " \"\"\"Visualize the built xgboost tree.\"\"\"\n", - " xgb.plot_tree(tree, num_trees=n_tree)\n", - " plt.rcParams[\"figure.figsize\"] = [50, 10]\n", - " plt.show()\n", - "\n", - "\n", - "def construct_tree(\n", - " dataset: Dataset, label: NDArray, n_estimators: int, tree_type: str\n", - ") -> Union[XGBClassifier, XGBRegressor]:\n", - " \"\"\"Construct a xgboost tree form tabular dataset.\"\"\"\n", - " if tree_type == \"BINARY\":\n", - " tree = xgb.XGBClassifier(\n", - " objective=\"binary:logistic\",\n", - " learning_rate=0.1,\n", - " max_depth=8,\n", - " n_estimators=n_estimators,\n", - " subsample=0.8,\n", - " colsample_bylevel=1,\n", - " colsample_bynode=1,\n", - " colsample_bytree=1,\n", - " alpha=5,\n", - " gamma=5,\n", - " num_parallel_tree=1,\n", - " min_child_weight=1,\n", - " )\n", - "\n", - " elif tree_type == \"REG\":\n", - " tree = xgb.XGBRegressor(\n", - " objective=\"reg:squarederror\",\n", - " learning_rate=0.1,\n", - " max_depth=8,\n", - " n_estimators=n_estimators,\n", - " subsample=0.8,\n", - " colsample_bylevel=1,\n", - " colsample_bynode=1,\n", - " colsample_bytree=1,\n", - " alpha=5,\n", - " gamma=5,\n", - " num_parallel_tree=1,\n", - " min_child_weight=1,\n", - " )\n", - "\n", - " tree.fit(dataset, label)\n", - " return tree\n", - "\n", - "\n", - "def construct_tree_from_loader(\n", - " dataset_loader: DataLoader, n_estimators: int, tree_type: str\n", - ") -> Union[XGBClassifier, XGBRegressor]:\n", - " \"\"\"Construct a xgboost tree form tabular dataset loader.\"\"\"\n", - " for dataset in dataset_loader:\n", - " data, label = dataset[0], dataset[1]\n", - " return construct_tree(data, label, n_estimators, tree_type)\n", - "\n", - "\n", - "def single_tree_prediction(\n", - " tree: Union[XGBClassifier, XGBRegressor], n_tree: int, dataset: NDArray\n", - ") -> Optional[NDArray]:\n", - " \"\"\"Extract the prediction result of a single tree in the xgboost tree\n", - " ensemble.\"\"\"\n", - " # How to access a single tree\n", - " # https://github.com/bmreiniger/datascience.stackexchange/blob/master/57905.ipynb\n", - " num_t = len(tree.get_booster().get_dump())\n", - " if n_tree > num_t:\n", - " print(\n", - " \"The tree index to be extracted is larger than the total number of trees.\"\n", - " )\n", - " return None\n", - "\n", - " return tree.predict( # type: ignore\n", - " dataset, iteration_range=(n_tree, n_tree + 1), output_margin=True\n", - " )\n", - "\n", - "\n", - "def tree_encoding( # pylint: disable=R0914\n", - " trainloader: DataLoader,\n", - " client_trees: Union[\n", - " Tuple[XGBClassifier, int],\n", - " Tuple[XGBRegressor, int],\n", - " List[Union[Tuple[XGBClassifier, int], Tuple[XGBRegressor, int]]],\n", - " ],\n", - " client_tree_num: int,\n", - " client_num: int,\n", - ") -> Optional[Tuple[NDArray, NDArray]]:\n", - " \"\"\"Transform the tabular dataset into prediction results using the\n", - " aggregated xgboost tree ensembles from all clients.\"\"\"\n", - " if trainloader is None:\n", - " return None\n", - "\n", - " for local_dataset in trainloader:\n", - " x_train, y_train = local_dataset[0], local_dataset[1]\n", - "\n", - " x_train_enc = np.zeros((x_train.shape[0], client_num * client_tree_num))\n", - " x_train_enc = np.array(x_train_enc, copy=True)\n", - "\n", - " temp_trees: Any = None\n", - " if isinstance(client_trees, list) is False:\n", - " temp_trees = [client_trees[0]] * client_num\n", - " elif isinstance(client_trees, list) and len(client_trees) != client_num:\n", - " temp_trees = [client_trees[0][0]] * client_num\n", - " else:\n", - " cids = []\n", - " temp_trees = []\n", - " for i, _ in enumerate(client_trees):\n", - " temp_trees.append(client_trees[i][0]) # type: ignore\n", - " cids.append(client_trees[i][1]) # type: ignore\n", - " sorted_index = np.argsort(np.asarray(cids))\n", - " temp_trees = np.asarray(temp_trees)[sorted_index]\n", - "\n", - " for i, _ in enumerate(temp_trees):\n", - " for j in range(client_tree_num):\n", - " x_train_enc[:, i * client_tree_num + j] = single_tree_prediction(\n", - " temp_trees[i], j, x_train\n", - " )\n", - "\n", - " x_train_enc32: Any = np.float32(x_train_enc)\n", - " y_train32: Any = np.float32(y_train)\n", - "\n", - " x_train_enc32, y_train32 = torch.from_numpy(\n", - " np.expand_dims(x_train_enc32, axis=1) # type: ignore\n", - " ), torch.from_numpy(\n", - " np.expand_dims(y_train32, axis=-1) # type: ignore\n", - " )\n", - " return x_train_enc32, y_train32" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Manually download and load the tabular dataset from LIBSVM data" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "executionInfo": { - "elapsed": 26613, - "status": "ok", - "timestamp": 1670356076585, - "user": { - "displayName": "Chenyang Ma", - "userId": "17975430055716133031" - }, - "user_tz": 0 - }, - "outputId": "22843504-faf0-44cf-aedd-1df8d0ec87a6" - }, - "outputs": [], - "source": [ - "# Datasets can be downloaded from LIBSVM Data: https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/\n", - "binary_train = [\"cod-rna.t\", \"cod-rna\", \"ijcnn1.t\"]\n", - "binary_test = [\"cod-rna.r\", \"cod-rna.t\", \"ijcnn1.tr\"]\n", - "reg_train = [\"eunite2001\", \"YearPredictionMSD\"]\n", - "reg_test = [\"eunite2001.t\", \"YearPredictionMSD.t\"]\n", - "\n", - "# Define the type of training task. Binary classification: BINARY; Regression: REG\n", - "task_types = [\"BINARY\", \"REG\"]\n", - "task_type = task_types[0]\n", - "\n", - "# Select the downloaded training and test dataset\n", - "if task_type == \"BINARY\":\n", - " dataset_path = \"dataset/binary_classification/\"\n", - " train = binary_train[0]\n", - " test = binary_test[0]\n", - "elif task_type == \"REG\":\n", - " dataset_path = \"dataset/regression/\"\n", - " train = reg_train[0]\n", - " test = reg_test[0]\n", - "\n", - "data_train = load_svmlight_file(dataset_path + train, zero_based=False)\n", - "data_test = load_svmlight_file(dataset_path + test, zero_based=False)\n", - "\n", - "print(\"Task type selected is: \" + task_type)\n", - "print(\"Training dataset is: \" + train)\n", - "print(\"Test dataset is: \" + test)" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Preprocess the tabular dataset" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "class TreeDataset(Dataset):\n", - " def __init__(self, data: NDArray, labels: NDArray) -> None:\n", - " self.labels = labels\n", - " self.data = data\n", - "\n", - " def __len__(self) -> int:\n", - " return len(self.labels)\n", - "\n", - " def __getitem__(self, idx: int) -> Dict[int, NDArray]:\n", - " label = self.labels[idx]\n", - " data = self.data[idx, :]\n", - " sample = {0: data, 1: label}\n", - " return sample\n", - "\n", - "\n", - "X_train = data_train[0].toarray()\n", - "y_train = data_train[1]\n", - "X_test = data_test[0].toarray()\n", - "y_test = data_test[1]\n", - "X_train.flags.writeable = True\n", - "y_train.flags.writeable = True\n", - "X_test.flags.writeable = True\n", - "y_test.flags.writeable = True\n", - "\n", - "# If the feature dimensions of the trainset and testset do not agree,\n", - "# specify n_features in the load_svmlight_file function in the above cell.\n", - "# https://scikit-learn.org/stable/modules/generated/sklearn.datasets.load_svmlight_file.html\n", - "print(\"Feature dimension of the dataset:\", X_train.shape[1])\n", - "print(\"Size of the trainset:\", X_train.shape[0])\n", - "print(\"Size of the testset:\", X_test.shape[0])\n", - "assert X_train.shape[1] == X_test.shape[1]\n", - "\n", - "if task_type == \"BINARY\":\n", - " y_train[y_train == -1] = 0\n", - " y_test[y_test == -1] = 0\n", - "\n", - "trainset = TreeDataset(np.array(X_train, copy=True), np.array(y_train, copy=True))\n", - "testset = TreeDataset(np.array(X_test, copy=True), np.array(y_test, copy=True))" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Conduct tabular dataset partition for Federated Learning" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "def get_dataloader(\n", - " dataset: Dataset, partition: str, batch_size: Union[int, str]\n", - ") -> DataLoader:\n", - " if batch_size == \"whole\":\n", - " batch_size = len(dataset)\n", - " return DataLoader(\n", - " dataset, batch_size=batch_size, pin_memory=True, shuffle=(partition == \"train\")\n", - " )\n", - "\n", - "\n", - "# https://github.com/adap/flower\n", - "def do_fl_partitioning(\n", - " trainset: Dataset,\n", - " testset: Dataset,\n", - " pool_size: int,\n", - " batch_size: Union[int, str],\n", - " val_ratio: float = 0.0,\n", - ") -> Tuple[DataLoader, DataLoader, DataLoader]:\n", - " # Split training set into `num_clients` partitions to simulate different local datasets\n", - " partition_size = len(trainset) // pool_size\n", - " lengths = [partition_size] * pool_size\n", - " if sum(lengths) != len(trainset):\n", - " lengths[-1] = len(trainset) - sum(lengths[0:-1])\n", - " datasets = random_split(trainset, lengths, torch.Generator().manual_seed(0))\n", - "\n", - " # Split each partition into train/val and create DataLoader\n", - " trainloaders = []\n", - " valloaders = []\n", - " for ds in datasets:\n", - " len_val = int(len(ds) * val_ratio)\n", - " len_train = len(ds) - len_val\n", - " lengths = [len_train, len_val]\n", - " ds_train, ds_val = random_split(ds, lengths, torch.Generator().manual_seed(0))\n", - " trainloaders.append(get_dataloader(ds_train, \"train\", batch_size))\n", - " if len_val != 0:\n", - " valloaders.append(get_dataloader(ds_val, \"val\", batch_size))\n", - " else:\n", - " valloaders = None\n", - " testloader = get_dataloader(testset, \"test\", batch_size)\n", - " return trainloaders, valloaders, testloader" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Define global variables for Federated XGBoost Learning" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# The number of clients participated in the federated learning\n", - "client_num = 5\n", - "\n", - "# The number of XGBoost trees in the tree ensemble that will be built for each client\n", - "client_tree_num = 500 // client_num" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Build global XGBoost tree for comparison" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "executionInfo": { - "elapsed": 1080216, - "status": "ok", - "timestamp": 1670357156788, - "user": { - "displayName": "Chenyang Ma", - "userId": "17975430055716133031" - }, - "user_tz": 0 - }, - "outputId": "d56f2821-5cd5-49ff-c5dc-f8d088eed799" - }, - "outputs": [], - "source": [ - "global_tree = construct_tree(X_train, y_train, client_tree_num, task_type)\n", - "preds_train = global_tree.predict(X_train)\n", - "preds_test = global_tree.predict(X_test)\n", - "\n", - "if task_type == \"BINARY\":\n", - " result_train = accuracy_score(y_train, preds_train)\n", - " result_test = accuracy_score(y_test, preds_test)\n", - " print(\"Global XGBoost Training Accuracy: %f\" % (result_train))\n", - " print(\"Global XGBoost Testing Accuracy: %f\" % (result_test))\n", - "elif task_type == \"REG\":\n", - " result_train = mean_squared_error(y_train, preds_train)\n", - " result_test = mean_squared_error(y_test, preds_test)\n", - " print(\"Global XGBoost Training MSE: %f\" % (result_train))\n", - " print(\"Global XGBoost Testing MSE: %f\" % (result_test))\n", - "\n", - "print(global_tree)" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Simulate local XGBoost trees on clients for comparison" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "executionInfo": { - "elapsed": 242310, - "status": "ok", - "timestamp": 1670357399084, - "user": { - "displayName": "Chenyang Ma", - "userId": "17975430055716133031" - }, - "user_tz": 0 - }, - "outputId": "0739df9f-84de-4749-8de1-7bd7c6a32ccc" - }, - "outputs": [], - "source": [ - "client_trees_comparison = []\n", - "trainloaders, _, testloader = do_fl_partitioning(\n", - " trainset, testset, pool_size=client_num, batch_size=\"whole\", val_ratio=0.0\n", - ")\n", - "\n", - "for i, trainloader in enumerate(trainloaders):\n", - " for local_dataset in trainloader:\n", - " local_X_train, local_y_train = local_dataset[0], local_dataset[1]\n", - " tree = construct_tree(local_X_train, local_y_train, client_tree_num, task_type)\n", - " client_trees_comparison.append(tree)\n", - "\n", - " preds_train = client_trees_comparison[-1].predict(local_X_train)\n", - " preds_test = client_trees_comparison[-1].predict(X_test)\n", - "\n", - " if task_type == \"BINARY\":\n", - " result_train = accuracy_score(local_y_train, preds_train)\n", - " result_test = accuracy_score(y_test, preds_test)\n", - " print(\"Local Client %d XGBoost Training Accuracy: %f\" % (i, result_train))\n", - " print(\"Local Client %d XGBoost Testing Accuracy: %f\" % (i, result_test))\n", - " elif task_type == \"REG\":\n", - " result_train = mean_squared_error(local_y_train, preds_train)\n", - " result_test = mean_squared_error(y_test, preds_test)\n", - " print(\"Local Client %d XGBoost Training MSE: %f\" % (i, result_train))\n", - " print(\"Local Client %d XGBoost Testing MSE: %f\" % (i, result_test))" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Centralized Federated XGBoost\n", - "#### Create 1D convolutional neural network on trees prediction results. \n", - "#### 1D kernel size == client_tree_num\n", - "#### Make the learning rate of the tree ensembles learnable." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "executionInfo": { - "elapsed": 38, - "status": "ok", - "timestamp": 1670363021675, - "user": { - "displayName": "Chenyang Ma", - "userId": "17975430055716133031" - }, - "user_tz": 0 - } - }, - "outputs": [], - "source": [ - "class CNN(nn.Module):\n", - " def __init__(self, n_channel: int = 64) -> None:\n", - " super(CNN, self).__init__()\n", - " n_out = 1\n", - " self.task_type = task_type\n", - " self.conv1d = nn.Conv1d(\n", - " 1, n_channel, kernel_size=client_tree_num, stride=client_tree_num, padding=0\n", - " )\n", - " self.layer_direct = nn.Linear(n_channel * client_num, n_out)\n", - " self.ReLU = nn.ReLU()\n", - " self.Sigmoid = nn.Sigmoid()\n", - " self.Identity = nn.Identity()\n", - "\n", - " # Add weight initialization\n", - " for layer in self.modules():\n", - " if isinstance(layer, nn.Linear):\n", - " nn.init.kaiming_uniform_(\n", - " layer.weight, mode=\"fan_in\", nonlinearity=\"relu\"\n", - " )\n", - "\n", - " def forward(self, x: torch.Tensor) -> torch.Tensor:\n", - " x = self.ReLU(self.conv1d(x))\n", - " x = x.flatten(start_dim=1)\n", - " x = self.ReLU(x)\n", - " if self.task_type == \"BINARY\":\n", - " x = self.Sigmoid(self.layer_direct(x))\n", - " elif self.task_type == \"REG\":\n", - " x = self.Identity(self.layer_direct(x))\n", - " return x\n", - "\n", - " def get_weights(self) -> fl.common.NDArrays:\n", - " \"\"\"Get model weights as a list of NumPy ndarrays.\"\"\"\n", - " return [\n", - " np.array(val.cpu().numpy(), copy=True)\n", - " for _, val in self.state_dict().items()\n", - " ]\n", - "\n", - " def set_weights(self, weights: fl.common.NDArrays) -> None:\n", - " \"\"\"Set model weights from a list of NumPy ndarrays.\"\"\"\n", - " layer_dict = {}\n", - " for k, v in zip(self.state_dict().keys(), weights):\n", - " if v.ndim != 0:\n", - " layer_dict[k] = torch.Tensor(np.array(v, copy=True))\n", - " state_dict = OrderedDict(layer_dict)\n", - " self.load_state_dict(state_dict, strict=True)\n", - "\n", - "\n", - "def train(\n", - " task_type: str,\n", - " net: CNN,\n", - " trainloader: DataLoader,\n", - " device: torch.device,\n", - " num_iterations: int,\n", - " log_progress: bool = True,\n", - ") -> Tuple[float, float, int]:\n", - " # Define loss and optimizer\n", - " if task_type == \"BINARY\":\n", - " criterion = nn.BCELoss()\n", - " elif task_type == \"REG\":\n", - " criterion = nn.MSELoss()\n", - " # optimizer = torch.optim.SGD(net.parameters(), lr=0.001, momentum=0.9, weight_decay=1e-6)\n", - " optimizer = torch.optim.Adam(net.parameters(), lr=0.0001, betas=(0.9, 0.999))\n", - "\n", - " def cycle(iterable):\n", - " \"\"\"Repeats the contents of the train loader, in case it gets exhausted in 'num_iterations'.\"\"\"\n", - " while True:\n", - " for x in iterable:\n", - " yield x\n", - "\n", - " # Train the network\n", - " net.train()\n", - " total_loss, total_result, n_samples = 0.0, 0.0, 0\n", - " pbar = (\n", - " tqdm(iter(cycle(trainloader)), total=num_iterations, desc=f\"TRAIN\")\n", - " if log_progress\n", - " else iter(cycle(trainloader))\n", - " )\n", - "\n", - " # Unusually, this training is formulated in terms of number of updates/iterations/batches processed\n", - " # by the network. This will be helpful later on, when partitioning the data across clients: resulting\n", - " # in differences between dataset sizes and hence inconsistent numbers of updates per 'epoch'.\n", - " for i, data in zip(range(num_iterations), pbar):\n", - " tree_outputs, labels = data[0].to(device), data[1].to(device)\n", - " optimizer.zero_grad()\n", - "\n", - " outputs = net(tree_outputs)\n", - " loss = criterion(outputs, labels)\n", - " loss.backward()\n", - " optimizer.step()\n", - "\n", - " # Collected training loss and accuracy statistics\n", - " total_loss += loss.item()\n", - " n_samples += labels.size(0)\n", - "\n", - " if task_type == \"BINARY\":\n", - " acc = Accuracy(task=\"binary\")(outputs, labels.type(torch.int))\n", - " total_result += acc * labels.size(0)\n", - " elif task_type == \"REG\":\n", - " mse = MeanSquaredError()(outputs, labels.type(torch.int))\n", - " total_result += mse * labels.size(0)\n", - "\n", - " if log_progress:\n", - " if task_type == \"BINARY\":\n", - " pbar.set_postfix(\n", - " {\n", - " \"train_loss\": total_loss / n_samples,\n", - " \"train_acc\": total_result / n_samples,\n", - " }\n", - " )\n", - " elif task_type == \"REG\":\n", - " pbar.set_postfix(\n", - " {\n", - " \"train_loss\": total_loss / n_samples,\n", - " \"train_mse\": total_result / n_samples,\n", - " }\n", - " )\n", - " if log_progress:\n", - " print(\"\\n\")\n", - "\n", - " return total_loss / n_samples, total_result / n_samples, n_samples\n", - "\n", - "\n", - "def test(\n", - " task_type: str,\n", - " net: CNN,\n", - " testloader: DataLoader,\n", - " device: torch.device,\n", - " log_progress: bool = True,\n", - ") -> Tuple[float, float, int]:\n", - " \"\"\"Evaluates the network on test data.\"\"\"\n", - " if task_type == \"BINARY\":\n", - " criterion = nn.BCELoss()\n", - " elif task_type == \"REG\":\n", - " criterion = nn.MSELoss()\n", - "\n", - " total_loss, total_result, n_samples = 0.0, 0.0, 0\n", - " net.eval()\n", - " with torch.no_grad():\n", - " pbar = tqdm(testloader, desc=\"TEST\") if log_progress else testloader\n", - " for data in pbar:\n", - " tree_outputs, labels = data[0].to(device), data[1].to(device)\n", - " outputs = net(tree_outputs)\n", - "\n", - " # Collected testing loss and accuracy statistics\n", - " total_loss += criterion(outputs, labels).item()\n", - " n_samples += labels.size(0)\n", - "\n", - " if task_type == \"BINARY\":\n", - " acc = Accuracy(task=\"binary\")(\n", - " outputs.cpu(), labels.type(torch.int).cpu()\n", - " )\n", - " total_result += acc * labels.size(0)\n", - " elif task_type == \"REG\":\n", - " mse = MeanSquaredError()(outputs.cpu(), labels.type(torch.int).cpu())\n", - " total_result += mse * labels.size(0)\n", - "\n", - " if log_progress:\n", - " print(\"\\n\")\n", - "\n", - " return total_loss / n_samples, total_result / n_samples, n_samples" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Create Flower custom client\n", - "## Import Flower custom client relevant modules" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Flower client\n", - "from flwr.common import (\n", - " EvaluateIns,\n", - " EvaluateRes,\n", - " FitIns,\n", - " FitRes,\n", - " GetPropertiesIns,\n", - " GetPropertiesRes,\n", - " GetParametersIns,\n", - " GetParametersRes,\n", - " Status,\n", - " Code,\n", - " parameters_to_ndarrays,\n", - " ndarrays_to_parameters,\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "executionInfo": { - "elapsed": 36, - "status": "ok", - "timestamp": 1670363021676, - "user": { - "displayName": "Chenyang Ma", - "userId": "17975430055716133031" - }, - "user_tz": 0 - } - }, - "outputs": [], - "source": [ - "def tree_encoding_loader(\n", - " dataloader: DataLoader,\n", - " batch_size: int,\n", - " client_trees: Union[\n", - " Tuple[XGBClassifier, int],\n", - " Tuple[XGBRegressor, int],\n", - " List[Union[Tuple[XGBClassifier, int], Tuple[XGBRegressor, int]]],\n", - " ],\n", - " client_tree_num: int,\n", - " client_num: int,\n", - ") -> DataLoader:\n", - " encoding = tree_encoding(dataloader, client_trees, client_tree_num, client_num)\n", - " if encoding is None:\n", - " return None\n", - " data, labels = encoding\n", - " tree_dataset = TreeDataset(data, labels)\n", - " return get_dataloader(tree_dataset, \"tree\", batch_size)\n", - "\n", - "\n", - "class FL_Client(fl.client.Client):\n", - " def __init__(\n", - " self,\n", - " task_type: str,\n", - " trainloader: DataLoader,\n", - " valloader: DataLoader,\n", - " client_tree_num: int,\n", - " client_num: int,\n", - " cid: str,\n", - " log_progress: bool = False,\n", - " ):\n", - " \"\"\"\n", - " Creates a client for training `network.Net` on tabular dataset.\n", - " \"\"\"\n", - " self.task_type = task_type\n", - " self.cid = cid\n", - " self.tree = construct_tree_from_loader(trainloader, client_tree_num, task_type)\n", - " self.trainloader_original = trainloader\n", - " self.valloader_original = valloader\n", - " self.trainloader = None\n", - " self.valloader = None\n", - " self.client_tree_num = client_tree_num\n", - " self.client_num = client_num\n", - " self.properties = {\"tensor_type\": \"numpy.ndarray\"}\n", - " self.log_progress = log_progress\n", - "\n", - " # instantiate model\n", - " self.net = CNN()\n", - "\n", - " # determine device\n", - " self.device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n", - "\n", - " def get_properties(self, ins: GetPropertiesIns) -> GetPropertiesRes:\n", - " return GetPropertiesRes(properties=self.properties)\n", - "\n", - " def get_parameters(\n", - " self, ins: GetParametersIns\n", - " ) -> Tuple[\n", - " GetParametersRes, Union[Tuple[XGBClassifier, int], Tuple[XGBRegressor, int]]\n", - " ]:\n", - " return [\n", - " GetParametersRes(\n", - " status=Status(Code.OK, \"\"),\n", - " parameters=ndarrays_to_parameters(self.net.get_weights()),\n", - " ),\n", - " (self.tree, int(self.cid)),\n", - " ]\n", - "\n", - " def set_parameters(\n", - " self,\n", - " parameters: Tuple[\n", - " Parameters,\n", - " Union[\n", - " Tuple[XGBClassifier, int],\n", - " Tuple[XGBRegressor, int],\n", - " List[Union[Tuple[XGBClassifier, int], Tuple[XGBRegressor, int]]],\n", - " ],\n", - " ],\n", - " ) -> Union[\n", - " Tuple[XGBClassifier, int],\n", - " Tuple[XGBRegressor, int],\n", - " List[Union[Tuple[XGBClassifier, int], Tuple[XGBRegressor, int]]],\n", - " ]:\n", - " self.net.set_weights(parameters_to_ndarrays(parameters[0]))\n", - " return parameters[1]\n", - "\n", - " def fit(self, fit_params: FitIns) -> FitRes:\n", - " # Process incoming request to train\n", - " num_iterations = fit_params.config[\"num_iterations\"]\n", - " batch_size = fit_params.config[\"batch_size\"]\n", - " aggregated_trees = self.set_parameters(fit_params.parameters)\n", - "\n", - " if type(aggregated_trees) is list:\n", - " print(\"Client \" + self.cid + \": recieved\", len(aggregated_trees), \"trees\")\n", - " else:\n", - " print(\"Client \" + self.cid + \": only had its own tree\")\n", - " self.trainloader = tree_encoding_loader(\n", - " self.trainloader_original,\n", - " batch_size,\n", - " aggregated_trees,\n", - " self.client_tree_num,\n", - " self.client_num,\n", - " )\n", - " self.valloader = tree_encoding_loader(\n", - " self.valloader_original,\n", - " batch_size,\n", - " aggregated_trees,\n", - " self.client_tree_num,\n", - " self.client_num,\n", - " )\n", - "\n", - " # num_iterations = None special behaviour: train(...) runs for a single epoch, however many updates it may be\n", - " num_iterations = num_iterations or len(self.trainloader)\n", - "\n", - " # Train the model\n", - " print(f\"Client {self.cid}: training for {num_iterations} iterations/updates\")\n", - " self.net.to(self.device)\n", - " train_loss, train_result, num_examples = train(\n", - " self.task_type,\n", - " self.net,\n", - " self.trainloader,\n", - " device=self.device,\n", - " num_iterations=num_iterations,\n", - " log_progress=self.log_progress,\n", - " )\n", - " print(\n", - " f\"Client {self.cid}: training round complete, {num_examples} examples processed\"\n", - " )\n", - "\n", - " # Return training information: model, number of examples processed and metrics\n", - " if self.task_type == \"BINARY\":\n", - " return FitRes(\n", - " status=Status(Code.OK, \"\"),\n", - " parameters=self.get_parameters(fit_params.config),\n", - " num_examples=num_examples,\n", - " metrics={\"loss\": train_loss, \"accuracy\": train_result},\n", - " )\n", - " elif self.task_type == \"REG\":\n", - " return FitRes(\n", - " status=Status(Code.OK, \"\"),\n", - " parameters=self.get_parameters(fit_params.config),\n", - " num_examples=num_examples,\n", - " metrics={\"loss\": train_loss, \"mse\": train_result},\n", - " )\n", - "\n", - " def evaluate(self, eval_params: EvaluateIns) -> EvaluateRes:\n", - " # Process incoming request to evaluate\n", - " self.set_parameters(eval_params.parameters)\n", - "\n", - " # Evaluate the model\n", - " self.net.to(self.device)\n", - " loss, result, num_examples = test(\n", - " self.task_type,\n", - " self.net,\n", - " self.valloader,\n", - " device=self.device,\n", - " log_progress=self.log_progress,\n", - " )\n", - "\n", - " # Return evaluation information\n", - " if self.task_type == \"BINARY\":\n", - " print(\n", - " f\"Client {self.cid}: evaluation on {num_examples} examples: loss={loss:.4f}, accuracy={result:.4f}\"\n", - " )\n", - " return EvaluateRes(\n", - " status=Status(Code.OK, \"\"),\n", - " loss=loss,\n", - " num_examples=num_examples,\n", - " metrics={\"accuracy\": result},\n", - " )\n", - " elif self.task_type == \"REG\":\n", - " print(\n", - " f\"Client {self.cid}: evaluation on {num_examples} examples: loss={loss:.4f}, mse={result:.4f}\"\n", - " )\n", - " return EvaluateRes(\n", - " status=Status(Code.OK, \"\"),\n", - " loss=loss,\n", - " num_examples=num_examples,\n", - " metrics={\"mse\": result},\n", - " )" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Create Flower custom server\n", - "## Import Flower custom server relevant modules" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Flower server\n", - "import functools\n", - "from flwr.server.strategy import FedXgbNnAvg\n", - "from flwr.server.app import ServerConfig\n", - "\n", - "import timeit\n", - "from logging import DEBUG, INFO\n", - "from typing import Dict, List, Optional, Tuple, Union\n", - "\n", - "from flwr.common import DisconnectRes, Parameters, ReconnectIns, Scalar\n", - "from flwr.common.logger import log\n", - "from flwr.common.typing import GetParametersIns\n", - "from flwr.server.client_manager import ClientManager, SimpleClientManager\n", - "from flwr.server.client_proxy import ClientProxy\n", - "from flwr.server.history import History\n", - "from flwr.server.strategy import Strategy\n", - "from flwr.server.server import (\n", - " reconnect_clients,\n", - " reconnect_client,\n", - " fit_clients,\n", - " fit_client,\n", - " _handle_finished_future_after_fit,\n", - " evaluate_clients,\n", - " evaluate_client,\n", - " _handle_finished_future_after_evaluate,\n", - ")\n", - "\n", - "FitResultsAndFailures = Tuple[\n", - " List[Tuple[ClientProxy, FitRes]],\n", - " List[Union[Tuple[ClientProxy, FitRes], BaseException]],\n", - "]\n", - "EvaluateResultsAndFailures = Tuple[\n", - " List[Tuple[ClientProxy, EvaluateRes]],\n", - " List[Union[Tuple[ClientProxy, EvaluateRes], BaseException]],\n", - "]" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "class FL_Server(fl.server.Server):\n", - " \"\"\"Flower server.\"\"\"\n", - "\n", - " def __init__(\n", - " self, *, client_manager: ClientManager, strategy: Optional[Strategy] = None\n", - " ) -> None:\n", - " self._client_manager: ClientManager = client_manager\n", - " self.parameters: Parameters = Parameters(\n", - " tensors=[], tensor_type=\"numpy.ndarray\"\n", - " )\n", - " self.strategy: Strategy = strategy\n", - " self.max_workers: Optional[int] = None\n", - "\n", - " # pylint: disable=too-many-locals\n", - " def fit(self, num_rounds: int, timeout: Optional[float]) -> History:\n", - " \"\"\"Run federated averaging for a number of rounds.\"\"\"\n", - " history = History()\n", - "\n", - " # Initialize parameters\n", - " log(INFO, \"Initializing global parameters\")\n", - " self.parameters = self._get_initial_parameters(timeout=timeout)\n", - "\n", - " log(INFO, \"Evaluating initial parameters\")\n", - " res = self.strategy.evaluate(0, parameters=self.parameters)\n", - " if res is not None:\n", - " log(\n", - " INFO,\n", - " \"initial parameters (loss, other metrics): %s, %s\",\n", - " res[0],\n", - " res[1],\n", - " )\n", - " history.add_loss_centralized(server_round=0, loss=res[0])\n", - " history.add_metrics_centralized(server_round=0, metrics=res[1])\n", - "\n", - " # Run federated learning for num_rounds\n", - " log(INFO, \"FL starting\")\n", - " start_time = timeit.default_timer()\n", - "\n", - " for current_round in range(1, num_rounds + 1):\n", - " # Train model and replace previous global model\n", - " res_fit = self.fit_round(server_round=current_round, timeout=timeout)\n", - " if res_fit:\n", - " parameters_prime, _, _ = res_fit # fit_metrics_aggregated\n", - " if parameters_prime:\n", - " self.parameters = parameters_prime\n", - "\n", - " # Evaluate model using strategy implementation\n", - " res_cen = self.strategy.evaluate(current_round, parameters=self.parameters)\n", - " if res_cen is not None:\n", - " loss_cen, metrics_cen = res_cen\n", - " log(\n", - " INFO,\n", - " \"fit progress: (%s, %s, %s, %s)\",\n", - " current_round,\n", - " loss_cen,\n", - " metrics_cen,\n", - " timeit.default_timer() - start_time,\n", - " )\n", - " history.add_loss_centralized(server_round=current_round, loss=loss_cen)\n", - " history.add_metrics_centralized(\n", - " server_round=current_round, metrics=metrics_cen\n", - " )\n", - "\n", - " # Evaluate model on a sample of available clients\n", - " res_fed = self.evaluate_round(server_round=current_round, timeout=timeout)\n", - " if res_fed:\n", - " loss_fed, evaluate_metrics_fed, _ = res_fed\n", - " if loss_fed:\n", - " history.add_loss_distributed(\n", - " server_round=current_round, loss=loss_fed\n", - " )\n", - " history.add_metrics_distributed(\n", - " server_round=current_round, metrics=evaluate_metrics_fed\n", - " )\n", - "\n", - " # Bookkeeping\n", - " end_time = timeit.default_timer()\n", - " elapsed = end_time - start_time\n", - " log(INFO, \"FL finished in %s\", elapsed)\n", - " return history\n", - "\n", - " def evaluate_round(\n", - " self,\n", - " server_round: int,\n", - " timeout: Optional[float],\n", - " ) -> Optional[\n", - " Tuple[Optional[float], Dict[str, Scalar], EvaluateResultsAndFailures]\n", - " ]:\n", - " \"\"\"Validate current global model on a number of clients.\"\"\"\n", - "\n", - " # Get clients and their respective instructions from strategy\n", - " client_instructions = self.strategy.configure_evaluate(\n", - " server_round=server_round,\n", - " parameters=self.parameters,\n", - " client_manager=self._client_manager,\n", - " )\n", - " if not client_instructions:\n", - " log(INFO, \"evaluate_round %s: no clients selected, cancel\", server_round)\n", - " return None\n", - " log(\n", - " DEBUG,\n", - " \"evaluate_round %s: strategy sampled %s clients (out of %s)\",\n", - " server_round,\n", - " len(client_instructions),\n", - " self._client_manager.num_available(),\n", - " )\n", - "\n", - " # Collect `evaluate` results from all clients participating in this round\n", - " results, failures = evaluate_clients(\n", - " client_instructions,\n", - " max_workers=self.max_workers,\n", - " timeout=timeout,\n", - " )\n", - " log(\n", - " DEBUG,\n", - " \"evaluate_round %s received %s results and %s failures\",\n", - " server_round,\n", - " len(results),\n", - " len(failures),\n", - " )\n", - "\n", - " # Aggregate the evaluation results\n", - " aggregated_result: Tuple[\n", - " Optional[float],\n", - " Dict[str, Scalar],\n", - " ] = self.strategy.aggregate_evaluate(server_round, results, failures)\n", - "\n", - " loss_aggregated, metrics_aggregated = aggregated_result\n", - " return loss_aggregated, metrics_aggregated, (results, failures)\n", - "\n", - " def fit_round(\n", - " self,\n", - " server_round: int,\n", - " timeout: Optional[float],\n", - " ) -> Optional[\n", - " Tuple[\n", - " Optional[\n", - " Tuple[\n", - " Parameters,\n", - " Union[\n", - " Tuple[XGBClassifier, int],\n", - " Tuple[XGBRegressor, int],\n", - " List[\n", - " Union[Tuple[XGBClassifier, int], Tuple[XGBRegressor, int]]\n", - " ],\n", - " ],\n", - " ]\n", - " ],\n", - " Dict[str, Scalar],\n", - " FitResultsAndFailures,\n", - " ]\n", - " ]:\n", - " \"\"\"Perform a single round of federated averaging.\"\"\"\n", - "\n", - " # Get clients and their respective instructions from strategy\n", - " client_instructions = self.strategy.configure_fit(\n", - " server_round=server_round,\n", - " parameters=self.parameters,\n", - " client_manager=self._client_manager,\n", - " )\n", - "\n", - " if not client_instructions:\n", - " log(INFO, \"fit_round %s: no clients selected, cancel\", server_round)\n", - " return None\n", - " log(\n", - " DEBUG,\n", - " \"fit_round %s: strategy sampled %s clients (out of %s)\",\n", - " server_round,\n", - " len(client_instructions),\n", - " self._client_manager.num_available(),\n", - " )\n", - "\n", - " # Collect `fit` results from all clients participating in this round\n", - " results, failures = fit_clients(\n", - " client_instructions=client_instructions,\n", - " max_workers=self.max_workers,\n", - " timeout=timeout,\n", - " )\n", - "\n", - " log(\n", - " DEBUG,\n", - " \"fit_round %s received %s results and %s failures\",\n", - " server_round,\n", - " len(results),\n", - " len(failures),\n", - " )\n", - "\n", - " # Aggregate training results\n", - " NN_aggregated: Parameters\n", - " trees_aggregated: Union[\n", - " Tuple[XGBClassifier, int],\n", - " Tuple[XGBRegressor, int],\n", - " List[Union[Tuple[XGBClassifier, int], Tuple[XGBRegressor, int]]],\n", - " ]\n", - " metrics_aggregated: Dict[str, Scalar]\n", - " aggregated, metrics_aggregated = self.strategy.aggregate_fit(\n", - " server_round, results, failures\n", - " )\n", - " NN_aggregated, trees_aggregated = aggregated[0], aggregated[1]\n", - "\n", - " if type(trees_aggregated) is list:\n", - " print(\"Server side aggregated\", len(trees_aggregated), \"trees.\")\n", - " else:\n", - " print(\"Server side did not aggregate trees.\")\n", - "\n", - " return (\n", - " [NN_aggregated, trees_aggregated],\n", - " metrics_aggregated,\n", - " (results, failures),\n", - " )\n", - "\n", - " def _get_initial_parameters(\n", - " self, timeout: Optional[float]\n", - " ) -> Tuple[Parameters, Union[Tuple[XGBClassifier, int], Tuple[XGBRegressor, int]]]:\n", - " \"\"\"Get initial parameters from one of the available clients.\"\"\"\n", - "\n", - " # Server-side parameter initialization\n", - " parameters: Optional[Parameters] = self.strategy.initialize_parameters(\n", - " client_manager=self._client_manager\n", - " )\n", - " if parameters is not None:\n", - " log(INFO, \"Using initial parameters provided by strategy\")\n", - " return parameters\n", - "\n", - " # Get initial parameters from one of the clients\n", - " log(INFO, \"Requesting initial parameters from one random client\")\n", - " random_client = self._client_manager.sample(1)[0]\n", - " ins = GetParametersIns(config={})\n", - " get_parameters_res_tree = random_client.get_parameters(ins=ins, timeout=timeout)\n", - " parameters = [get_parameters_res_tree[0].parameters, get_parameters_res_tree[1]]\n", - " log(INFO, \"Received initial parameters from one random client\")\n", - "\n", - " return parameters" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Create server-side evaluation and experiment" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "executionInfo": { - "elapsed": 35, - "status": "ok", - "timestamp": 1670363021676, - "user": { - "displayName": "Chenyang Ma", - "userId": "17975430055716133031" - }, - "user_tz": 0 - } - }, - "outputs": [], - "source": [ - "def print_model_layers(model: nn.Module) -> None:\n", - " print(model)\n", - " for param_tensor in model.state_dict():\n", - " print(param_tensor, \"\\t\", model.state_dict()[param_tensor].size())\n", - "\n", - "\n", - "def serverside_eval(\n", - " server_round: int,\n", - " parameters: Tuple[\n", - " Parameters,\n", - " Union[\n", - " Tuple[XGBClassifier, int],\n", - " Tuple[XGBRegressor, int],\n", - " List[Union[Tuple[XGBClassifier, int], Tuple[XGBRegressor, int]]],\n", - " ],\n", - " ],\n", - " config: Dict[str, Scalar],\n", - " task_type: str,\n", - " testloader: DataLoader,\n", - " batch_size: int,\n", - " client_tree_num: int,\n", - " client_num: int,\n", - ") -> Tuple[float, Dict[str, float]]:\n", - " \"\"\"An evaluation function for centralized/serverside evaluation over the entire test set.\"\"\"\n", - " # device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n", - " device = \"cpu\"\n", - " model = CNN()\n", - " # print_model_layers(model)\n", - "\n", - " model.set_weights(parameters_to_ndarrays(parameters[0]))\n", - " model.to(device)\n", - "\n", - " trees_aggregated = parameters[1]\n", - " testloader = tree_encoding_loader(\n", - " testloader, batch_size, trees_aggregated, client_tree_num, client_num\n", - " )\n", - " loss, result, _ = test(\n", - " task_type, model, testloader, device=device, log_progress=False\n", - " )\n", - "\n", - " if task_type == \"BINARY\":\n", - " print(\n", - " f\"Evaluation on the server: test_loss={loss:.4f}, test_accuracy={result:.4f}\"\n", - " )\n", - " return loss, {\"accuracy\": result}\n", - " elif task_type == \"REG\":\n", - " print(f\"Evaluation on the server: test_loss={loss:.4f}, test_mse={result:.4f}\")\n", - " return loss, {\"mse\": result}\n", - "\n", - "\n", - "def start_experiment(\n", - " task_type: str,\n", - " trainset: Dataset,\n", - " testset: Dataset,\n", - " num_rounds: int = 5,\n", - " client_tree_num: int = 50,\n", - " client_pool_size: int = 5,\n", - " num_iterations: int = 100,\n", - " fraction_fit: float = 1.0,\n", - " min_fit_clients: int = 2,\n", - " batch_size: int = 32,\n", - " val_ratio: float = 0.1,\n", - ") -> History:\n", - " client_resources = {\"num_cpus\": 0.5} # 2 clients per CPU\n", - "\n", - " # Partition the dataset into subsets reserved for each client.\n", - " # - 'val_ratio' controls the proportion of the (local) client reserved as a local test set\n", - " # (good for testing how the final model performs on the client's local unseen data)\n", - " trainloaders, valloaders, testloader = do_fl_partitioning(\n", - " trainset,\n", - " testset,\n", - " batch_size=\"whole\",\n", - " pool_size=client_pool_size,\n", - " val_ratio=val_ratio,\n", - " )\n", - " print(\n", - " f\"Data partitioned across {client_pool_size} clients\"\n", - " f\" and {val_ratio} of local dataset reserved for validation.\"\n", - " )\n", - "\n", - " # Configure the strategy\n", - " def fit_config(server_round: int) -> Dict[str, Scalar]:\n", - " print(f\"Configuring round {server_round}\")\n", - " return {\n", - " \"num_iterations\": num_iterations,\n", - " \"batch_size\": batch_size,\n", - " }\n", - "\n", - " # FedXgbNnAvg\n", - " strategy = FedXgbNnAvg(\n", - " fraction_fit=fraction_fit,\n", - " fraction_evaluate=fraction_fit if val_ratio > 0.0 else 0.0,\n", - " min_fit_clients=min_fit_clients,\n", - " min_evaluate_clients=min_fit_clients,\n", - " min_available_clients=client_pool_size, # all clients should be available\n", - " on_fit_config_fn=fit_config,\n", - " on_evaluate_config_fn=(lambda r: {\"batch_size\": batch_size}),\n", - " evaluate_fn=functools.partial(\n", - " serverside_eval,\n", - " task_type=task_type,\n", - " testloader=testloader,\n", - " batch_size=batch_size,\n", - " client_tree_num=client_tree_num,\n", - " client_num=client_num,\n", - " ),\n", - " accept_failures=False,\n", - " )\n", - "\n", - " print(\n", - " f\"FL experiment configured for {num_rounds} rounds with {client_pool_size} client in the pool.\"\n", - " )\n", - " print(\n", - " f\"FL round will proceed with {fraction_fit * 100}% of clients sampled, at least {min_fit_clients}.\"\n", - " )\n", - "\n", - " def client_fn(cid: str) -> fl.client.Client:\n", - " \"\"\"Creates a federated learning client\"\"\"\n", - " if val_ratio > 0.0 and val_ratio <= 1.0:\n", - " return FL_Client(\n", - " task_type,\n", - " trainloaders[int(cid)],\n", - " valloaders[int(cid)],\n", - " client_tree_num,\n", - " client_pool_size,\n", - " cid,\n", - " log_progress=False,\n", - " )\n", - " else:\n", - " return FL_Client(\n", - " task_type,\n", - " trainloaders[int(cid)],\n", - " None,\n", - " client_tree_num,\n", - " client_pool_size,\n", - " cid,\n", - " log_progress=False,\n", - " )\n", - "\n", - " # Start the simulation\n", - " history = fl.simulation.start_simulation(\n", - " client_fn=client_fn,\n", - " server=FL_Server(client_manager=SimpleClientManager(), strategy=strategy),\n", - " num_clients=client_pool_size,\n", - " client_resources=client_resources,\n", - " config=ServerConfig(num_rounds=num_rounds),\n", - " strategy=strategy,\n", - " )\n", - "\n", - " print(history)\n", - "\n", - " return history" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Start federated training and inference\n", - "#### High-level workflow: \n", - "#### At round 1, each client first builds their own local XGBoost tree, and sends to the server. The server aggregates all trees and sends to all clients. \n", - "#### After round 1, each client calculates every other client tree’s prediction results, and trains a convolutional neural network with 1D convolution kernel size == the number of XGBoost trees in the tree ensemble. \n", - "#### The sharing of privacy-sensitive information is not needed, and the learning rate (a hyperparameter for XGBoost) is learnable using 1D convolution." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/", - "height": 624 - }, - "executionInfo": { - "elapsed": 7610, - "status": "error", - "timestamp": 1670363029252, - "user": { - "displayName": "Chenyang Ma", - "userId": "17975430055716133031" - }, - "user_tz": 0 - }, - "outputId": "ee2b7146-07ec-4f97-ba44-5b12b35bbeaf" - }, - "outputs": [], - "source": [ - "start_experiment(\n", - " task_type=task_type,\n", - " trainset=trainset,\n", - " testset=testset,\n", - " num_rounds=20,\n", - " client_tree_num=client_tree_num,\n", - " client_pool_size=client_num,\n", - " num_iterations=100,\n", - " batch_size=64,\n", - " fraction_fit=1.0,\n", - " min_fit_clients=1,\n", - " val_ratio=0.0,\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "colab": { - "provenance": [] - }, - "gpuClass": "premium", - "kernelspec": { - "display_name": "FedXGBoost", - "language": "python", - "name": "python3" - } - }, - "nbformat": 4, - "nbformat_minor": 0 -} diff --git a/examples/secaggplus-mt/client.py b/examples/secaggplus-mt/client.py index f0f1348ee378..164a261213be 100644 --- a/examples/secaggplus-mt/client.py +++ b/examples/secaggplus-mt/client.py @@ -5,11 +5,11 @@ import flwr as fl from flwr.common import Status, FitIns, FitRes, Code from flwr.common.parameter import ndarrays_to_parameters -from flwr.client.secure_aggregation import SecAggPlusHandler +from flwr.client.mod import secaggplus_mod # Define Flower client with the SecAgg+ protocol -class FlowerClient(fl.client.Client, SecAggPlusHandler): +class FlowerClient(fl.client.Client): def fit(self, fit_ins: FitIns) -> FitRes: ret_vec = [np.ones(3)] ret = FitRes( @@ -19,17 +19,30 @@ def fit(self, fit_ins: FitIns) -> FitRes: metrics={}, ) # Force a significant delay for testing purposes - if self._shared_state.sid == 0: - print(f"Client {self._shared_state.sid} dropped for testing purposes.") + if fit_ins.config["drop"]: + print(f"Client dropped for testing purposes.") time.sleep(4) return ret - print(f"Client {self._shared_state.sid} uploading {ret_vec[0]}...") + print(f"Client uploading {ret_vec[0]}...") return ret -# Start Flower client -fl.client.start_client( - server_address="0.0.0.0:9092", - client=FlowerClient(), - transport="grpc-rere", +def client_fn(cid: str): + """.""" + return FlowerClient().to_client() + + +# To run this: `flower-client-app client:app` +app = fl.client.ClientApp( + client_fn=client_fn, + mods=[secaggplus_mod], ) + + +if __name__ == "__main__": + # Start Flower client + fl.client.start_client( + server_address="0.0.0.0:9092", + client=FlowerClient(), + transport="grpc-rere", + ) diff --git a/examples/secaggplus-mt/driver.py b/examples/secaggplus-mt/driver.py index f5871f1b44e4..42559c2f4a21 100644 --- a/examples/secaggplus-mt/driver.py +++ b/examples/secaggplus-mt/driver.py @@ -6,7 +6,7 @@ from workflows import get_workflow_factory from flwr.common import Metrics, ndarrays_to_parameters -from flwr.driver import GrpcDriver +from flwr.server.driver import GrpcDriver from flwr.proto import driver_pb2, node_pb2, task_pb2 from flwr.server import History @@ -24,7 +24,6 @@ def merge(_task: task_pb2.Task, _merge_task: task_pb2.Task) -> task_pb2.Task: task_id="", # Do not set, will be created and set by the DriverAPI group_id="", run_id=run_id, - run_id=run_id, task=merge( task, task_pb2.Task( @@ -72,7 +71,7 @@ def weighted_average(metrics: List[Tuple[int, Metrics]]) -> Metrics: # -------------------------------------------------------------------------- Driver SDK -driver = GrpcDriver(driver_service_address="0.0.0.0:9091", certificates=None) +driver = GrpcDriver(driver_service_address="0.0.0.0:9091", root_certificates=None) # -------------------------------------------------------------------------- Driver SDK anonymous_client_nodes = False @@ -193,9 +192,7 @@ def weighted_average(metrics: List[Tuple[int, Metrics]]) -> Metrics: break # Collect correct results - node_messages = task_res_list_to_task_dict( - [res for res in all_task_res if res.task.HasField("sa")] - ) + node_messages = task_res_list_to_task_dict(all_task_res) workflow.close() # Slow down the start of the next round diff --git a/examples/secaggplus-mt/run.sh b/examples/secaggplus-mt/run.sh index 5cc769f6cbd8..659c1aaee8ce 100755 --- a/examples/secaggplus-mt/run.sh +++ b/examples/secaggplus-mt/run.sh @@ -1,13 +1,13 @@ #!/bin/bash # Kill any currently running client.py processes -pkill -f 'python client.py' +pkill -f 'flower-client-app' -# Kill any currently running flower-server processes with --grpc-rere option -pkill -f 'flower-server --grpc-rere' +# Kill any currently running flower-superlink processes +pkill -f 'flower-superlink' # Start the flower server echo "Starting flower server in background..." -flower-server --grpc-rere > /dev/null 2>&1 & +flower-superlink --insecure > /dev/null 2>&1 & sleep 2 # Number of client processes to start @@ -18,8 +18,7 @@ echo "Starting $N clients in background..." # Start N client processes for i in $(seq 1 $N) do - python client.py > /dev/null 2>&1 & - # python client.py & + flower-client-app --insecure client:app > /dev/null 2>&1 & sleep 0.1 done @@ -29,7 +28,7 @@ python driver.py echo "Clearing background processes..." # Kill any currently running client.py processes -pkill -f 'python client.py' +pkill -f 'flower-client-app' -# Kill any currently running flower-server processes with --grpc-rere option -pkill -f 'flower-server --grpc-rere' +# Kill any currently running flower-superlink processes +pkill -f 'flower-superlink' diff --git a/examples/secaggplus-mt/workflows.py b/examples/secaggplus-mt/workflows.py index 3117e308a498..b98de883b8f7 100644 --- a/examples/secaggplus-mt/workflows.py +++ b/examples/secaggplus-mt/workflows.py @@ -1,6 +1,6 @@ import random from logging import WARNING -from typing import Callable, Dict, Generator, List +from typing import Callable, Dict, Generator, List, Optional import numpy as np @@ -36,7 +36,6 @@ KEY_DESTINATION_LIST, KEY_MASKED_PARAMETERS, KEY_MOD_RANGE, - KEY_PARAMETERS, KEY_PUBLIC_KEY_1, KEY_PUBLIC_KEY_2, KEY_SAMPLE_NUMBER, @@ -52,11 +51,16 @@ STAGE_SETUP, STAGE_SHARE_KEYS, STAGE_UNMASK, + RECORD_KEY_CONFIGS, ) from flwr.common.secure_aggregation.secaggplus_utils import pseudo_rand_gen -from flwr.common.serde import named_values_from_proto, named_values_to_proto -from flwr.common.typing import Value -from flwr.proto.task_pb2 import SecureAggregation, Task +from flwr.common.typing import ConfigsRecordValues, FitIns, ServerMessage +from flwr.proto.task_pb2 import Task +from flwr.common import serde +from flwr.common.constant import TASK_TYPE_FIT +from flwr.common.recordset import RecordSet +from flwr.common import recordset_compat as compat +from flwr.common.configsrecord import ConfigsRecord LOG_EXPLAIN = True @@ -68,12 +72,23 @@ def get_workflow_factory() -> ( return _wrap_workflow_with_sec_agg -def _wrap_in_task(named_values: Dict[str, Value]) -> Task: - return Task(sa=SecureAggregation(named_values=named_values_to_proto(named_values))) +def _wrap_in_task( + named_values: Dict[str, ConfigsRecordValues], fit_ins: Optional[FitIns] = None +) -> Task: + if fit_ins is not None: + recordset = compat.fitins_to_recordset(fit_ins, keep_input=True) + else: + recordset = RecordSet() + recordset.set_configs(RECORD_KEY_CONFIGS, ConfigsRecord(named_values)) + return Task( + task_type=TASK_TYPE_FIT, + recordset=serde.recordset_to_proto(recordset), + ) -def _get_from_task(task: Task) -> Dict[str, Value]: - return named_values_from_proto(task.sa.named_values) +def _get_from_task(task: Task) -> Dict[str, ConfigsRecordValues]: + recordset = serde.recordset_from_proto(task.recordset) + return recordset.get_configs(RECORD_KEY_CONFIGS).data _secure_aggregation_configuration = { @@ -233,15 +248,16 @@ def workflow_with_sec_agg( if LOG_EXPLAIN: print(f"\nForwarding encrypted key shares and requesting masked input...") # Send encrypted secret key shares to clients (plus model parameters) - weights = parameters_to_ndarrays(parameters) yield { node_id: _wrap_in_task( named_values={ KEY_STAGE: STAGE_COLLECT_MASKED_INPUT, KEY_CIPHERTEXT_LIST: fwd_ciphertexts[nid2sid[node_id]], KEY_SOURCE_LIST: fwd_srcs[nid2sid[node_id]], - KEY_PARAMETERS: [ndarray_to_bytes(arr) for arr in weights], - } + }, + fit_ins=FitIns( + parameters=parameters, config={"drop": nid2sid[node_id] == 0} + ), ) for node_id in surviving_node_ids } @@ -249,6 +265,7 @@ def workflow_with_sec_agg( node_messages = yield surviving_node_ids = [node_id for node_id in node_messages] # Get shape of vector sent by first client + weights = parameters_to_ndarrays(parameters) masked_vector = [np.array([0], dtype=int)] + get_zero_parameters( [w.shape for w in weights] ) diff --git a/examples/simulation-pytorch/README.md b/examples/simulation-pytorch/README.md index 11b7a3364376..5ba5ec70dc3e 100644 --- a/examples/simulation-pytorch/README.md +++ b/examples/simulation-pytorch/README.md @@ -1,6 +1,6 @@ # Flower Simulation example using PyTorch -This introductory example uses the simulation capabilities of Flower to simulate a large number of clients on a single machine. Take a look at the [Documentation](https://flower.dev/docs/framework/how-to-run-simulations.html) for a deep dive into how Flower simulation works. This example uses [Flower Datasets](https://flower.dev/docs/datasets/) to download, partition and preprocess the MNIST dataset. This examples uses 100 clients by default. +This introductory example uses the simulation capabilities of Flower to simulate a large number of clients on a single machine. Take a look at the [Documentation](https://flower.ai/docs/framework/how-to-run-simulations.html) for a deep dive into how Flower simulation works. This example uses [Flower Datasets](https://flower.ai/docs/datasets/) to download, partition and preprocess the MNIST dataset. This examples uses 100 clients by default. ## Running the example (via Jupyter Notebook) @@ -79,4 +79,4 @@ python sim.py --num_cpus=2 python sim.py --num_cpus=2 --num_gpus=0.2 ``` -Take a look at the [Documentation](https://flower.dev/docs/framework/how-to-run-simulations.html) for more details on how you can customise your simulation. +Take a look at the [Documentation](https://flower.ai/docs/framework/how-to-run-simulations.html) for more details on how you can customise your simulation. diff --git a/examples/simulation-pytorch/sim.ipynb b/examples/simulation-pytorch/sim.ipynb index 508630cf9422..d1e7358566cc 100644 --- a/examples/simulation-pytorch/sim.ipynb +++ b/examples/simulation-pytorch/sim.ipynb @@ -509,7 +509,7 @@ " valloader = DataLoader(valset.with_transform(apply_transforms), batch_size=32)\n", "\n", " # Create and return client\n", - " return FlowerClient(trainloader, valloader)\n", + " return FlowerClient(trainloader, valloader).to_client()\n", "\n", " return client_fn\n", "\n", diff --git a/examples/simulation-pytorch/sim.py b/examples/simulation-pytorch/sim.py index 68d9426e83ab..0a6ed8ebb9b8 100644 --- a/examples/simulation-pytorch/sim.py +++ b/examples/simulation-pytorch/sim.py @@ -104,7 +104,7 @@ def client_fn(cid: str) -> fl.client.Client: valset = valset.with_transform(apply_transforms) # Create and return client - return FlowerClient(trainset, valset) + return FlowerClient(trainset, valset).to_client() return client_fn diff --git a/examples/simulation-tensorflow/README.md b/examples/simulation-tensorflow/README.md index f0d94f343d37..75be823db2eb 100644 --- a/examples/simulation-tensorflow/README.md +++ b/examples/simulation-tensorflow/README.md @@ -1,6 +1,6 @@ # Flower Simulation example using TensorFlow/Keras -This introductory example uses the simulation capabilities of Flower to simulate a large number of clients on a single machine. Take a look at the [Documentation](https://flower.dev/docs/framework/how-to-run-simulations.html) for a deep dive into how Flower simulation works. This example uses [Flower Datasets](https://flower.dev/docs/datasets/) to download, partition and preprocess the MNIST dataset. This examples uses 100 clients by default. +This introductory example uses the simulation capabilities of Flower to simulate a large number of clients on a single machine. Take a look at the [Documentation](https://flower.ai/docs/framework/how-to-run-simulations.html) for a deep dive into how Flower simulation works. This example uses [Flower Datasets](https://flower.ai/docs/datasets/) to download, partition and preprocess the MNIST dataset. This examples uses 100 clients by default. ## Running the example (via Jupyter Notebook) @@ -78,4 +78,4 @@ python sim.py --num_cpus=2 python sim.py --num_cpus=2 --num_gpus=0.2 ``` -Take a look at the [Documentation](https://flower.dev/docs/framework/how-to-run-simulations.html) for more details on how you can customise your simulation. +Take a look at the [Documentation](https://flower.ai/docs/framework/how-to-run-simulations.html) for more details on how you can customise your simulation. diff --git a/examples/simulation-tensorflow/sim.ipynb b/examples/simulation-tensorflow/sim.ipynb index 575b437018f3..5ef1992bcc7e 100644 --- a/examples/simulation-tensorflow/sim.ipynb +++ b/examples/simulation-tensorflow/sim.ipynb @@ -189,7 +189,7 @@ " )\n", "\n", " # Create and return client\n", - " return FlowerClient(trainset, valset)\n", + " return FlowerClient(trainset, valset).to_client()\n", "\n", " return client_fn\n", "\n", diff --git a/examples/simulation-tensorflow/sim.py b/examples/simulation-tensorflow/sim.py index 490e25fe8c8d..043c624a40a9 100644 --- a/examples/simulation-tensorflow/sim.py +++ b/examples/simulation-tensorflow/sim.py @@ -94,7 +94,7 @@ def client_fn(cid: str) -> fl.client.Client: ) # Create and return client - return FlowerClient(trainset, valset) + return FlowerClient(trainset, valset).to_client() return client_fn diff --git a/examples/sklearn-logreg-mnist/README.md b/examples/sklearn-logreg-mnist/README.md index ee3cdfc9768e..50576d98ba3d 100644 --- a/examples/sklearn-logreg-mnist/README.md +++ b/examples/sklearn-logreg-mnist/README.md @@ -1,7 +1,7 @@ # Flower Example using scikit-learn This example of Flower uses `scikit-learn`'s `LogisticRegression` model to train a federated learning system. It will help you understand how to adapt Flower for use with `scikit-learn`. -Running this example in itself is quite easy. This example uses [Flower Datasets](https://flower.dev/docs/datasets/) to download, partition and preprocess the MNIST dataset. +Running this example in itself is quite easy. This example uses [Flower Datasets](https://flower.ai/docs/datasets/) to download, partition and preprocess the MNIST dataset. ## Project Setup diff --git a/examples/sklearn-logreg-mnist/client.py b/examples/sklearn-logreg-mnist/client.py index a5fcaba87409..3d41cb6fbb21 100644 --- a/examples/sklearn-logreg-mnist/client.py +++ b/examples/sklearn-logreg-mnist/client.py @@ -62,4 +62,6 @@ def evaluate(self, parameters, config): # type: ignore return loss, len(X_test), {"accuracy": accuracy} # Start Flower client - fl.client.start_numpy_client(server_address="0.0.0.0:8080", client=MnistClient()) + fl.client.start_client( + server_address="0.0.0.0:8080", client=MnistClient().to_client() + ) diff --git a/examples/vertical-fl/README.md b/examples/vertical-fl/README.md index d5ab0ab9c30d..78588180d3d6 100644 --- a/examples/vertical-fl/README.md +++ b/examples/vertical-fl/README.md @@ -295,7 +295,7 @@ class ServerModel(nn.Module): It comprises a single linear layer that accepts the concatenated outputs from all client models as its input. The number of inputs to this layer equals the -total number of outputs from the client models ( $3 \times 4 = 12$ ). After processing +total number of outputs from the client models (3 x 4 = 12). After processing these inputs, the linear layer's output is passed through a sigmoid activation function (`nn.Sigmoid()`), which maps the result to a `(0, 1)` range, providing a probability score indicative of the likelihood of survival. diff --git a/examples/whisper-federated-finetuning/README.md b/examples/whisper-federated-finetuning/README.md index e89a09519fed..ddebe51247b2 100644 --- a/examples/whisper-federated-finetuning/README.md +++ b/examples/whisper-federated-finetuning/README.md @@ -110,7 +110,7 @@ An overview of the FL pipeline built with Flower for this example is illustrated 3. Once on-site training is completed, each client sends back the (now updated) classification head to the Flower server. 4. The Flower server aggregates (via FedAvg) the classification heads in order to obtain a new _global_ classification head. This head will be shared with clients in the next round. -Flower supports two ways of doing Federated Learning: simulated and non-simulated FL. The former, managed by the [`VirtualClientEngine`](https://flower.dev/docs/framework/how-to-run-simulations.html), allows you to run large-scale workloads in a system-aware manner, that scales with the resources available on your system (whether it is a laptop, a desktop with a single GPU, or a cluster of GPU servers). The latter is better suited for settings where clients are unique devices (e.g. a server, a smart device, etc). This example shows you how to use both. +Flower supports two ways of doing Federated Learning: simulated and non-simulated FL. The former, managed by the [`VirtualClientEngine`](https://flower.ai/docs/framework/how-to-run-simulations.html), allows you to run large-scale workloads in a system-aware manner, that scales with the resources available on your system (whether it is a laptop, a desktop with a single GPU, or a cluster of GPU servers). The latter is better suited for settings where clients are unique devices (e.g. a server, a smart device, etc). This example shows you how to use both. ### Preparing the dataset @@ -147,7 +147,7 @@ INFO flwr 2023-11-08 14:03:57,557 | app.py:229 | app_fit: metrics_centralized {' With just 5 FL rounds, the global model should be reaching ~95% validation accuracy. A test accuracy of 97% can be reached with 10 rounds of FL training using the default hyperparameters. On an RTX 3090Ti, each round takes ~20-30s depending on the amount of data the clients selected in a round have. -Take a look at the [Documentation](https://flower.dev/docs/framework/how-to-run-simulations.html) for more details on how you can customize your simulation. +Take a look at the [Documentation](https://flower.ai/docs/framework/how-to-run-simulations.html) for more details on how you can customize your simulation. ### Federated Finetuning (non-simulated) diff --git a/examples/whisper-federated-finetuning/client.py b/examples/whisper-federated-finetuning/client.py index 2bfeadfbdae6..d3bb217933f8 100644 --- a/examples/whisper-federated-finetuning/client.py +++ b/examples/whisper-federated-finetuning/client.py @@ -146,13 +146,13 @@ def client_fn(cid: str): return WhisperFlowerClient( full_train_dataset, num_classes, disable_tqdm, compile - ) + ).to_client() return client_fn -def run_client(): - """Run clinet.""" +def main(): + """Run client.""" # Parse input arguments args = parser.parse_args() @@ -174,10 +174,11 @@ def run_client(): client_data_path=CLIENT_DATA, ) - fl.client.start_numpy_client( - server_address=f"{args.server_address}:8080", client=client_fn(args.cid) + fl.client.start_client( + server_address=f"{args.server_address}:8080", + client=client_fn(args.cid), ) if __name__ == "__main__": - run_client() + main() diff --git a/examples/xgboost-comprehensive/README.md b/examples/xgboost-comprehensive/README.md index 11c4c3f9a08b..01fed646d056 100644 --- a/examples/xgboost-comprehensive/README.md +++ b/examples/xgboost-comprehensive/README.md @@ -1,7 +1,7 @@ # Flower Example using XGBoost (Comprehensive) This example demonstrates a comprehensive federated learning setup using Flower with XGBoost. -We use [HIGGS](https://archive.ics.uci.edu/dataset/280/higgs) dataset to perform a binary classification task. +We use [HIGGS](https://archive.ics.uci.edu/dataset/280/higgs) dataset to perform a binary classification task. This examples uses [Flower Datasets](https://flower.ai/docs/datasets/) to retrieve, partition and preprocess the data for each Flower client. It differs from the [xgboost-quickstart](https://github.com/adap/flower/tree/main/examples/xgboost-quickstart) example in the following ways: - Arguments parsers of server and clients for hyperparameters selection. @@ -10,6 +10,32 @@ It differs from the [xgboost-quickstart](https://github.com/adap/flower/tree/mai - Customised partitioner type (uniform, linear, square, exponential). - Centralised/distributed evaluation. - Bagging/cyclic training methods. +- You can run it with Flower Simulation + +## Training Strategies + +This example provides two training strategies, **bagging aggregation** and **cyclic training**. + +### Bagging Aggregation + +Bagging (bootstrap) aggregation is an ensemble meta-algorithm in machine learning, +used for enhancing the stability and accuracy of machine learning algorithms. +Here, we leverage this algorithm for XGBoost trees. + +Specifically, each client is treated as a bootstrap by random subsampling (data partitioning in FL). +At each FL round, all clients boost a number of trees (in this example, 1 tree) based on the local bootstrap samples. +Then, the clients' trees are aggregated on the server, and concatenates them to the global model from previous round. +The aggregated tree ensemble is regarded as a new global model. + +This way, let's consider a scenario with M clients. +Given FL round R, the bagging models consist of (M * R) trees. + +### Cyclic Training + +Cyclic XGBoost training performs FL in a client-by-client fashion. +Instead of aggregating multiple clients, +there is only one single client participating in the training per round in the cyclic training scenario. +The trained local XGBoost trees will be passed to the next client as an initialised model for next round's boosting. ## Project Setup @@ -26,7 +52,10 @@ This will create a new directory called `xgboost-comprehensive` containing the f -- server.py <- Defines the server-side logic -- client.py <- Defines the client-side logic -- dataset.py <- Defines the functions of data loading and partitioning --- utils.py <- Defines the arguments parser for clients and server +-- utils.py <- Defines the arguments parser and hyper-parameters +-- client_utils.py <- Defines the client utility functions +-- server_utils.py <- Defines the server utility functions +-- sim.py <- Example of using Flower simulation -- run_bagging.sh <- Commands to run bagging experiments -- run_cyclic.sh <- Commands to run cyclic experiments -- pyproject.toml <- Example dependencies (if you use Poetry) @@ -47,7 +76,7 @@ poetry shell Poetry will install all your dependencies in a newly created virtual environment. To verify that everything works correctly you can run the following command: ```shell -poetry run python3 -c "import flwr" +poetry run python -c "import flwr" ``` If you don't see any errors you're good to go! @@ -62,44 +91,76 @@ pip install -r requirements.txt ## Run Federated Learning with XGBoost and Flower +You can run this example in two ways: either by manually launching the server, and then several clients that connect to it; or by launching a Flower simulation. Both run the same workload, yielding identical results. The former is ideal for deployments on different machines, while the latter makes it easy to simulate large client cohorts in a resource-aware manner. You can read more about how Flower Simulation works in the [Documentation](https://flower.ai/docs/framework/how-to-run-simulations.html). The commands shown below assume you have activated your environment (if you decide to use Poetry, you can activate it via `poetry shell`). + +### Independent Client/Server Setup + We have two scripts to run bagging and cyclic (client-by-client) experiments. The included `run_bagging.sh` or `run_cyclic.sh` will start the Flower server (using `server.py`), sleep for 15 seconds to ensure that the server is up, and then start 5 Flower clients (using `client.py`) with a small subset of the data from exponential partition distribution. + You can simply start everything in a terminal as follows: ```shell -poetry run ./run_bagging.sh +./run_bagging.sh ``` Or ```shell -poetry run ./run_cyclic.sh +./run_cyclic.sh +``` + +The script starts processes in the background so that you don't have to open six terminal windows. + +You can also run the example without the scripts. First, launch the server: + +```bash +python server.py --train-method=bagging/cyclic --pool-size=N --num-clients-per-round=N +``` + +Then run at least two clients (each on a new terminal or computer in your network) passing different `NODE_ID` and all using the same `N` (denoting the total number of clients or data partitions): + +```bash +python client.py --train-method=bagging/cyclic --node-id=NODE_ID --num-partitions=N +``` + +### Flower Simulation Setup + +We also provide an example code (`sim.py`) to use the simulation capabilities of Flower to simulate federated XGBoost training on either a single machine or a cluster of machines. With default arguments, each client will use 2 CPUs. + +To run bagging aggregation with 5 clients for 30 rounds evaluated on centralised test set: + +```shell +python sim.py --train-method=bagging --pool-size=5 --num-clients-per-round=5 --num-rounds=30 --centralised-eval ``` -The script starts processes in the background so that you don't have to open eleven terminal windows. -If you experiment with the code example and something goes wrong, simply using `CTRL + C` on Linux (or `CMD + C` on macOS) wouldn't normally kill all these processes, -which is why the script ends with `trap "trap - SIGTERM && kill -- -$$" SIGINT SIGTERM EXIT` and `wait`. -This simply allows you to stop the experiment using `CTRL + C` (or `CMD + C`). -If you change the script and anything goes wrong you can still use `killall python` (or `killall python3`) -to kill all background processes (or a more specific command if you have other Python processes running that you don't want to kill). +To run cyclic training with 5 clients for 30 rounds evaluated on centralised test set: -You can also manually run `poetry run python3 server.py --train-method=bagging/cyclic --pool-size=N --num-clients-per-round=N` -and `poetry run python3 client.py --train-method=bagging/cyclic --node-id=NODE_ID --num-partitions=N` for as many clients as you want, -but you have to make sure that each command is run in a different terminal window (or a different computer on the network). +```shell +python sim.py --train-method=cyclic --pool-size=5 --num-rounds=30 --centralised-eval-client +``` In addition, we provide more options to customise the experimental settings, including data partitioning and centralised/distributed evaluation (see `utils.py`). -Look at the [code](https://github.com/adap/flower/tree/main/examples/xgboost-comprehensive) -and [tutorial](https://flower.dev/docs/framework/tutorial-quickstart-xgboost.html) for a detailed explanation. +Check the [tutorial](https://flower.ai/docs/framework/tutorial-quickstart-xgboost.html) for a detailed explanation. ### Expected Experimental Results #### Bagging aggregation experiment -![](_static/xgboost_flower_auc.png) +![](_static/xgboost_flower_auc_bagging.png) -The figure above shows the centralised tested AUC performance over FL rounds on 4 experimental settings. +The figure above shows the centralised tested AUC performance over FL rounds with bagging aggregation strategy on 4 experimental settings. One can see that all settings obtain stable performance boost over FL rounds (especially noticeable at the start of training). -As expected, uniform client distribution shows higher AUC values (beyond 83% at the end) than square/exponential setup. -Feel free to explore more interesting experiments by yourself! +As expected, uniform client distribution shows higher AUC values than square/exponential setup. + +#### Cyclic training experiment + +![](_static/xgboost_flower_auc_cyclic.png) + +This figure shows the cyclic training results on centralised test set. +The models with cyclic training requires more rounds to converge +because only a single client participate in the training per round. + +Feel free to explore more interesting experiments by yourself ! diff --git a/examples/xgboost-comprehensive/_static/xgboost_flower_auc.png b/examples/xgboost-comprehensive/_static/xgboost_flower_auc.png deleted file mode 100644 index e6a4bfb83250..000000000000 Binary files a/examples/xgboost-comprehensive/_static/xgboost_flower_auc.png and /dev/null differ diff --git a/examples/xgboost-comprehensive/_static/xgboost_flower_auc_bagging.png b/examples/xgboost-comprehensive/_static/xgboost_flower_auc_bagging.png new file mode 100644 index 000000000000..e192df214471 Binary files /dev/null and b/examples/xgboost-comprehensive/_static/xgboost_flower_auc_bagging.png differ diff --git a/examples/xgboost-comprehensive/_static/xgboost_flower_auc_cyclic.png b/examples/xgboost-comprehensive/_static/xgboost_flower_auc_cyclic.png new file mode 100644 index 000000000000..731d0fc3fbbc Binary files /dev/null and b/examples/xgboost-comprehensive/_static/xgboost_flower_auc_cyclic.png differ diff --git a/examples/xgboost-comprehensive/client.py b/examples/xgboost-comprehensive/client.py index ff7a4adf7977..74fbc4f5366a 100644 --- a/examples/xgboost-comprehensive/client.py +++ b/examples/xgboost-comprehensive/client.py @@ -1,21 +1,9 @@ import warnings from logging import INFO -import xgboost as xgb import flwr as fl from flwr_datasets import FederatedDataset from flwr.common.logger import log -from flwr.common import ( - Code, - EvaluateIns, - EvaluateRes, - FitIns, - FitRes, - GetParametersIns, - GetParametersRes, - Parameters, - Status, -) from dataset import ( instantiate_partitioner, @@ -23,7 +11,8 @@ transform_dataset_to_dmatrix, resplit, ) -from utils import client_args_parser, BST_PARAMS +from utils import client_args_parser, BST_PARAMS, NUM_LOCAL_ROUND +from client_utils import XgbClient warnings.filterwarnings("ignore", category=UserWarning) @@ -32,15 +21,13 @@ # Parse arguments for experimental settings args = client_args_parser() -# Load (HIGGS) dataset and conduct partitioning -num_partitions = args.num_partitions - -# Partitioner type is chosen from ["uniform", "linear", "square", "exponential"] -partitioner_type = args.partitioner_type +# Train method (bagging or cyclic) +train_method = args.train_method -# Instantiate partitioner +# Load (HIGGS) dataset and conduct partitioning +# Instantiate partitioner from ["uniform", "linear", "square", "exponential"] partitioner = instantiate_partitioner( - partitioner_type=partitioner_type, num_partitions=num_partitions + partitioner_type=args.partitioner_type, num_partitions=args.num_partitions ) fds = FederatedDataset( dataset="jxie/higgs", @@ -50,8 +37,7 @@ # Load the partition for this `node_id` log(INFO, "Loading partition...") -node_id = args.node_id -partition = fds.load_partition(node_id=node_id, split="train") +partition = fds.load_partition(node_id=args.node_id, split="train") partition.set_format("numpy") if args.centralised_eval: @@ -63,10 +49,8 @@ num_val = valid_data.shape[0] else: # Train/test splitting - SEED = args.seed - test_fraction = args.test_fraction train_data, valid_data, num_train, num_val = train_test_split( - partition, test_fraction=test_fraction, seed=SEED + partition, test_fraction=args.test_fraction, seed=args.seed ) # Reformat data to DMatrix for xgboost @@ -74,101 +58,25 @@ train_dmatrix = transform_dataset_to_dmatrix(train_data) valid_dmatrix = transform_dataset_to_dmatrix(valid_data) - # Hyper-parameters for xgboost training -num_local_round = 1 +num_local_round = NUM_LOCAL_ROUND params = BST_PARAMS - -# Define Flower client -class XgbClient(fl.client.Client): - def __init__(self): - self.bst = None - self.config = None - - def get_parameters(self, ins: GetParametersIns) -> GetParametersRes: - _ = (self, ins) - return GetParametersRes( - status=Status( - code=Code.OK, - message="OK", - ), - parameters=Parameters(tensor_type="", tensors=[]), - ) - - def _local_boost(self): - # Update trees based on local training data. - for i in range(num_local_round): - self.bst.update(train_dmatrix, self.bst.num_boosted_rounds()) - - # Bagging: extract the last N=num_local_round trees for sever aggregation - # Cyclic: return the entire model - bst = ( - self.bst[ - self.bst.num_boosted_rounds() - - num_local_round : self.bst.num_boosted_rounds() - ] - if args.train_method == "bagging" - else self.bst - ) - - return bst - - def fit(self, ins: FitIns) -> FitRes: - if not self.bst: - # First round local training - log(INFO, "Start training at round 1") - bst = xgb.train( - params, - train_dmatrix, - num_boost_round=num_local_round, - evals=[(valid_dmatrix, "validate"), (train_dmatrix, "train")], - ) - self.config = bst.save_config() - self.bst = bst - else: - for item in ins.parameters.tensors: - global_model = bytearray(item) - - # Load global model into booster - self.bst.load_model(global_model) - self.bst.load_config(self.config) - - bst = self._local_boost() - - local_model = bst.save_raw("json") - local_model_bytes = bytes(local_model) - - return FitRes( - status=Status( - code=Code.OK, - message="OK", - ), - parameters=Parameters(tensor_type="", tensors=[local_model_bytes]), - num_examples=num_train, - metrics={}, - ) - - def evaluate(self, ins: EvaluateIns) -> EvaluateRes: - eval_results = self.bst.eval_set( - evals=[(valid_dmatrix, "valid")], - iteration=self.bst.num_boosted_rounds() - 1, - ) - auc = round(float(eval_results.split("\t")[1].split(":")[1]), 4) - - global_round = ins.config["global_round"] - log(INFO, f"AUC = {auc} at round {global_round}") - - return EvaluateRes( - status=Status( - code=Code.OK, - message="OK", - ), - loss=0.0, - num_examples=num_val, - metrics={"AUC": auc}, - ) - +# Setup learning rate +if args.train_method == "bagging" and args.scaled_lr: + new_lr = params["eta"] / args.num_partitions + params.update({"eta": new_lr}) # Start Flower client -fl.client.start_client(server_address="127.0.0.1:8080", client=XgbClient()) +fl.client.start_client( + server_address="127.0.0.1:8080", + client=XgbClient( + train_dmatrix, + valid_dmatrix, + num_train, + num_val, + num_local_round, + params, + train_method, + ), +) diff --git a/examples/xgboost-comprehensive/client_utils.py b/examples/xgboost-comprehensive/client_utils.py new file mode 100644 index 000000000000..d2e07677ef97 --- /dev/null +++ b/examples/xgboost-comprehensive/client_utils.py @@ -0,0 +1,126 @@ +from logging import INFO +import xgboost as xgb + +import flwr as fl +from flwr.common.logger import log +from flwr.common import ( + Code, + EvaluateIns, + EvaluateRes, + FitIns, + FitRes, + GetParametersIns, + GetParametersRes, + Parameters, + Status, +) + + +class XgbClient(fl.client.Client): + def __init__( + self, + train_dmatrix, + valid_dmatrix, + num_train, + num_val, + num_local_round, + params, + train_method, + ): + self.train_dmatrix = train_dmatrix + self.valid_dmatrix = valid_dmatrix + self.num_train = num_train + self.num_val = num_val + self.num_local_round = num_local_round + self.params = params + self.train_method = train_method + + def get_parameters(self, ins: GetParametersIns) -> GetParametersRes: + _ = (self, ins) + return GetParametersRes( + status=Status( + code=Code.OK, + message="OK", + ), + parameters=Parameters(tensor_type="", tensors=[]), + ) + + def _local_boost(self, bst_input): + # Update trees based on local training data. + for i in range(self.num_local_round): + bst_input.update(self.train_dmatrix, bst_input.num_boosted_rounds()) + + # Bagging: extract the last N=num_local_round trees for sever aggregation + # Cyclic: return the entire model + bst = ( + bst_input[ + bst_input.num_boosted_rounds() + - self.num_local_round : bst_input.num_boosted_rounds() + ] + if self.train_method == "bagging" + else bst_input + ) + + return bst + + def fit(self, ins: FitIns) -> FitRes: + global_round = int(ins.config["global_round"]) + if global_round == 1: + # First round local training + bst = xgb.train( + self.params, + self.train_dmatrix, + num_boost_round=self.num_local_round, + evals=[(self.valid_dmatrix, "validate"), (self.train_dmatrix, "train")], + ) + else: + bst = xgb.Booster(params=self.params) + for item in ins.parameters.tensors: + global_model = bytearray(item) + + # Load global model into booster + bst.load_model(global_model) + + # Local training + bst = self._local_boost(bst) + + # Save model + local_model = bst.save_raw("json") + local_model_bytes = bytes(local_model) + + return FitRes( + status=Status( + code=Code.OK, + message="OK", + ), + parameters=Parameters(tensor_type="", tensors=[local_model_bytes]), + num_examples=self.num_train, + metrics={}, + ) + + def evaluate(self, ins: EvaluateIns) -> EvaluateRes: + # Load global model + bst = xgb.Booster(params=self.params) + for para in ins.parameters.tensors: + para_b = bytearray(para) + bst.load_model(para_b) + + # Run evaluation + eval_results = bst.eval_set( + evals=[(self.valid_dmatrix, "valid")], + iteration=bst.num_boosted_rounds() - 1, + ) + auc = round(float(eval_results.split("\t")[1].split(":")[1]), 4) + + global_round = ins.config["global_round"] + log(INFO, f"AUC = {auc} at round {global_round}") + + return EvaluateRes( + status=Status( + code=Code.OK, + message="OK", + ), + loss=0.0, + num_examples=self.num_val, + metrics={"AUC": auc}, + ) diff --git a/examples/xgboost-comprehensive/dataset.py b/examples/xgboost-comprehensive/dataset.py index bcf2e00b30af..94959925f833 100644 --- a/examples/xgboost-comprehensive/dataset.py +++ b/examples/xgboost-comprehensive/dataset.py @@ -39,12 +39,18 @@ def train_test_split(partition: Dataset, test_fraction: float, seed: int): def transform_dataset_to_dmatrix(data: Union[Dataset, DatasetDict]) -> xgb.core.DMatrix: """Transform dataset to DMatrix format for xgboost.""" - x = data["inputs"] - y = data["label"] + x, y = separate_xy(data) new_data = xgb.DMatrix(x, label=y) return new_data +def separate_xy(data: Union[Dataset, DatasetDict]): + """Return outputs of x (data) and y (labels) .""" + x = data["inputs"] + y = data["label"] + return x, y + + def resplit(dataset: DatasetDict) -> DatasetDict: """Increase the quantity of centralised test samples from 500K to 1M.""" return DatasetDict( diff --git a/examples/xgboost-comprehensive/pyproject.toml b/examples/xgboost-comprehensive/pyproject.toml index bbfbb4134b8d..e6495a98c969 100644 --- a/examples/xgboost-comprehensive/pyproject.toml +++ b/examples/xgboost-comprehensive/pyproject.toml @@ -10,6 +10,6 @@ authors = ["The Flower Authors "] [tool.poetry.dependencies] python = ">=3.8,<3.11" -flwr-nightly = ">=1.0,<2.0" +flwr-nightly = { extras = ["simulation"], version = ">=1.7.0,<2.0" } flwr-datasets = ">=0.0.2,<1.0.0" xgboost = ">=2.0.0,<3.0.0" diff --git a/examples/xgboost-comprehensive/requirements.txt b/examples/xgboost-comprehensive/requirements.txt index c37ac2b6ad6d..b5b1d83bcdd1 100644 --- a/examples/xgboost-comprehensive/requirements.txt +++ b/examples/xgboost-comprehensive/requirements.txt @@ -1,3 +1,3 @@ -flwr-nightly>=1.0, <2.0 +flwr[simulation]>=1.7.0, <2.0 flwr-datasets>=0.0.2, <1.0.0 xgboost>=2.0.0, <3.0.0 diff --git a/examples/xgboost-comprehensive/run_bagging.sh b/examples/xgboost-comprehensive/run_bagging.sh index 7920f6bf5e55..e853a4ef19cb 100755 --- a/examples/xgboost-comprehensive/run_bagging.sh +++ b/examples/xgboost-comprehensive/run_bagging.sh @@ -3,8 +3,8 @@ set -e cd "$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"/ echo "Starting server" -python3 server.py --pool-size=5 --num-rounds=50 --num-clients-per-round=5 --centralised-eval & -sleep 15 # Sleep for 15s to give the server enough time to start +python3 server.py --pool-size=5 --num-rounds=30 --num-clients-per-round=5 --centralised-eval & +sleep 30 # Sleep for 30s to give the server enough time to start for i in `seq 0 4`; do echo "Starting client $i" diff --git a/examples/xgboost-comprehensive/server.py b/examples/xgboost-comprehensive/server.py index 1cf4ba79fa50..c6986dc63ce4 100644 --- a/examples/xgboost-comprehensive/server.py +++ b/examples/xgboost-comprehensive/server.py @@ -1,19 +1,19 @@ import warnings -from typing import Dict, List, Optional from logging import INFO -import xgboost as xgb import flwr as fl from flwr.common.logger import log -from flwr.common import Parameters, Scalar from flwr_datasets import FederatedDataset -from flwr.server.strategy import FedXgbBagging -from flwr.server.strategy import FedXgbCyclic -from flwr.server.client_proxy import ClientProxy -from flwr.server.criterion import Criterion -from flwr.server.client_manager import SimpleClientManager - -from utils import server_args_parser, BST_PARAMS +from flwr.server.strategy import FedXgbBagging, FedXgbCyclic + +from utils import server_args_parser +from server_utils import ( + eval_config, + fit_config, + evaluate_metrics_aggregation, + get_evaluate_fn, + CyclicClientManager, +) from dataset import resplit, transform_dataset_to_dmatrix @@ -34,97 +34,11 @@ fds = FederatedDataset( dataset="jxie/higgs", partitioners={"train": 20}, resplitter=resplit ) + log(INFO, "Loading centralised test set...") test_set = fds.load_full("test") test_set.set_format("numpy") test_dmatrix = transform_dataset_to_dmatrix(test_set) -# Hyper-parameters used for initialisation -params = BST_PARAMS - - -def eval_config(rnd: int) -> Dict[str, str]: - """Return a configuration with global epochs.""" - config = { - "global_round": str(rnd), - } - return config - - -def evaluate_metrics_aggregation(eval_metrics): - """Return an aggregated metric (AUC) for evaluation.""" - total_num = sum([num for num, _ in eval_metrics]) - auc_aggregated = ( - sum([metrics["AUC"] * num for num, metrics in eval_metrics]) / total_num - ) - metrics_aggregated = {"AUC": auc_aggregated} - return metrics_aggregated - - -def get_evaluate_fn(test_data): - """Return a function for centralised evaluation.""" - - def evaluate_fn( - server_round: int, parameters: Parameters, config: Dict[str, Scalar] - ): - # If at the first round, skip the evaluation - if server_round == 0: - return 0, {} - else: - bst = xgb.Booster(params=params) - for para in parameters.tensors: - para_b = bytearray(para) - - # Load global model - bst.load_model(para_b) - # Run evaluation - eval_results = bst.eval_set( - evals=[(test_data, "valid")], - iteration=bst.num_boosted_rounds() - 1, - ) - auc = round(float(eval_results.split("\t")[1].split(":")[1]), 4) - log(INFO, f"AUC = {auc} at round {server_round}") - - return 0, {"AUC": auc} - - return evaluate_fn - - -class CyclicClientManager(SimpleClientManager): - """Provides a cyclic client selection rule.""" - - def sample( - self, - num_clients: int, - min_num_clients: Optional[int] = None, - criterion: Optional[Criterion] = None, - ) -> List[ClientProxy]: - """Sample a number of Flower ClientProxy instances.""" - - # Block until at least num_clients are connected. - if min_num_clients is None: - min_num_clients = num_clients - self.wait_for(min_num_clients) - - # Sample clients which meet the criterion - available_cids = list(self.clients) - if criterion is not None: - available_cids = [ - cid for cid in available_cids if criterion.select(self.clients[cid]) - ] - - if num_clients > len(available_cids): - log( - INFO, - "Sampling failed: number of available clients" - " (%s) is less than number of requested clients (%s).", - len(available_cids), - num_clients, - ) - return [] - - # Return all available clients - return [self.clients[cid] for cid in available_cids] - # Define strategy if train_method == "bagging": @@ -137,6 +51,7 @@ def sample( min_evaluate_clients=num_evaluate_clients if not centralised_eval else 0, fraction_evaluate=1.0 if not centralised_eval else 0.0, on_evaluate_config_fn=eval_config, + on_fit_config_fn=fit_config, evaluate_metrics_aggregation_fn=evaluate_metrics_aggregation if not centralised_eval else None, @@ -149,6 +64,7 @@ def sample( fraction_evaluate=1.0, evaluate_metrics_aggregation_fn=evaluate_metrics_aggregation, on_evaluate_config_fn=eval_config, + on_fit_config_fn=fit_config, ) # Start Flower server diff --git a/examples/xgboost-comprehensive/server_utils.py b/examples/xgboost-comprehensive/server_utils.py new file mode 100644 index 000000000000..35a31bd9adac --- /dev/null +++ b/examples/xgboost-comprehensive/server_utils.py @@ -0,0 +1,101 @@ +from typing import Dict, List, Optional +from logging import INFO +import xgboost as xgb +from flwr.common.logger import log +from flwr.common import Parameters, Scalar +from flwr.server.client_manager import SimpleClientManager +from flwr.server.client_proxy import ClientProxy +from flwr.server.criterion import Criterion +from utils import BST_PARAMS + + +def eval_config(rnd: int) -> Dict[str, str]: + """Return a configuration with global epochs.""" + config = { + "global_round": str(rnd), + } + return config + + +def fit_config(rnd: int) -> Dict[str, str]: + """Return a configuration with global epochs.""" + config = { + "global_round": str(rnd), + } + return config + + +def evaluate_metrics_aggregation(eval_metrics): + """Return an aggregated metric (AUC) for evaluation.""" + total_num = sum([num for num, _ in eval_metrics]) + auc_aggregated = ( + sum([metrics["AUC"] * num for num, metrics in eval_metrics]) / total_num + ) + metrics_aggregated = {"AUC": auc_aggregated} + return metrics_aggregated + + +def get_evaluate_fn(test_data): + """Return a function for centralised evaluation.""" + + def evaluate_fn( + server_round: int, parameters: Parameters, config: Dict[str, Scalar] + ): + # If at the first round, skip the evaluation + if server_round == 0: + return 0, {} + else: + bst = xgb.Booster(params=BST_PARAMS) + for para in parameters.tensors: + para_b = bytearray(para) + + # Load global model + bst.load_model(para_b) + # Run evaluation + eval_results = bst.eval_set( + evals=[(test_data, "valid")], + iteration=bst.num_boosted_rounds() - 1, + ) + auc = round(float(eval_results.split("\t")[1].split(":")[1]), 4) + log(INFO, f"AUC = {auc} at round {server_round}") + + return 0, {"AUC": auc} + + return evaluate_fn + + +class CyclicClientManager(SimpleClientManager): + """Provides a cyclic client selection rule.""" + + def sample( + self, + num_clients: int, + min_num_clients: Optional[int] = None, + criterion: Optional[Criterion] = None, + ) -> List[ClientProxy]: + """Sample a number of Flower ClientProxy instances.""" + + # Block until at least num_clients are connected. + if min_num_clients is None: + min_num_clients = num_clients + self.wait_for(min_num_clients) + + # Sample clients which meet the criterion + available_cids = list(self.clients) + if criterion is not None: + available_cids = [ + cid for cid in available_cids if criterion.select(self.clients[cid]) + ] + + if num_clients > len(available_cids): + log( + INFO, + "Sampling failed: number of available clients" + " (%s) is less than number of requested clients (%s).", + len(available_cids), + num_clients, + ) + return [] + + # Return all available clients + return [self.clients[cid] for cid in available_cids] diff --git a/examples/xgboost-comprehensive/sim.py b/examples/xgboost-comprehensive/sim.py new file mode 100644 index 000000000000..ec05b566dd95 --- /dev/null +++ b/examples/xgboost-comprehensive/sim.py @@ -0,0 +1,188 @@ +import warnings +from logging import INFO +import xgboost as xgb +from tqdm import tqdm + +import flwr as fl +from flwr_datasets import FederatedDataset +from flwr.common.logger import log +from flwr.server.strategy import FedXgbBagging, FedXgbCyclic + +from dataset import ( + instantiate_partitioner, + train_test_split, + transform_dataset_to_dmatrix, + separate_xy, + resplit, +) +from utils import ( + sim_args_parser, + NUM_LOCAL_ROUND, + BST_PARAMS, +) +from server_utils import ( + eval_config, + fit_config, + evaluate_metrics_aggregation, + get_evaluate_fn, + CyclicClientManager, +) +from client_utils import XgbClient + + +warnings.filterwarnings("ignore", category=UserWarning) + + +def get_client_fn( + train_data_list, valid_data_list, train_method, params, num_local_round +): + """Return a function to construct a client. + + The VirtualClientEngine will execute this function whenever a client is sampled by + the strategy to participate. + """ + + def client_fn(cid: str) -> fl.client.Client: + """Construct a FlowerClient with its own dataset partition.""" + x_train, y_train = train_data_list[int(cid)][0] + x_valid, y_valid = valid_data_list[int(cid)][0] + + # Reformat data to DMatrix + train_dmatrix = xgb.DMatrix(x_train, label=y_train) + valid_dmatrix = xgb.DMatrix(x_valid, label=y_valid) + + # Fetch the number of examples + num_train = train_data_list[int(cid)][1] + num_val = valid_data_list[int(cid)][1] + + # Create and return client + return XgbClient( + train_dmatrix, + valid_dmatrix, + num_train, + num_val, + num_local_round, + params, + train_method, + ) + + return client_fn + + +def main(): + # Parse arguments for experimental settings + args = sim_args_parser() + + # Load (HIGGS) dataset and conduct partitioning + partitioner = instantiate_partitioner( + partitioner_type=args.partitioner_type, num_partitions=args.pool_size + ) + fds = FederatedDataset( + dataset="jxie/higgs", + partitioners={"train": partitioner}, + resplitter=resplit, + ) + + # Load centralised test set + if args.centralised_eval or args.centralised_eval_client: + log(INFO, "Loading centralised test set...") + test_data = fds.load_full("test") + test_data.set_format("numpy") + num_test = test_data.shape[0] + test_dmatrix = transform_dataset_to_dmatrix(test_data) + + # Load partitions and reformat data to DMatrix for xgboost + log(INFO, "Loading client local partitions...") + train_data_list = [] + valid_data_list = [] + + # Load and process all client partitions. This upfront cost is amortized soon + # after the simulation begins since clients wont need to preprocess their partition. + for node_id in tqdm(range(args.pool_size), desc="Extracting client partition"): + # Extract partition for client with node_id + partition = fds.load_partition(node_id=node_id, split="train") + partition.set_format("numpy") + + if args.centralised_eval_client: + # Use centralised test set for evaluation + train_data = partition + num_train = train_data.shape[0] + x_test, y_test = separate_xy(test_data) + valid_data_list.append(((x_test, y_test), num_test)) + else: + # Train/test splitting + train_data, valid_data, num_train, num_val = train_test_split( + partition, test_fraction=args.test_fraction, seed=args.seed + ) + x_valid, y_valid = separate_xy(valid_data) + valid_data_list.append(((x_valid, y_valid), num_val)) + + x_train, y_train = separate_xy(train_data) + train_data_list.append(((x_train, y_train), num_train)) + + # Define strategy + if args.train_method == "bagging": + # Bagging training + strategy = FedXgbBagging( + evaluate_function=get_evaluate_fn(test_dmatrix) + if args.centralised_eval + else None, + fraction_fit=(float(args.num_clients_per_round) / args.pool_size), + min_fit_clients=args.num_clients_per_round, + min_available_clients=args.pool_size, + min_evaluate_clients=args.num_evaluate_clients + if not args.centralised_eval + else 0, + fraction_evaluate=1.0 if not args.centralised_eval else 0.0, + on_evaluate_config_fn=eval_config, + on_fit_config_fn=fit_config, + evaluate_metrics_aggregation_fn=evaluate_metrics_aggregation + if not args.centralised_eval + else None, + ) + else: + # Cyclic training + strategy = FedXgbCyclic( + fraction_fit=1.0, + min_available_clients=args.pool_size, + fraction_evaluate=1.0, + evaluate_metrics_aggregation_fn=evaluate_metrics_aggregation, + on_evaluate_config_fn=eval_config, + on_fit_config_fn=fit_config, + ) + + # Resources to be assigned to each virtual client + # In this example we use CPU by default + client_resources = { + "num_cpus": args.num_cpus_per_client, + "num_gpus": 0.0, + } + + # Hyper-parameters for xgboost training + num_local_round = NUM_LOCAL_ROUND + params = BST_PARAMS + + # Setup learning rate + if args.train_method == "bagging" and args.scaled_lr: + new_lr = params["eta"] / args.pool_size + params.update({"eta": new_lr}) + + # Start simulation + fl.simulation.start_simulation( + client_fn=get_client_fn( + train_data_list, + valid_data_list, + args.train_method, + params, + num_local_round, + ), + num_clients=args.pool_size, + client_resources=client_resources, + config=fl.server.ServerConfig(num_rounds=args.num_rounds), + strategy=strategy, + client_manager=CyclicClientManager() if args.train_method == "cyclic" else None, + ) + + +if __name__ == "__main__": + main() diff --git a/examples/xgboost-comprehensive/utils.py b/examples/xgboost-comprehensive/utils.py index 8acdbbb88a7e..102587f4266d 100644 --- a/examples/xgboost-comprehensive/utils.py +++ b/examples/xgboost-comprehensive/utils.py @@ -1,6 +1,8 @@ import argparse +# Hyper-parameters for xgboost training +NUM_LOCAL_ROUND = 1 BST_PARAMS = { "objective": "binary:logistic", "eta": 0.1, # Learning rate @@ -52,7 +54,12 @@ def client_args_parser(): parser.add_argument( "--centralised-eval", action="store_true", - help="Conduct centralised evaluation (True), or client evaluation on hold-out data (False).", + help="Conduct evaluation on centralised test set (True), or on hold-out data (False).", + ) + parser.add_argument( + "--scaled-lr", + action="store_true", + help="Perform scaled learning rate based on the number of clients (True).", ) args = parser.parse_args() @@ -96,3 +103,78 @@ def server_args_parser(): args = parser.parse_args() return args + + +def sim_args_parser(): + """Parse arguments to define experimental settings on server side.""" + parser = argparse.ArgumentParser() + + parser.add_argument( + "--train-method", + default="bagging", + type=str, + choices=["bagging", "cyclic"], + help="Training methods selected from bagging aggregation or cyclic training.", + ) + + # Server side + parser.add_argument( + "--pool-size", default=5, type=int, help="Number of total clients." + ) + parser.add_argument( + "--num-rounds", default=30, type=int, help="Number of FL rounds." + ) + parser.add_argument( + "--num-clients-per-round", + default=5, + type=int, + help="Number of clients participate in training each round.", + ) + parser.add_argument( + "--num-evaluate-clients", + default=5, + type=int, + help="Number of clients selected for evaluation.", + ) + parser.add_argument( + "--centralised-eval", + action="store_true", + help="Conduct centralised evaluation (True), or client evaluation on hold-out data (False).", + ) + parser.add_argument( + "--num-cpus-per-client", + default=2, + type=int, + help="Number of CPUs used for per client.", + ) + + # Client side + parser.add_argument( + "--partitioner-type", + default="uniform", + type=str, + choices=["uniform", "linear", "square", "exponential"], + help="Partitioner types.", + ) + parser.add_argument( + "--seed", default=42, type=int, help="Seed used for train/test splitting." + ) + parser.add_argument( + "--test-fraction", + default=0.2, + type=float, + help="Test fraction for train/test splitting.", + ) + parser.add_argument( + "--centralised-eval-client", + action="store_true", + help="Conduct evaluation on centralised test set (True), or on hold-out data (False).", + ) + parser.add_argument( + "--scaled-lr", + action="store_true", + help="Perform scaled learning rate based on the number of clients (True).", + ) + + args = parser.parse_args() + return args diff --git a/examples/xgboost-quickstart/README.md b/examples/xgboost-quickstart/README.md index 5174c236c668..cd99cd4c2895 100644 --- a/examples/xgboost-quickstart/README.md +++ b/examples/xgboost-quickstart/README.md @@ -85,4 +85,4 @@ poetry run ./run.sh ``` Look at the [code](https://github.com/adap/flower/tree/main/examples/xgboost-quickstart) -and [tutorial](https://flower.dev/docs/framework/tutorial-quickstart-xgboost.html) for a detailed explanation. +and [tutorial](https://flower.ai/docs/framework/tutorial-quickstart-xgboost.html) for a detailed explanation. diff --git a/examples/xgboost-quickstart/client.py b/examples/xgboost-quickstart/client.py index b5eab59ba14d..62e8a441bae1 100644 --- a/examples/xgboost-quickstart/client.py +++ b/examples/xgboost-quickstart/client.py @@ -173,4 +173,4 @@ def evaluate(self, ins: EvaluateIns) -> EvaluateRes: # Start Flower client -fl.client.start_client(server_address="127.0.0.1:8080", client=XgbClient()) +fl.client.start_client(server_address="127.0.0.1:8080", client=XgbClient().to_client()) diff --git a/examples/xgboost-quickstart/pyproject.toml b/examples/xgboost-quickstart/pyproject.toml index 7b3cbd9659a2..af0164514cf1 100644 --- a/examples/xgboost-quickstart/pyproject.toml +++ b/examples/xgboost-quickstart/pyproject.toml @@ -10,6 +10,6 @@ authors = ["The Flower Authors "] [tool.poetry.dependencies] python = ">=3.8,<3.11" -flwr = ">=1.6.0,<2.0" +flwr = ">=1.7.0,<2.0" flwr-datasets = ">=0.0.1,<1.0.0" xgboost = ">=2.0.0,<3.0.0" diff --git a/examples/xgboost-quickstart/requirements.txt b/examples/xgboost-quickstart/requirements.txt index 4ccd5587bfc3..c6949e0651c5 100644 --- a/examples/xgboost-quickstart/requirements.txt +++ b/examples/xgboost-quickstart/requirements.txt @@ -1,3 +1,3 @@ -flwr>=1.6.0, <2.0 +flwr>=1.7.0, <2.0 flwr-datasets>=0.0.1, <1.0.0 xgboost>=2.0.0, <3.0.0 diff --git a/pyproject.toml b/pyproject.toml index cab083b32325..297574ef67ed 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "poetry.core.masonry.api" [tool.poetry] name = "flwr" -version = "1.7.0" +version = "1.8.0" description = "Flower: A Friendly Federated Learning Framework" license = "Apache-2.0" authors = ["The Flower Authors "] @@ -54,15 +54,16 @@ exclude = [ [tool.poetry.scripts] flower-driver-api = "flwr.server:run_driver_api" flower-fleet-api = "flwr.server:run_fleet_api" -flower-server = "flwr.server:run_server" -flower-client = "flwr.client:run_client" +flower-superlink = "flwr.server:run_superlink" +flower-client-app = "flwr.client:run_client_app" +flower-server-app = "flwr.server:run_server_app" [tool.poetry.dependencies] python = "^3.8" # Mandatory dependencies numpy = "^1.21.0" -grpcio = "^1.48.2,!=1.52.0" -protobuf = "^3.19.0" +grpcio = "^1.60.0" +protobuf = "^4.25.2" cryptography = "^41.0.2" pycryptodome = "^3.18.0" iterators = "^0.0.2" @@ -81,21 +82,21 @@ rest = ["requests", "starlette", "uvicorn"] [tool.poetry.group.dev.dependencies] types-dataclasses = "==0.6.6" types-protobuf = "==3.19.18" -types-requests = "==2.31.0.10" -types-setuptools = "==69.0.0.20240115" -clang-format = "==17.0.4" +types-requests = "==2.31.0.20240125" +types-setuptools = "==69.0.0.20240125" +clang-format = "==17.0.6" isort = "==5.13.2" black = { version = "==23.10.1", extras = ["jupyter"] } docformatter = "==1.7.5" -mypy = "==1.6.1" +mypy = "==1.8.0" pylint = "==3.0.3" flake8 = "==5.0.4" -pytest = "==7.4.3" +pytest = "==7.4.4" pytest-cov = "==4.1.0" -pytest-watch = "==4.2.0" -grpcio-tools = "==1.48.2" +pytest-watcher = "==0.4.1" +grpcio-tools = "==1.60.0" mypy-protobuf = "==3.2.0" -jupyterlab = "==4.0.9" +jupyterlab = "==4.0.12" rope = "==1.11.0" semver = "==3.0.2" sphinx = "==6.2.1" @@ -121,7 +122,7 @@ pyroma = "==4.2" check-wheel-contents = "==0.4.0" GitPython = "==3.1.32" PyGithub = "==2.1.1" -licensecheck = "==2023.5.1" +licensecheck = "==2024" [tool.isort] line_length = 88 @@ -146,6 +147,16 @@ testpaths = [ "src/py/flwr", "src/py/flwr_tool", ] +filterwarnings = "ignore::DeprecationWarning" + +[tool.pytest-watcher] +now = false +clear = true +delay = 0.2 +runner = "pytest" +runner_args = ["-s", "-vvvvv"] +patterns = ["*.py"] +ignore_patterns = [] [tool.mypy] plugins = [ diff --git a/src/docker/server/Dockerfile b/src/docker/server/Dockerfile index c42246b16104..faa9cf2e56fe 100644 --- a/src/docker/server/Dockerfile +++ b/src/docker/server/Dockerfile @@ -7,8 +7,8 @@ FROM $BASE_REPOSITORY:$BASE_IMAGE_TAG as server WORKDIR /app ARG FLWR_VERSION RUN python -m pip install -U --no-cache-dir flwr[rest]==${FLWR_VERSION} -ENTRYPOINT ["python", "-c", "from flwr.server import run_server; run_server()"] +ENTRYPOINT ["python", "-c", "from flwr.server import run_superlink; run_superlink()"] # Test if Flower can be successfully installed and imported FROM server as test -RUN python -c "from flwr.server import run_server" +RUN python -c "from flwr.server import run_superlink" diff --git a/src/proto/flwr/proto/recordset.proto b/src/proto/flwr/proto/recordset.proto new file mode 100644 index 000000000000..d51d0f9ce416 --- /dev/null +++ b/src/proto/flwr/proto/recordset.proto @@ -0,0 +1,76 @@ +// Copyright 2024 Flower Labs GmbH. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// ============================================================================== + +syntax = "proto3"; + +package flwr.proto; + +message DoubleList { repeated double vals = 1; } +message Sint64List { repeated sint64 vals = 1; } +message BoolList { repeated bool vals = 1; } +message StringList { repeated string vals = 1; } +message BytesList { repeated bytes vals = 1; } + +message Array { + string dtype = 1; + repeated int32 shape = 2; + string stype = 3; + bytes data = 4; +} + +message MetricsRecordValue { + oneof value { + // Single element + double double = 1; + sint64 sint64 = 2; + + // List types + DoubleList double_list = 21; + Sint64List sint64_list = 22; + } +} + +message ConfigsRecordValue { + oneof value { + // Single element + double double = 1; + sint64 sint64 = 2; + bool bool = 3; + string string = 4; + bytes bytes = 5; + + // List types + DoubleList double_list = 21; + Sint64List sint64_list = 22; + BoolList bool_list = 23; + StringList string_list = 24; + BytesList bytes_list = 25; + } +} + +message ParametersRecord { + repeated string data_keys = 1; + repeated Array data_values = 2; +} + +message MetricsRecord { map data = 1; } + +message ConfigsRecord { map data = 1; } + +message RecordSet { + map parameters = 1; + map metrics = 2; + map configs = 3; +} diff --git a/src/proto/flwr/proto/task.proto b/src/proto/flwr/proto/task.proto index ad71d7ea3811..7ae0c31fded5 100644 --- a/src/proto/flwr/proto/task.proto +++ b/src/proto/flwr/proto/task.proto @@ -18,6 +18,7 @@ syntax = "proto3"; package flwr.proto; import "flwr/proto/node.proto"; +import "flwr/proto/recordset.proto"; import "flwr/proto/transport.proto"; message Task { @@ -27,10 +28,8 @@ message Task { string delivered_at = 4; string ttl = 5; repeated string ancestry = 6; - SecureAggregation sa = 7; - - ServerMessage legacy_server_message = 101 [ deprecated = true ]; - ClientMessage legacy_client_message = 102 [ deprecated = true ]; + string task_type = 7; + RecordSet recordset = 8; } message TaskIns { @@ -46,29 +45,3 @@ message TaskRes { sint64 run_id = 3; Task task = 4; } - -message Value { - message DoubleList { repeated double vals = 1; } - message Sint64List { repeated sint64 vals = 1; } - message BoolList { repeated bool vals = 1; } - message StringList { repeated string vals = 1; } - message BytesList { repeated bytes vals = 1; } - - oneof value { - // Single element - double double = 1; - sint64 sint64 = 2; - bool bool = 3; - string string = 4; - bytes bytes = 5; - - // List types - DoubleList double_list = 21; - Sint64List sint64_list = 22; - BoolList bool_list = 23; - StringList string_list = 24; - BytesList bytes_list = 25; - } -} - -message SecureAggregation { map named_values = 1; } diff --git a/src/py/flwr/__init__.py b/src/py/flwr/__init__.py index e05799280339..ccaf07c6012f 100644 --- a/src/py/flwr/__init__.py +++ b/src/py/flwr/__init__.py @@ -17,13 +17,11 @@ from flwr.common.version import package_version as _package_version -from . import client, common, driver, flower, server, simulation +from . import client, common, server, simulation __all__ = [ "client", "common", - "driver", - "flower", "server", "simulation", ] diff --git a/src/py/flwr/client/__init__.py b/src/py/flwr/client/__init__.py index 13540a76cc25..f359fb472cbe 100644 --- a/src/py/flwr/client/__init__.py +++ b/src/py/flwr/client/__init__.py @@ -15,18 +15,20 @@ """Flower client.""" -from .app import run_client as run_client +from .app import run_client_app as run_client_app from .app import start_client as start_client from .app import start_numpy_client as start_numpy_client from .client import Client as Client +from .clientapp import ClientApp as ClientApp from .numpy_client import NumPyClient as NumPyClient from .typing import ClientFn as ClientFn __all__ = [ "Client", + "ClientApp", "ClientFn", "NumPyClient", - "run_client", + "run_client_app", "start_client", "start_numpy_client", ] diff --git a/src/py/flwr/client/app.py b/src/py/flwr/client/app.py index 91fa5468ae75..15f7c5057a20 100644 --- a/src/py/flwr/client/app.py +++ b/src/py/flwr/client/app.py @@ -18,13 +18,13 @@ import argparse import sys import time -from logging import INFO, WARN +from logging import DEBUG, INFO, WARN from pathlib import Path from typing import Callable, ContextManager, Optional, Tuple, Union from flwr.client.client import Client -from flwr.client.flower import Flower -from flwr.client.typing import Bwd, ClientFn, Fwd +from flwr.client.clientapp import ClientApp +from flwr.client.typing import ClientFn from flwr.common import GRPC_MAX_MESSAGE_LENGTH, EventType, event from flwr.common.address import parse_address from flwr.common.constant import ( @@ -34,10 +34,10 @@ TRANSPORT_TYPE_REST, TRANSPORT_TYPES, ) -from flwr.common.logger import log, warn_experimental_feature -from flwr.proto.task_pb2 import TaskIns, TaskRes +from flwr.common.logger import log, warn_deprecated_feature, warn_experimental_feature +from flwr.common.message import Message -from .flower import load_flower_callable +from .clientapp import load_client_app from .grpc_client.connection import grpc_connection from .grpc_rere_client.connection import grpc_request_response from .message_handler.message_handler import handle_control_message @@ -45,13 +45,13 @@ from .numpy_client import NumPyClient -def run_client() -> None: - """Run Flower client.""" - event(EventType.RUN_CLIENT_ENTER) +def run_client_app() -> None: + """Run Flower client app.""" + event(EventType.RUN_CLIENT_APP_ENTER) log(INFO, "Long-running Flower client starting") - args = _parse_args_client().parse_args() + args = _parse_args_run_client_app().parse_args() # Obtain certificates if args.insecure: @@ -62,7 +62,12 @@ def run_client() -> None: "the '--root-certificates' option when running in insecure mode, " "or omit '--insecure' to use HTTPS." ) - log(WARN, "Option `--insecure` was set. Starting insecure HTTP client.") + log( + WARN, + "Option `--insecure` was set. " + "Starting insecure HTTP client connected to %s.", + args.server, + ) root_certificates = None else: # Load the certificates if provided, or load the system certificates @@ -71,39 +76,47 @@ def run_client() -> None: root_certificates = None else: root_certificates = Path(cert_path).read_bytes() + log( + DEBUG, + "Starting secure HTTPS client connected to %s " + "with the following certificates: %s.", + args.server, + cert_path, + ) - print(args.root_certificates) - print(args.server) - print(args.dir) - print(args.callable) + log( + DEBUG, + "Flower will load ClientApp `%s`", + getattr(args, "client-app"), + ) - callable_dir = args.dir - if callable_dir is not None: - sys.path.insert(0, callable_dir) + client_app_dir = args.dir + if client_app_dir is not None: + sys.path.insert(0, client_app_dir) - def _load() -> Flower: - flower: Flower = load_flower_callable(args.callable) - return flower + def _load() -> ClientApp: + client_app: ClientApp = load_client_app(getattr(args, "client-app")) + return client_app _start_client_internal( server_address=args.server, - load_flower_callable_fn=_load, - transport="grpc-rere", # Only + load_client_app_fn=_load, + transport="rest" if args.rest else "grpc-rere", root_certificates=root_certificates, insecure=args.insecure, ) - event(EventType.RUN_CLIENT_LEAVE) + event(EventType.RUN_CLIENT_APP_LEAVE) -def _parse_args_client() -> argparse.ArgumentParser: - """Parse command line arguments.""" +def _parse_args_run_client_app() -> argparse.ArgumentParser: + """Parse flower-client-app command line arguments.""" parser = argparse.ArgumentParser( - description="Start a long-running Flower client", + description="Start a Flower client app", ) parser.add_argument( - "callable", - help="For example: `client:flower` or `project.package.module:wrapper.flower`", + "client-app", + help="For example: `client:app` or `project.package.module:wrapper.app`", ) parser.add_argument( "--insecure", @@ -111,6 +124,11 @@ def _parse_args_client() -> argparse.ArgumentParser: help="Run the client without HTTPS. By default, the client runs with " "HTTPS enabled. Use this flag only if you understand the risks.", ) + parser.add_argument( + "--rest", + action="store_true", + help="Use REST as a transport layer for the client.", + ) parser.add_argument( "--root-certificates", metavar="ROOT_CERT", @@ -127,7 +145,7 @@ def _parse_args_client() -> argparse.ArgumentParser: "--dir", default="", help="Add specified directory to the PYTHONPATH and load Flower " - "callable from there." + "app from there." " Default: current working directory.", ) @@ -229,7 +247,7 @@ class `flwr.client.Client` (default: None) event(EventType.START_CLIENT_ENTER) _start_client_internal( server_address=server_address, - load_flower_callable_fn=None, + load_client_app_fn=None, client_fn=client_fn, client=client, grpc_max_message_length=grpc_max_message_length, @@ -247,7 +265,7 @@ class `flwr.client.Client` (default: None) def _start_client_internal( *, server_address: str, - load_flower_callable_fn: Optional[Callable[[], Flower]] = None, + load_client_app_fn: Optional[Callable[[], ClientApp]] = None, client_fn: Optional[ClientFn] = None, client: Optional[Client] = None, grpc_max_message_length: int = GRPC_MAX_MESSAGE_LENGTH, @@ -263,8 +281,8 @@ def _start_client_internal( The IPv4 or IPv6 address of the server. If the Flower server runs on the same machine on port 8080, then `server_address` would be `"[::]:8080"`. - load_flower_callable_fn : Optional[Callable[[], Flower]] (default: None) - A function that can be used to load a `Flower` callable instance. + load_client_app_fn : Optional[Callable[[], ClientApp]] (default: None) + A function that can be used to load a `ClientApp` instance. client_fn : Optional[ClientFn] A callable that instantiates a Client. (default: None) client : Optional[flwr.client.Client] @@ -293,7 +311,7 @@ class `flwr.client.Client` (default: None) if insecure is None: insecure = root_certificates is None - if load_flower_callable_fn is None: + if load_client_app_fn is None: _check_actionable_client(client, client_fn) if client_fn is None: @@ -309,14 +327,14 @@ def single_client_factory( client_fn = single_client_factory - def _load_app() -> Flower: - return Flower(client_fn=client_fn) + def _load_client_app() -> ClientApp: + return ClientApp(client_fn=client_fn) - load_flower_callable_fn = _load_app + load_client_app_fn = _load_client_app else: - warn_experimental_feature("`load_flower_callable_fn`") + warn_experimental_feature("`load_client_app_fn`") - # At this point, only `load_flower_callable_fn` should be used + # At this point, only `load_client_app_fn` should be used # Both `client` and `client_fn` must not be used directly # Initialize connection context manager @@ -340,38 +358,37 @@ def _load_app() -> Flower: while True: # Receive - task_ins = receive() - if task_ins is None: + message = receive() + if message is None: time.sleep(3) # Wait for 3s before asking again continue # Handle control message - task_res, sleep_duration = handle_control_message(task_ins=task_ins) - if task_res: - send(task_res) + out_message, sleep_duration = handle_control_message(message) + if out_message: + send(out_message) break - # Register state - node_state.register_runstate(run_id=task_ins.run_id) + # Register context for this run + node_state.register_context(run_id=message.metadata.run_id) + + # Retrieve context for this run + context = node_state.retrieve_context(run_id=message.metadata.run_id) - # Load app - app: Flower = load_flower_callable_fn() + # Load ClientApp instance + client_app: ClientApp = load_client_app_fn() # Handle task message - fwd_msg: Fwd = Fwd( - task_ins=task_ins, - state=node_state.retrieve_runstate(run_id=task_ins.run_id), - ) - bwd_msg: Bwd = app(fwd=fwd_msg) + out_message = client_app(message=message, context=context) # Update node state - node_state.update_runstate( - run_id=bwd_msg.task_res.run_id, - run_state=bwd_msg.state, + node_state.update_context( + run_id=message.metadata.run_id, + context=context, ) # Send - send(bwd_msg.task_res) + send(out_message) # Unregister node if delete_node is not None: @@ -400,6 +417,12 @@ def start_numpy_client( ) -> None: """Start a Flower NumPyClient which connects to a gRPC server. + Warning + ------- + This function is deprecated since 1.7.0. Use :code:`flwr.client.start_client` + instead and first convert your :code:`NumPyClient` to type + :code:`flwr.client.Client` by executing its :code:`to_client()` method. + Parameters ---------- server_address : str @@ -455,21 +478,22 @@ def start_numpy_client( >>> root_certificates=Path("/crts/root.pem").read_bytes(), >>> ) """ - # warnings.warn( - # "flwr.client.start_numpy_client() is deprecated and will " - # "be removed in a future version of Flower. Instead, pass " - # "your client to `flwr.client.start_client()` by calling " - # "first the `.to_client()` method as shown below: \n" - # "\tflwr.client.start_client(\n" - # "\t\tserver_address=':',\n" - # "\t\tclient=FlowerClient().to_client()\n" - # "\t)", - # DeprecationWarning, - # stacklevel=2, - # ) + mssg = ( + "flwr.client.start_numpy_client() is deprecated. \n\tInstead, use " + "`flwr.client.start_client()` by ensuring you first call " + "the `.to_client()` method as shown below: \n" + "\tflwr.client.start_client(\n" + "\t\tserver_address=':',\n" + "\t\tclient=FlowerClient().to_client()," + " # <-- where FlowerClient is of type flwr.client.NumPyClient object\n" + "\t)\n" + "\tUsing `start_numpy_client()` is deprecated." + ) + + warn_deprecated_feature(name=mssg) # Calling this function is deprecated. A warning is thrown. - # We first need to convert either the supplied client to `Client.` + # We first need to convert the supplied client to `Client.` wrp_client = client.to_client() @@ -490,8 +514,8 @@ def _init_connection( [str, bool, int, Union[bytes, str, None]], ContextManager[ Tuple[ - Callable[[], Optional[TaskIns]], - Callable[[TaskRes], None], + Callable[[], Optional[Message]], + Callable[[Message], None], Optional[Callable[[], None]], Optional[Callable[[], None]], ] diff --git a/src/py/flwr/client/client.py b/src/py/flwr/client/client.py index 54b53296fd2f..6d982ecc9a9e 100644 --- a/src/py/flwr/client/client.py +++ b/src/py/flwr/client/client.py @@ -19,7 +19,6 @@ from abc import ABC -from flwr.client.run_state import RunState from flwr.common import ( Code, EvaluateIns, @@ -33,12 +32,13 @@ Parameters, Status, ) +from flwr.common.context import Context class Client(ABC): """Abstract base class for Flower clients.""" - state: RunState + context: Context def get_properties(self, ins: GetPropertiesIns) -> GetPropertiesRes: """Return set of client's properties. @@ -141,13 +141,13 @@ def evaluate(self, ins: EvaluateIns) -> EvaluateRes: metrics={}, ) - def get_state(self) -> RunState: - """Get the run state from this client.""" - return self.state + def get_context(self) -> Context: + """Get the run context from this client.""" + return self.context - def set_state(self, state: RunState) -> None: - """Apply a run state to this client.""" - self.state = state + def set_context(self, context: Context) -> None: + """Apply a run context to this client.""" + self.context = context def to_client(self) -> Client: """Return client (itself).""" diff --git a/src/py/flwr/client/flower.py b/src/py/flwr/client/clientapp.py similarity index 51% rename from src/py/flwr/client/flower.py rename to src/py/flwr/client/clientapp.py index 535f096e5866..cfc59c9298ed 100644 --- a/src/py/flwr/client/flower.py +++ b/src/py/flwr/client/clientapp.py @@ -12,24 +12,28 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== -"""Flower callable.""" +"""Flower ClientApp.""" import importlib from typing import List, Optional, cast -from flwr.client.message_handler.message_handler import handle -from flwr.client.middleware.utils import make_ffn -from flwr.client.typing import Bwd, ClientFn, Fwd, Layer +from flwr.client.message_handler.message_handler import ( + handle_legacy_message_from_msgtype, +) +from flwr.client.mod.utils import make_ffn +from flwr.client.typing import ClientFn, Mod +from flwr.common.context import Context +from flwr.common.message import Message -class Flower: - """Flower callable. +class ClientApp: + """Flower ClientApp. Examples -------- - Assuming a typical client implementation in `FlowerClient`, you can wrap it in a - Flower callable as follows: + Assuming a typical `Client` implementation named `FlowerClient`, you can wrap it in + a `ClientApp` as follows: >>> class FlowerClient(NumPyClient): >>> # ... @@ -37,59 +41,60 @@ class Flower: >>> def client_fn(cid): >>> return FlowerClient().to_client() >>> - >>> flower = Flower(client_fn) + >>> app = ClientApp(client_fn) If the above code is in a Python module called `client`, it can be started as follows: - >>> flower-client --callable client:flower + >>> flower-client-app client:app --insecure - In this `client:flower` example, `client` refers to the Python module in which the - previous code lives in. `flower` refers to the global attribute `flower` that points - to an object of type `Flower` (a Flower callable). + In this `client:app` example, `client` refers to the Python module `client.py` in + which the previous code lives in and `app` refers to the global attribute `app` that + points to an object of type `ClientApp`. """ def __init__( self, client_fn: ClientFn, # Only for backward compatibility - layers: Optional[List[Layer]] = None, + mods: Optional[List[Mod]] = None, ) -> None: # Create wrapper function for `handle` - def ffn(fwd: Fwd) -> Bwd: # pylint: disable=invalid-name - task_res, state_updated = handle( - client_fn=client_fn, - state=fwd.state, - task_ins=fwd.task_ins, + def ffn( + message: Message, + context: Context, + ) -> Message: # pylint: disable=invalid-name + out_message = handle_legacy_message_from_msgtype( + client_fn=client_fn, message=message, context=context ) - return Bwd(task_res=task_res, state=state_updated) + return out_message - # Wrap middleware layers around the wrapped handle function - self._call = make_ffn(ffn, layers if layers is not None else []) + # Wrap mods around the wrapped handle function + self._call = make_ffn(ffn, mods if mods is not None else []) - def __call__(self, fwd: Fwd) -> Bwd: - """.""" - return self._call(fwd) + def __call__(self, message: Message, context: Context) -> Message: + """Execute `ClientApp`.""" + return self._call(message, context) -class LoadCallableError(Exception): - """.""" +class LoadClientAppError(Exception): + """Error when trying to load `ClientApp`.""" -def load_flower_callable(module_attribute_str: str) -> Flower: - """Load the `Flower` object specified in a module attribute string. +def load_client_app(module_attribute_str: str) -> ClientApp: + """Load the `ClientApp` object specified in a module attribute string. The module/attribute string should have the form :. Valid - examples include `client:flower` and `project.package.module:wrapper.flower`. It + examples include `client:app` and `project.package.module:wrapper.app`. It must refer to a module on the PYTHONPATH, the module needs to have the specified - attribute, and the attribute must be of type `Flower`. + attribute, and the attribute must be of type `ClientApp`. """ module_str, _, attributes_str = module_attribute_str.partition(":") if not module_str: - raise LoadCallableError( + raise LoadClientAppError( f"Missing module in {module_attribute_str}", ) from None if not attributes_str: - raise LoadCallableError( + raise LoadClientAppError( f"Missing attribute in {module_attribute_str}", ) from None @@ -97,7 +102,7 @@ def load_flower_callable(module_attribute_str: str) -> Flower: try: module = importlib.import_module(module_str) except ModuleNotFoundError: - raise LoadCallableError( + raise LoadClientAppError( f"Unable to load module {module_str}", ) from None @@ -107,14 +112,14 @@ def load_flower_callable(module_attribute_str: str) -> Flower: for attribute_str in attributes_str.split("."): attribute = getattr(attribute, attribute_str) except AttributeError: - raise LoadCallableError( + raise LoadClientAppError( f"Unable to load attribute {attributes_str} from module {module_str}", ) from None # Check type - if not isinstance(attribute, Flower): - raise LoadCallableError( - f"Attribute {attributes_str} is not of type {Flower}", + if not isinstance(attribute, ClientApp): + raise LoadClientAppError( + f"Attribute {attributes_str} is not of type {ClientApp}", ) from None - return cast(Flower, attribute) + return cast(ClientApp, attribute) diff --git a/src/py/flwr/client/dpfedavg_numpy_client.py b/src/py/flwr/client/dpfedavg_numpy_client.py index c39b89b31da3..ab31a289d29b 100644 --- a/src/py/flwr/client/dpfedavg_numpy_client.py +++ b/src/py/flwr/client/dpfedavg_numpy_client.py @@ -22,13 +22,20 @@ from flwr.client.numpy_client import NumPyClient from flwr.common.dp import add_gaussian_noise, clip_by_l2 +from flwr.common.logger import warn_deprecated_feature from flwr.common.typing import Config, NDArrays, Scalar class DPFedAvgNumPyClient(NumPyClient): - """Wrapper for configuring a Flower client for DP.""" + """Wrapper for configuring a Flower client for DP. + + Warning + ------- + This class is deprecated and will be removed in a future release. + """ def __init__(self, client: NumPyClient) -> None: + warn_deprecated_feature("`DPFedAvgNumPyClient` wrapper") super().__init__() self.client = client diff --git a/src/py/flwr/client/grpc_client/connection.py b/src/py/flwr/client/grpc_client/connection.py index 481f32c77859..e6d21963fcbf 100644 --- a/src/py/flwr/client/grpc_client/connection.py +++ b/src/py/flwr/client/grpc_client/connection.py @@ -20,15 +20,28 @@ from logging import DEBUG from pathlib import Path from queue import Queue -from typing import Callable, Iterator, Optional, Tuple, Union +from typing import Callable, Iterator, Optional, Tuple, Union, cast from flwr.common import GRPC_MAX_MESSAGE_LENGTH +from flwr.common import recordset_compat as compat +from flwr.common import serde +from flwr.common.configsrecord import ConfigsRecord +from flwr.common.constant import ( + MESSAGE_TYPE_EVALUATE, + MESSAGE_TYPE_FIT, + MESSAGE_TYPE_GET_PARAMETERS, + MESSAGE_TYPE_GET_PROPERTIES, +) from flwr.common.grpc import create_channel from flwr.common.logger import log -from flwr.proto.node_pb2 import Node -from flwr.proto.task_pb2 import Task, TaskIns, TaskRes -from flwr.proto.transport_pb2 import ClientMessage, ServerMessage -from flwr.proto.transport_pb2_grpc import FlowerServiceStub +from flwr.common.message import Message, Metadata +from flwr.common.recordset import RecordSet +from flwr.proto.transport_pb2 import ( # pylint: disable=E0611 + ClientMessage, + Reason, + ServerMessage, +) +from flwr.proto.transport_pb2_grpc import FlowerServiceStub # pylint: disable=E0611 # The following flags can be uncommented for debugging. Other possible values: # https://github.com/grpc/grpc/blob/master/doc/environment_variables.md @@ -43,15 +56,15 @@ def on_channel_state_change(channel_connectivity: str) -> None: @contextmanager -def grpc_connection( +def grpc_connection( # pylint: disable=R0915 server_address: str, insecure: bool, max_message_length: int = GRPC_MAX_MESSAGE_LENGTH, root_certificates: Optional[Union[bytes, str]] = None, ) -> Iterator[ Tuple[ - Callable[[], Optional[TaskIns]], - Callable[[TaskRes], None], + Callable[[], Optional[Message]], + Callable[[Message], None], Optional[Callable[[], None]], Optional[Callable[[], None]], ] @@ -114,23 +127,90 @@ def grpc_connection( server_message_iterator: Iterator[ServerMessage] = stub.Join(iter(queue.get, None)) - def receive() -> TaskIns: - server_message = next(server_message_iterator) - return TaskIns( - task_id=str(uuid.uuid4()), - group_id="", - run_id=0, - task=Task( - producer=Node(node_id=0, anonymous=True), - consumer=Node(node_id=0, anonymous=True), - ancestry=[], - legacy_server_message=server_message, + def receive() -> Message: + # Receive ServerMessage proto + proto = next(server_message_iterator) + + # ServerMessage proto --> *Ins --> RecordSet + field = proto.WhichOneof("msg") + message_type = "" + if field == "get_properties_ins": + recordset = compat.getpropertiesins_to_recordset( + serde.get_properties_ins_from_proto(proto.get_properties_ins) + ) + message_type = MESSAGE_TYPE_GET_PROPERTIES + elif field == "get_parameters_ins": + recordset = compat.getparametersins_to_recordset( + serde.get_parameters_ins_from_proto(proto.get_parameters_ins) + ) + message_type = MESSAGE_TYPE_GET_PARAMETERS + elif field == "fit_ins": + recordset = compat.fitins_to_recordset( + serde.fit_ins_from_proto(proto.fit_ins), False + ) + message_type = MESSAGE_TYPE_FIT + elif field == "evaluate_ins": + recordset = compat.evaluateins_to_recordset( + serde.evaluate_ins_from_proto(proto.evaluate_ins), False + ) + message_type = MESSAGE_TYPE_EVALUATE + elif field == "reconnect_ins": + recordset = RecordSet() + recordset.set_configs( + "config", ConfigsRecord({"seconds": proto.reconnect_ins.seconds}) + ) + message_type = "reconnect" + else: + raise ValueError( + "Unsupported instruction in ServerMessage, " + "cannot deserialize from ProtoBuf" + ) + + # Construct Message + return Message( + metadata=Metadata( + run_id=0, + message_id=str(uuid.uuid4()), + group_id="", + ttl="", + node_id=0, + message_type=message_type, ), + content=recordset, ) - def send(task_res: TaskRes) -> None: - msg = task_res.task.legacy_client_message - return queue.put(msg, block=False) + def send(message: Message) -> None: + # Retrieve RecordSet and message_type + recordset = message.content + message_type = message.metadata.message_type + + # RecordSet --> *Res --> *Res proto -> ClientMessage proto + if message_type == MESSAGE_TYPE_GET_PROPERTIES: + getpropres = compat.recordset_to_getpropertiesres(recordset) + msg_proto = ClientMessage( + get_properties_res=serde.get_properties_res_to_proto(getpropres) + ) + elif message_type == MESSAGE_TYPE_GET_PARAMETERS: + getparamres = compat.recordset_to_getparametersres(recordset, False) + msg_proto = ClientMessage( + get_parameters_res=serde.get_parameters_res_to_proto(getparamres) + ) + elif message_type == MESSAGE_TYPE_FIT: + fitres = compat.recordset_to_fitres(recordset, False) + msg_proto = ClientMessage(fit_res=serde.fit_res_to_proto(fitres)) + elif message_type == MESSAGE_TYPE_EVALUATE: + evalres = compat.recordset_to_evaluateres(recordset) + msg_proto = ClientMessage(evaluate_res=serde.evaluate_res_to_proto(evalres)) + elif message_type == "reconnect": + reason = cast(Reason.ValueType, recordset.get_configs("config")["reason"]) + msg_proto = ClientMessage( + disconnect_res=ClientMessage.DisconnectRes(reason=reason) + ) + else: + raise ValueError(f"Invalid task type: {message_type}") + + # Send ClientMessage proto + return queue.put(msg_proto, block=False) try: # Yield methods diff --git a/src/py/flwr/client/grpc_client/connection_test.py b/src/py/flwr/client/grpc_client/connection_test.py index e5944230e5af..127e27356f64 100644 --- a/src/py/flwr/client/grpc_client/connection_test.py +++ b/src/py/flwr/client/grpc_client/connection_test.py @@ -23,20 +23,50 @@ import grpc -from flwr.proto.task_pb2 import Task, TaskRes -from flwr.proto.transport_pb2 import ClientMessage, ServerMessage +from flwr.common import recordset_compat as compat +from flwr.common.configsrecord import ConfigsRecord +from flwr.common.constant import MESSAGE_TYPE_GET_PROPERTIES +from flwr.common.message import Message, Metadata +from flwr.common.recordset import RecordSet +from flwr.common.typing import Code, GetPropertiesRes, Status +from flwr.proto.transport_pb2 import ( # pylint: disable=E0611 + ClientMessage, + ServerMessage, +) from flwr.server.client_manager import SimpleClientManager -from flwr.server.fleet.grpc_bidi.grpc_server import start_grpc_server +from flwr.server.superlink.fleet.grpc_bidi.grpc_server import start_grpc_server from .connection import grpc_connection EXPECTED_NUM_SERVER_MESSAGE = 10 -SERVER_MESSAGE = ServerMessage() +SERVER_MESSAGE = ServerMessage(get_properties_ins=ServerMessage.GetPropertiesIns()) SERVER_MESSAGE_RECONNECT = ServerMessage(reconnect_ins=ServerMessage.ReconnectIns()) -CLIENT_MESSAGE = ClientMessage() -CLIENT_MESSAGE_DISCONNECT = ClientMessage(disconnect_res=ClientMessage.DisconnectRes()) +MESSAGE_GET_PROPERTIES = Message( + metadata=Metadata( + run_id=0, + message_id="", + group_id="", + node_id=0, + ttl="", + message_type=MESSAGE_TYPE_GET_PROPERTIES, + ), + content=compat.getpropertiesres_to_recordset( + GetPropertiesRes(Status(Code.OK, ""), {}) + ), +) +MESSAGE_DISCONNECT = Message( + metadata=Metadata( + run_id=0, + message_id="", + group_id="", + node_id=0, + ttl="", + message_type="reconnect", + ), + content=RecordSet(configs={"config": ConfigsRecord({"reason": 0})}), +) def unused_tcp_port() -> int: @@ -72,7 +102,9 @@ def mock_join( # type: ignore # pylint: disable=invalid-name @patch( - "flwr.server.fleet.grpc_bidi.flower_service_servicer.FlowerServiceServicer.Join", + # pylint: disable=line-too-long + "flwr.server.superlink.fleet.grpc_bidi.flower_service_servicer.FlowerServiceServicer.Join", # noqa: E501 + # pylint: enable=line-too-long mock_join, ) def test_integration_connection() -> None: @@ -99,34 +131,15 @@ def run_client() -> int: # Setup processing loop while True: # Block until server responds with a message - task_ins = receive() - - if task_ins is None: - raise ValueError("Unexpected None value") - - # pylint: disable=no-member - if task_ins.HasField("task") and task_ins.task.HasField( - "legacy_server_message" - ): - server_message = task_ins.task.legacy_server_message - else: - server_message = None - # pylint: enable=no-member - - if server_message is None: - raise ValueError("Unexpected None value") + message = receive() messages_received += 1 - if server_message.HasField("reconnect_ins"): - task_res = TaskRes( - task=Task(legacy_client_message=CLIENT_MESSAGE_DISCONNECT) - ) - send(task_res) + if message.metadata.message_type == "reconnect": # type: ignore + send(MESSAGE_DISCONNECT) break # Process server_message and send client_message... - task_res = TaskRes(task=Task(legacy_client_message=CLIENT_MESSAGE)) - send(task_res) + send(MESSAGE_GET_PROPERTIES) return messages_received diff --git a/src/py/flwr/client/grpc_rere_client/connection.py b/src/py/flwr/client/grpc_rere_client/connection.py index 30d407a52c53..07635d002721 100644 --- a/src/py/flwr/client/grpc_rere_client/connection.py +++ b/src/py/flwr/client/grpc_rere_client/connection.py @@ -29,15 +29,17 @@ from flwr.common import GRPC_MAX_MESSAGE_LENGTH from flwr.common.grpc import create_channel from flwr.common.logger import log, warn_experimental_feature -from flwr.proto.fleet_pb2 import ( +from flwr.common.message import Message +from flwr.common.serde import message_from_taskins, message_to_taskres +from flwr.proto.fleet_pb2 import ( # pylint: disable=E0611 CreateNodeRequest, DeleteNodeRequest, PullTaskInsRequest, PushTaskResRequest, ) -from flwr.proto.fleet_pb2_grpc import FleetStub -from flwr.proto.node_pb2 import Node -from flwr.proto.task_pb2 import TaskIns, TaskRes +from flwr.proto.fleet_pb2_grpc import FleetStub # pylint: disable=E0611 +from flwr.proto.node_pb2 import Node # pylint: disable=E0611 +from flwr.proto.task_pb2 import TaskIns # pylint: disable=E0611 KEY_NODE = "node" KEY_TASK_INS = "current_task_ins" @@ -56,8 +58,8 @@ def grpc_request_response( root_certificates: Optional[Union[bytes, str]] = None, ) -> Iterator[ Tuple[ - Callable[[], Optional[TaskIns]], - Callable[[TaskRes], None], + Callable[[], Optional[Message]], + Callable[[Message], None], Optional[Callable[[], None]], Optional[Callable[[], None]], ] @@ -132,7 +134,7 @@ def delete_node() -> None: del node_store[KEY_NODE] - def receive() -> Optional[TaskIns]: + def receive() -> Optional[Message]: """Receive next task from server.""" # Get Node if node_store[KEY_NODE] is None: @@ -148,18 +150,16 @@ def receive() -> Optional[TaskIns]: task_ins: Optional[TaskIns] = get_task_ins(response) # Discard the current TaskIns if not valid - if task_ins is not None and not validate_task_ins( - task_ins, discard_reconnect_ins=True - ): + if task_ins is not None and not validate_task_ins(task_ins): task_ins = None # Remember `task_ins` until `task_res` is available state[KEY_TASK_INS] = task_ins - # Return the TaskIns if available - return task_ins + # Return the message if available + return message_from_taskins(task_ins) if task_ins is not None else None - def send(task_res: TaskRes) -> None: + def send(message: Message) -> None: """Send task result back to server.""" # Get Node if node_store[KEY_NODE] is None: @@ -173,6 +173,9 @@ def send(task_res: TaskRes) -> None: return task_ins: TaskIns = cast(TaskIns, state[KEY_TASK_INS]) + # Construct TaskRes + task_res = message_to_taskres(message) + # Check if fields to be set are not initialized if not validate_task_res(task_res): state[KEY_TASK_INS] = None diff --git a/src/py/flwr/client/message_handler/message_handler.py b/src/py/flwr/client/message_handler/message_handler.py index 3f30db2a4ea2..93de7d7d8821 100644 --- a/src/py/flwr/client/message_handler/message_handler.py +++ b/src/py/flwr/client/message_handler/message_handler.py @@ -15,25 +15,40 @@ """Client-side message handler.""" -from typing import Optional, Tuple +from typing import Optional, Tuple, cast from flwr.client.client import ( - Client, maybe_call_evaluate, maybe_call_fit, maybe_call_get_parameters, maybe_call_get_properties, ) -from flwr.client.message_handler.task_handler import ( - get_server_message_from_task_ins, - wrap_client_message_in_task_res, -) -from flwr.client.run_state import RunState -from flwr.client.secure_aggregation import SecureAggregationHandler from flwr.client.typing import ClientFn -from flwr.common import serde -from flwr.proto.task_pb2 import SecureAggregation, Task, TaskIns, TaskRes -from flwr.proto.transport_pb2 import ClientMessage, Reason, ServerMessage +from flwr.common.configsrecord import ConfigsRecord +from flwr.common.constant import ( + MESSAGE_TYPE_EVALUATE, + MESSAGE_TYPE_FIT, + MESSAGE_TYPE_GET_PARAMETERS, + MESSAGE_TYPE_GET_PROPERTIES, +) +from flwr.common.context import Context +from flwr.common.message import Message, Metadata +from flwr.common.recordset import RecordSet +from flwr.common.recordset_compat import ( + evaluateres_to_recordset, + fitres_to_recordset, + getparametersres_to_recordset, + getpropertiesres_to_recordset, + recordset_to_evaluateins, + recordset_to_fitins, + recordset_to_getparametersins, + recordset_to_getpropertiesins, +) +from flwr.proto.transport_pb2 import ( # pylint: disable=E0611 + ClientMessage, + Reason, + ServerMessage, +) class UnexpectedServerMessage(Exception): @@ -44,128 +59,108 @@ class UnknownServerMessage(Exception): """Exception indicating that the received message is unknown.""" -def handle_control_message(task_ins: TaskIns) -> Tuple[Optional[TaskRes], int]: +def handle_control_message(message: Message) -> Tuple[Optional[Message], int]: """Handle control part of the incoming message. Parameters ---------- - task_ins : TaskIns - The task instruction coming from the server, to be processed by the client. + message : Message + The Message coming from the server, to be processed by the client. Returns ------- + message : Optional[Message] + Message to be sent back to the server. If None, the client should + continue to process messages from the server. sleep_duration : int Number of seconds that the client should disconnect from the server. - keep_going : bool - Flag that indicates whether the client should continue to process the - next message from the server (True) or disconnect and optionally - reconnect later (False). """ - server_msg = get_server_message_from_task_ins(task_ins, exclude_reconnect_ins=False) - - # SecAgg message - if server_msg is None: - return None, 0 - - # ReconnectIns message - field = server_msg.WhichOneof("msg") - if field == "reconnect_ins": - disconnect_msg, sleep_duration = _reconnect(server_msg.reconnect_ins) - task_res = wrap_client_message_in_task_res(disconnect_msg) - return task_res, sleep_duration + if message.metadata.message_type == "reconnect": + # Retrieve ReconnectIns from recordset + recordset = message.content + seconds = cast(int, recordset.get_configs("config")["seconds"]) + # Construct ReconnectIns and call _reconnect + disconnect_msg, sleep_duration = _reconnect( + ServerMessage.ReconnectIns(seconds=seconds) + ) + # Store DisconnectRes in recordset + reason = cast(int, disconnect_msg.disconnect_res.reason) + recordset = RecordSet() + recordset.set_configs("config", ConfigsRecord({"reason": reason})) + out_message = Message( + metadata=Metadata( + run_id=0, + message_id="", + group_id="", + node_id=0, + ttl="", + message_type="reconnect", + ), + content=recordset, + ) + # Return TaskRes and sleep duration + return out_message, sleep_duration # Any other message return None, 0 -def handle( - client_fn: ClientFn, state: RunState, task_ins: TaskIns -) -> Tuple[TaskRes, RunState]: - """Handle incoming TaskIns from the server. - - Parameters - ---------- - client_fn : ClientFn - A callable that instantiates a Client. - state : RunState - A dataclass storing the state for the run being executed by the client. - task_ins: TaskIns - The task instruction coming from the server, to be processed by the client. - - Returns - ------- - task_res : TaskRes - The task response that should be returned to the server. - """ - server_msg = get_server_message_from_task_ins(task_ins, exclude_reconnect_ins=False) - if server_msg is None: - # Instantiate the client - client = client_fn("-1") - client.set_state(state) - # Secure Aggregation - if task_ins.task.HasField("sa") and isinstance( - client, SecureAggregationHandler - ): - # pylint: disable-next=invalid-name - named_values = serde.named_values_from_proto(task_ins.task.sa.named_values) - res = client.handle_secure_aggregation(named_values) - task_res = TaskRes( - task_id="", - group_id="", - run_id=0, - task=Task( - ancestry=[], - sa=SecureAggregation(named_values=serde.named_values_to_proto(res)), - ), - ) - return task_res, client.get_state() - raise NotImplementedError() - client_msg, updated_state = handle_legacy_message(client_fn, state, server_msg) - task_res = wrap_client_message_in_task_res(client_msg) - return task_res, updated_state - - -def handle_legacy_message( - client_fn: ClientFn, state: RunState, server_msg: ServerMessage -) -> Tuple[ClientMessage, RunState]: - """Handle incoming messages from the server. - - Parameters - ---------- - client_fn : ClientFn - A callable that instantiates a Client. - state : RunState - A dataclass storing the state for the run being executed by the client. - server_msg: ServerMessage - The message coming from the server, to be processed by the client. - - Returns - ------- - client_msg : ClientMessage - The result message that should be returned to the server. - """ - field = server_msg.WhichOneof("msg") - - # Must be handled elsewhere - if field == "reconnect_ins": - raise UnexpectedServerMessage() - - # Instantiate the client +def handle_legacy_message_from_msgtype( + client_fn: ClientFn, message: Message, context: Context +) -> Message: + """Handle legacy message in the inner most mod.""" client = client_fn("-1") - client.set_state(state) - # Execute task - message = None - if field == "get_properties_ins": - message = _get_properties(client, server_msg.get_properties_ins) - if field == "get_parameters_ins": - message = _get_parameters(client, server_msg.get_parameters_ins) - if field == "fit_ins": - message = _fit(client, server_msg.fit_ins) - if field == "evaluate_ins": - message = _evaluate(client, server_msg.evaluate_ins) - if message: - return message, client.get_state() - raise UnknownServerMessage() + + client.set_context(context) + + message_type = message.metadata.message_type + + # Handle GetPropertiesIns + if message_type == MESSAGE_TYPE_GET_PROPERTIES: + get_properties_res = maybe_call_get_properties( + client=client, + get_properties_ins=recordset_to_getpropertiesins(message.content), + ) + out_recordset = getpropertiesres_to_recordset(get_properties_res) + # Handle GetParametersIns + elif message_type == MESSAGE_TYPE_GET_PARAMETERS: + get_parameters_res = maybe_call_get_parameters( + client=client, + get_parameters_ins=recordset_to_getparametersins(message.content), + ) + out_recordset = getparametersres_to_recordset( + get_parameters_res, keep_input=False + ) + # Handle FitIns + elif message_type == MESSAGE_TYPE_FIT: + fit_res = maybe_call_fit( + client=client, + fit_ins=recordset_to_fitins(message.content, keep_input=True), + ) + out_recordset = fitres_to_recordset(fit_res, keep_input=False) + # Handle EvaluateIns + elif message_type == MESSAGE_TYPE_EVALUATE: + evaluate_res = maybe_call_evaluate( + client=client, + evaluate_ins=recordset_to_evaluateins(message.content, keep_input=True), + ) + out_recordset = evaluateres_to_recordset(evaluate_res) + else: + raise ValueError(f"Invalid task type: {message_type}") + + # Return Message + out_message = Message( + metadata=Metadata( + run_id=0, + message_id="", + group_id="", + node_id=0, + ttl="", + message_type=message_type, + ), + content=out_recordset, + ) + return out_message def _reconnect( @@ -180,67 +175,3 @@ def _reconnect( # Build DisconnectRes message disconnect_res = ClientMessage.DisconnectRes(reason=reason) return ClientMessage(disconnect_res=disconnect_res), sleep_duration - - -def _get_properties( - client: Client, get_properties_msg: ServerMessage.GetPropertiesIns -) -> ClientMessage: - # Deserialize `get_properties` instruction - get_properties_ins = serde.get_properties_ins_from_proto(get_properties_msg) - - # Request properties - get_properties_res = maybe_call_get_properties( - client=client, - get_properties_ins=get_properties_ins, - ) - - # Serialize response - get_properties_res_proto = serde.get_properties_res_to_proto(get_properties_res) - return ClientMessage(get_properties_res=get_properties_res_proto) - - -def _get_parameters( - client: Client, get_parameters_msg: ServerMessage.GetParametersIns -) -> ClientMessage: - # Deserialize `get_parameters` instruction - get_parameters_ins = serde.get_parameters_ins_from_proto(get_parameters_msg) - - # Request parameters - get_parameters_res = maybe_call_get_parameters( - client=client, - get_parameters_ins=get_parameters_ins, - ) - - # Serialize response - get_parameters_res_proto = serde.get_parameters_res_to_proto(get_parameters_res) - return ClientMessage(get_parameters_res=get_parameters_res_proto) - - -def _fit(client: Client, fit_msg: ServerMessage.FitIns) -> ClientMessage: - # Deserialize fit instruction - fit_ins = serde.fit_ins_from_proto(fit_msg) - - # Perform fit - fit_res = maybe_call_fit( - client=client, - fit_ins=fit_ins, - ) - - # Serialize fit result - fit_res_proto = serde.fit_res_to_proto(fit_res) - return ClientMessage(fit_res=fit_res_proto) - - -def _evaluate(client: Client, evaluate_msg: ServerMessage.EvaluateIns) -> ClientMessage: - # Deserialize evaluate instruction - evaluate_ins = serde.evaluate_ins_from_proto(evaluate_msg) - - # Perform evaluation - evaluate_res = maybe_call_evaluate( - client=client, - evaluate_ins=evaluate_ins, - ) - - # Serialize evaluate result - evaluate_res_proto = serde.evaluate_res_to_proto(evaluate_res) - return ClientMessage(evaluate_res=evaluate_res_proto) diff --git a/src/py/flwr/client/message_handler/message_handler_test.py b/src/py/flwr/client/message_handler/message_handler_test.py index cd810ae220e9..c4c65d98b833 100644 --- a/src/py/flwr/client/message_handler/message_handler_test.py +++ b/src/py/flwr/client/message_handler/message_handler_test.py @@ -18,9 +18,9 @@ import uuid from flwr.client import Client -from flwr.client.run_state import RunState from flwr.client.typing import ClientFn from flwr.common import ( + Code, EvaluateIns, EvaluateRes, FitIns, @@ -30,14 +30,16 @@ GetPropertiesIns, GetPropertiesRes, Parameters, - serde, - typing, + Status, ) -from flwr.proto.node_pb2 import Node -from flwr.proto.task_pb2 import Task, TaskIns, TaskRes -from flwr.proto.transport_pb2 import ClientMessage, Code, ServerMessage, Status +from flwr.common import recordset_compat as compat +from flwr.common import typing +from flwr.common.constant import MESSAGE_TYPE_GET_PROPERTIES +from flwr.common.context import Context +from flwr.common.message import Message, Metadata +from flwr.common.recordset import RecordSet -from .message_handler import handle, handle_control_message +from .message_handler import handle_legacy_message_from_msgtype class ClientWithoutProps(Client): @@ -116,137 +118,75 @@ def test_client_without_get_properties() -> None: """Test client implementing get_properties.""" # Prepare client = ClientWithoutProps() - ins = ServerMessage.GetPropertiesIns() - - task_ins: TaskIns = TaskIns( - task_id=str(uuid.uuid4()), - group_id="", - run_id=0, - task=Task( - producer=Node(node_id=0, anonymous=True), - consumer=Node(node_id=0, anonymous=True), - ancestry=[], - legacy_server_message=ServerMessage(get_properties_ins=ins), + recordset = compat.getpropertiesins_to_recordset(GetPropertiesIns({})) + message = Message( + metadata=Metadata( + run_id=0, + message_id=str(uuid.uuid4()), + group_id="", + node_id=0, + ttl="", + message_type=MESSAGE_TYPE_GET_PROPERTIES, ), + content=recordset, ) # Execute - disconnect_task_res, actual_sleep_duration = handle_control_message( - task_ins=task_ins - ) - task_res, _ = handle( + actual_msg = handle_legacy_message_from_msgtype( client_fn=_get_client_fn(client), - state=RunState(state={}), - task_ins=task_ins, + message=message, + context=Context(state=RecordSet()), ) - if not task_res.HasField("task"): - raise ValueError("Task value not found") - - # pylint: disable=no-member - if not task_res.task.HasField("legacy_client_message"): - raise ValueError("Unexpected None value") - # pylint: enable=no-member - - task_res.MergeFrom( - TaskRes( - task_id=str(uuid.uuid4()), - group_id="", - run_id=0, - ) - ) - # pylint: disable=no-member - task_res.task.MergeFrom( - Task( - producer=Node(node_id=0, anonymous=True), - consumer=Node(node_id=0, anonymous=True), - ancestry=[task_ins.task_id], - ) - ) - - actual_msg = task_res.task.legacy_client_message - # pylint: enable=no-member - # Assert - expected_get_properties_res = ClientMessage.GetPropertiesRes( + expected_get_properties_res = GetPropertiesRes( status=Status( code=Code.GET_PROPERTIES_NOT_IMPLEMENTED, message="Client does not implement `get_properties`", - ) + ), + properties={}, ) - expected_msg = ClientMessage(get_properties_res=expected_get_properties_res) + expected_rs = compat.getpropertiesres_to_recordset(expected_get_properties_res) + expected_msg = Message(message.metadata, expected_rs) - assert actual_msg == expected_msg - assert not disconnect_task_res - assert actual_sleep_duration == 0 + assert actual_msg.content == expected_msg.content + assert actual_msg.metadata.message_type == expected_msg.metadata.message_type def test_client_with_get_properties() -> None: """Test client not implementing get_properties.""" # Prepare client = ClientWithProps() - ins = ServerMessage.GetPropertiesIns() - task_ins = TaskIns( - task_id=str(uuid.uuid4()), - group_id="", - run_id=0, - task=Task( - producer=Node(node_id=0, anonymous=True), - consumer=Node(node_id=0, anonymous=True), - ancestry=[], - legacy_server_message=ServerMessage(get_properties_ins=ins), + recordset = compat.getpropertiesins_to_recordset(GetPropertiesIns({})) + message = Message( + metadata=Metadata( + run_id=0, + message_id=str(uuid.uuid4()), + group_id="", + node_id=0, + ttl="", + message_type=MESSAGE_TYPE_GET_PROPERTIES, ), + content=recordset, ) # Execute - disconnect_task_res, actual_sleep_duration = handle_control_message( - task_ins=task_ins - ) - task_res, _ = handle( + actual_msg = handle_legacy_message_from_msgtype( client_fn=_get_client_fn(client), - state=RunState(state={}), - task_ins=task_ins, - ) - - if not task_res.HasField("task"): - raise ValueError("Task value not found") - - # pylint: disable=no-member - if not task_res.task.HasField("legacy_client_message"): - raise ValueError("Unexpected None value") - # pylint: enable=no-member - - task_res.MergeFrom( - TaskRes( - task_id=str(uuid.uuid4()), - group_id="", - run_id=0, - ) + message=message, + context=Context(state=RecordSet()), ) - # pylint: disable=no-member - task_res.task.MergeFrom( - Task( - producer=Node(node_id=0, anonymous=True), - consumer=Node(node_id=0, anonymous=True), - ancestry=[task_ins.task_id], - ) - ) - - actual_msg = task_res.task.legacy_client_message - # pylint: enable=no-member # Assert - expected_get_properties_res = ClientMessage.GetPropertiesRes( + expected_get_properties_res = GetPropertiesRes( status=Status( code=Code.OK, message="Success", ), - properties=serde.properties_to_proto( - properties={"str_prop": "val", "int_prop": 1} - ), + properties={"str_prop": "val", "int_prop": 1}, ) - expected_msg = ClientMessage(get_properties_res=expected_get_properties_res) + expected_rs = compat.getpropertiesres_to_recordset(expected_get_properties_res) + expected_msg = Message(message.metadata, expected_rs) - assert actual_msg == expected_msg - assert not disconnect_task_res - assert actual_sleep_duration == 0 + assert actual_msg.content == expected_msg.content + assert actual_msg.metadata.message_type == expected_msg.metadata.message_type diff --git a/src/py/flwr/client/message_handler/task_handler.py b/src/py/flwr/client/message_handler/task_handler.py index 3599e1dfb254..daac1be77138 100644 --- a/src/py/flwr/client/message_handler/task_handler.py +++ b/src/py/flwr/client/message_handler/task_handler.py @@ -17,21 +17,18 @@ from typing import Optional -from flwr.proto.fleet_pb2 import PullTaskInsResponse -from flwr.proto.node_pb2 import Node -from flwr.proto.task_pb2 import Task, TaskIns, TaskRes -from flwr.proto.transport_pb2 import ClientMessage, ServerMessage +from flwr.proto.fleet_pb2 import PullTaskInsResponse # pylint: disable=E0611 +from flwr.proto.node_pb2 import Node # pylint: disable=E0611 +from flwr.proto.task_pb2 import Task, TaskIns, TaskRes # pylint: disable=E0611 -def validate_task_ins(task_ins: TaskIns, discard_reconnect_ins: bool) -> bool: +def validate_task_ins(task_ins: TaskIns) -> bool: """Validate a TaskIns before it entering the message handling process. Parameters ---------- task_ins: TaskIns The task instruction coming from the server. - discard_reconnect_ins: bool - If True, ReconnectIns will not be considered as valid content. Returns ------- @@ -39,23 +36,8 @@ def validate_task_ins(task_ins: TaskIns, discard_reconnect_ins: bool) -> bool: True if the TaskIns is deemed valid and therefore suitable for undergoing the message handling process, False otherwise. """ - # Check if the task_ins contains legacy_server_message or sa. - # If legacy_server_message is set, check if ServerMessage is one of - # {GetPropertiesIns, GetParametersIns, FitIns, EvaluateIns, ReconnectIns*} - # Discard ReconnectIns if discard_reconnect_ins is true. - if ( - not task_ins.HasField("task") - or ( - not task_ins.task.HasField("legacy_server_message") - and not task_ins.task.HasField("sa") - ) - or ( - discard_reconnect_ins - and task_ins.task.legacy_server_message.WhichOneof("msg") == "reconnect_ins" - ) - ): + if not (task_ins.HasField("task") and task_ins.task.HasField("recordset")): return False - return True @@ -107,32 +89,6 @@ def get_task_ins( return task_ins -def get_server_message_from_task_ins( - task_ins: TaskIns, exclude_reconnect_ins: bool -) -> Optional[ServerMessage]: - """Get ServerMessage from TaskIns, if available.""" - # Return the message if it is in - # {GetPropertiesIns, GetParametersIns, FitIns, EvaluateIns} - # Return the message if it is ReconnectIns and exclude_reconnect_ins is False. - if not validate_task_ins( - task_ins, discard_reconnect_ins=exclude_reconnect_ins - ) or not task_ins.task.HasField("legacy_server_message"): - return None - - return task_ins.task.legacy_server_message - - -def wrap_client_message_in_task_res(client_message: ClientMessage) -> TaskRes: - """Wrap ClientMessage in TaskRes.""" - # Instantiate a TaskRes, only filling client_message field. - return TaskRes( - task_id="", - group_id="", - run_id=0, - task=Task(ancestry=[], legacy_client_message=client_message), - ) - - def configure_task_res( task_res: TaskRes, ref_task_ins: TaskIns, producer: Node ) -> TaskRes: diff --git a/src/py/flwr/client/message_handler/task_handler_test.py b/src/py/flwr/client/message_handler/task_handler_test.py index 748ef63e72ef..9a668231d509 100644 --- a/src/py/flwr/client/message_handler/task_handler_test.py +++ b/src/py/flwr/client/message_handler/task_handler_test.py @@ -16,67 +16,35 @@ from flwr.client.message_handler.task_handler import ( - get_server_message_from_task_ins, get_task_ins, validate_task_ins, validate_task_res, - wrap_client_message_in_task_res, ) -from flwr.proto.fleet_pb2 import PullTaskInsResponse -from flwr.proto.task_pb2 import SecureAggregation, Task, TaskIns, TaskRes -from flwr.proto.transport_pb2 import ClientMessage, ServerMessage +from flwr.common import serde +from flwr.common.recordset import RecordSet +from flwr.proto.fleet_pb2 import PullTaskInsResponse # pylint: disable=E0611 +from flwr.proto.task_pb2 import Task, TaskIns, TaskRes # pylint: disable=E0611 def test_validate_task_ins_no_task() -> None: """Test validate_task_ins.""" task_ins = TaskIns(task=None) - assert not validate_task_ins(task_ins, discard_reconnect_ins=True) - assert not validate_task_ins(task_ins, discard_reconnect_ins=False) + assert not validate_task_ins(task_ins) def test_validate_task_ins_no_content() -> None: """Test validate_task_ins.""" - task_ins = TaskIns(task=Task(legacy_server_message=None, sa=None)) + task_ins = TaskIns(task=Task(recordset=None)) - assert not validate_task_ins(task_ins, discard_reconnect_ins=True) - assert not validate_task_ins(task_ins, discard_reconnect_ins=False) + assert not validate_task_ins(task_ins) -def test_validate_task_ins_with_reconnect_ins() -> None: +def test_validate_task_ins_valid() -> None: """Test validate_task_ins.""" - task_ins = TaskIns( - task=Task( - legacy_server_message=ServerMessage( - reconnect_ins=ServerMessage.ReconnectIns(seconds=3) - ) - ) - ) - - assert not validate_task_ins(task_ins, discard_reconnect_ins=True) - assert validate_task_ins(task_ins, discard_reconnect_ins=False) - - -def test_validate_task_ins_valid_legacy_server_message() -> None: - """Test validate_task_ins.""" - task_ins = TaskIns( - task=Task( - legacy_server_message=ServerMessage( - get_properties_ins=ServerMessage.GetPropertiesIns() - ) - ) - ) + task_ins = TaskIns(task=Task(recordset=serde.recordset_to_proto(RecordSet()))) - assert validate_task_ins(task_ins, discard_reconnect_ins=True) - assert validate_task_ins(task_ins, discard_reconnect_ins=False) - - -def test_validate_task_ins_valid_sa() -> None: - """Test validate_task_ins.""" - task_ins = TaskIns(task=Task(sa=SecureAggregation())) - - assert validate_task_ins(task_ins, discard_reconnect_ins=True) - assert validate_task_ins(task_ins, discard_reconnect_ins=False) + assert validate_task_ins(task_ins) def test_validate_task_res() -> None: @@ -134,61 +102,3 @@ def test_get_task_ins_multiple_ins() -> None: ) actual_task_ins = get_task_ins(res) assert actual_task_ins == expected_task_ins - - -def test_get_server_message_from_task_ins_invalid() -> None: - """Test get_server_message_from_task_ins.""" - task_ins = TaskIns(task=Task(legacy_server_message=None)) - msg_t = get_server_message_from_task_ins(task_ins, exclude_reconnect_ins=True) - msg_f = get_server_message_from_task_ins(task_ins, exclude_reconnect_ins=False) - - assert msg_t is None - assert msg_f is None - - -def test_get_server_message_from_task_ins_reconnect_ins() -> None: - """Test get_server_message_from_task_ins.""" - expected_server_message = ServerMessage( - reconnect_ins=ServerMessage.ReconnectIns(seconds=3) - ) - task_ins = TaskIns(task=Task(legacy_server_message=expected_server_message)) - msg_t = get_server_message_from_task_ins(task_ins, exclude_reconnect_ins=True) - msg_f = get_server_message_from_task_ins(task_ins, exclude_reconnect_ins=False) - - assert msg_t is None - assert msg_f == expected_server_message - - -def test_get_server_message_from_task_ins_sa() -> None: - """Test get_server_message_from_task_ins.""" - task_ins = TaskIns(task=Task(sa=SecureAggregation())) - msg_t = get_server_message_from_task_ins(task_ins, exclude_reconnect_ins=True) - msg_f = get_server_message_from_task_ins(task_ins, exclude_reconnect_ins=False) - - assert msg_t is None - assert msg_f is None - - -def test_get_server_message_from_task_ins_valid_legacy_server_message() -> None: - """Test get_server_message_from_task_ins.""" - expected_server_message = ServerMessage( - get_properties_ins=ServerMessage.GetPropertiesIns() - ) - task_ins = TaskIns(task=Task(legacy_server_message=expected_server_message)) - msg_t = get_server_message_from_task_ins(task_ins, exclude_reconnect_ins=True) - msg_f = get_server_message_from_task_ins(task_ins, exclude_reconnect_ins=False) - - assert msg_t == expected_server_message - assert msg_f == expected_server_message - - -def test_wrap_client_message_in_task_res() -> None: - """Test wrap_client_message_in_task_res.""" - expected_client_message = ClientMessage( - get_properties_res=ClientMessage.GetPropertiesRes() - ) - task_res = wrap_client_message_in_task_res(expected_client_message) - - assert validate_task_res(task_res) - # pylint: disable-next=no-member - assert task_res.task.legacy_client_message == expected_client_message diff --git a/src/py/flwr/client/middleware/utils_test.py b/src/py/flwr/client/middleware/utils_test.py deleted file mode 100644 index aa4358be5a51..000000000000 --- a/src/py/flwr/client/middleware/utils_test.py +++ /dev/null @@ -1,99 +0,0 @@ -# Copyright 2023 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Tests for the utility functions.""" - - -import unittest -from typing import List - -from flwr.client.run_state import RunState -from flwr.client.typing import Bwd, FlowerCallable, Fwd, Layer -from flwr.proto.task_pb2 import TaskIns, TaskRes - -from .utils import make_ffn - - -def make_mock_middleware(name: str, footprint: List[str]) -> Layer: - """Make a mock middleware layer.""" - - def middleware(fwd: Fwd, app: FlowerCallable) -> Bwd: - footprint.append(name) - fwd.task_ins.task_id += f"{name}" - bwd = app(fwd) - footprint.append(name) - bwd.task_res.task_id += f"{name}" - return bwd - - return middleware - - -def make_mock_app(name: str, footprint: List[str]) -> FlowerCallable: - """Make a mock app.""" - - def app(fwd: Fwd) -> Bwd: - footprint.append(name) - fwd.task_ins.task_id += f"{name}" - return Bwd(task_res=TaskRes(task_id=name), state=RunState({})) - - return app - - -class TestMakeApp(unittest.TestCase): - """Tests for the `make_app` function.""" - - def test_multiple_middlewares(self) -> None: - """Test if multiple middlewares are called in the correct order.""" - # Prepare - footprint: List[str] = [] - mock_app = make_mock_app("app", footprint) - mock_middleware_names = [f"middleware{i}" for i in range(1, 15)] - mock_middleware_layers = [ - make_mock_middleware(name, footprint) for name in mock_middleware_names - ] - task_ins = TaskIns() - - # Execute - wrapped_app = make_ffn(mock_app, mock_middleware_layers) - task_res = wrapped_app(Fwd(task_ins=task_ins, state=RunState({}))).task_res - - # Assert - trace = mock_middleware_names + ["app"] - self.assertEqual(footprint, trace + list(reversed(mock_middleware_names))) - # pylint: disable-next=no-member - self.assertEqual(task_ins.task_id, "".join(trace)) - self.assertEqual(task_res.task_id, "".join(reversed(trace))) - - def test_filter(self) -> None: - """Test if a middleware can filter incoming TaskIns.""" - # Prepare - footprint: List[str] = [] - mock_app = make_mock_app("app", footprint) - task_ins = TaskIns() - - def filter_layer(fwd: Fwd, _: FlowerCallable) -> Bwd: - footprint.append("filter") - fwd.task_ins.task_id += "filter" - # Skip calling app - return Bwd(task_res=TaskRes(task_id="filter"), state=RunState({})) - - # Execute - wrapped_app = make_ffn(mock_app, [filter_layer]) - task_res = wrapped_app(Fwd(task_ins=task_ins, state=RunState({}))).task_res - - # Assert - self.assertEqual(footprint, ["filter"]) - # pylint: disable-next=no-member - self.assertEqual(task_ins.task_id, "filter") - self.assertEqual(task_res.task_id, "filter") diff --git a/src/py/flwr/client/middleware/__init__.py b/src/py/flwr/client/mod/__init__.py similarity index 88% rename from src/py/flwr/client/middleware/__init__.py rename to src/py/flwr/client/mod/__init__.py index 58b31296fbbe..a181865614df 100644 --- a/src/py/flwr/client/middleware/__init__.py +++ b/src/py/flwr/client/mod/__init__.py @@ -12,11 +12,13 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== -"""Middleware layers.""" +"""Mods.""" +from .secure_aggregation.secaggplus_mod import secaggplus_mod from .utils import make_ffn __all__ = [ "make_ffn", + "secaggplus_mod", ] diff --git a/src/py/flwr/client/run_state.py b/src/py/flwr/client/mod/secure_aggregation/__init__.py similarity index 79% rename from src/py/flwr/client/run_state.py rename to src/py/flwr/client/mod/secure_aggregation/__init__.py index c2755eb995eb..863c149f952e 100644 --- a/src/py/flwr/client/run_state.py +++ b/src/py/flwr/client/mod/secure_aggregation/__init__.py @@ -12,14 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== -"""Run state.""" +"""Secure Aggregation mods.""" -from dataclasses import dataclass -from typing import Dict +from .secaggplus_mod import secaggplus_mod -@dataclass -class RunState: - """State of a run executed by a client node.""" - - state: Dict[str, str] +__all__ = [ + "secaggplus_mod", +] diff --git a/src/py/flwr/client/secure_aggregation/secaggplus_handler.py b/src/py/flwr/client/mod/secure_aggregation/secaggplus_mod.py similarity index 70% rename from src/py/flwr/client/secure_aggregation/secaggplus_handler.py rename to src/py/flwr/client/mod/secure_aggregation/secaggplus_mod.py index 4b74c1ace3de..fa5a9fd24109 100644 --- a/src/py/flwr/client/secure_aggregation/secaggplus_handler.py +++ b/src/py/flwr/client/mod/secure_aggregation/secaggplus_mod.py @@ -1,4 +1,4 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -17,18 +17,18 @@ import os from dataclasses import dataclass, field -from logging import ERROR, INFO, WARNING -from typing import Any, Dict, List, Optional, Tuple, Union, cast - -from flwr.client.client import Client -from flwr.client.numpy_client import NumPyClient -from flwr.common import ( - bytes_to_ndarray, - ndarray_to_bytes, - ndarrays_to_parameters, - parameters_to_ndarrays, -) +from logging import INFO, WARNING +from typing import Any, Callable, Dict, List, Tuple, cast + +from flwr.client.typing import ClientAppCallable +from flwr.common import ndarray_to_bytes, parameters_to_ndarrays +from flwr.common import recordset_compat as compat +from flwr.common.configsrecord import ConfigsRecord +from flwr.common.constant import MESSAGE_TYPE_FIT +from flwr.common.context import Context from flwr.common.logger import log +from flwr.common.message import Message, Metadata +from flwr.common.recordset import RecordSet from flwr.common.secure_aggregation.crypto.shamir import create_shares from flwr.common.secure_aggregation.crypto.symmetric_encryption import ( bytes_to_private_key, @@ -56,7 +56,6 @@ KEY_DESTINATION_LIST, KEY_MASKED_PARAMETERS, KEY_MOD_RANGE, - KEY_PARAMETERS, KEY_PUBLIC_KEY_1, KEY_PUBLIC_KEY_2, KEY_SAMPLE_NUMBER, @@ -68,6 +67,8 @@ KEY_STAGE, KEY_TARGET_RANGE, KEY_THRESHOLD, + RECORD_KEY_CONFIGS, + RECORD_KEY_STATE, STAGE_COLLECT_MASKED_INPUT, STAGE_SETUP, STAGE_SHARE_KEYS, @@ -79,9 +80,7 @@ share_keys_plaintext_concat, share_keys_plaintext_separate, ) -from flwr.common.typing import FitIns, Value - -from .handler import SecureAggregationHandler +from flwr.common.typing import ConfigsRecordValues, FitRes @dataclass @@ -89,6 +88,8 @@ class SecAggPlusState: """State of the SecAgg+ protocol.""" + current_stage: str = STAGE_UNMASK + sid: int = 0 sample_num: int = 0 share_num: int = 0 @@ -112,70 +113,122 @@ class SecAggPlusState: ss2_dict: Dict[int, bytes] = field(default_factory=dict) public_keys_dict: Dict[int, Tuple[bytes, bytes]] = field(default_factory=dict) - client: Optional[Union[Client, NumPyClient]] = None - - -class SecAggPlusHandler(SecureAggregationHandler): - """Message handler for the SecAgg+ protocol.""" - - _shared_state = SecAggPlusState() - _current_stage = STAGE_UNMASK - - def handle_secure_aggregation( - self, named_values: Dict[str, Value] - ) -> Dict[str, Value]: - """Handle incoming message and return results, following the SecAgg+ protocol. - - Parameters - ---------- - named_values : Dict[str, Value] - The named values retrieved from the SecureAggregation sub-message - of Task message in the server's TaskIns. - - Returns - ------- - Dict[str, Value] - The final/intermediate results of the SecAgg+ protocol. - """ - # Check if self is a client - if not isinstance(self, (Client, NumPyClient)): - raise TypeError( - "The subclass of SecAggPlusHandler must be " - "the subclass of Client or NumPyClient." - ) - - # Check the validity of the next stage - check_stage(self._current_stage, named_values) - - # Update the current stage - self._current_stage = cast(str, named_values.pop(KEY_STAGE)) + def __init__(self, **kwargs: ConfigsRecordValues) -> None: + for k, v in kwargs.items(): + if k.endswith(":V"): + continue + new_v: Any = v + if k.endswith(":K"): + k = k[:-2] + keys = cast(List[int], v) + values = cast(List[bytes], kwargs[f"{k}:V"]) + if len(values) > len(keys): + updated_values = [ + tuple(values[i : i + 2]) for i in range(0, len(values), 2) + ] + new_v = dict(zip(keys, updated_values)) + else: + new_v = dict(zip(keys, values)) + self.__setattr__(k, new_v) + + def to_dict(self) -> Dict[str, ConfigsRecordValues]: + """Convert the state to a dictionary.""" + ret = vars(self) + for k in list(ret.keys()): + if isinstance(ret[k], dict): + # Replace dict with two lists + v = cast(Dict[str, Any], ret.pop(k)) + ret[f"{k}:K"] = list(v.keys()) + if k == "public_keys_dict": + v_list: List[bytes] = [] + for b1_b2 in cast(List[Tuple[bytes, bytes]], v.values()): + v_list.extend(b1_b2) + ret[f"{k}:V"] = v_list + else: + ret[f"{k}:V"] = list(v.values()) + return ret + + +def _get_fit_fn( + msg: Message, ctxt: Context, call_next: ClientAppCallable +) -> Callable[[], FitRes]: + """Get the fit function.""" + + def fit() -> FitRes: + out_msg = call_next(msg, ctxt) + return compat.recordset_to_fitres(out_msg.content, keep_input=False) + + return fit + + +def secaggplus_mod( + msg: Message, + ctxt: Context, + call_next: ClientAppCallable, +) -> Message: + """Handle incoming message and return results, following the SecAgg+ protocol.""" + # Ignore non-fit messages + if msg.metadata.message_type != MESSAGE_TYPE_FIT: + return call_next(msg, ctxt) + + # Retrieve local state + if RECORD_KEY_STATE not in ctxt.state.configs: + ctxt.state.set_configs(RECORD_KEY_STATE, ConfigsRecord({})) + state_dict = ctxt.state.get_configs(RECORD_KEY_STATE).data + state = SecAggPlusState(**state_dict) + + # Retrieve incoming configs + configs = msg.content.get_configs(RECORD_KEY_CONFIGS).data - # Check the validity of the `named_values` based on the current stage - check_named_values(self._current_stage, named_values) - - # Execute - if self._current_stage == STAGE_SETUP: - self._shared_state = SecAggPlusState(client=self) - return _setup(self._shared_state, named_values) - if self._current_stage == STAGE_SHARE_KEYS: - return _share_keys(self._shared_state, named_values) - if self._current_stage == STAGE_COLLECT_MASKED_INPUT: - return _collect_masked_input(self._shared_state, named_values) - if self._current_stage == STAGE_UNMASK: - return _unmask(self._shared_state, named_values) - raise ValueError(f"Unknown secagg stage: {self._current_stage}") + # Check the validity of the next stage + check_stage(state.current_stage, configs) + + # Update the current stage + state.current_stage = cast(str, configs.pop(KEY_STAGE)) + + # Check the validity of the configs based on the current stage + check_configs(state.current_stage, configs) + + # Execute + if state.current_stage == STAGE_SETUP: + res = _setup(state, configs) + elif state.current_stage == STAGE_SHARE_KEYS: + res = _share_keys(state, configs) + elif state.current_stage == STAGE_COLLECT_MASKED_INPUT: + fit = _get_fit_fn(msg, ctxt, call_next) + res = _collect_masked_input(state, configs, fit) + elif state.current_stage == STAGE_UNMASK: + res = _unmask(state, configs) + else: + raise ValueError(f"Unknown secagg stage: {state.current_stage}") + + # Save state + ctxt.state.set_configs(RECORD_KEY_STATE, ConfigsRecord(state.to_dict())) + + # Return message + return Message( + metadata=Metadata( + run_id=0, + message_id="", + group_id="", + node_id=0, + ttl="", + message_type=MESSAGE_TYPE_FIT, + ), + content=RecordSet(configs={RECORD_KEY_CONFIGS: ConfigsRecord(res, False)}), + ) -def check_stage(current_stage: str, named_values: Dict[str, Value]) -> None: +def check_stage(current_stage: str, configs: Dict[str, ConfigsRecordValues]) -> None: """Check the validity of the next stage.""" # Check the existence of KEY_STAGE - if KEY_STAGE not in named_values: + if KEY_STAGE not in configs: raise KeyError( f"The required key '{KEY_STAGE}' is missing from the input `named_values`." ) # Check the value type of the KEY_STAGE - next_stage = named_values[KEY_STAGE] + next_stage = configs[KEY_STAGE] if not isinstance(next_stage, str): raise TypeError( f"The value for the key '{KEY_STAGE}' must be of type {str}, " @@ -198,8 +251,8 @@ def check_stage(current_stage: str, named_values: Dict[str, Value]) -> None: # pylint: disable-next=too-many-branches -def check_named_values(stage: str, named_values: Dict[str, Value]) -> None: - """Check the validity of the input `named_values`.""" +def check_configs(stage: str, configs: Dict[str, ConfigsRecordValues]) -> None: + """Check the validity of the configs.""" # Check `named_values` for the setup stage if stage == STAGE_SETUP: key_type_pairs = [ @@ -212,7 +265,7 @@ def check_named_values(stage: str, named_values: Dict[str, Value]) -> None: (KEY_MOD_RANGE, int), ] for key, expected_type in key_type_pairs: - if key not in named_values: + if key not in configs: raise KeyError( f"Stage {STAGE_SETUP}: the required key '{key}' is " "missing from the input `named_values`." @@ -220,14 +273,14 @@ def check_named_values(stage: str, named_values: Dict[str, Value]) -> None: # Bool is a subclass of int in Python, # so `isinstance(v, int)` will return True even if v is a boolean. # pylint: disable-next=unidiomatic-typecheck - if type(named_values[key]) is not expected_type: + if type(configs[key]) is not expected_type: raise TypeError( f"Stage {STAGE_SETUP}: The value for the key '{key}' " f"must be of type {expected_type}, " - f"but got {type(named_values[key])} instead." + f"but got {type(configs[key])} instead." ) elif stage == STAGE_SHARE_KEYS: - for key, value in named_values.items(): + for key, value in configs.items(): if ( not isinstance(value, list) or len(value) != 2 @@ -242,18 +295,17 @@ def check_named_values(stage: str, named_values: Dict[str, Value]) -> None: key_type_pairs = [ (KEY_CIPHERTEXT_LIST, bytes), (KEY_SOURCE_LIST, int), - (KEY_PARAMETERS, bytes), ] for key, expected_type in key_type_pairs: - if key not in named_values: + if key not in configs: raise KeyError( f"Stage {STAGE_COLLECT_MASKED_INPUT}: " f"the required key '{key}' is " "missing from the input `named_values`." ) - if not isinstance(named_values[key], list) or any( + if not isinstance(configs[key], list) or any( elm - for elm in cast(List[Any], named_values[key]) + for elm in cast(List[Any], configs[key]) # pylint: disable-next=unidiomatic-typecheck if type(elm) is not expected_type ): @@ -268,15 +320,15 @@ def check_named_values(stage: str, named_values: Dict[str, Value]) -> None: (KEY_DEAD_SECURE_ID_LIST, int), ] for key, expected_type in key_type_pairs: - if key not in named_values: + if key not in configs: raise KeyError( f"Stage {STAGE_UNMASK}: " f"the required key '{key}' is " "missing from the input `named_values`." ) - if not isinstance(named_values[key], list) or any( + if not isinstance(configs[key], list) or any( elm - for elm in cast(List[Any], named_values[key]) + for elm in cast(List[Any], configs[key]) # pylint: disable-next=unidiomatic-typecheck if type(elm) is not expected_type ): @@ -289,9 +341,11 @@ def check_named_values(stage: str, named_values: Dict[str, Value]) -> None: raise ValueError(f"Unknown secagg stage: {stage}") -def _setup(state: SecAggPlusState, named_values: Dict[str, Value]) -> Dict[str, Value]: +def _setup( + state: SecAggPlusState, configs: Dict[str, ConfigsRecordValues] +) -> Dict[str, ConfigsRecordValues]: # Assigning parameter values to object fields - sec_agg_param_dict = named_values + sec_agg_param_dict = configs state.sample_num = cast(int, sec_agg_param_dict[KEY_SAMPLE_NUMBER]) state.sid = cast(int, sec_agg_param_dict[KEY_SECURE_ID]) log(INFO, "Client %d: starting stage 0...", state.sid) @@ -324,9 +378,9 @@ def _setup(state: SecAggPlusState, named_values: Dict[str, Value]) -> Dict[str, # pylint: disable-next=too-many-locals def _share_keys( - state: SecAggPlusState, named_values: Dict[str, Value] -) -> Dict[str, Value]: - named_bytes_tuples = cast(Dict[str, Tuple[bytes, bytes]], named_values) + state: SecAggPlusState, configs: Dict[str, ConfigsRecordValues] +) -> Dict[str, ConfigsRecordValues]: + named_bytes_tuples = cast(Dict[str, Tuple[bytes, bytes]], configs) key_dict = {int(sid): (pk1, pk2) for sid, (pk1, pk2) in named_bytes_tuples.items()} log(INFO, "Client %d: starting stage 1...", state.sid) state.public_keys_dict = key_dict @@ -386,12 +440,14 @@ def _share_keys( # pylint: disable-next=too-many-locals def _collect_masked_input( - state: SecAggPlusState, named_values: Dict[str, Value] -) -> Dict[str, Value]: + state: SecAggPlusState, + configs: Dict[str, ConfigsRecordValues], + fit: Callable[[], FitRes], +) -> Dict[str, ConfigsRecordValues]: log(INFO, "Client %d: starting stage 2...", state.sid) available_clients: List[int] = [] - ciphertexts = cast(List[bytes], named_values[KEY_CIPHERTEXT_LIST]) - srcs = cast(List[int], named_values[KEY_SOURCE_LIST]) + ciphertexts = cast(List[bytes], configs[KEY_CIPHERTEXT_LIST]) + srcs = cast(List[int], configs[KEY_SOURCE_LIST]) if len(ciphertexts) + 1 < state.threshold: raise ValueError("Not enough available neighbour clients.") @@ -417,18 +473,9 @@ def _collect_masked_input( state.sk1_share_dict[src] = sk1_share # Fit client - parameters_bytes = cast(List[bytes], named_values[KEY_PARAMETERS]) - parameters = [bytes_to_ndarray(w) for w in parameters_bytes] - if isinstance(state.client, Client): - fit_res = state.client.fit( - FitIns(parameters=ndarrays_to_parameters(parameters), config={}) - ) - parameters_factor = fit_res.num_examples - parameters = parameters_to_ndarrays(fit_res.parameters) - elif isinstance(state.client, NumPyClient): - parameters, parameters_factor, _ = state.client.fit(parameters, {}) - else: - log(ERROR, "Client %d: fit function is missing.", state.sid) + fit_res = fit() + parameters_factor = fit_res.num_examples + parameters = parameters_to_ndarrays(fit_res.parameters) # Quantize parameter update (vector) quantized_parameters = quantize( @@ -468,11 +515,13 @@ def _collect_masked_input( } -def _unmask(state: SecAggPlusState, named_values: Dict[str, Value]) -> Dict[str, Value]: +def _unmask( + state: SecAggPlusState, configs: Dict[str, ConfigsRecordValues] +) -> Dict[str, ConfigsRecordValues]: log(INFO, "Client %d: starting stage 3...", state.sid) - active_sids = cast(List[int], named_values[KEY_ACTIVE_SECURE_ID_LIST]) - dead_sids = cast(List[int], named_values[KEY_DEAD_SECURE_ID_LIST]) + active_sids = cast(List[int], configs[KEY_ACTIVE_SECURE_ID_LIST]) + dead_sids = cast(List[int], configs[KEY_DEAD_SECURE_ID_LIST]) # Send private mask seed share for every avaliable client (including itclient) # Send first private key share for building pairwise mask for every dropped client if len(active_sids) < state.threshold: diff --git a/src/py/flwr/client/secure_aggregation/secaggplus_handler_test.py b/src/py/flwr/client/mod/secure_aggregation/secaggplus_mod_test.py similarity index 55% rename from src/py/flwr/client/secure_aggregation/secaggplus_handler_test.py rename to src/py/flwr/client/mod/secure_aggregation/secaggplus_mod_test.py index 9693a46af989..4033306d0845 100644 --- a/src/py/flwr/client/secure_aggregation/secaggplus_handler_test.py +++ b/src/py/flwr/client/mod/secure_aggregation/secaggplus_mod_test.py @@ -1,4 +1,4 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -16,16 +16,20 @@ import unittest from itertools import product -from typing import Any, Dict, List, cast - -from flwr.client import NumPyClient +from typing import Callable, Dict, List + +from flwr.client.mod import make_ffn +from flwr.common.configsrecord import ConfigsRecord +from flwr.common.constant import MESSAGE_TYPE_FIT +from flwr.common.context import Context +from flwr.common.message import Message, Metadata +from flwr.common.recordset import RecordSet from flwr.common.secure_aggregation.secaggplus_constants import ( KEY_ACTIVE_SECURE_ID_LIST, KEY_CIPHERTEXT_LIST, KEY_CLIPPING_RANGE, KEY_DEAD_SECURE_ID_LIST, KEY_MOD_RANGE, - KEY_PARAMETERS, KEY_SAMPLE_NUMBER, KEY_SECURE_ID, KEY_SHARE_NUMBER, @@ -33,34 +37,82 @@ KEY_STAGE, KEY_TARGET_RANGE, KEY_THRESHOLD, + RECORD_KEY_CONFIGS, + RECORD_KEY_STATE, STAGE_COLLECT_MASKED_INPUT, STAGE_SETUP, STAGE_SHARE_KEYS, STAGE_UNMASK, STAGES, ) -from flwr.common.typing import Value +from flwr.common.typing import ConfigsRecordValues + +from .secaggplus_mod import SecAggPlusState, check_configs, secaggplus_mod + + +def get_test_handler( + ctxt: Context, +) -> Callable[[Dict[str, ConfigsRecordValues]], Dict[str, ConfigsRecordValues]]: + """.""" + + def empty_ffn(_: Message, _2: Context) -> Message: + return Message( + metadata=Metadata( + run_id=0, + message_id="", + group_id="", + node_id=0, + ttl="", + message_type=MESSAGE_TYPE_FIT, + ), + content=RecordSet(), + ) -from .secaggplus_handler import SecAggPlusHandler, check_named_values + app = make_ffn(empty_ffn, [secaggplus_mod]) + + def func(configs: Dict[str, ConfigsRecordValues]) -> Dict[str, ConfigsRecordValues]: + in_msg = Message( + metadata=Metadata( + run_id=0, + message_id="", + group_id="", + node_id=0, + ttl="", + message_type=MESSAGE_TYPE_FIT, + ), + content=RecordSet(configs={RECORD_KEY_CONFIGS: ConfigsRecord(configs)}), + ) + out_msg = app(in_msg, ctxt) + return out_msg.content.get_configs(RECORD_KEY_CONFIGS).data + return func -class EmptyFlowerNumPyClient(NumPyClient, SecAggPlusHandler): - """Empty NumPyClient.""" +def _make_ctxt() -> Context: + cfg = ConfigsRecord(SecAggPlusState().to_dict()) + return Context(RecordSet(configs={RECORD_KEY_STATE: cfg})) -class TestSecAggPlusHandler(unittest.TestCase): - """Test the SecAgg+ protocol handler.""" - def test_invalid_handler(self) -> None: - """Test invalid handler.""" - handler = SecAggPlusHandler() +def _make_set_state_fn( + ctxt: Context, +) -> Callable[[str], None]: + def set_stage(stage: str) -> None: + state_dict = ctxt.state.get_configs(RECORD_KEY_STATE).data + state = SecAggPlusState(**state_dict) + state.current_stage = stage + ctxt.state.set_configs(RECORD_KEY_STATE, ConfigsRecord(state.to_dict())) + + return set_stage - with self.assertRaises(TypeError): - handler.handle_secure_aggregation({}) + +class TestSecAggPlusHandler(unittest.TestCase): + """Test the SecAgg+ protocol handler.""" def test_stage_transition(self) -> None: """Test stage transition.""" - handler = EmptyFlowerNumPyClient() + ctxt = _make_ctxt() + handler = get_test_handler(ctxt) + set_stage = _make_set_state_fn(ctxt) assert STAGES == ( STAGE_SETUP, @@ -88,28 +140,24 @@ def test_stage_transition(self) -> None: # If the next stage is valid, the function should update the current stage # and then raise KeyError or other exceptions when trying to execute SA. for current_stage, next_stage in valid_transitions: - # pylint: disable-next=protected-access - handler._current_stage = current_stage + set_stage(current_stage) with self.assertRaises(KeyError): - handler.handle_secure_aggregation({KEY_STAGE: next_stage}) - # pylint: disable-next=protected-access - assert handler._current_stage == next_stage + handler({KEY_STAGE: next_stage}) # Test invalid transitions # If the next stage is invalid, the function should raise ValueError for current_stage, next_stage in invalid_transitions: - # pylint: disable-next=protected-access - handler._current_stage = current_stage + set_stage(current_stage) with self.assertRaises(ValueError): - handler.handle_secure_aggregation({KEY_STAGE: next_stage}) - # pylint: disable-next=protected-access - assert handler._current_stage == current_stage + handler({KEY_STAGE: next_stage}) def test_stage_setup_check(self) -> None: """Test content checking for the setup stage.""" - handler = EmptyFlowerNumPyClient() + ctxt = _make_ctxt() + handler = get_test_handler(ctxt) + set_stage = _make_set_state_fn(ctxt) valid_key_type_pairs = [ (KEY_SAMPLE_NUMBER, int), @@ -121,7 +169,7 @@ def test_stage_setup_check(self) -> None: (KEY_MOD_RANGE, int), ] - type_to_test_value: Dict[type, Value] = { + type_to_test_value: Dict[type, ConfigsRecordValues] = { int: 10, bool: True, float: 1.0, @@ -129,47 +177,49 @@ def test_stage_setup_check(self) -> None: bytes: b"test", } - valid_named_values: Dict[str, Value] = { + valid_configs: Dict[str, ConfigsRecordValues] = { key: type_to_test_value[value_type] for key, value_type in valid_key_type_pairs } # Test valid `named_values` try: - check_named_values(STAGE_SETUP, valid_named_values.copy()) + check_configs(STAGE_SETUP, valid_configs.copy()) # pylint: disable-next=broad-except except Exception as exc: self.fail(f"check_named_values() raised {type(exc)} unexpectedly!") # Set the stage - valid_named_values[KEY_STAGE] = STAGE_SETUP + valid_configs[KEY_STAGE] = STAGE_SETUP # Test invalid `named_values` for key, value_type in valid_key_type_pairs: - invalid_named_values = valid_named_values.copy() + invalid_configs = valid_configs.copy() # Test wrong value type for the key for other_type, other_value in type_to_test_value.items(): if other_type == value_type: continue - invalid_named_values[key] = other_value - # pylint: disable-next=protected-access - handler._current_stage = STAGE_UNMASK + invalid_configs[key] = other_value + + set_stage(STAGE_UNMASK) with self.assertRaises(TypeError): - handler.handle_secure_aggregation(invalid_named_values.copy()) + handler(invalid_configs.copy()) # Test missing key - invalid_named_values.pop(key) - # pylint: disable-next=protected-access - handler._current_stage = STAGE_UNMASK + invalid_configs.pop(key) + + set_stage(STAGE_UNMASK) with self.assertRaises(KeyError): - handler.handle_secure_aggregation(invalid_named_values.copy()) + handler(invalid_configs.copy()) def test_stage_share_keys_check(self) -> None: """Test content checking for the share keys stage.""" - handler = EmptyFlowerNumPyClient() + ctxt = _make_ctxt() + handler = get_test_handler(ctxt) + set_stage = _make_set_state_fn(ctxt) - valid_named_values: Dict[str, Value] = { + valid_configs: Dict[str, ConfigsRecordValues] = { "1": [b"public key 1", b"public key 2"], "2": [b"public key 1", b"public key 2"], "3": [b"public key 1", b"public key 2"], @@ -177,111 +227,113 @@ def test_stage_share_keys_check(self) -> None: # Test valid `named_values` try: - check_named_values(STAGE_SHARE_KEYS, valid_named_values.copy()) + check_configs(STAGE_SHARE_KEYS, valid_configs.copy()) # pylint: disable-next=broad-except except Exception as exc: self.fail(f"check_named_values() raised {type(exc)} unexpectedly!") # Set the stage - valid_named_values[KEY_STAGE] = STAGE_SHARE_KEYS + valid_configs[KEY_STAGE] = STAGE_SHARE_KEYS # Test invalid `named_values` - invalid_values: List[Value] = [ + invalid_values: List[ConfigsRecordValues] = [ b"public key 1", [b"public key 1"], [b"public key 1", b"public key 2", b"public key 3"], ] for value in invalid_values: - invalid_named_values = valid_named_values.copy() - invalid_named_values["1"] = value + invalid_configs = valid_configs.copy() + invalid_configs["1"] = value - # pylint: disable-next=protected-access - handler._current_stage = STAGE_SETUP + set_stage(STAGE_SETUP) with self.assertRaises(TypeError): - handler.handle_secure_aggregation(invalid_named_values.copy()) + handler(invalid_configs.copy()) def test_stage_collect_masked_input_check(self) -> None: """Test content checking for the collect masked input stage.""" - handler = EmptyFlowerNumPyClient() + ctxt = _make_ctxt() + handler = get_test_handler(ctxt) + set_stage = _make_set_state_fn(ctxt) - valid_named_values: Dict[str, Value] = { + valid_configs: Dict[str, ConfigsRecordValues] = { KEY_CIPHERTEXT_LIST: [b"ctxt!", b"ctxt@", b"ctxt#", b"ctxt?"], KEY_SOURCE_LIST: [32, 51324, 32324123, -3], - KEY_PARAMETERS: [b"params1", b"params2"], } # Test valid `named_values` try: - check_named_values(STAGE_COLLECT_MASKED_INPUT, valid_named_values.copy()) + check_configs(STAGE_COLLECT_MASKED_INPUT, valid_configs.copy()) # pylint: disable-next=broad-except except Exception as exc: self.fail(f"check_named_values() raised {type(exc)} unexpectedly!") # Set the stage - valid_named_values[KEY_STAGE] = STAGE_COLLECT_MASKED_INPUT + valid_configs[KEY_STAGE] = STAGE_COLLECT_MASKED_INPUT # Test invalid `named_values` # Test missing keys - for key in list(valid_named_values.keys()): + for key in list(valid_configs.keys()): if key == KEY_STAGE: continue - invalid_named_values = valid_named_values.copy() - invalid_named_values.pop(key) - # pylint: disable-next=protected-access - handler._current_stage = STAGE_SHARE_KEYS + invalid_configs = valid_configs.copy() + invalid_configs.pop(key) + + set_stage(STAGE_SHARE_KEYS) with self.assertRaises(KeyError): - handler.handle_secure_aggregation(invalid_named_values) + handler(invalid_configs) # Test wrong value type for the key - for key in valid_named_values: + for key in valid_configs: if key == KEY_STAGE: continue - invalid_named_values = valid_named_values.copy() - cast(List[Any], invalid_named_values[key]).append(3.1415926) - # pylint: disable-next=protected-access - handler._current_stage = STAGE_SHARE_KEYS + invalid_configs = valid_configs.copy() + invalid_configs[key] = [3.1415926] + + set_stage(STAGE_SHARE_KEYS) with self.assertRaises(TypeError): - handler.handle_secure_aggregation(invalid_named_values) + handler(invalid_configs) def test_stage_unmask_check(self) -> None: """Test content checking for the unmasking stage.""" - handler = EmptyFlowerNumPyClient() + ctxt = _make_ctxt() + handler = get_test_handler(ctxt) + set_stage = _make_set_state_fn(ctxt) - valid_named_values: Dict[str, Value] = { + valid_configs: Dict[str, ConfigsRecordValues] = { KEY_ACTIVE_SECURE_ID_LIST: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10], KEY_DEAD_SECURE_ID_LIST: [32, 51324, 32324123, -3], } # Test valid `named_values` try: - check_named_values(STAGE_UNMASK, valid_named_values.copy()) + check_configs(STAGE_UNMASK, valid_configs.copy()) # pylint: disable-next=broad-except except Exception as exc: self.fail(f"check_named_values() raised {type(exc)} unexpectedly!") # Set the stage - valid_named_values[KEY_STAGE] = STAGE_UNMASK + valid_configs[KEY_STAGE] = STAGE_UNMASK # Test invalid `named_values` # Test missing keys - for key in list(valid_named_values.keys()): + for key in list(valid_configs.keys()): if key == KEY_STAGE: continue - invalid_named_values = valid_named_values.copy() - invalid_named_values.pop(key) - # pylint: disable-next=protected-access - handler._current_stage = STAGE_COLLECT_MASKED_INPUT + invalid_configs = valid_configs.copy() + invalid_configs.pop(key) + + set_stage(STAGE_COLLECT_MASKED_INPUT) with self.assertRaises(KeyError): - handler.handle_secure_aggregation(invalid_named_values) + handler(invalid_configs) # Test wrong value type for the key - for key in valid_named_values: + for key in valid_configs: if key == KEY_STAGE: continue - invalid_named_values = valid_named_values.copy() - cast(List[Any], invalid_named_values[key]).append(True) - # pylint: disable-next=protected-access - handler._current_stage = STAGE_COLLECT_MASKED_INPUT + invalid_configs = valid_configs.copy() + invalid_configs[key] = [True, False, True, False] + + set_stage(STAGE_COLLECT_MASKED_INPUT) with self.assertRaises(TypeError): - handler.handle_secure_aggregation(invalid_named_values) + handler(invalid_configs) diff --git a/src/py/flwr/client/middleware/utils.py b/src/py/flwr/client/mod/utils.py similarity index 60% rename from src/py/flwr/client/middleware/utils.py rename to src/py/flwr/client/mod/utils.py index d93132403c1e..3db5da563c23 100644 --- a/src/py/flwr/client/middleware/utils.py +++ b/src/py/flwr/client/mod/utils.py @@ -12,24 +12,26 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== -"""Utility functions for middleware layers.""" +"""Utility functions for mods.""" from typing import List -from flwr.client.typing import Bwd, FlowerCallable, Fwd, Layer +from flwr.client.typing import ClientAppCallable, Mod +from flwr.common.context import Context +from flwr.common.message import Message -def make_ffn(ffn: FlowerCallable, layers: List[Layer]) -> FlowerCallable: +def make_ffn(ffn: ClientAppCallable, mods: List[Mod]) -> ClientAppCallable: """.""" - def wrap_ffn(_ffn: FlowerCallable, _layer: Layer) -> FlowerCallable: - def new_ffn(fwd: Fwd) -> Bwd: - return _layer(fwd, _ffn) + def wrap_ffn(_ffn: ClientAppCallable, _mod: Mod) -> ClientAppCallable: + def new_ffn(message: Message, context: Context) -> Message: + return _mod(message, context, _ffn) return new_ffn - for layer in reversed(layers): - ffn = wrap_ffn(ffn, layer) + for mod in reversed(mods): + ffn = wrap_ffn(ffn, mod) return ffn diff --git a/src/py/flwr/client/mod/utils_test.py b/src/py/flwr/client/mod/utils_test.py new file mode 100644 index 000000000000..4a086d9ae3f7 --- /dev/null +++ b/src/py/flwr/client/mod/utils_test.py @@ -0,0 +1,141 @@ +# Copyright 2023 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for the utility functions.""" + + +import unittest +from typing import List + +from flwr.client.typing import ClientAppCallable, Mod +from flwr.common.configsrecord import ConfigsRecord +from flwr.common.context import Context +from flwr.common.message import Message, Metadata +from flwr.common.metricsrecord import MetricsRecord +from flwr.common.recordset import RecordSet + +from .utils import make_ffn + +METRIC = "context" +COUNTER = "counter" + + +def _increment_context_counter(context: Context) -> None: + # Read from context + current_counter: int = context.state.get_metrics(METRIC)[COUNTER] # type: ignore + # update and override context + current_counter += 1 + context.state.set_metrics(METRIC, record=MetricsRecord({COUNTER: current_counter})) + + +def make_mock_mod(name: str, footprint: List[str]) -> Mod: + """Make a mock mod.""" + + def mod(message: Message, context: Context, app: ClientAppCallable) -> Message: + footprint.append(name) + # add empty ConfigRecord to in_message for this mod + message.content.set_configs(name=name, record=ConfigsRecord()) + _increment_context_counter(context) + out_message: Message = app(message, context) + footprint.append(name) + _increment_context_counter(context) + # add empty ConfigRegcord to out_message for this mod + out_message.content.set_configs(name=name, record=ConfigsRecord()) + return out_message + + return mod + + +def make_mock_app(name: str, footprint: List[str]) -> ClientAppCallable: + """Make a mock app.""" + + def app(message: Message, context: Context) -> Message: + footprint.append(name) + message.content.set_configs(name=name, record=ConfigsRecord()) + out_message = Message(metadata=message.metadata, content=RecordSet()) + out_message.content.set_configs(name=name, record=ConfigsRecord()) + print(context) + return out_message + + return app + + +def _get_dummy_flower_message() -> Message: + return Message( + content=RecordSet(), + metadata=Metadata( + run_id=0, message_id="", group_id="", node_id=0, ttl="", message_type="mock" + ), + ) + + +class TestMakeApp(unittest.TestCase): + """Tests for the `make_app` function.""" + + def test_multiple_mods(self) -> None: + """Test if multiple mods are called in the correct order.""" + # Prepare + footprint: List[str] = [] + mock_app = make_mock_app("app", footprint) + mock_mod_names = [f"mod{i}" for i in range(1, 15)] + mock_mods = [make_mock_mod(name, footprint) for name in mock_mod_names] + + state = RecordSet() + state.set_metrics(METRIC, record=MetricsRecord({COUNTER: 0.0})) + context = Context(state=state) + message = _get_dummy_flower_message() + + # Execute + wrapped_app = make_ffn(mock_app, mock_mods) + out_message = wrapped_app(message, context) + + # Assert + trace = mock_mod_names + ["app"] + self.assertEqual(footprint, trace + list(reversed(mock_mod_names))) + # pylint: disable-next=no-member + self.assertEqual("".join(message.content.configs.keys()), "".join(trace)) + self.assertEqual( + "".join(out_message.content.configs.keys()), "".join(reversed(trace)) + ) + self.assertEqual(state.get_metrics(METRIC)[COUNTER], 2 * len(mock_mods)) + + def test_filter(self) -> None: + """Test if a mod can filter incoming TaskIns.""" + # Prepare + footprint: List[str] = [] + mock_app = make_mock_app("app", footprint) + context = Context(state=RecordSet()) + message = _get_dummy_flower_message() + + def filter_mod( + message: Message, + _1: Context, + _2: ClientAppCallable, + ) -> Message: + footprint.append("filter") + message.content.set_configs(name="filter", record=ConfigsRecord()) + out_message = Message(metadata=message.metadata, content=RecordSet()) + out_message.content.set_configs(name="filter", record=ConfigsRecord()) + # Skip calling app + return out_message + + # Execute + wrapped_app = make_ffn(mock_app, [filter_mod]) + out_message = wrapped_app(message, context) + + # Assert + self.assertEqual(footprint, ["filter"]) + # pylint: disable-next=no-member + self.assertEqual(list(message.content.configs.keys())[0], "filter") + self.assertEqual(list(out_message.content.configs.keys())[0], "filter") diff --git a/src/py/flwr/client/node_state.py b/src/py/flwr/client/node_state.py index 0a29be511806..465bbd356c1c 100644 --- a/src/py/flwr/client/node_state.py +++ b/src/py/flwr/client/node_state.py @@ -17,7 +17,8 @@ from typing import Any, Dict -from flwr.client.run_state import RunState +from flwr.common.context import Context +from flwr.common.recordset import RecordSet class NodeState: @@ -25,24 +26,24 @@ class NodeState: def __init__(self) -> None: self._meta: Dict[str, Any] = {} # holds metadata about the node - self.run_states: Dict[int, RunState] = {} + self.run_contexts: Dict[int, Context] = {} - def register_runstate(self, run_id: int) -> None: - """Register new run state for this node.""" - if run_id not in self.run_states: - self.run_states[run_id] = RunState({}) + def register_context(self, run_id: int) -> None: + """Register new run context for this node.""" + if run_id not in self.run_contexts: + self.run_contexts[run_id] = Context(state=RecordSet()) - def retrieve_runstate(self, run_id: int) -> RunState: - """Get run state given a run_id.""" - if run_id in self.run_states: - return self.run_states[run_id] + def retrieve_context(self, run_id: int) -> Context: + """Get run context given a run_id.""" + if run_id in self.run_contexts: + return self.run_contexts[run_id] raise RuntimeError( - f"RunState for run_id={run_id} doesn't exist." - " A run must be registered before it can be retrieved or updated " + f"Context for run_id={run_id} doesn't exist." + " A run context must be registered before it can be retrieved or updated " " by a client." ) - def update_runstate(self, run_id: int, run_state: RunState) -> None: - """Update run state.""" - self.run_states[run_id] = run_state + def update_context(self, run_id: int, context: Context) -> None: + """Update run context.""" + self.run_contexts[run_id] = context diff --git a/src/py/flwr/client/node_state_tests.py b/src/py/flwr/client/node_state_tests.py index 7a6bfcd31f08..11e5e74a31ec 100644 --- a/src/py/flwr/client/node_state_tests.py +++ b/src/py/flwr/client/node_state_tests.py @@ -16,17 +16,22 @@ from flwr.client.node_state import NodeState -from flwr.client.run_state import RunState -from flwr.proto.task_pb2 import TaskIns +from flwr.common.configsrecord import ConfigsRecord +from flwr.common.context import Context +from flwr.proto.task_pb2 import TaskIns # pylint: disable=E0611 -def _run_dummy_task(state: RunState) -> RunState: - if "counter" in state.state: - state.state["counter"] += "1" - else: - state.state["counter"] = "1" +def _run_dummy_task(context: Context) -> Context: + counter_value: str = "1" + if "counter" in context.state.configs.keys(): + counter_value = context.get_configs("counter")["count"] # type: ignore + counter_value += "1" - return state + context.state.set_configs( + name="counter", record=ConfigsRecord({"count": counter_value}) + ) + + return context def test_multirun_in_node_state() -> None: @@ -43,17 +48,17 @@ def test_multirun_in_node_state() -> None: run_id = task.run_id # Register - node_state.register_runstate(run_id=run_id) + node_state.register_context(run_id=run_id) # Get run state - state = node_state.retrieve_runstate(run_id=run_id) + context = node_state.retrieve_context(run_id=run_id) # Run "task" - updated_state = _run_dummy_task(state) + updated_state = _run_dummy_task(context) # Update run state - node_state.update_runstate(run_id=run_id, run_state=updated_state) + node_state.update_context(run_id=run_id, context=updated_state) # Verify values - for run_id, state in node_state.run_states.items(): - assert state.state["counter"] == expected_values[run_id] + for run_id, context in node_state.run_contexts.items(): + assert context.state.get_configs("counter")["count"] == expected_values[run_id] diff --git a/src/py/flwr/client/numpy_client.py b/src/py/flwr/client/numpy_client.py index d67fb90512d4..a77889912a09 100644 --- a/src/py/flwr/client/numpy_client.py +++ b/src/py/flwr/client/numpy_client.py @@ -19,7 +19,6 @@ from typing import Callable, Dict, Tuple from flwr.client.client import Client -from flwr.client.run_state import RunState from flwr.common import ( Config, NDArrays, @@ -27,6 +26,7 @@ ndarrays_to_parameters, parameters_to_ndarrays, ) +from flwr.common.context import Context from flwr.common.typing import ( Code, EvaluateIns, @@ -70,7 +70,7 @@ class NumPyClient(ABC): """Abstract base class for Flower clients using NumPy.""" - state: RunState + context: Context def get_properties(self, config: Config) -> Dict[str, Scalar]: """Return a client's set of properties. @@ -174,13 +174,13 @@ def evaluate( _ = (self, parameters, config) return 0.0, 0, {} - def get_state(self) -> RunState: - """Get the run state from this client.""" - return self.state + def get_context(self) -> Context: + """Get the run context from this client.""" + return self.context - def set_state(self, state: RunState) -> None: - """Apply a run state to this client.""" - self.state = state + def set_context(self, context: Context) -> None: + """Apply a run context to this client.""" + self.context = context def to_client(self) -> Client: """Convert to object to Client type and return it.""" @@ -278,21 +278,21 @@ def _evaluate(self: Client, ins: EvaluateIns) -> EvaluateRes: ) -def _get_state(self: Client) -> RunState: - """Return state of underlying NumPyClient.""" - return self.numpy_client.get_state() # type: ignore +def _get_context(self: Client) -> Context: + """Return context of underlying NumPyClient.""" + return self.numpy_client.get_context() # type: ignore -def _set_state(self: Client, state: RunState) -> None: - """Apply state to underlying NumPyClient.""" - self.numpy_client.set_state(state) # type: ignore +def _set_context(self: Client, context: Context) -> None: + """Apply context to underlying NumPyClient.""" + self.numpy_client.set_context(context) # type: ignore def _wrap_numpy_client(client: NumPyClient) -> Client: member_dict: Dict[str, Callable] = { # type: ignore "__init__": _constructor, - "get_state": _get_state, - "set_state": _set_state, + "get_context": _get_context, + "set_context": _set_context, } # Add wrapper type methods (if overridden) diff --git a/src/py/flwr/client/rest_client/connection.py b/src/py/flwr/client/rest_client/connection.py index 87b06dd0be4e..a5c8ea0957d2 100644 --- a/src/py/flwr/client/rest_client/connection.py +++ b/src/py/flwr/client/rest_client/connection.py @@ -29,7 +29,9 @@ from flwr.common import GRPC_MAX_MESSAGE_LENGTH from flwr.common.constant import MISSING_EXTRA_REST from flwr.common.logger import log -from flwr.proto.fleet_pb2 import ( +from flwr.common.message import Message +from flwr.common.serde import message_from_taskins, message_to_taskres +from flwr.proto.fleet_pb2 import ( # pylint: disable=E0611 CreateNodeRequest, CreateNodeResponse, DeleteNodeRequest, @@ -38,8 +40,8 @@ PushTaskResRequest, PushTaskResResponse, ) -from flwr.proto.node_pb2 import Node -from flwr.proto.task_pb2 import TaskIns, TaskRes +from flwr.proto.node_pb2 import Node # pylint: disable=E0611 +from flwr.proto.task_pb2 import TaskIns # pylint: disable=E0611 try: import requests @@ -68,8 +70,8 @@ def http_request_response( ] = None, # pylint: disable=unused-argument ) -> Iterator[ Tuple[ - Callable[[], Optional[TaskIns]], - Callable[[TaskRes], None], + Callable[[], Optional[Message]], + Callable[[Message], None], Optional[Callable[[], None]], Optional[Callable[[], None]], ] @@ -206,7 +208,7 @@ def delete_node() -> None: PATH_PULL_TASK_INS, ) - def receive() -> Optional[TaskIns]: + def receive() -> Optional[Message]: """Receive next task from server.""" # Get Node if node_store[KEY_NODE] is None: @@ -256,20 +258,20 @@ def receive() -> Optional[TaskIns]: task_ins: Optional[TaskIns] = get_task_ins(pull_task_ins_response_proto) # Discard the current TaskIns if not valid - if task_ins is not None and not validate_task_ins( - task_ins, discard_reconnect_ins=True - ): + if task_ins is not None and not validate_task_ins(task_ins): task_ins = None # Remember `task_ins` until `task_res` is available state[KEY_TASK_INS] = task_ins - # Return the TaskIns if available + # Return the Message if available + message = None if task_ins is not None: + message = message_from_taskins(task_ins) log(INFO, "[Node] POST /%s: success", PATH_PULL_TASK_INS) - return task_ins + return message - def send(task_res: TaskRes) -> None: + def send(message: Message) -> None: """Send task result back to server.""" # Get Node if node_store[KEY_NODE] is None: @@ -283,6 +285,9 @@ def send(task_res: TaskRes) -> None: task_ins: TaskIns = cast(TaskIns, state[KEY_TASK_INS]) + # Construct TaskRes + task_res = message_to_taskres(message) + # Check if fields to be set are not initialized if not validate_task_res(task_res): state[KEY_TASK_INS] = None diff --git a/src/py/flwr/client/secure_aggregation/handler.py b/src/py/flwr/client/secure_aggregation/handler.py deleted file mode 100644 index 487ed842c93f..000000000000 --- a/src/py/flwr/client/secure_aggregation/handler.py +++ /dev/null @@ -1,43 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Message Handler for Secure Aggregation (abstract base class).""" - - -from abc import ABC, abstractmethod -from typing import Dict - -from flwr.common.typing import Value - - -class SecureAggregationHandler(ABC): - """Abstract base class for secure aggregation message handlers.""" - - @abstractmethod - def handle_secure_aggregation( - self, named_values: Dict[str, Value] - ) -> Dict[str, Value]: - """Handle incoming Secure Aggregation message and return results. - - Parameters - ---------- - named_values : Dict[str, Value] - The named values retrieved from the SecureAggregation sub-message - of Task message in the server's TaskIns. - - Returns - ------- - Dict[str, Value] - The final/intermediate results of the Secure Aggregation protocol. - """ diff --git a/src/py/flwr/client/typing.py b/src/py/flwr/client/typing.py index 1652ee57674a..7aef2b30e0fc 100644 --- a/src/py/flwr/client/typing.py +++ b/src/py/flwr/client/typing.py @@ -14,31 +14,16 @@ # ============================================================================== """Custom types for Flower clients.""" -from dataclasses import dataclass + from typing import Callable -from flwr.client.run_state import RunState -from flwr.proto.task_pb2 import TaskIns, TaskRes +from flwr.common.context import Context +from flwr.common.message import Message from .client import Client as Client - -@dataclass -class Fwd: - """.""" - - task_ins: TaskIns - state: RunState - - -@dataclass -class Bwd: - """.""" - - task_res: TaskRes - state: RunState - - -FlowerCallable = Callable[[Fwd], Bwd] +# Compatibility ClientFn = Callable[[str], Client] -Layer = Callable[[Fwd, FlowerCallable], Bwd] + +ClientAppCallable = Callable[[Message, Context], Message] +Mod = Callable[[Message, Context, ClientAppCallable], Message] diff --git a/src/py/flwr/common/configsrecord.py b/src/py/flwr/common/configsrecord.py new file mode 100644 index 000000000000..b0480841e06c --- /dev/null +++ b/src/py/flwr/common/configsrecord.py @@ -0,0 +1,116 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""ConfigsRecord.""" + + +from dataclasses import dataclass, field +from typing import Dict, Optional, get_args + +from .typing import ConfigsRecordValues, ConfigsScalar + + +@dataclass +class ConfigsRecord: + """Configs record.""" + + data: Dict[str, ConfigsRecordValues] = field(default_factory=dict) + + def __init__( + self, + configs_dict: Optional[Dict[str, ConfigsRecordValues]] = None, + keep_input: bool = True, + ): + """Construct a ConfigsRecord object. + + Parameters + ---------- + configs_dict : Optional[Dict[str, ConfigsRecordValues]] + A dictionary that stores basic types (i.e. `str`, `int`, `float`, `bytes` as + defined in `ConfigsScalar`) and lists of such types (see + `ConfigsScalarList`). + keep_input : bool (default: True) + A boolean indicating whether config passed should be deleted from the input + dictionary immediately after adding them to the record. When set + to True, the data is duplicated in memory. If memory is a concern, set + it to False. + """ + self.data = {} + if configs_dict: + self.set_configs(configs_dict, keep_input=keep_input) + + def set_configs( + self, configs_dict: Dict[str, ConfigsRecordValues], keep_input: bool = True + ) -> None: + """Add configs to the record. + + Parameters + ---------- + configs_dict : Dict[str, ConfigsRecordValues] + A dictionary that stores basic types (i.e. `str`,`int`, `float`, `bytes` as + defined in `ConfigsRecordValues`) and list of such types (see + `ConfigsScalarList`). + keep_input : bool (default: True) + A boolean indicating whether config passed should be deleted from the input + dictionary immediately after adding them to the record. When set + to True, the data is duplicated in memory. If memory is a concern, set + it to False. + """ + if any(not isinstance(k, str) for k in configs_dict.keys()): + raise TypeError(f"Not all keys are of valid type. Expected {str}") + + def is_valid(value: ConfigsScalar) -> None: + """Check if value is of expected type.""" + if not isinstance(value, get_args(ConfigsScalar)): + raise TypeError( + "Not all values are of valid type." + f" Expected {ConfigsRecordValues} but you passed {type(value)}." + ) + + # Check types of values + # Split between those values that are list and those that aren't + # then process in the same way + for value in configs_dict.values(): + if isinstance(value, list): + # If your lists are large (e.g. 1M+ elements) this will be slow + # 1s to check 10M element list on a M2 Pro + # In such settings, you'd be better of treating such config as + # an array and pass it to a ParametersRecord. + # Empty lists are valid + if len(value) > 0: + is_valid(value[0]) + # all elements in the list must be of the same valid type + # this is needed for protobuf + value_type = type(value[0]) + if not all(isinstance(v, value_type) for v in value): + raise TypeError( + "All values in a list must be of the same valid type. " + f"One of {ConfigsScalar}." + ) + else: + is_valid(value) + + # Add configs to record + if keep_input: + # Copy + self.data = configs_dict.copy() + else: + # Add entries to dataclass without duplicating memory + for key in list(configs_dict.keys()): + self.data[key] = configs_dict[key] + del configs_dict[key] + + def __getitem__(self, key: str) -> ConfigsRecordValues: + """Retrieve an element stored in record.""" + return self.data[key] diff --git a/src/py/flwr/common/constant.py b/src/py/flwr/common/constant.py index 49802f2815be..fcafd853a349 100644 --- a/src/py/flwr/common/constant.py +++ b/src/py/flwr/common/constant.py @@ -31,3 +31,8 @@ TRANSPORT_TYPE_GRPC_RERE, TRANSPORT_TYPE_REST, ] + +MESSAGE_TYPE_GET_PROPERTIES = "get_properties" +MESSAGE_TYPE_GET_PARAMETERS = "get_parameters" +MESSAGE_TYPE_FIT = "fit" +MESSAGE_TYPE_EVALUATE = "evaluate" diff --git a/src/py/flwr/common/context.py b/src/py/flwr/common/context.py new file mode 100644 index 000000000000..30c1131a206f --- /dev/null +++ b/src/py/flwr/common/context.py @@ -0,0 +1,38 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Context.""" + + +from dataclasses import dataclass + +from .recordset import RecordSet + + +@dataclass +class Context: + """State of your run. + + Parameters + ---------- + state : RecordSet + Holds records added by the entity in a given run and that will stay local. + This means that the data it holds will never leave the system it's running from. + This can be used as an intermediate storage or scratchpad when + executing mods. It can also be used as a memory to access + at different points during the lifecycle of this entity (e.g. across + multiple rounds) + """ + + state: RecordSet diff --git a/src/py/flwr/common/dp.py b/src/py/flwr/common/dp.py index 5030ad34805b..83a72b8ce749 100644 --- a/src/py/flwr/common/dp.py +++ b/src/py/flwr/common/dp.py @@ -19,11 +19,13 @@ import numpy as np +from flwr.common.logger import warn_deprecated_feature from flwr.common.typing import NDArrays # Calculates the L2-norm of a potentially ragged array def _get_update_norm(update: NDArrays) -> float: + warn_deprecated_feature("`_get_update_norm` method") flattened_update = update[0] for i in range(1, len(update)): flattened_update = np.append(flattened_update, update[i]) @@ -32,6 +34,7 @@ def _get_update_norm(update: NDArrays) -> float: def add_gaussian_noise(update: NDArrays, std_dev: float) -> NDArrays: """Add iid Gaussian noise to each floating point value in the update.""" + warn_deprecated_feature("`add_gaussian_noise` method") update_noised = [ layer + np.random.normal(0, std_dev, layer.shape) for layer in update ] @@ -40,6 +43,7 @@ def add_gaussian_noise(update: NDArrays, std_dev: float) -> NDArrays: def clip_by_l2(update: NDArrays, threshold: float) -> Tuple[NDArrays, bool]: """Scales the update so thats its L2 norm is upper-bound to threshold.""" + warn_deprecated_feature("`clip_by_l2` method") update_norm = _get_update_norm(update) scaling_factor = min(1, threshold / update_norm) update_clipped: NDArrays = [layer * scaling_factor for layer in update] diff --git a/src/py/flwr/common/logger.py b/src/py/flwr/common/logger.py index 29d1562a86d3..50c902da38b5 100644 --- a/src/py/flwr/common/logger.py +++ b/src/py/flwr/common/logger.py @@ -111,3 +111,17 @@ def warn_experimental_feature(name: str) -> None: """, name, ) + + +def warn_deprecated_feature(name: str) -> None: + """Warn the user when they use a deprecated feature.""" + log( + WARN, + """ + DEPRECATED FEATURE: %s + + This is a deprecated feature. It will be removed + entirely in future versions of Flower. + """, + name, + ) diff --git a/src/py/flwr/common/message.py b/src/py/flwr/common/message.py new file mode 100644 index 000000000000..9258edccbcd5 --- /dev/null +++ b/src/py/flwr/common/message.py @@ -0,0 +1,67 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Message.""" + + +from dataclasses import dataclass + +from .recordset import RecordSet + + +@dataclass +class Metadata: + """A dataclass holding metadata associated with the current message. + + Parameters + ---------- + run_id : int + An identifier for the current run. + message_id : str + An identifier for the current message. + group_id : str + An identifier for grouping messages. In some settings + this is used as the FL round. + node_id : int + An identifier for the node running a message. + ttl : str + Time-to-live for this message. + message_type : str + A string that encodes the action to be executed on + the receiving end. + """ + + run_id: int + message_id: str + group_id: str + node_id: int + ttl: str + message_type: str + + +@dataclass +class Message: + """State of your application from the viewpoint of the entity using it. + + Parameters + ---------- + metadata : Metadata + A dataclass including information about the message to be executed. + content : RecordSet + Holds records either sent by another entity (e.g. sent by the server-side + logic to a client, or vice-versa) or that will be sent to it. + """ + + metadata: Metadata + content: RecordSet diff --git a/src/py/flwr/common/metricsrecord.py b/src/py/flwr/common/metricsrecord.py new file mode 100644 index 000000000000..e70b0cb31d55 --- /dev/null +++ b/src/py/flwr/common/metricsrecord.py @@ -0,0 +1,116 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""MetricsRecord.""" + + +from dataclasses import dataclass, field +from typing import Dict, Optional, get_args + +from .typing import MetricsRecordValues, MetricsScalar + + +@dataclass +class MetricsRecord: + """Metrics record.""" + + data: Dict[str, MetricsRecordValues] = field(default_factory=dict) + + def __init__( + self, + metrics_dict: Optional[Dict[str, MetricsRecordValues]] = None, + keep_input: bool = True, + ): + """Construct a MetricsRecord object. + + Parameters + ---------- + metrics_dict : Optional[Dict[str, MetricsRecordValues]] + A dictionary that stores basic types (i.e. `int`, `float` as defined + in `MetricsScalar`) and list of such types (see `MetricsScalarList`). + keep_input : bool (default: True) + A boolean indicating whether metrics should be deleted from the input + dictionary immediately after adding them to the record. When set + to True, the data is duplicated in memory. If memory is a concern, set + it to False. + """ + self.data = {} + if metrics_dict: + self.set_metrics(metrics_dict, keep_input=keep_input) + + def set_metrics( + self, metrics_dict: Dict[str, MetricsRecordValues], keep_input: bool = True + ) -> None: + """Add metrics to the record. + + Parameters + ---------- + metrics_dict : Dict[str, MetricsRecordValues] + A dictionary that stores basic types (i.e. `int`, `float` as defined + in `MetricsScalar`) and list of such types (see `MetricsScalarList`). + keep_input : bool (default: True) + A boolean indicating whether metrics should be deleted from the input + dictionary immediately after adding them to the record. When set + to True, the data is duplicated in memory. If memory is a concern, set + it to False. + """ + if any(not isinstance(k, str) for k in metrics_dict.keys()): + raise TypeError(f"Not all keys are of valid type. Expected {str}.") + + def is_valid(value: MetricsScalar) -> None: + """Check if value is of expected type.""" + if not isinstance(value, get_args(MetricsScalar)) or isinstance( + value, bool + ): + raise TypeError( + "Not all values are of valid type." + f" Expected {MetricsRecordValues} but you passed {type(value)}." + ) + + # Check types of values + # Split between those values that are list and those that aren't + # then process in the same way + for value in metrics_dict.values(): + if isinstance(value, list): + # If your lists are large (e.g. 1M+ elements) this will be slow + # 1s to check 10M element list on a M2 Pro + # In such settings, you'd be better of treating such metric as + # an array and pass it to a ParametersRecord. + # Empty lists are valid + if len(value) > 0: + is_valid(value[0]) + # all elements in the list must be of the same valid type + # this is needed for protobuf + value_type = type(value[0]) + if not all(isinstance(v, value_type) for v in value): + raise TypeError( + "All values in a list must be of the same valid type. " + f"One of {MetricsScalar}." + ) + else: + is_valid(value) + + # Add metrics to record + if keep_input: + # Copy + self.data = metrics_dict.copy() + else: + # Add entries to dataclass without duplicating memory + for key in list(metrics_dict.keys()): + self.data[key] = metrics_dict[key] + del metrics_dict[key] + + def __getitem__(self, key: str) -> MetricsRecordValues: + """Retrieve an element stored in record.""" + return self.data[key] diff --git a/src/py/flwr/common/parametersrecord.py b/src/py/flwr/common/parametersrecord.py index 3d40c0488baa..ef02a0789ddf 100644 --- a/src/py/flwr/common/parametersrecord.py +++ b/src/py/flwr/common/parametersrecord.py @@ -59,7 +59,6 @@ class ParametersRecord: PyTorch's state_dict, but holding serialised tensors instead. """ - keep_input: bool data: OrderedDict[str, Array] = field(default_factory=OrderedDict[str, Array]) def __init__( @@ -82,25 +81,29 @@ def __init__( parameters after adding it to the record, set this flag to True. When set to True, the data is duplicated in memory. """ - self.keep_input = keep_input self.data = OrderedDict() if array_dict: - self.set_parameters(array_dict) + self.set_parameters(array_dict, keep_input=keep_input) - def set_parameters(self, array_dict: OrderedDict[str, Array]) -> None: + def set_parameters( + self, array_dict: OrderedDict[str, Array], keep_input: bool = False + ) -> None: """Add parameters to record. Parameters ---------- array_dict : OrderedDict[str, Array] A dictionary that stores serialized array-like or tensor-like objects. + keep_input : bool (default: False) + A boolean indicating whether parameters should be deleted from the input + dictionary immediately after adding them to the record. """ if any(not isinstance(k, str) for k in array_dict.keys()): raise TypeError(f"Not all keys are of valid type. Expected {str}") if any(not isinstance(v, Array) for v in array_dict.values()): raise TypeError(f"Not all values are of valid type. Expected {Array}") - if self.keep_input: + if keep_input: # Copy self.data = OrderedDict(array_dict) else: @@ -108,3 +111,7 @@ def set_parameters(self, array_dict: OrderedDict[str, Array]) -> None: for key in list(array_dict.keys()): self.data[key] = array_dict[key] del array_dict[key] + + def __getitem__(self, key: str) -> Array: + """Retrieve an element stored in record.""" + return self.data[key] diff --git a/src/py/flwr/common/recordset.py b/src/py/flwr/common/recordset.py index dc723a2cea86..61c880c970b8 100644 --- a/src/py/flwr/common/recordset.py +++ b/src/py/flwr/common/recordset.py @@ -14,22 +14,15 @@ # ============================================================================== """RecordSet.""" + from dataclasses import dataclass, field from typing import Dict +from .configsrecord import ConfigsRecord +from .metricsrecord import MetricsRecord from .parametersrecord import ParametersRecord -@dataclass -class MetricsRecord: - """Metrics record.""" - - -@dataclass -class ConfigsRecord: - """Configs record.""" - - @dataclass class RecordSet: """Definition of RecordSet.""" diff --git a/src/py/flwr/common/recordset_compat.py b/src/py/flwr/common/recordset_compat.py new file mode 100644 index 000000000000..e0e591048820 --- /dev/null +++ b/src/py/flwr/common/recordset_compat.py @@ -0,0 +1,409 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""RecordSet utilities.""" + + +from typing import Dict, Mapping, OrderedDict, Tuple, Union, cast, get_args + +from .configsrecord import ConfigsRecord +from .metricsrecord import MetricsRecord +from .parametersrecord import Array, ParametersRecord +from .recordset import RecordSet +from .typing import ( + Code, + ConfigsRecordValues, + EvaluateIns, + EvaluateRes, + FitIns, + FitRes, + GetParametersIns, + GetParametersRes, + GetPropertiesIns, + GetPropertiesRes, + MetricsRecordValues, + Parameters, + Scalar, + Status, +) + + +def parametersrecord_to_parameters( + record: ParametersRecord, keep_input: bool +) -> Parameters: + """Convert ParameterRecord to legacy Parameters. + + Warnings + -------- + Because `Arrays` in `ParametersRecord` encode more information of the + array-like or tensor-like data (e.g their datatype, shape) than `Parameters` it + might not be possible to reconstruct such data structures from `Parameters` objects + alone. Additional information or metadata must be provided from elsewhere. + + Parameters + ---------- + record : ParametersRecord + The record to be conveted into Parameters. + keep_input : bool + A boolean indicating whether entries in the record should be deleted from the + input dictionary immediately after adding them to the record. + """ + parameters = Parameters(tensors=[], tensor_type="") + + for key in list(record.data.keys()): + parameters.tensors.append(record[key].data) + + if not parameters.tensor_type: + # Setting from first array in record. Recall the warning in the docstrings + # of this function. + parameters.tensor_type = record[key].stype + + if not keep_input: + del record.data[key] + + return parameters + + +def parameters_to_parametersrecord( + parameters: Parameters, keep_input: bool +) -> ParametersRecord: + """Convert legacy Parameters into a single ParametersRecord. + + Because there is no concept of names in the legacy Parameters, arbitrary keys will + be used when constructing the ParametersRecord. Similarly, the shape and data type + won't be recorded in the Array objects. + + Parameters + ---------- + parameters : Parameters + Parameters object to be represented as a ParametersRecord. + keep_input : bool + A boolean indicating whether parameters should be deleted from the input + Parameters object (i.e. a list of serialized NumPy arrays) immediately after + adding them to the record. + """ + tensor_type = parameters.tensor_type + + p_record = ParametersRecord() + + num_arrays = len(parameters.tensors) + ordered_dict = OrderedDict() + for idx in range(num_arrays): + if keep_input: + tensor = parameters.tensors[idx] + else: + tensor = parameters.tensors.pop(0) + ordered_dict[str(idx)] = Array( + data=tensor, dtype="", stype=tensor_type, shape=[] + ) + + p_record.set_parameters(ordered_dict, keep_input=keep_input) + return p_record + + +def _check_mapping_from_recordscalartype_to_scalar( + record_data: Mapping[str, Union[ConfigsRecordValues, MetricsRecordValues]] +) -> Dict[str, Scalar]: + """Check mapping `common.*RecordValues` into `common.Scalar` is possible.""" + for value in record_data.values(): + if not isinstance(value, get_args(Scalar)): + raise TypeError( + "There is not a 1:1 mapping between `common.Scalar` types and those " + "supported in `common.ConfigsRecordValues` or " + "`common.ConfigsRecordValues`. Consider casting your values to a type " + "supported by the `common.RecordSet` infrastructure. " + f"You used type: {type(value)}" + ) + return cast(Dict[str, Scalar], record_data) + + +def _recordset_to_fit_or_evaluate_ins_components( + recordset: RecordSet, + ins_str: str, + keep_input: bool, +) -> Tuple[Parameters, Dict[str, Scalar]]: + """Derive Fit/Evaluate Ins from a RecordSet.""" + # get Array and construct Parameters + parameters_record = recordset.get_parameters(f"{ins_str}.parameters") + + parameters = parametersrecord_to_parameters( + parameters_record, keep_input=keep_input + ) + + # get config dict + config_record = recordset.get_configs(f"{ins_str}.config") + + config_dict = _check_mapping_from_recordscalartype_to_scalar(config_record.data) + + return parameters, config_dict + + +def _fit_or_evaluate_ins_to_recordset( + ins: Union[FitIns, EvaluateIns], keep_input: bool +) -> RecordSet: + recordset = RecordSet() + + ins_str = "fitins" if isinstance(ins, FitIns) else "evaluateins" + recordset.set_parameters( + name=f"{ins_str}.parameters", + record=parameters_to_parametersrecord(ins.parameters, keep_input=keep_input), + ) + + recordset.set_configs( + name=f"{ins_str}.config", record=ConfigsRecord(ins.config) # type: ignore + ) + + return recordset + + +def _embed_status_into_recordset( + res_str: str, status: Status, recordset: RecordSet +) -> RecordSet: + status_dict: Dict[str, ConfigsRecordValues] = { + "code": int(status.code.value), + "message": status.message, + } + # we add it to a `ConfigsRecord`` because the `status.message`` is a string + # and `str` values aren't supported in `MetricsRecords` + recordset.set_configs(f"{res_str}.status", record=ConfigsRecord(status_dict)) + return recordset + + +def _extract_status_from_recordset(res_str: str, recordset: RecordSet) -> Status: + status = recordset.get_configs(f"{res_str}.status") + code = cast(int, status["code"]) + return Status(code=Code(code), message=str(status["message"])) + + +def recordset_to_fitins(recordset: RecordSet, keep_input: bool) -> FitIns: + """Derive FitIns from a RecordSet object.""" + parameters, config = _recordset_to_fit_or_evaluate_ins_components( + recordset, + ins_str="fitins", + keep_input=keep_input, + ) + + return FitIns(parameters=parameters, config=config) + + +def fitins_to_recordset(fitins: FitIns, keep_input: bool) -> RecordSet: + """Construct a RecordSet from a FitIns object.""" + return _fit_or_evaluate_ins_to_recordset(fitins, keep_input) + + +def recordset_to_fitres(recordset: RecordSet, keep_input: bool) -> FitRes: + """Derive FitRes from a RecordSet object.""" + ins_str = "fitres" + parameters = parametersrecord_to_parameters( + recordset.get_parameters(f"{ins_str}.parameters"), keep_input=keep_input + ) + + num_examples = cast( + int, recordset.get_metrics(f"{ins_str}.num_examples")["num_examples"] + ) + configs_record = recordset.get_configs(f"{ins_str}.metrics") + + metrics = _check_mapping_from_recordscalartype_to_scalar(configs_record.data) + status = _extract_status_from_recordset(ins_str, recordset) + + return FitRes( + status=status, parameters=parameters, num_examples=num_examples, metrics=metrics + ) + + +def fitres_to_recordset(fitres: FitRes, keep_input: bool) -> RecordSet: + """Construct a RecordSet from a FitRes object.""" + recordset = RecordSet() + + res_str = "fitres" + + recordset.set_configs( + name=f"{res_str}.metrics", record=ConfigsRecord(fitres.metrics) # type: ignore + ) + recordset.set_metrics( + name=f"{res_str}.num_examples", + record=MetricsRecord({"num_examples": fitres.num_examples}), + ) + recordset.set_parameters( + name=f"{res_str}.parameters", + record=parameters_to_parametersrecord(fitres.parameters, keep_input), + ) + + # status + recordset = _embed_status_into_recordset(res_str, fitres.status, recordset) + + return recordset + + +def recordset_to_evaluateins(recordset: RecordSet, keep_input: bool) -> EvaluateIns: + """Derive EvaluateIns from a RecordSet object.""" + parameters, config = _recordset_to_fit_or_evaluate_ins_components( + recordset, + ins_str="evaluateins", + keep_input=keep_input, + ) + + return EvaluateIns(parameters=parameters, config=config) + + +def evaluateins_to_recordset(evaluateins: EvaluateIns, keep_input: bool) -> RecordSet: + """Construct a RecordSet from a EvaluateIns object.""" + return _fit_or_evaluate_ins_to_recordset(evaluateins, keep_input) + + +def recordset_to_evaluateres(recordset: RecordSet) -> EvaluateRes: + """Derive EvaluateRes from a RecordSet object.""" + ins_str = "evaluateres" + + loss = cast(int, recordset.get_metrics(f"{ins_str}.loss")["loss"]) + + num_examples = cast( + int, recordset.get_metrics(f"{ins_str}.num_examples")["num_examples"] + ) + configs_record = recordset.get_configs(f"{ins_str}.metrics") + + metrics = _check_mapping_from_recordscalartype_to_scalar(configs_record.data) + status = _extract_status_from_recordset(ins_str, recordset) + + return EvaluateRes( + status=status, loss=loss, num_examples=num_examples, metrics=metrics + ) + + +def evaluateres_to_recordset(evaluateres: EvaluateRes) -> RecordSet: + """Construct a RecordSet from a EvaluateRes object.""" + recordset = RecordSet() + + res_str = "evaluateres" + # loss + recordset.set_metrics( + name=f"{res_str}.loss", + record=MetricsRecord({"loss": evaluateres.loss}), + ) + + # num_examples + recordset.set_metrics( + name=f"{res_str}.num_examples", + record=MetricsRecord({"num_examples": evaluateres.num_examples}), + ) + + # metrics + recordset.set_configs( + name=f"{res_str}.metrics", + record=ConfigsRecord(evaluateres.metrics), # type: ignore + ) + + # status + recordset = _embed_status_into_recordset( + f"{res_str}", evaluateres.status, recordset + ) + + return recordset + + +def recordset_to_getparametersins(recordset: RecordSet) -> GetParametersIns: + """Derive GetParametersIns from a RecordSet object.""" + config_record = recordset.get_configs("getparametersins.config") + + config_dict = _check_mapping_from_recordscalartype_to_scalar(config_record.data) + + return GetParametersIns(config=config_dict) + + +def getparametersins_to_recordset(getparameters_ins: GetParametersIns) -> RecordSet: + """Construct a RecordSet from a GetParametersIns object.""" + recordset = RecordSet() + + recordset.set_configs( + name="getparametersins.config", + record=ConfigsRecord(getparameters_ins.config), # type: ignore + ) + return recordset + + +def getparametersres_to_recordset( + getparametersres: GetParametersRes, keep_input: bool +) -> RecordSet: + """Construct a RecordSet from a GetParametersRes object.""" + recordset = RecordSet() + res_str = "getparametersres" + parameters_record = parameters_to_parametersrecord( + getparametersres.parameters, keep_input=keep_input + ) + recordset.set_parameters(f"{res_str}.parameters", parameters_record) + + # status + recordset = _embed_status_into_recordset( + res_str, getparametersres.status, recordset + ) + + return recordset + + +def recordset_to_getparametersres( + recordset: RecordSet, keep_input: bool +) -> GetParametersRes: + """Derive GetParametersRes from a RecordSet object.""" + res_str = "getparametersres" + parameters = parametersrecord_to_parameters( + recordset.get_parameters(f"{res_str}.parameters"), keep_input=keep_input + ) + + status = _extract_status_from_recordset(res_str, recordset) + return GetParametersRes(status=status, parameters=parameters) + + +def recordset_to_getpropertiesins(recordset: RecordSet) -> GetPropertiesIns: + """Derive GetPropertiesIns from a RecordSet object.""" + config_record = recordset.get_configs("getpropertiesins.config") + config_dict = _check_mapping_from_recordscalartype_to_scalar(config_record.data) + + return GetPropertiesIns(config=config_dict) + + +def getpropertiesins_to_recordset(getpropertiesins: GetPropertiesIns) -> RecordSet: + """Construct a RecordSet from a GetPropertiesRes object.""" + recordset = RecordSet() + recordset.set_configs( + name="getpropertiesins.config", + record=ConfigsRecord(getpropertiesins.config), # type: ignore + ) + return recordset + + +def recordset_to_getpropertiesres(recordset: RecordSet) -> GetPropertiesRes: + """Derive GetPropertiesRes from a RecordSet object.""" + res_str = "getpropertiesres" + config_record = recordset.get_configs(f"{res_str}.properties") + properties = _check_mapping_from_recordscalartype_to_scalar(config_record.data) + + status = _extract_status_from_recordset(res_str, recordset=recordset) + + return GetPropertiesRes(status=status, properties=properties) + + +def getpropertiesres_to_recordset(getpropertiesres: GetPropertiesRes) -> RecordSet: + """Construct a RecordSet from a GetPropertiesRes object.""" + recordset = RecordSet() + res_str = "getpropertiesres" + recordset.set_configs( + name=f"{res_str}.properties", + record=ConfigsRecord(getpropertiesres.properties), # type: ignore + ) + # status + recordset = _embed_status_into_recordset( + res_str, getpropertiesres.status, recordset + ) + + return recordset diff --git a/src/py/flwr/common/recordset_compat_test.py b/src/py/flwr/common/recordset_compat_test.py new file mode 100644 index 000000000000..288326dc9e83 --- /dev/null +++ b/src/py/flwr/common/recordset_compat_test.py @@ -0,0 +1,303 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""RecordSet from legacy messages tests.""" + +from copy import deepcopy +from typing import Callable, Dict + +import numpy as np +import pytest + +from .parameter import ndarrays_to_parameters +from .recordset_compat import ( + evaluateins_to_recordset, + evaluateres_to_recordset, + fitins_to_recordset, + fitres_to_recordset, + getparametersins_to_recordset, + getparametersres_to_recordset, + getpropertiesins_to_recordset, + getpropertiesres_to_recordset, + recordset_to_evaluateins, + recordset_to_evaluateres, + recordset_to_fitins, + recordset_to_fitres, + recordset_to_getparametersins, + recordset_to_getparametersres, + recordset_to_getpropertiesins, + recordset_to_getpropertiesres, +) +from .typing import ( + Code, + EvaluateIns, + EvaluateRes, + FitIns, + FitRes, + GetParametersIns, + GetParametersRes, + GetPropertiesIns, + GetPropertiesRes, + NDArrays, + Scalar, + Status, +) + + +def get_ndarrays() -> NDArrays: + """Return list of NumPy arrays.""" + arr1 = np.array([[1.0, 2.0], [3.0, 4], [5.0, 6.0]]) + arr2 = np.eye(2, 7, 3) + + return [arr1, arr2] + + +################################################## +# Testing conversion: *Ins --> RecordSet --> *Ins +# Testing conversion: *Res <-- RecordSet <-- *Res +################################################## + + +def _get_valid_fitins() -> FitIns: + arrays = get_ndarrays() + return FitIns(parameters=ndarrays_to_parameters(arrays), config={"a": 1.0, "b": 0}) + + +def _get_valid_fitres() -> FitRes: + """Returnn Valid parameters but potentially invalid config.""" + arrays = get_ndarrays() + metrics: Dict[str, Scalar] = {"a": 1.0, "b": 0} + return FitRes( + parameters=ndarrays_to_parameters(arrays), + num_examples=1, + status=Status(code=Code(0), message=""), + metrics=metrics, + ) + + +def _get_valid_evaluateins() -> EvaluateIns: + fit_ins = _get_valid_fitins() + return EvaluateIns(parameters=fit_ins.parameters, config=fit_ins.config) + + +def _get_valid_evaluateres() -> EvaluateRes: + """Return potentially invalid config.""" + metrics: Dict[str, Scalar] = {"a": 1.0, "b": 0} + return EvaluateRes( + num_examples=1, + loss=0.1, + status=Status(code=Code(0), message=""), + metrics=metrics, + ) + + +def _get_valid_getparametersins() -> GetParametersIns: + config_dict: Dict[str, Scalar] = { + "a": 1.0, + "b": 3, + "c": True, + } # valid since both Ins/Res communicate over ConfigsRecord + + return GetParametersIns(config_dict) + + +def _get_valid_getparametersres() -> GetParametersRes: + arrays = get_ndarrays() + return GetParametersRes( + status=Status(code=Code(0), message=""), + parameters=ndarrays_to_parameters(arrays), + ) + + +def _get_valid_getpropertiesins() -> GetPropertiesIns: + getparamsins = _get_valid_getparametersins() + return GetPropertiesIns(config=getparamsins.config) + + +def _get_valid_getpropertiesres() -> GetPropertiesRes: + config_dict: Dict[str, Scalar] = { + "a": 1.0, + "b": 3, + "c": True, + } # valid since both Ins/Res communicate over ConfigsRecord + + return GetPropertiesRes( + status=Status(code=Code(0), message=""), properties=config_dict + ) + + +@pytest.mark.parametrize( + "keep_input, validate_freed_fn", + [ + ( + False, + lambda x, x_copy, y: len(x.parameters.tensors) == 0 and x_copy == y, + ), # check tensors were freed + ( + True, + lambda x, x_copy, y: x == y, + ), + ], +) +def test_fitins_to_recordset_and_back( + keep_input: bool, validate_freed_fn: Callable[[FitIns, FitIns, FitIns], bool] +) -> None: + """Test conversion FitIns --> RecordSet --> FitIns.""" + fitins = _get_valid_fitins() + + fitins_copy = deepcopy(fitins) + + recordset = fitins_to_recordset(fitins, keep_input=keep_input) + + fitins_ = recordset_to_fitins(recordset, keep_input=keep_input) + + assert validate_freed_fn(fitins, fitins_copy, fitins_) + + +@pytest.mark.parametrize( + "keep_input, validate_freed_fn", + [ + ( + False, + lambda x, x_copy, y: len(x.parameters.tensors) == 0 and x_copy == y, + ), # check tensors were freed + ( + True, + lambda x, x_copy, y: x == y, + ), + ], +) +def test_fitres_to_recordset_and_back( + keep_input: bool, validate_freed_fn: Callable[[FitRes, FitRes, FitRes], bool] +) -> None: + """Test conversion FitRes --> RecordSet --> FitRes.""" + fitres = _get_valid_fitres() + + fitres_copy = deepcopy(fitres) + + recordset = fitres_to_recordset(fitres, keep_input=keep_input) + fitres_ = recordset_to_fitres(recordset, keep_input=keep_input) + + assert validate_freed_fn(fitres, fitres_copy, fitres_) + + +@pytest.mark.parametrize( + "keep_input, validate_freed_fn", + [ + ( + False, + lambda x, x_copy, y: len(x.parameters.tensors) == 0 and x_copy == y, + ), # check tensors were freed + ( + True, + lambda x, x_copy, y: x == y, + ), + ], +) +def test_evaluateins_to_recordset_and_back( + keep_input: bool, + validate_freed_fn: Callable[[EvaluateIns, EvaluateIns, EvaluateIns], bool], +) -> None: + """Test conversion EvaluateIns --> RecordSet --> EvaluateIns.""" + evaluateins = _get_valid_evaluateins() + + evaluateins_copy = deepcopy(evaluateins) + + recordset = evaluateins_to_recordset(evaluateins, keep_input=keep_input) + + evaluateins_ = recordset_to_evaluateins(recordset, keep_input=keep_input) + + assert validate_freed_fn(evaluateins, evaluateins_copy, evaluateins_) + + +def test_evaluateres_to_recordset_and_back() -> None: + """Test conversion EvaluateRes --> RecordSet --> EvaluateRes.""" + evaluateres = _get_valid_evaluateres() + + evaluateres_copy = deepcopy(evaluateres) + + recordset = evaluateres_to_recordset(evaluateres) + evaluateres_ = recordset_to_evaluateres(recordset) + + assert evaluateres_copy == evaluateres_ + + +def test_get_properties_ins_to_recordset_and_back() -> None: + """Test conversion GetPropertiesIns --> RecordSet --> GetPropertiesIns.""" + getproperties_ins = _get_valid_getpropertiesins() + + getproperties_ins_copy = deepcopy(getproperties_ins) + + recordset = getpropertiesins_to_recordset(getproperties_ins) + getproperties_ins_ = recordset_to_getpropertiesins(recordset) + + assert getproperties_ins_copy == getproperties_ins_ + + +def test_get_properties_res_to_recordset_and_back() -> None: + """Test conversion GetPropertiesRes --> RecordSet --> GetPropertiesRes.""" + getproperties_res = _get_valid_getpropertiesres() + + getproperties_res_copy = deepcopy(getproperties_res) + + recordset = getpropertiesres_to_recordset(getproperties_res) + getproperties_res_ = recordset_to_getpropertiesres(recordset) + + assert getproperties_res_copy == getproperties_res_ + + +def test_get_parameters_ins_to_recordset_and_back() -> None: + """Test conversion GetParametersIns --> RecordSet --> GetParametersIns.""" + getparameters_ins = _get_valid_getparametersins() + + getparameters_ins_copy = deepcopy(getparameters_ins) + + recordset = getparametersins_to_recordset(getparameters_ins) + getparameters_ins_ = recordset_to_getparametersins(recordset) + + assert getparameters_ins_copy == getparameters_ins_ + + +@pytest.mark.parametrize( + "keep_input, validate_freed_fn", + [ + ( + False, + lambda x, x_copy, y: len(x.parameters.tensors) == 0 and x_copy == y, + ), # check tensors were freed + ( + True, + lambda x, x_copy, y: x == y, + ), + ], +) +def test_get_parameters_res_to_recordset_and_back( + keep_input: bool, + validate_freed_fn: Callable[ + [GetParametersRes, GetParametersRes, GetParametersRes], bool + ], +) -> None: + """Test conversion GetParametersRes --> RecordSet --> GetParametersRes.""" + getparameteres_res = _get_valid_getparametersres() + + getparameters_res_copy = deepcopy(getparameteres_res) + + recordset = getparametersres_to_recordset(getparameteres_res, keep_input=keep_input) + getparameteres_res_ = recordset_to_getparametersres( + recordset, keep_input=keep_input + ) + + assert validate_freed_fn( + getparameteres_res, getparameters_res_copy, getparameteres_res_ + ) diff --git a/src/py/flwr/common/recordset_test.py b/src/py/flwr/common/recordset_test.py index 90c06dcdb109..cb199813f450 100644 --- a/src/py/flwr/common/recordset_test.py +++ b/src/py/flwr/common/recordset_test.py @@ -14,19 +14,27 @@ # ============================================================================== """RecordSet tests.""" - -from typing import Callable, List, OrderedDict, Type, Union +from copy import deepcopy +from typing import Callable, Dict, List, OrderedDict, Type, Union import numpy as np import pytest +from .configsrecord import ConfigsRecord +from .metricsrecord import MetricsRecord from .parameter import ndarrays_to_parameters, parameters_to_ndarrays from .parametersrecord import Array, ParametersRecord -from .recordset_utils import ( +from .recordset_compat import ( parameters_to_parametersrecord, parametersrecord_to_parameters, ) -from .typing import NDArray, NDArrays, Parameters +from .typing import ( + ConfigsRecordValues, + MetricsRecordValues, + NDArray, + NDArrays, + Parameters, +) def get_ndarrays() -> NDArrays: @@ -80,20 +88,37 @@ def test_parameters_to_array_and_back() -> None: assert np.array_equal(ndarray, ndarray_) -def test_parameters_to_parametersrecord_and_back() -> None: +@pytest.mark.parametrize( + "keep_input, validate_freed_fn", + [ + (False, lambda x, x_copy, y: len(x.tensors) == 0), # check tensors were freed + (True, lambda x, x_copy, y: x.tensors == y.tensors), # check they are equal + ], +) +def test_parameters_to_parametersrecord_and_back( + keep_input: bool, + validate_freed_fn: Callable[[Parameters, Parameters, Parameters], bool], +) -> None: """Test conversion between legacy Parameters and ParametersRecords.""" ndarrays = get_ndarrays() parameters = ndarrays_to_parameters(ndarrays) + parameters_copy = deepcopy(parameters) - params_record = parameters_to_parametersrecord(parameters=parameters) + params_record = parameters_to_parametersrecord( + parameters=parameters, keep_input=keep_input + ) - parameters_ = parametersrecord_to_parameters(params_record) + parameters_ = parametersrecord_to_parameters(params_record, keep_input=keep_input) ndarrays_ = parameters_to_ndarrays(parameters=parameters_) + # Validate returned NDArrays match those at the beginning for arr, arr_ in zip(ndarrays, ndarrays_): - assert np.array_equal(arr, arr_) + assert np.array_equal(arr, arr_), "no" + + # Validate initial Parameters object has been handled according to `keep_input` + assert validate_freed_fn(parameters, parameters_copy, parameters_) def test_set_parameters_while_keeping_intputs() -> None: @@ -103,7 +128,7 @@ def test_set_parameters_while_keeping_intputs() -> None: array_dict = OrderedDict( {str(i): ndarray_to_array(ndarray) for i, ndarray in enumerate(get_ndarrays())} ) - p_record.set_parameters(array_dict) + p_record.set_parameters(array_dict, keep_input=True) # Creating a second parametersrecord passing the same `array_dict` (not erased) p_record_2 = ParametersRecord(array_dict) @@ -145,3 +170,196 @@ def test_set_parameters_with_incorrect_types( with pytest.raises(TypeError): p_record.set_parameters(array_dict) # type: ignore + + +@pytest.mark.parametrize( + "key_type, value_fn", + [ + (str, lambda x: int(x.flatten()[0])), # str: int + (str, lambda x: float(x.flatten()[0])), # str: float + (str, lambda x: x.flatten().astype("int").tolist()), # str: List[int] + (str, lambda x: x.flatten().astype("float").tolist()), # str: List[float] + (str, lambda x: []), # str: empty list + ], +) +def test_set_metrics_to_metricsrecord_with_correct_types( + key_type: Type[str], + value_fn: Callable[[NDArray], MetricsRecordValues], +) -> None: + """Test adding metrics of various types to a MetricsRecord.""" + m_record = MetricsRecord() + + labels = [1, 2.0] + arrays = get_ndarrays() + + my_metrics = OrderedDict( + {key_type(label): value_fn(arr) for label, arr in zip(labels, arrays)} + ) + + # Add metric + m_record.set_metrics(my_metrics) + + # Check metrics are actually added + assert my_metrics == m_record.data + + +@pytest.mark.parametrize( + "key_type, value_fn", + [ + (str, lambda x: str(x.flatten()[0])), # str: str (supported: unsupported) + (str, lambda x: bool(x.flatten()[0])), # str: bool (supported: unsupported) + ( + str, + lambda x: x.flatten().astype("str").tolist(), + ), # str: List[str] (supported: unsupported) + (str, lambda x: x), # str: NDArray (supported: unsupported) + ( + str, + lambda x: {str(v): v for v in x.flatten()}, + ), # str: dict[str: float] (supported: unsupported) + ( + str, + lambda x: [{str(v): v for v in x.flatten()}], + ), # str: List[dict[str: float]] (supported: unsupported) + ( + str, + lambda x: [1, 2.0, 3.0, 4], + ), # str: List[mixing valid types] (supported: unsupported) + ( + int, + lambda x: x.flatten().tolist(), + ), # int: List[str] (unsupported: supported) + ( + float, + lambda x: x.flatten().tolist(), + ), # float: List[int] (unsupported: supported) + ], +) +def test_set_metrics_to_metricsrecord_with_incorrect_types( + key_type: Type[Union[str, int, float, bool]], + value_fn: Callable[[NDArray], Union[NDArray, Dict[str, NDArray], List[float]]], +) -> None: + """Test adding metrics of various unsupported types to a MetricsRecord.""" + m_record = MetricsRecord() + + labels = [1, 2.0] + arrays = get_ndarrays() + + my_metrics = OrderedDict( + {key_type(label): value_fn(arr) for label, arr in zip(labels, arrays)} + ) + + with pytest.raises(TypeError): + m_record.set_metrics(my_metrics) # type: ignore + + +@pytest.mark.parametrize( + "keep_input", + [ + (True), + (False), + ], +) +def test_set_metrics_to_metricsrecord_with_and_without_keeping_input( + keep_input: bool, +) -> None: + """Test keep_input functionality for MetricsRecord.""" + m_record = MetricsRecord(keep_input=keep_input) + + # constructing a valid input + labels = [1, 2.0] + arrays = get_ndarrays() + my_metrics = OrderedDict( + {str(label): arr.flatten().tolist() for label, arr in zip(labels, arrays)} + ) + + my_metrics_copy = my_metrics.copy() + + # Add metric + m_record.set_metrics(my_metrics, keep_input=keep_input) + + # Check metrics are actually added + # Check that input dict has been emptied when enabled such behaviour + if keep_input: + assert my_metrics == m_record.data + else: + assert my_metrics_copy == m_record.data + assert len(my_metrics) == 0 + + +@pytest.mark.parametrize( + "key_type, value_fn", + [ + (str, lambda x: str(x.flatten()[0])), # str: str + (str, lambda x: int(x.flatten()[0])), # str: int + (str, lambda x: float(x.flatten()[0])), # str: float + (str, lambda x: bool(x.flatten()[0])), # str: bool + (str, lambda x: x.flatten().tobytes()), # str: bytes + (str, lambda x: x.flatten().astype("str").tolist()), # str: List[str] + (str, lambda x: x.flatten().astype("int").tolist()), # str: List[int] + (str, lambda x: x.flatten().astype("float").tolist()), # str: List[float] + (str, lambda x: x.flatten().astype("bool").tolist()), # str: List[bool] + (str, lambda x: [x.flatten().tobytes()]), # str: List[bytes] + (str, lambda x: []), # str: empyt list + ], +) +def test_set_configs_to_configsrecord_with_correct_types( + key_type: Type[str], + value_fn: Callable[[NDArray], ConfigsRecordValues], +) -> None: + """Test adding configs of various types to a ConfigsRecord.""" + labels = [1, 2.0] + arrays = get_ndarrays() + + my_configs = OrderedDict( + {key_type(label): value_fn(arr) for label, arr in zip(labels, arrays)} + ) + + c_record = ConfigsRecord(my_configs) + + # check values are actually there + assert c_record.data == my_configs + + +@pytest.mark.parametrize( + "key_type, value_fn", + [ + (str, lambda x: x), # str: NDArray (supported: unsupported) + ( + str, + lambda x: {str(v): v for v in x.flatten()}, + ), # str: dict[str: float] (supported: unsupported) + ( + str, + lambda x: [{str(v): v for v in x.flatten()}], + ), # str: List[dict[str: float]] (supported: unsupported) + ( + str, + lambda x: [1, 2.0, 3.0, 4], + ), # str: List[mixing valid types] (supported: unsupported) + ( + int, + lambda x: x.flatten().tolist(), + ), # int: List[str] (unsupported: supported) + ( + float, + lambda x: x.flatten().tolist(), + ), # float: List[int] (unsupported: supported) + ], +) +def test_set_configs_to_configsrecord_with_incorrect_types( + key_type: Type[Union[str, int, float]], + value_fn: Callable[[NDArray], Union[NDArray, Dict[str, NDArray], List[float]]], +) -> None: + """Test adding configs of various unsupported types to a ConfigsRecord.""" + m_record = ConfigsRecord() + + labels = [1, 2.0] + arrays = get_ndarrays() + + my_metrics = OrderedDict( + {key_type(label): value_fn(arr) for label, arr in zip(labels, arrays)} + ) + + with pytest.raises(TypeError): + m_record.set_configs(my_metrics) # type: ignore diff --git a/src/py/flwr/common/recordset_utils.py b/src/py/flwr/common/recordset_utils.py deleted file mode 100644 index c1e724fa2758..000000000000 --- a/src/py/flwr/common/recordset_utils.py +++ /dev/null @@ -1,87 +0,0 @@ -# Copyright 2024 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""RecordSet utilities.""" - - -from typing import OrderedDict - -from .parametersrecord import Array, ParametersRecord -from .typing import Parameters - - -def parametersrecord_to_parameters( - record: ParametersRecord, keep_input: bool = False -) -> Parameters: - """Convert ParameterRecord to legacy Parameters. - - Warning: Because `Arrays` in `ParametersRecord` encode more information of the - array-like or tensor-like data (e.g their datatype, shape) than `Parameters` it - might not be possible to reconstruct such data structures from `Parameters` objects - alone. Additional information or metadta must be provided from elsewhere. - - Parameters - ---------- - record : ParametersRecord - The record to be conveted into Parameters. - keep_input : bool (default: False) - A boolean indicating whether entries in the record should be deleted from the - input dictionary immediately after adding them to the record. - """ - parameters = Parameters(tensors=[], tensor_type="") - - for key in list(record.data.keys()): - parameters.tensors.append(record.data[key].data) - - if not keep_input: - del record.data[key] - - return parameters - - -def parameters_to_parametersrecord( - parameters: Parameters, keep_input: bool = False -) -> ParametersRecord: - """Convert legacy Parameters into a single ParametersRecord. - - Because there is no concept of names in the legacy Parameters, arbitrary keys will - be used when constructing the ParametersRecord. Similarly, the shape and data type - won't be recorded in the Array objects. - - Parameters - ---------- - parameters : Parameters - Parameters object to be represented as a ParametersRecord. - keep_input : bool (default: False) - A boolean indicating whether parameters should be deleted from the input - Parameters object (i.e. a list of serialized NumPy arrays) immediately after - adding them to the record. - """ - tensor_type = parameters.tensor_type - - p_record = ParametersRecord() - - num_arrays = len(parameters.tensors) - for idx in range(num_arrays): - if keep_input: - tensor = parameters.tensors[idx] - else: - tensor = parameters.tensors.pop(0) - p_record.set_parameters( - OrderedDict( - {str(idx): Array(data=tensor, dtype="", stype=tensor_type, shape=[])} - ) - ) - - return p_record diff --git a/src/py/flwr/common/secure_aggregation/secaggplus_constants.py b/src/py/flwr/common/secure_aggregation/secaggplus_constants.py index 8dd21a6016f1..9a2bf26e98e8 100644 --- a/src/py/flwr/common/secure_aggregation/secaggplus_constants.py +++ b/src/py/flwr/common/secure_aggregation/secaggplus_constants.py @@ -14,6 +14,8 @@ # ============================================================================== """Constants for the SecAgg/SecAgg+ protocol.""" +RECORD_KEY_STATE = "secaggplus_state" +RECORD_KEY_CONFIGS = "secaggplus_configs" # Names of stages STAGE_SETUP = "setup" diff --git a/src/py/flwr/common/serde.py b/src/py/flwr/common/serde.py index 59f5387b0a07..2808cb88fb5c 100644 --- a/src/py/flwr/common/serde.py +++ b/src/py/flwr/common/serde.py @@ -15,9 +15,22 @@ """ProtoBuf serialization and deserialization.""" -from typing import Any, Dict, List, MutableMapping, cast - -from flwr.proto.task_pb2 import Value +from typing import Any, Dict, List, MutableMapping, OrderedDict, Type, TypeVar, cast + +from google.protobuf.message import Message as GrpcMessage + +# pylint: disable=E0611 +from flwr.proto.recordset_pb2 import Array as ProtoArray +from flwr.proto.recordset_pb2 import BoolList, BytesList +from flwr.proto.recordset_pb2 import ConfigsRecord as ProtoConfigsRecord +from flwr.proto.recordset_pb2 import ConfigsRecordValue as ProtoConfigsRecordValue +from flwr.proto.recordset_pb2 import DoubleList +from flwr.proto.recordset_pb2 import MetricsRecord as ProtoMetricsRecord +from flwr.proto.recordset_pb2 import MetricsRecordValue as ProtoMetricsRecordValue +from flwr.proto.recordset_pb2 import ParametersRecord as ProtoParametersRecord +from flwr.proto.recordset_pb2 import RecordSet as ProtoRecordSet +from flwr.proto.recordset_pb2 import Sint64List, StringList +from flwr.proto.task_pb2 import Task, TaskIns, TaskRes from flwr.proto.transport_pb2 import ( ClientMessage, Code, @@ -28,143 +41,13 @@ Status, ) +# pylint: enable=E0611 from . import typing - -# === ServerMessage message === - - -def server_message_to_proto(server_message: typing.ServerMessage) -> ServerMessage: - """Serialize `ServerMessage` to ProtoBuf.""" - if server_message.get_properties_ins is not None: - return ServerMessage( - get_properties_ins=get_properties_ins_to_proto( - server_message.get_properties_ins, - ) - ) - if server_message.get_parameters_ins is not None: - return ServerMessage( - get_parameters_ins=get_parameters_ins_to_proto( - server_message.get_parameters_ins, - ) - ) - if server_message.fit_ins is not None: - return ServerMessage( - fit_ins=fit_ins_to_proto( - server_message.fit_ins, - ) - ) - if server_message.evaluate_ins is not None: - return ServerMessage( - evaluate_ins=evaluate_ins_to_proto( - server_message.evaluate_ins, - ) - ) - raise ValueError( - "No instruction set in ServerMessage, cannot serialize to ProtoBuf" - ) - - -def server_message_from_proto( - server_message_proto: ServerMessage, -) -> typing.ServerMessage: - """Deserialize `ServerMessage` from ProtoBuf.""" - field = server_message_proto.WhichOneof("msg") - if field == "get_properties_ins": - return typing.ServerMessage( - get_properties_ins=get_properties_ins_from_proto( - server_message_proto.get_properties_ins, - ) - ) - if field == "get_parameters_ins": - return typing.ServerMessage( - get_parameters_ins=get_parameters_ins_from_proto( - server_message_proto.get_parameters_ins, - ) - ) - if field == "fit_ins": - return typing.ServerMessage( - fit_ins=fit_ins_from_proto( - server_message_proto.fit_ins, - ) - ) - if field == "evaluate_ins": - return typing.ServerMessage( - evaluate_ins=evaluate_ins_from_proto( - server_message_proto.evaluate_ins, - ) - ) - raise ValueError( - "Unsupported instruction in ServerMessage, cannot deserialize from ProtoBuf" - ) - - -# === ClientMessage message === - - -def client_message_to_proto(client_message: typing.ClientMessage) -> ClientMessage: - """Serialize `ClientMessage` to ProtoBuf.""" - if client_message.get_properties_res is not None: - return ClientMessage( - get_properties_res=get_properties_res_to_proto( - client_message.get_properties_res, - ) - ) - if client_message.get_parameters_res is not None: - return ClientMessage( - get_parameters_res=get_parameters_res_to_proto( - client_message.get_parameters_res, - ) - ) - if client_message.fit_res is not None: - return ClientMessage( - fit_res=fit_res_to_proto( - client_message.fit_res, - ) - ) - if client_message.evaluate_res is not None: - return ClientMessage( - evaluate_res=evaluate_res_to_proto( - client_message.evaluate_res, - ) - ) - raise ValueError( - "No instruction set in ClientMessage, cannot serialize to ProtoBuf" - ) - - -def client_message_from_proto( - client_message_proto: ClientMessage, -) -> typing.ClientMessage: - """Deserialize `ClientMessage` from ProtoBuf.""" - field = client_message_proto.WhichOneof("msg") - if field == "get_properties_res": - return typing.ClientMessage( - get_properties_res=get_properties_res_from_proto( - client_message_proto.get_properties_res, - ) - ) - if field == "get_parameters_res": - return typing.ClientMessage( - get_parameters_res=get_parameters_res_from_proto( - client_message_proto.get_parameters_res, - ) - ) - if field == "fit_res": - return typing.ClientMessage( - fit_res=fit_res_from_proto( - client_message_proto.fit_res, - ) - ) - if field == "evaluate_res": - return typing.ClientMessage( - evaluate_res=evaluate_res_from_proto( - client_message_proto.evaluate_res, - ) - ) - raise ValueError( - "Unsupported instruction in ClientMessage, cannot deserialize from ProtoBuf" - ) - +from .configsrecord import ConfigsRecord +from .message import Message, Metadata +from .metricsrecord import MetricsRecord +from .parametersrecord import Array, ParametersRecord +from .recordset import RecordSet # === Parameters message === @@ -190,26 +73,9 @@ def reconnect_ins_to_proto(ins: typing.ReconnectIns) -> ServerMessage.ReconnectI return ServerMessage.ReconnectIns() -def reconnect_ins_from_proto(msg: ServerMessage.ReconnectIns) -> typing.ReconnectIns: - """Deserialize `ReconnectIns` from ProtoBuf.""" - return typing.ReconnectIns(seconds=msg.seconds) - - # === DisconnectRes message === -def disconnect_res_to_proto(res: typing.DisconnectRes) -> ClientMessage.DisconnectRes: - """Serialize `DisconnectRes` to ProtoBuf.""" - reason_proto = Reason.UNKNOWN - if res.reason == "RECONNECT": - reason_proto = Reason.RECONNECT - elif res.reason == "POWER_DISCONNECTED": - reason_proto = Reason.POWER_DISCONNECTED - elif res.reason == "WIFI_UNAVAILABLE": - reason_proto = Reason.WIFI_UNAVAILABLE - return ClientMessage.DisconnectRes(reason=reason_proto) - - def disconnect_res_from_proto(msg: ClientMessage.DisconnectRes) -> typing.DisconnectRes: """Deserialize `DisconnectRes` from ProtoBuf.""" if msg.reason == Reason.RECONNECT: @@ -490,86 +356,250 @@ def scalar_from_proto(scalar_msg: Scalar) -> typing.Scalar: return cast(typing.Scalar, scalar) -# === Value messages === +# === Record messages === -_python_type_to_field_name = { +_type_to_field = { float: "double", int: "sint64", bool: "bool", str: "string", bytes: "bytes", } +_list_type_to_class_and_field = { + float: (DoubleList, "double_list"), + int: (Sint64List, "sint64_list"), + bool: (BoolList, "bool_list"), + str: (StringList, "string_list"), + bytes: (BytesList, "bytes_list"), +} +T = TypeVar("T") -_python_list_type_to_message_and_field_name = { - float: (Value.DoubleList, "double_list"), - int: (Value.Sint64List, "sint64_list"), - bool: (Value.BoolList, "bool_list"), - str: (Value.StringList, "string_list"), - bytes: (Value.BytesList, "bytes_list"), -} +def _record_value_to_proto( + value: Any, allowed_types: List[type], proto_class: Type[T] +) -> T: + """Serialize `*RecordValue` to ProtoBuf. + + Note: `bool` MUST be put in the front of allowd_types if it exists. + """ + arg = {} + for t in allowed_types: + # Single element + # Note: `isinstance(False, int) == True`. + if isinstance(value, t): + arg[_type_to_field[t]] = value + return proto_class(**arg) + # List + if isinstance(value, list) and all(isinstance(item, t) for item in value): + list_class, field_name = _list_type_to_class_and_field[t] + arg[field_name] = list_class(vals=value) + return proto_class(**arg) + # Invalid types + raise TypeError( + f"The type of the following value is not allowed " + f"in '{proto_class.__name__}':\n{value}" + ) -def _check_value(value: typing.Value) -> None: - if isinstance(value, tuple(_python_type_to_field_name.keys())): - return - if isinstance(value, list): - if len(value) > 0 and isinstance( - value[0], tuple(_python_type_to_field_name.keys()) - ): - data_type = type(value[0]) - for element in value: - if isinstance(element, data_type): - continue - raise TypeError( - f"Inconsistent type: the types of elements in the list must " - f"be the same (expected {data_type}, but got {type(element)})." - ) +def _record_value_from_proto(value_proto: GrpcMessage) -> Any: + """Deserialize `*RecordValue` from ProtoBuf.""" + value_field = cast(str, value_proto.WhichOneof("value")) + if value_field.endswith("list"): + value = list(getattr(value_proto, value_field).vals) else: - raise TypeError( - f"Accepted types: {bool, bytes, float, int, str} or " - f"list of these types." + value = getattr(value_proto, value_field) + return value + + +def _record_value_dict_to_proto( + value_dict: Dict[str, Any], allowed_types: List[type], value_proto_class: Type[T] +) -> Dict[str, T]: + """Serialize the record value dict to ProtoBuf. + + Note: `bool` MUST be put in the front of allowd_types if it exists. + """ + # Move bool to the front + if bool in allowed_types and allowed_types[0] != bool: + allowed_types.remove(bool) + allowed_types.insert(0, bool) + + def proto(_v: Any) -> T: + return _record_value_to_proto(_v, allowed_types, value_proto_class) + + return {k: proto(v) for k, v in value_dict.items()} + + +def _record_value_dict_from_proto( + value_dict_proto: MutableMapping[str, Any] +) -> Dict[str, Any]: + """Deserialize the record value dict from ProtoBuf.""" + return {k: _record_value_from_proto(v) for k, v in value_dict_proto.items()} + + +def array_to_proto(array: Array) -> ProtoArray: + """Serialize Array to ProtoBuf.""" + return ProtoArray(**vars(array)) + + +def array_from_proto(array_proto: ProtoArray) -> Array: + """Deserialize Array from ProtoBuf.""" + return Array( + dtype=array_proto.dtype, + shape=list(array_proto.shape), + stype=array_proto.stype, + data=array_proto.data, + ) + + +def parameters_record_to_proto(record: ParametersRecord) -> ProtoParametersRecord: + """Serialize ParametersRecord to ProtoBuf.""" + return ProtoParametersRecord( + data_keys=record.data.keys(), + data_values=map(array_to_proto, record.data.values()), + ) + + +def parameters_record_from_proto( + record_proto: ProtoParametersRecord, +) -> ParametersRecord: + """Deserialize ParametersRecord from ProtoBuf.""" + return ParametersRecord( + array_dict=OrderedDict( + zip(record_proto.data_keys, map(array_from_proto, record_proto.data_values)) + ), + keep_input=False, + ) + + +def metrics_record_to_proto(record: MetricsRecord) -> ProtoMetricsRecord: + """Serialize MetricsRecord to ProtoBuf.""" + return ProtoMetricsRecord( + data=_record_value_dict_to_proto( + record.data, [float, int], ProtoMetricsRecordValue ) + ) -def value_to_proto(value: typing.Value) -> Value: - """Serialize `Value` to ProtoBuf.""" - _check_value(value) +def metrics_record_from_proto(record_proto: ProtoMetricsRecord) -> MetricsRecord: + """Deserialize MetricsRecord from ProtoBuf.""" + return MetricsRecord( + metrics_dict=cast( + Dict[str, typing.MetricsRecordValues], + _record_value_dict_from_proto(record_proto.data), + ), + keep_input=False, + ) - arg = {} - if isinstance(value, list): - msg_class, field_name = _python_list_type_to_message_and_field_name[ - type(value[0]) if len(value) > 0 else int - ] - arg[field_name] = msg_class(vals=value) - else: - arg[_python_type_to_field_name[type(value)]] = value - return Value(**arg) + +def configs_record_to_proto(record: ConfigsRecord) -> ProtoConfigsRecord: + """Serialize ConfigsRecord to ProtoBuf.""" + return ProtoConfigsRecord( + data=_record_value_dict_to_proto( + record.data, [bool, int, float, str, bytes], ProtoConfigsRecordValue + ) + ) -def value_from_proto(value_msg: Value) -> typing.Value: - """Deserialize `Value` from ProtoBuf.""" - value_field = cast(str, value_msg.WhichOneof("value")) - if value_field.endswith("list"): - value = list(getattr(value_msg, value_field).vals) - else: - value = getattr(value_msg, value_field) - return cast(typing.Value, value) +def configs_record_from_proto(record_proto: ProtoConfigsRecord) -> ConfigsRecord: + """Deserialize ConfigsRecord from ProtoBuf.""" + return ConfigsRecord( + configs_dict=cast( + Dict[str, typing.ConfigsRecordValues], + _record_value_dict_from_proto(record_proto.data), + ), + keep_input=False, + ) + + +# === RecordSet message === + + +def recordset_to_proto(recordset: RecordSet) -> ProtoRecordSet: + """Serialize RecordSet to ProtoBuf.""" + return ProtoRecordSet( + parameters={ + k: parameters_record_to_proto(v) for k, v in recordset.parameters.items() + }, + metrics={k: metrics_record_to_proto(v) for k, v in recordset.metrics.items()}, + configs={k: configs_record_to_proto(v) for k, v in recordset.configs.items()}, + ) + + +def recordset_from_proto(recordset_proto: ProtoRecordSet) -> RecordSet: + """Deserialize RecordSet from ProtoBuf.""" + return RecordSet( + parameters={ + k: parameters_record_from_proto(v) + for k, v in recordset_proto.parameters.items() + }, + metrics={ + k: metrics_record_from_proto(v) for k, v in recordset_proto.metrics.items() + }, + configs={ + k: configs_record_from_proto(v) for k, v in recordset_proto.configs.items() + }, + ) -# === Named Values === +# === Message === -def named_values_to_proto( - named_values: Dict[str, typing.Value], -) -> Dict[str, Value]: - """Serialize named values to ProtoBuf.""" - return {name: value_to_proto(value) for name, value in named_values.items()} +def message_to_taskins(message: Message) -> TaskIns: + """Create a TaskIns from the Message.""" + return TaskIns( + task=Task( + ttl=message.metadata.ttl, + task_type=message.metadata.message_type, + recordset=recordset_to_proto(message.content), + ), + ) + + +def message_from_taskins(taskins: TaskIns) -> Message: + """Create a Message from the TaskIns.""" + # Retrieve the Metadata + metadata = Metadata( + run_id=taskins.run_id, + message_id=taskins.task_id, + group_id=taskins.group_id, + node_id=taskins.task.consumer.node_id, + ttl=taskins.task.ttl, + message_type=taskins.task.task_type, + ) + + # Return the Message + return Message( + metadata=metadata, + content=recordset_from_proto(taskins.task.recordset), + ) -def named_values_from_proto( - named_values_proto: MutableMapping[str, Value] -) -> Dict[str, typing.Value]: - """Deserialize named values from ProtoBuf.""" - return {name: value_from_proto(value) for name, value in named_values_proto.items()} +def message_to_taskres(message: Message) -> TaskRes: + """Create a TaskRes from the Message.""" + return TaskRes( + task=Task( + ttl=message.metadata.ttl, + task_type=message.metadata.message_type, + recordset=recordset_to_proto(message.content), + ), + ) + + +def message_from_taskres(taskres: TaskRes) -> Message: + """Create a Message from the TaskIns.""" + # Retrieve the MetaData + metadata = Metadata( + run_id=taskres.run_id, + message_id=taskres.task_id, + group_id=taskres.group_id, + node_id=taskres.task.consumer.node_id, + ttl=taskres.task.ttl, + message_type=taskres.task.task_type, + ) + + # Return the Message + return Message( + metadata=metadata, + content=recordset_from_proto(taskres.task.recordset), + ) diff --git a/src/py/flwr/common/serde_test.py b/src/py/flwr/common/serde_test.py index ba07890f4658..44085e8d9ab8 100644 --- a/src/py/flwr/common/serde_test.py +++ b/src/py/flwr/common/serde_test.py @@ -15,20 +15,44 @@ """(De-)serialization tests.""" -from typing import Dict, Union, cast +import random +import string +from typing import Any, Optional, OrderedDict, Type, TypeVar, Union, cast -from flwr.common import typing +# pylint: disable=E0611 from flwr.proto import transport_pb2 as pb2 +from flwr.proto.recordset_pb2 import Array as ProtoArray +from flwr.proto.recordset_pb2 import ConfigsRecord as ProtoConfigsRecord +from flwr.proto.recordset_pb2 import MetricsRecord as ProtoMetricsRecord +from flwr.proto.recordset_pb2 import ParametersRecord as ProtoParametersRecord +from flwr.proto.recordset_pb2 import RecordSet as ProtoRecordSet +# pylint: enable=E0611 +from . import typing +from .configsrecord import ConfigsRecord +from .message import Message, Metadata +from .metricsrecord import MetricsRecord +from .parametersrecord import Array, ParametersRecord +from .recordset import RecordSet from .serde import ( - named_values_from_proto, - named_values_to_proto, + array_from_proto, + array_to_proto, + configs_record_from_proto, + configs_record_to_proto, + message_from_taskins, + message_from_taskres, + message_to_taskins, + message_to_taskres, + metrics_record_from_proto, + metrics_record_to_proto, + parameters_record_from_proto, + parameters_record_to_proto, + recordset_from_proto, + recordset_to_proto, scalar_from_proto, scalar_to_proto, status_from_proto, status_to_proto, - value_from_proto, - value_to_proto, ) @@ -50,8 +74,8 @@ def test_serialisation_deserialisation() -> None: def test_status_to_proto() -> None: """Test status message (de-)serialization.""" # Prepare - code_msg = pb2.Code.OK - status_msg = pb2.Status(code=code_msg, message="Success") + code_msg = pb2.Code.OK # pylint: disable=E1101 + status_msg = pb2.Status(code=code_msg, message="Success") # pylint: disable=E1101 code = typing.Code.OK status = typing.Status(code=code, message="Success") @@ -66,8 +90,8 @@ def test_status_to_proto() -> None: def test_status_from_proto() -> None: """Test status message (de-)serialization.""" # Prepare - code_msg = pb2.Code.OK - status_msg = pb2.Status(code=code_msg, message="Success") + code_msg = pb2.Code.OK # pylint: disable=E1101 + status_msg = pb2.Status(code=code_msg, message="Success") # pylint: disable=E1101 code = typing.Code.OK status = typing.Status(code=code, message="Success") @@ -79,81 +103,258 @@ def test_status_from_proto() -> None: assert actual_status == status -def test_value_serialization_deserialization() -> None: - """Test if values are identical after (de-)serialization.""" - # Prepare - values = [ - # boolean scalar and list - True, - [True, False, False, True], - # bytes scalar and list - b"test \x01\x02\x03 !@#$%^&*()", - [b"\x0a\x0b", b"\x0c\x0d\x0e", b"\x0f"], - # float scalar and list - 3.14, - [2.714, -0.012], - # integer scalar and list - 23, - [123456], - # string scalar and list - "abcdefghijklmnopqrstuvwxy", - ["456hgdhfd", "1234567890123456789012345678901", "I'm a string."], - # empty list - [], - ] - - for value in values: - # Execute - serialized = value_to_proto(cast(typing.Value, value)) - deserialized = value_from_proto(serialized) +T = TypeVar("T") - # Assert - if isinstance(value, list): - assert isinstance(deserialized, list) - assert len(value) == len(deserialized) - for elm1, elm2 in zip(value, deserialized): - assert elm1 == elm2 + +class RecordMaker: + """A record maker based on a seeded random number generator.""" + + def __init__(self, state: int = 42) -> None: + self.rng = random.Random(state) + + def randbytes(self, n: int) -> bytes: + """Create a bytes.""" + return self.rng.getrandbits(n * 8).to_bytes(n, "little") + + def get_str(self, length: Optional[int] = None) -> str: + """Create a string.""" + char_pool = ( + string.ascii_letters + string.digits + " !@#$%^&*()_-+=[]|;':,./<>?{}" + ) + if length is None: + length = self.rng.randint(1, 10) + return "".join(self.rng.choices(char_pool, k=length)) + + def get_value(self, dtype: Type[T]) -> T: + """Create a value of a given type.""" + ret: Any = None + if dtype == bool: + ret = self.rng.random() < 0.5 + elif dtype == str: + ret = self.get_str(self.rng.randint(10, 100)) + elif dtype == int: + ret = self.rng.randint(-1 << 30, 1 << 30) + elif dtype == float: + ret = (self.rng.random() - 0.5) * (2.0 ** self.rng.randint(0, 50)) + elif dtype == bytes: + ret = self.randbytes(self.rng.randint(10, 100)) else: - assert value == deserialized + raise NotImplementedError(f"Unsupported dtype: {dtype}") + return cast(T, ret) + + def array(self) -> Array: + """Create a Array.""" + dtypes = ("float", "int") + stypes = ("torch", "tf", "numpy") + max_shape_size = 100 + max_shape_dim = 10 + min_max_bytes_size = (10, 1000) + + dtype = self.rng.choice(dtypes) + shape = [ + self.rng.randint(1, max_shape_size) + for _ in range(self.rng.randint(1, max_shape_dim)) + ] + stype = self.rng.choice(stypes) + data = self.randbytes(self.rng.randint(*min_max_bytes_size)) + return Array(dtype=dtype, shape=shape, stype=stype, data=data) + + def parameters_record(self) -> ParametersRecord: + """Create a ParametersRecord.""" + num_arrays = self.rng.randint(1, 5) + arrays = OrderedDict( + [(self.get_str(), self.array()) for i in range(num_arrays)] + ) + return ParametersRecord(arrays, keep_input=False) + + def metrics_record(self) -> MetricsRecord: + """Create a MetricsRecord.""" + num_entries = self.rng.randint(1, 5) + types = (float, int) + return MetricsRecord( + metrics_dict={ + self.get_str(): self.get_value(self.rng.choice(types)) + for _ in range(num_entries) + }, + keep_input=False, + ) + + def configs_record(self) -> ConfigsRecord: + """Create a ConfigsRecord.""" + num_entries = self.rng.randint(1, 5) + types = (str, int, float, bytes, bool) + return ConfigsRecord( + configs_dict={ + self.get_str(): self.get_value(self.rng.choice(types)) + for _ in range(num_entries) + }, + keep_input=False, + ) + + def recordset( + self, + num_params_records: int, + num_metrics_records: int, + num_configs_records: int, + ) -> RecordSet: + """Create a RecordSet.""" + return RecordSet( + parameters={ + self.get_str(): self.parameters_record() + for _ in range(num_params_records) + }, + metrics={ + self.get_str(): self.metrics_record() + for _ in range(num_metrics_records) + }, + configs={ + self.get_str(): self.configs_record() + for _ in range(num_configs_records) + }, + ) + + def metadata(self) -> Metadata: + """Create a Metadata.""" + return Metadata( + run_id=self.rng.randint(0, 1 << 30), + message_id=self.get_str(64), + group_id=self.get_str(30), + node_id=self.rng.randint(0, 1 << 63), + ttl=self.get_str(10), + message_type=self.get_str(10), + ) -def test_named_values_serialization_deserialization() -> None: - """Test if named values is identical after (de-)serialization.""" +def test_array_serialization_deserialization() -> None: + """Test serialization and deserialization of Array.""" # Prepare - values = [ - # boolean scalar and list - True, - [True, False, False, True], - # bytes scalar and list - b"test \x01\x02\x03 !@#$%^&*()", - [b"\x0a\x0b", b"\x0c\x0d\x0e", b"\x0f"], - # float scalar and list - 3.14, - [2.714, -0.012], - # integer scalar and list - 23, - [123456], - # string scalar and list - "abcdefghijklmnopqrstuvwxy", - ["456hgdhfd", "1234567890123456789012345678901", "I'm a string."], - # empty list - [], - ] - named_values = {f"value {i}": value for i, value in enumerate(values)} + maker = RecordMaker() + original = maker.array() # Execute - serialized = named_values_to_proto(cast(Dict[str, typing.Value], named_values)) - deserialized = named_values_from_proto(serialized) + proto = array_to_proto(original) + deserialized = array_from_proto(proto) # Assert - assert len(named_values) == len(deserialized) - for name in named_values: - expected = named_values[name] - actual = deserialized[name] - if isinstance(expected, list): - assert isinstance(actual, list) - assert len(expected) == len(actual) - for elm1, elm2 in zip(expected, actual): - assert elm1 == elm2 - else: - assert expected == actual + assert isinstance(proto, ProtoArray) + assert original == deserialized + + +def test_parameters_record_serialization_deserialization() -> None: + """Test serialization and deserialization of ParametersRecord.""" + # Prepare + maker = RecordMaker() + original = maker.parameters_record() + + # Execute + proto = parameters_record_to_proto(original) + deserialized = parameters_record_from_proto(proto) + + # Assert + assert isinstance(proto, ProtoParametersRecord) + assert original.data == deserialized.data + + +def test_metrics_record_serialization_deserialization() -> None: + """Test serialization and deserialization of MetricsRecord.""" + # Prepare + maker = RecordMaker() + original = maker.metrics_record() + + # Execute + proto = metrics_record_to_proto(original) + deserialized = metrics_record_from_proto(proto) + + # Assert + assert isinstance(proto, ProtoMetricsRecord) + assert original.data == deserialized.data + + +def test_configs_record_serialization_deserialization() -> None: + """Test serialization and deserialization of ConfigsRecord.""" + # Prepare + maker = RecordMaker() + original = maker.configs_record() + + # Execute + proto = configs_record_to_proto(original) + deserialized = configs_record_from_proto(proto) + + # Assert + assert isinstance(proto, ProtoConfigsRecord) + assert original.data == deserialized.data + + +def test_recordset_serialization_deserialization() -> None: + """Test serialization and deserialization of RecordSet.""" + # Prepare + maker = RecordMaker(state=0) + original = maker.recordset(2, 2, 1) + + # Execute + proto = recordset_to_proto(original) + deserialized = recordset_from_proto(proto) + + # Assert + assert isinstance(proto, ProtoRecordSet) + assert original == deserialized + + +def test_message_to_and_from_taskins() -> None: + """Test Message to and from TaskIns.""" + # Prepare + maker = RecordMaker(state=1) + metadata = maker.metadata() + original = Message( + metadata=Metadata( + run_id=0, + message_id="", + group_id="", + node_id=metadata.node_id, + ttl=metadata.ttl, + message_type=metadata.message_type, + ), + content=maker.recordset(1, 1, 1), + ) + + # Execute + taskins = message_to_taskins(original) + taskins.run_id = metadata.run_id + taskins.task_id = metadata.message_id + taskins.group_id = metadata.group_id + taskins.task.consumer.node_id = metadata.node_id + deserialized = message_from_taskins(taskins) + + # Assert + assert original.content == deserialized.content + assert metadata == deserialized.metadata + + +def test_message_to_and_from_taskres() -> None: + """Test Message to and from TaskRes.""" + # Prepare + maker = RecordMaker(state=2) + metadata = maker.metadata() + original = Message( + metadata=Metadata( + run_id=0, + message_id="", + group_id="", + node_id=metadata.node_id, + ttl=metadata.ttl, + message_type=metadata.message_type, + ), + content=maker.recordset(1, 1, 1), + ) + + # Execute + taskres = message_to_taskres(original) + taskres.run_id = metadata.run_id + taskres.task_id = metadata.message_id + taskres.group_id = metadata.group_id + taskres.task.consumer.node_id = metadata.node_id + deserialized = message_from_taskres(taskres) + + # Assert + assert original.content == deserialized.content + assert metadata == deserialized.metadata diff --git a/src/py/flwr/common/telemetry.py b/src/py/flwr/common/telemetry.py index fed8b5a978bc..8eb594085d31 100644 --- a/src/py/flwr/common/telemetry.py +++ b/src/py/flwr/common/telemetry.py @@ -32,7 +32,7 @@ FLWR_TELEMETRY_ENABLED = os.getenv("FLWR_TELEMETRY_ENABLED", "1") FLWR_TELEMETRY_LOGGING = os.getenv("FLWR_TELEMETRY_LOGGING", "0") -TELEMETRY_EVENTS_URL = "https://telemetry.flower.dev/api/v1/event" +TELEMETRY_EVENTS_URL = "https://telemetry.flower.ai/api/v1/event" LOGGER_NAME = "flwr-telemetry" LOGGER_LEVEL = logging.DEBUG @@ -137,8 +137,8 @@ def _generate_next_value_(name: str, start: int, count: int, last_values: List[A RUN_FLEET_API_LEAVE = auto() # Driver API and Fleet API - RUN_SERVER_ENTER = auto() - RUN_SERVER_LEAVE = auto() + RUN_SUPERLINK_ENTER = auto() + RUN_SUPERLINK_LEAVE = auto() # Simulation START_SIMULATION_ENTER = auto() @@ -152,9 +152,13 @@ def _generate_next_value_(name: str, start: int, count: int, last_values: List[A START_DRIVER_ENTER = auto() START_DRIVER_LEAVE = auto() - # SuperNode: flower-client - RUN_CLIENT_ENTER = auto() - RUN_CLIENT_LEAVE = auto() + # flower-client-app + RUN_CLIENT_APP_ENTER = auto() + RUN_CLIENT_APP_LEAVE = auto() + + # flower-server-app + RUN_SERVER_APP_ENTER = auto() + RUN_SERVER_APP_LEAVE = auto() # Use the ThreadPoolExecutor with max_workers=1 to have a queue diff --git a/src/py/flwr/common/typing.py b/src/py/flwr/common/typing.py index 6c0266f5eec8..d6b2ec9b158c 100644 --- a/src/py/flwr/common/typing.py +++ b/src/py/flwr/common/typing.py @@ -45,6 +45,15 @@ List[str], ] +# Value types for common.MetricsRecord +MetricsScalar = Union[int, float] +MetricsScalarList = Union[List[int], List[float]] +MetricsRecordValues = Union[MetricsScalar, MetricsScalarList] +# Value types for common.ConfigsRecord +ConfigsScalar = Union[MetricsScalar, str, bytes, bool] +ConfigsScalarList = Union[MetricsScalarList, List[str], List[bytes], List[bool]] +ConfigsRecordValues = Union[ConfigsScalar, ConfigsScalarList] + Metrics = Dict[str, Scalar] MetricsAggregationFn = Callable[[List[Tuple[int, Metrics]]], Metrics] diff --git a/src/py/flwr/driver/driver_client_proxy_test.py b/src/py/flwr/driver/driver_client_proxy_test.py deleted file mode 100644 index e7fb088dbf57..000000000000 --- a/src/py/flwr/driver/driver_client_proxy_test.py +++ /dev/null @@ -1,185 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""DriverClientProxy tests.""" - - -import unittest -from unittest.mock import MagicMock - -import numpy as np - -import flwr -from flwr.common.typing import Config, GetParametersIns -from flwr.driver.driver_client_proxy import DriverClientProxy -from flwr.proto import driver_pb2, node_pb2, task_pb2 -from flwr.proto.transport_pb2 import ClientMessage, Parameters, Scalar - -MESSAGE_PARAMETERS = Parameters(tensors=[b"abc"], tensor_type="np") - -CLIENT_PROPERTIES = {"tensor_type": Scalar(string="numpy.ndarray")} - - -class DriverClientProxyTestCase(unittest.TestCase): - """Tests for DriverClientProxy.""" - - def setUp(self) -> None: - """Set up mocks for tests.""" - self.driver = MagicMock() - self.driver.get_nodes.return_value = driver_pb2.GetNodesResponse( - nodes=[node_pb2.Node(node_id=1, anonymous=False)] - ) - - def test_get_properties(self) -> None: - """Test positive case.""" - # Prepare - self.driver.push_task_ins.return_value = driver_pb2.PushTaskInsResponse( - task_ids=["19341fd7-62e1-4eb4-beb4-9876d3acda32"] - ) - self.driver.pull_task_res.return_value = driver_pb2.PullTaskResResponse( - task_res_list=[ - task_pb2.TaskRes( - task_id="554bd3c8-8474-4b93-a7db-c7bec1bf0012", - group_id="", - run_id=0, - task=task_pb2.Task( - legacy_client_message=ClientMessage( - get_properties_res=ClientMessage.GetPropertiesRes( - properties=CLIENT_PROPERTIES - ) - ) - ), - ) - ] - ) - client = DriverClientProxy( - node_id=1, driver=self.driver, anonymous=True, run_id=0 - ) - request_properties: Config = {"tensor_type": "str"} - ins: flwr.common.GetPropertiesIns = flwr.common.GetPropertiesIns( - config=request_properties - ) - - # Execute - value: flwr.common.GetPropertiesRes = client.get_properties(ins, timeout=None) - - # Assert - assert value.properties["tensor_type"] == "numpy.ndarray" - - def test_get_parameters(self) -> None: - """Test positive case.""" - # Prepare - self.driver.push_task_ins.return_value = driver_pb2.PushTaskInsResponse( - task_ids=["19341fd7-62e1-4eb4-beb4-9876d3acda32"] - ) - self.driver.pull_task_res.return_value = driver_pb2.PullTaskResResponse( - task_res_list=[ - task_pb2.TaskRes( - task_id="554bd3c8-8474-4b93-a7db-c7bec1bf0012", - group_id="", - run_id=0, - task=task_pb2.Task( - legacy_client_message=ClientMessage( - get_parameters_res=ClientMessage.GetParametersRes( - parameters=MESSAGE_PARAMETERS, - ) - ) - ), - ) - ] - ) - client = DriverClientProxy( - node_id=1, driver=self.driver, anonymous=True, run_id=0 - ) - get_parameters_ins = GetParametersIns(config={}) - - # Execute - value: flwr.common.GetParametersRes = client.get_parameters( - ins=get_parameters_ins, timeout=None - ) - - # Assert - assert value.parameters.tensors[0] == b"abc" - - def test_fit(self) -> None: - """Test positive case.""" - # Prepare - self.driver.push_task_ins.return_value = driver_pb2.PushTaskInsResponse( - task_ids=["19341fd7-62e1-4eb4-beb4-9876d3acda32"] - ) - self.driver.pull_task_res.return_value = driver_pb2.PullTaskResResponse( - task_res_list=[ - task_pb2.TaskRes( - task_id="554bd3c8-8474-4b93-a7db-c7bec1bf0012", - group_id="", - run_id=0, - task=task_pb2.Task( - legacy_client_message=ClientMessage( - fit_res=ClientMessage.FitRes( - parameters=MESSAGE_PARAMETERS, - num_examples=10, - ) - ) - ), - ) - ] - ) - client = DriverClientProxy( - node_id=1, driver=self.driver, anonymous=True, run_id=0 - ) - parameters = flwr.common.ndarrays_to_parameters([np.ones((2, 2))]) - ins: flwr.common.FitIns = flwr.common.FitIns(parameters, {}) - - # Execute - fit_res = client.fit(ins=ins, timeout=None) - - # Assert - assert fit_res.parameters.tensor_type == "np" - assert fit_res.parameters.tensors[0] == b"abc" - assert fit_res.num_examples == 10 - - def test_evaluate(self) -> None: - """Test positive case.""" - # Prepare - self.driver.push_task_ins.return_value = driver_pb2.PushTaskInsResponse( - task_ids=["19341fd7-62e1-4eb4-beb4-9876d3acda32"] - ) - self.driver.pull_task_res.return_value = driver_pb2.PullTaskResResponse( - task_res_list=[ - task_pb2.TaskRes( - task_id="554bd3c8-8474-4b93-a7db-c7bec1bf0012", - group_id="", - run_id=0, - task=task_pb2.Task( - legacy_client_message=ClientMessage( - evaluate_res=ClientMessage.EvaluateRes( - loss=0.0, num_examples=0 - ) - ) - ), - ) - ] - ) - client = DriverClientProxy( - node_id=1, driver=self.driver, anonymous=True, run_id=0 - ) - parameters = flwr.common.Parameters(tensors=[], tensor_type="np") - evaluate_ins: flwr.common.EvaluateIns = flwr.common.EvaluateIns(parameters, {}) - - # Execute - evaluate_res = client.evaluate(evaluate_ins, timeout=None) - - # Assert - assert 0.0 == evaluate_res.loss - assert 0 == evaluate_res.num_examples diff --git a/src/py/flwr/flower/__init__.py b/src/py/flwr/flower/__init__.py deleted file mode 100644 index 892a7ce5afdc..000000000000 --- a/src/py/flwr/flower/__init__.py +++ /dev/null @@ -1,26 +0,0 @@ -# Copyright 2020 Adap GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Flower callable package.""" - - -from flwr.client.flower import Flower as Flower -from flwr.client.typing import Bwd as Bwd -from flwr.client.typing import Fwd as Fwd - -__all__ = [ - "Flower", - "Fwd", - "Bwd", -] diff --git a/src/py/flwr/proto/driver_pb2.py b/src/py/flwr/proto/driver_pb2.py index 615bf4672afa..fe9c33da0fa9 100644 --- a/src/py/flwr/proto/driver_pb2.py +++ b/src/py/flwr/proto/driver_pb2.py @@ -1,12 +1,12 @@ # -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: flwr/proto/driver.proto +# Protobuf Python Version: 4.25.0 """Generated protocol buffer code.""" from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() @@ -18,92 +18,27 @@ DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x17\x66lwr/proto/driver.proto\x12\nflwr.proto\x1a\x15\x66lwr/proto/node.proto\x1a\x15\x66lwr/proto/task.proto\"\x12\n\x10\x43reateRunRequest\"#\n\x11\x43reateRunResponse\x12\x0e\n\x06run_id\x18\x01 \x01(\x12\"!\n\x0fGetNodesRequest\x12\x0e\n\x06run_id\x18\x01 \x01(\x12\"3\n\x10GetNodesResponse\x12\x1f\n\x05nodes\x18\x01 \x03(\x0b\x32\x10.flwr.proto.Node\"@\n\x12PushTaskInsRequest\x12*\n\rtask_ins_list\x18\x01 \x03(\x0b\x32\x13.flwr.proto.TaskIns\"\'\n\x13PushTaskInsResponse\x12\x10\n\x08task_ids\x18\x02 \x03(\t\"F\n\x12PullTaskResRequest\x12\x1e\n\x04node\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\x12\x10\n\x08task_ids\x18\x02 \x03(\t\"A\n\x13PullTaskResResponse\x12*\n\rtask_res_list\x18\x01 \x03(\x0b\x32\x13.flwr.proto.TaskRes2\xc1\x02\n\x06\x44river\x12J\n\tCreateRun\x12\x1c.flwr.proto.CreateRunRequest\x1a\x1d.flwr.proto.CreateRunResponse\"\x00\x12G\n\x08GetNodes\x12\x1b.flwr.proto.GetNodesRequest\x1a\x1c.flwr.proto.GetNodesResponse\"\x00\x12P\n\x0bPushTaskIns\x12\x1e.flwr.proto.PushTaskInsRequest\x1a\x1f.flwr.proto.PushTaskInsResponse\"\x00\x12P\n\x0bPullTaskRes\x12\x1e.flwr.proto.PullTaskResRequest\x1a\x1f.flwr.proto.PullTaskResResponse\"\x00\x62\x06proto3') - - -_CREATERUNREQUEST = DESCRIPTOR.message_types_by_name['CreateRunRequest'] -_CREATERUNRESPONSE = DESCRIPTOR.message_types_by_name['CreateRunResponse'] -_GETNODESREQUEST = DESCRIPTOR.message_types_by_name['GetNodesRequest'] -_GETNODESRESPONSE = DESCRIPTOR.message_types_by_name['GetNodesResponse'] -_PUSHTASKINSREQUEST = DESCRIPTOR.message_types_by_name['PushTaskInsRequest'] -_PUSHTASKINSRESPONSE = DESCRIPTOR.message_types_by_name['PushTaskInsResponse'] -_PULLTASKRESREQUEST = DESCRIPTOR.message_types_by_name['PullTaskResRequest'] -_PULLTASKRESRESPONSE = DESCRIPTOR.message_types_by_name['PullTaskResResponse'] -CreateRunRequest = _reflection.GeneratedProtocolMessageType('CreateRunRequest', (_message.Message,), { - 'DESCRIPTOR' : _CREATERUNREQUEST, - '__module__' : 'flwr.proto.driver_pb2' - # @@protoc_insertion_point(class_scope:flwr.proto.CreateRunRequest) - }) -_sym_db.RegisterMessage(CreateRunRequest) - -CreateRunResponse = _reflection.GeneratedProtocolMessageType('CreateRunResponse', (_message.Message,), { - 'DESCRIPTOR' : _CREATERUNRESPONSE, - '__module__' : 'flwr.proto.driver_pb2' - # @@protoc_insertion_point(class_scope:flwr.proto.CreateRunResponse) - }) -_sym_db.RegisterMessage(CreateRunResponse) - -GetNodesRequest = _reflection.GeneratedProtocolMessageType('GetNodesRequest', (_message.Message,), { - 'DESCRIPTOR' : _GETNODESREQUEST, - '__module__' : 'flwr.proto.driver_pb2' - # @@protoc_insertion_point(class_scope:flwr.proto.GetNodesRequest) - }) -_sym_db.RegisterMessage(GetNodesRequest) - -GetNodesResponse = _reflection.GeneratedProtocolMessageType('GetNodesResponse', (_message.Message,), { - 'DESCRIPTOR' : _GETNODESRESPONSE, - '__module__' : 'flwr.proto.driver_pb2' - # @@protoc_insertion_point(class_scope:flwr.proto.GetNodesResponse) - }) -_sym_db.RegisterMessage(GetNodesResponse) - -PushTaskInsRequest = _reflection.GeneratedProtocolMessageType('PushTaskInsRequest', (_message.Message,), { - 'DESCRIPTOR' : _PUSHTASKINSREQUEST, - '__module__' : 'flwr.proto.driver_pb2' - # @@protoc_insertion_point(class_scope:flwr.proto.PushTaskInsRequest) - }) -_sym_db.RegisterMessage(PushTaskInsRequest) - -PushTaskInsResponse = _reflection.GeneratedProtocolMessageType('PushTaskInsResponse', (_message.Message,), { - 'DESCRIPTOR' : _PUSHTASKINSRESPONSE, - '__module__' : 'flwr.proto.driver_pb2' - # @@protoc_insertion_point(class_scope:flwr.proto.PushTaskInsResponse) - }) -_sym_db.RegisterMessage(PushTaskInsResponse) - -PullTaskResRequest = _reflection.GeneratedProtocolMessageType('PullTaskResRequest', (_message.Message,), { - 'DESCRIPTOR' : _PULLTASKRESREQUEST, - '__module__' : 'flwr.proto.driver_pb2' - # @@protoc_insertion_point(class_scope:flwr.proto.PullTaskResRequest) - }) -_sym_db.RegisterMessage(PullTaskResRequest) - -PullTaskResResponse = _reflection.GeneratedProtocolMessageType('PullTaskResResponse', (_message.Message,), { - 'DESCRIPTOR' : _PULLTASKRESRESPONSE, - '__module__' : 'flwr.proto.driver_pb2' - # @@protoc_insertion_point(class_scope:flwr.proto.PullTaskResResponse) - }) -_sym_db.RegisterMessage(PullTaskResResponse) - -_DRIVER = DESCRIPTOR.services_by_name['Driver'] +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'flwr.proto.driver_pb2', _globals) if _descriptor._USE_C_DESCRIPTORS == False: - DESCRIPTOR._options = None - _CREATERUNREQUEST._serialized_start=85 - _CREATERUNREQUEST._serialized_end=103 - _CREATERUNRESPONSE._serialized_start=105 - _CREATERUNRESPONSE._serialized_end=140 - _GETNODESREQUEST._serialized_start=142 - _GETNODESREQUEST._serialized_end=175 - _GETNODESRESPONSE._serialized_start=177 - _GETNODESRESPONSE._serialized_end=228 - _PUSHTASKINSREQUEST._serialized_start=230 - _PUSHTASKINSREQUEST._serialized_end=294 - _PUSHTASKINSRESPONSE._serialized_start=296 - _PUSHTASKINSRESPONSE._serialized_end=335 - _PULLTASKRESREQUEST._serialized_start=337 - _PULLTASKRESREQUEST._serialized_end=407 - _PULLTASKRESRESPONSE._serialized_start=409 - _PULLTASKRESRESPONSE._serialized_end=474 - _DRIVER._serialized_start=477 - _DRIVER._serialized_end=798 + _globals['_CREATERUNREQUEST']._serialized_start=85 + _globals['_CREATERUNREQUEST']._serialized_end=103 + _globals['_CREATERUNRESPONSE']._serialized_start=105 + _globals['_CREATERUNRESPONSE']._serialized_end=140 + _globals['_GETNODESREQUEST']._serialized_start=142 + _globals['_GETNODESREQUEST']._serialized_end=175 + _globals['_GETNODESRESPONSE']._serialized_start=177 + _globals['_GETNODESRESPONSE']._serialized_end=228 + _globals['_PUSHTASKINSREQUEST']._serialized_start=230 + _globals['_PUSHTASKINSREQUEST']._serialized_end=294 + _globals['_PUSHTASKINSRESPONSE']._serialized_start=296 + _globals['_PUSHTASKINSRESPONSE']._serialized_end=335 + _globals['_PULLTASKRESREQUEST']._serialized_start=337 + _globals['_PULLTASKRESREQUEST']._serialized_end=407 + _globals['_PULLTASKRESRESPONSE']._serialized_start=409 + _globals['_PULLTASKRESRESPONSE']._serialized_end=474 + _globals['_DRIVER']._serialized_start=477 + _globals['_DRIVER']._serialized_end=798 # @@protoc_insertion_point(module_scope) diff --git a/src/py/flwr/proto/fleet_pb2.py b/src/py/flwr/proto/fleet_pb2.py index e86a53e2139e..e8443c296f0c 100644 --- a/src/py/flwr/proto/fleet_pb2.py +++ b/src/py/flwr/proto/fleet_pb2.py @@ -1,12 +1,12 @@ # -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: flwr/proto/fleet.proto +# Protobuf Python Version: 4.25.0 """Generated protocol buffer code.""" from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() @@ -18,115 +18,33 @@ DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x16\x66lwr/proto/fleet.proto\x12\nflwr.proto\x1a\x15\x66lwr/proto/node.proto\x1a\x15\x66lwr/proto/task.proto\"\x13\n\x11\x43reateNodeRequest\"4\n\x12\x43reateNodeResponse\x12\x1e\n\x04node\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\"3\n\x11\x44\x65leteNodeRequest\x12\x1e\n\x04node\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\"\x14\n\x12\x44\x65leteNodeResponse\"F\n\x12PullTaskInsRequest\x12\x1e\n\x04node\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\x12\x10\n\x08task_ids\x18\x02 \x03(\t\"k\n\x13PullTaskInsResponse\x12(\n\treconnect\x18\x01 \x01(\x0b\x32\x15.flwr.proto.Reconnect\x12*\n\rtask_ins_list\x18\x02 \x03(\x0b\x32\x13.flwr.proto.TaskIns\"@\n\x12PushTaskResRequest\x12*\n\rtask_res_list\x18\x01 \x03(\x0b\x32\x13.flwr.proto.TaskRes\"\xae\x01\n\x13PushTaskResResponse\x12(\n\treconnect\x18\x01 \x01(\x0b\x32\x15.flwr.proto.Reconnect\x12=\n\x07results\x18\x02 \x03(\x0b\x32,.flwr.proto.PushTaskResResponse.ResultsEntry\x1a.\n\x0cResultsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\r:\x02\x38\x01\"\x1e\n\tReconnect\x12\x11\n\treconnect\x18\x01 \x01(\x04\x32\xc9\x02\n\x05\x46leet\x12M\n\nCreateNode\x12\x1d.flwr.proto.CreateNodeRequest\x1a\x1e.flwr.proto.CreateNodeResponse\"\x00\x12M\n\nDeleteNode\x12\x1d.flwr.proto.DeleteNodeRequest\x1a\x1e.flwr.proto.DeleteNodeResponse\"\x00\x12P\n\x0bPullTaskIns\x12\x1e.flwr.proto.PullTaskInsRequest\x1a\x1f.flwr.proto.PullTaskInsResponse\"\x00\x12P\n\x0bPushTaskRes\x12\x1e.flwr.proto.PushTaskResRequest\x1a\x1f.flwr.proto.PushTaskResResponse\"\x00\x62\x06proto3') - - -_CREATENODEREQUEST = DESCRIPTOR.message_types_by_name['CreateNodeRequest'] -_CREATENODERESPONSE = DESCRIPTOR.message_types_by_name['CreateNodeResponse'] -_DELETENODEREQUEST = DESCRIPTOR.message_types_by_name['DeleteNodeRequest'] -_DELETENODERESPONSE = DESCRIPTOR.message_types_by_name['DeleteNodeResponse'] -_PULLTASKINSREQUEST = DESCRIPTOR.message_types_by_name['PullTaskInsRequest'] -_PULLTASKINSRESPONSE = DESCRIPTOR.message_types_by_name['PullTaskInsResponse'] -_PUSHTASKRESREQUEST = DESCRIPTOR.message_types_by_name['PushTaskResRequest'] -_PUSHTASKRESRESPONSE = DESCRIPTOR.message_types_by_name['PushTaskResResponse'] -_PUSHTASKRESRESPONSE_RESULTSENTRY = _PUSHTASKRESRESPONSE.nested_types_by_name['ResultsEntry'] -_RECONNECT = DESCRIPTOR.message_types_by_name['Reconnect'] -CreateNodeRequest = _reflection.GeneratedProtocolMessageType('CreateNodeRequest', (_message.Message,), { - 'DESCRIPTOR' : _CREATENODEREQUEST, - '__module__' : 'flwr.proto.fleet_pb2' - # @@protoc_insertion_point(class_scope:flwr.proto.CreateNodeRequest) - }) -_sym_db.RegisterMessage(CreateNodeRequest) - -CreateNodeResponse = _reflection.GeneratedProtocolMessageType('CreateNodeResponse', (_message.Message,), { - 'DESCRIPTOR' : _CREATENODERESPONSE, - '__module__' : 'flwr.proto.fleet_pb2' - # @@protoc_insertion_point(class_scope:flwr.proto.CreateNodeResponse) - }) -_sym_db.RegisterMessage(CreateNodeResponse) - -DeleteNodeRequest = _reflection.GeneratedProtocolMessageType('DeleteNodeRequest', (_message.Message,), { - 'DESCRIPTOR' : _DELETENODEREQUEST, - '__module__' : 'flwr.proto.fleet_pb2' - # @@protoc_insertion_point(class_scope:flwr.proto.DeleteNodeRequest) - }) -_sym_db.RegisterMessage(DeleteNodeRequest) - -DeleteNodeResponse = _reflection.GeneratedProtocolMessageType('DeleteNodeResponse', (_message.Message,), { - 'DESCRIPTOR' : _DELETENODERESPONSE, - '__module__' : 'flwr.proto.fleet_pb2' - # @@protoc_insertion_point(class_scope:flwr.proto.DeleteNodeResponse) - }) -_sym_db.RegisterMessage(DeleteNodeResponse) - -PullTaskInsRequest = _reflection.GeneratedProtocolMessageType('PullTaskInsRequest', (_message.Message,), { - 'DESCRIPTOR' : _PULLTASKINSREQUEST, - '__module__' : 'flwr.proto.fleet_pb2' - # @@protoc_insertion_point(class_scope:flwr.proto.PullTaskInsRequest) - }) -_sym_db.RegisterMessage(PullTaskInsRequest) - -PullTaskInsResponse = _reflection.GeneratedProtocolMessageType('PullTaskInsResponse', (_message.Message,), { - 'DESCRIPTOR' : _PULLTASKINSRESPONSE, - '__module__' : 'flwr.proto.fleet_pb2' - # @@protoc_insertion_point(class_scope:flwr.proto.PullTaskInsResponse) - }) -_sym_db.RegisterMessage(PullTaskInsResponse) - -PushTaskResRequest = _reflection.GeneratedProtocolMessageType('PushTaskResRequest', (_message.Message,), { - 'DESCRIPTOR' : _PUSHTASKRESREQUEST, - '__module__' : 'flwr.proto.fleet_pb2' - # @@protoc_insertion_point(class_scope:flwr.proto.PushTaskResRequest) - }) -_sym_db.RegisterMessage(PushTaskResRequest) - -PushTaskResResponse = _reflection.GeneratedProtocolMessageType('PushTaskResResponse', (_message.Message,), { - - 'ResultsEntry' : _reflection.GeneratedProtocolMessageType('ResultsEntry', (_message.Message,), { - 'DESCRIPTOR' : _PUSHTASKRESRESPONSE_RESULTSENTRY, - '__module__' : 'flwr.proto.fleet_pb2' - # @@protoc_insertion_point(class_scope:flwr.proto.PushTaskResResponse.ResultsEntry) - }) - , - 'DESCRIPTOR' : _PUSHTASKRESRESPONSE, - '__module__' : 'flwr.proto.fleet_pb2' - # @@protoc_insertion_point(class_scope:flwr.proto.PushTaskResResponse) - }) -_sym_db.RegisterMessage(PushTaskResResponse) -_sym_db.RegisterMessage(PushTaskResResponse.ResultsEntry) - -Reconnect = _reflection.GeneratedProtocolMessageType('Reconnect', (_message.Message,), { - 'DESCRIPTOR' : _RECONNECT, - '__module__' : 'flwr.proto.fleet_pb2' - # @@protoc_insertion_point(class_scope:flwr.proto.Reconnect) - }) -_sym_db.RegisterMessage(Reconnect) - -_FLEET = DESCRIPTOR.services_by_name['Fleet'] +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'flwr.proto.fleet_pb2', _globals) if _descriptor._USE_C_DESCRIPTORS == False: - DESCRIPTOR._options = None - _PUSHTASKRESRESPONSE_RESULTSENTRY._options = None - _PUSHTASKRESRESPONSE_RESULTSENTRY._serialized_options = b'8\001' - _CREATENODEREQUEST._serialized_start=84 - _CREATENODEREQUEST._serialized_end=103 - _CREATENODERESPONSE._serialized_start=105 - _CREATENODERESPONSE._serialized_end=157 - _DELETENODEREQUEST._serialized_start=159 - _DELETENODEREQUEST._serialized_end=210 - _DELETENODERESPONSE._serialized_start=212 - _DELETENODERESPONSE._serialized_end=232 - _PULLTASKINSREQUEST._serialized_start=234 - _PULLTASKINSREQUEST._serialized_end=304 - _PULLTASKINSRESPONSE._serialized_start=306 - _PULLTASKINSRESPONSE._serialized_end=413 - _PUSHTASKRESREQUEST._serialized_start=415 - _PUSHTASKRESREQUEST._serialized_end=479 - _PUSHTASKRESRESPONSE._serialized_start=482 - _PUSHTASKRESRESPONSE._serialized_end=656 - _PUSHTASKRESRESPONSE_RESULTSENTRY._serialized_start=610 - _PUSHTASKRESRESPONSE_RESULTSENTRY._serialized_end=656 - _RECONNECT._serialized_start=658 - _RECONNECT._serialized_end=688 - _FLEET._serialized_start=691 - _FLEET._serialized_end=1020 + _globals['_PUSHTASKRESRESPONSE_RESULTSENTRY']._options = None + _globals['_PUSHTASKRESRESPONSE_RESULTSENTRY']._serialized_options = b'8\001' + _globals['_CREATENODEREQUEST']._serialized_start=84 + _globals['_CREATENODEREQUEST']._serialized_end=103 + _globals['_CREATENODERESPONSE']._serialized_start=105 + _globals['_CREATENODERESPONSE']._serialized_end=157 + _globals['_DELETENODEREQUEST']._serialized_start=159 + _globals['_DELETENODEREQUEST']._serialized_end=210 + _globals['_DELETENODERESPONSE']._serialized_start=212 + _globals['_DELETENODERESPONSE']._serialized_end=232 + _globals['_PULLTASKINSREQUEST']._serialized_start=234 + _globals['_PULLTASKINSREQUEST']._serialized_end=304 + _globals['_PULLTASKINSRESPONSE']._serialized_start=306 + _globals['_PULLTASKINSRESPONSE']._serialized_end=413 + _globals['_PUSHTASKRESREQUEST']._serialized_start=415 + _globals['_PUSHTASKRESREQUEST']._serialized_end=479 + _globals['_PUSHTASKRESRESPONSE']._serialized_start=482 + _globals['_PUSHTASKRESRESPONSE']._serialized_end=656 + _globals['_PUSHTASKRESRESPONSE_RESULTSENTRY']._serialized_start=610 + _globals['_PUSHTASKRESRESPONSE_RESULTSENTRY']._serialized_end=656 + _globals['_RECONNECT']._serialized_start=658 + _globals['_RECONNECT']._serialized_end=688 + _globals['_FLEET']._serialized_start=691 + _globals['_FLEET']._serialized_end=1020 # @@protoc_insertion_point(module_scope) diff --git a/src/py/flwr/proto/node_pb2.py b/src/py/flwr/proto/node_pb2.py index 9d91900d8f53..b300f2c562c2 100644 --- a/src/py/flwr/proto/node_pb2.py +++ b/src/py/flwr/proto/node_pb2.py @@ -1,12 +1,12 @@ # -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: flwr/proto/node.proto +# Protobuf Python Version: 4.25.0 """Generated protocol buffer code.""" from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() @@ -16,19 +16,11 @@ DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x15\x66lwr/proto/node.proto\x12\nflwr.proto\"*\n\x04Node\x12\x0f\n\x07node_id\x18\x01 \x01(\x12\x12\x11\n\tanonymous\x18\x02 \x01(\x08\x62\x06proto3') - - -_NODE = DESCRIPTOR.message_types_by_name['Node'] -Node = _reflection.GeneratedProtocolMessageType('Node', (_message.Message,), { - 'DESCRIPTOR' : _NODE, - '__module__' : 'flwr.proto.node_pb2' - # @@protoc_insertion_point(class_scope:flwr.proto.Node) - }) -_sym_db.RegisterMessage(Node) - +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'flwr.proto.node_pb2', _globals) if _descriptor._USE_C_DESCRIPTORS == False: - DESCRIPTOR._options = None - _NODE._serialized_start=37 - _NODE._serialized_end=79 + _globals['_NODE']._serialized_start=37 + _globals['_NODE']._serialized_end=79 # @@protoc_insertion_point(module_scope) diff --git a/src/py/flwr/proto/recordset_pb2.py b/src/py/flwr/proto/recordset_pb2.py new file mode 100644 index 000000000000..f7f74d72182b --- /dev/null +++ b/src/py/flwr/proto/recordset_pb2.py @@ -0,0 +1,68 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: flwr/proto/recordset.proto +# Protobuf Python Version: 4.25.0 +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1a\x66lwr/proto/recordset.proto\x12\nflwr.proto\"\x1a\n\nDoubleList\x12\x0c\n\x04vals\x18\x01 \x03(\x01\"\x1a\n\nSint64List\x12\x0c\n\x04vals\x18\x01 \x03(\x12\"\x18\n\x08\x42oolList\x12\x0c\n\x04vals\x18\x01 \x03(\x08\"\x1a\n\nStringList\x12\x0c\n\x04vals\x18\x01 \x03(\t\"\x19\n\tBytesList\x12\x0c\n\x04vals\x18\x01 \x03(\x0c\"B\n\x05\x41rray\x12\r\n\x05\x64type\x18\x01 \x01(\t\x12\r\n\x05shape\x18\x02 \x03(\x05\x12\r\n\x05stype\x18\x03 \x01(\t\x12\x0c\n\x04\x64\x61ta\x18\x04 \x01(\x0c\"\x9f\x01\n\x12MetricsRecordValue\x12\x10\n\x06\x64ouble\x18\x01 \x01(\x01H\x00\x12\x10\n\x06sint64\x18\x02 \x01(\x12H\x00\x12-\n\x0b\x64ouble_list\x18\x15 \x01(\x0b\x32\x16.flwr.proto.DoubleListH\x00\x12-\n\x0bsint64_list\x18\x16 \x01(\x0b\x32\x16.flwr.proto.Sint64ListH\x00\x42\x07\n\x05value\"\xd9\x02\n\x12\x43onfigsRecordValue\x12\x10\n\x06\x64ouble\x18\x01 \x01(\x01H\x00\x12\x10\n\x06sint64\x18\x02 \x01(\x12H\x00\x12\x0e\n\x04\x62ool\x18\x03 \x01(\x08H\x00\x12\x10\n\x06string\x18\x04 \x01(\tH\x00\x12\x0f\n\x05\x62ytes\x18\x05 \x01(\x0cH\x00\x12-\n\x0b\x64ouble_list\x18\x15 \x01(\x0b\x32\x16.flwr.proto.DoubleListH\x00\x12-\n\x0bsint64_list\x18\x16 \x01(\x0b\x32\x16.flwr.proto.Sint64ListH\x00\x12)\n\tbool_list\x18\x17 \x01(\x0b\x32\x14.flwr.proto.BoolListH\x00\x12-\n\x0bstring_list\x18\x18 \x01(\x0b\x32\x16.flwr.proto.StringListH\x00\x12+\n\nbytes_list\x18\x19 \x01(\x0b\x32\x15.flwr.proto.BytesListH\x00\x42\x07\n\x05value\"M\n\x10ParametersRecord\x12\x11\n\tdata_keys\x18\x01 \x03(\t\x12&\n\x0b\x64\x61ta_values\x18\x02 \x03(\x0b\x32\x11.flwr.proto.Array\"\x8f\x01\n\rMetricsRecord\x12\x31\n\x04\x64\x61ta\x18\x01 \x03(\x0b\x32#.flwr.proto.MetricsRecord.DataEntry\x1aK\n\tDataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12-\n\x05value\x18\x02 \x01(\x0b\x32\x1e.flwr.proto.MetricsRecordValue:\x02\x38\x01\"\x8f\x01\n\rConfigsRecord\x12\x31\n\x04\x64\x61ta\x18\x01 \x03(\x0b\x32#.flwr.proto.ConfigsRecord.DataEntry\x1aK\n\tDataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12-\n\x05value\x18\x02 \x01(\x0b\x32\x1e.flwr.proto.ConfigsRecordValue:\x02\x38\x01\"\x97\x03\n\tRecordSet\x12\x39\n\nparameters\x18\x01 \x03(\x0b\x32%.flwr.proto.RecordSet.ParametersEntry\x12\x33\n\x07metrics\x18\x02 \x03(\x0b\x32\".flwr.proto.RecordSet.MetricsEntry\x12\x33\n\x07\x63onfigs\x18\x03 \x03(\x0b\x32\".flwr.proto.RecordSet.ConfigsEntry\x1aO\n\x0fParametersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12+\n\x05value\x18\x02 \x01(\x0b\x32\x1c.flwr.proto.ParametersRecord:\x02\x38\x01\x1aI\n\x0cMetricsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12(\n\x05value\x18\x02 \x01(\x0b\x32\x19.flwr.proto.MetricsRecord:\x02\x38\x01\x1aI\n\x0c\x43onfigsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12(\n\x05value\x18\x02 \x01(\x0b\x32\x19.flwr.proto.ConfigsRecord:\x02\x38\x01\x62\x06proto3') + +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'flwr.proto.recordset_pb2', _globals) +if _descriptor._USE_C_DESCRIPTORS == False: + DESCRIPTOR._options = None + _globals['_METRICSRECORD_DATAENTRY']._options = None + _globals['_METRICSRECORD_DATAENTRY']._serialized_options = b'8\001' + _globals['_CONFIGSRECORD_DATAENTRY']._options = None + _globals['_CONFIGSRECORD_DATAENTRY']._serialized_options = b'8\001' + _globals['_RECORDSET_PARAMETERSENTRY']._options = None + _globals['_RECORDSET_PARAMETERSENTRY']._serialized_options = b'8\001' + _globals['_RECORDSET_METRICSENTRY']._options = None + _globals['_RECORDSET_METRICSENTRY']._serialized_options = b'8\001' + _globals['_RECORDSET_CONFIGSENTRY']._options = None + _globals['_RECORDSET_CONFIGSENTRY']._serialized_options = b'8\001' + _globals['_DOUBLELIST']._serialized_start=42 + _globals['_DOUBLELIST']._serialized_end=68 + _globals['_SINT64LIST']._serialized_start=70 + _globals['_SINT64LIST']._serialized_end=96 + _globals['_BOOLLIST']._serialized_start=98 + _globals['_BOOLLIST']._serialized_end=122 + _globals['_STRINGLIST']._serialized_start=124 + _globals['_STRINGLIST']._serialized_end=150 + _globals['_BYTESLIST']._serialized_start=152 + _globals['_BYTESLIST']._serialized_end=177 + _globals['_ARRAY']._serialized_start=179 + _globals['_ARRAY']._serialized_end=245 + _globals['_METRICSRECORDVALUE']._serialized_start=248 + _globals['_METRICSRECORDVALUE']._serialized_end=407 + _globals['_CONFIGSRECORDVALUE']._serialized_start=410 + _globals['_CONFIGSRECORDVALUE']._serialized_end=755 + _globals['_PARAMETERSRECORD']._serialized_start=757 + _globals['_PARAMETERSRECORD']._serialized_end=834 + _globals['_METRICSRECORD']._serialized_start=837 + _globals['_METRICSRECORD']._serialized_end=980 + _globals['_METRICSRECORD_DATAENTRY']._serialized_start=905 + _globals['_METRICSRECORD_DATAENTRY']._serialized_end=980 + _globals['_CONFIGSRECORD']._serialized_start=983 + _globals['_CONFIGSRECORD']._serialized_end=1126 + _globals['_CONFIGSRECORD_DATAENTRY']._serialized_start=1051 + _globals['_CONFIGSRECORD_DATAENTRY']._serialized_end=1126 + _globals['_RECORDSET']._serialized_start=1129 + _globals['_RECORDSET']._serialized_end=1536 + _globals['_RECORDSET_PARAMETERSENTRY']._serialized_start=1307 + _globals['_RECORDSET_PARAMETERSENTRY']._serialized_end=1386 + _globals['_RECORDSET_METRICSENTRY']._serialized_start=1388 + _globals['_RECORDSET_METRICSENTRY']._serialized_end=1461 + _globals['_RECORDSET_CONFIGSENTRY']._serialized_start=1463 + _globals['_RECORDSET_CONFIGSENTRY']._serialized_end=1536 +# @@protoc_insertion_point(module_scope) diff --git a/src/py/flwr/proto/recordset_pb2.pyi b/src/py/flwr/proto/recordset_pb2.pyi new file mode 100644 index 000000000000..86244697129c --- /dev/null +++ b/src/py/flwr/proto/recordset_pb2.pyi @@ -0,0 +1,305 @@ +""" +@generated by mypy-protobuf. Do not edit manually! +isort:skip_file +""" +import builtins +import google.protobuf.descriptor +import google.protobuf.internal.containers +import google.protobuf.message +import typing +import typing_extensions + +DESCRIPTOR: google.protobuf.descriptor.FileDescriptor + +class DoubleList(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor + VALS_FIELD_NUMBER: builtins.int + @property + def vals(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.float]: ... + def __init__(self, + *, + vals: typing.Optional[typing.Iterable[builtins.float]] = ..., + ) -> None: ... + def ClearField(self, field_name: typing_extensions.Literal["vals",b"vals"]) -> None: ... +global___DoubleList = DoubleList + +class Sint64List(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor + VALS_FIELD_NUMBER: builtins.int + @property + def vals(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.int]: ... + def __init__(self, + *, + vals: typing.Optional[typing.Iterable[builtins.int]] = ..., + ) -> None: ... + def ClearField(self, field_name: typing_extensions.Literal["vals",b"vals"]) -> None: ... +global___Sint64List = Sint64List + +class BoolList(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor + VALS_FIELD_NUMBER: builtins.int + @property + def vals(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.bool]: ... + def __init__(self, + *, + vals: typing.Optional[typing.Iterable[builtins.bool]] = ..., + ) -> None: ... + def ClearField(self, field_name: typing_extensions.Literal["vals",b"vals"]) -> None: ... +global___BoolList = BoolList + +class StringList(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor + VALS_FIELD_NUMBER: builtins.int + @property + def vals(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[typing.Text]: ... + def __init__(self, + *, + vals: typing.Optional[typing.Iterable[typing.Text]] = ..., + ) -> None: ... + def ClearField(self, field_name: typing_extensions.Literal["vals",b"vals"]) -> None: ... +global___StringList = StringList + +class BytesList(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor + VALS_FIELD_NUMBER: builtins.int + @property + def vals(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.bytes]: ... + def __init__(self, + *, + vals: typing.Optional[typing.Iterable[builtins.bytes]] = ..., + ) -> None: ... + def ClearField(self, field_name: typing_extensions.Literal["vals",b"vals"]) -> None: ... +global___BytesList = BytesList + +class Array(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor + DTYPE_FIELD_NUMBER: builtins.int + SHAPE_FIELD_NUMBER: builtins.int + STYPE_FIELD_NUMBER: builtins.int + DATA_FIELD_NUMBER: builtins.int + dtype: typing.Text + @property + def shape(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.int]: ... + stype: typing.Text + data: builtins.bytes + def __init__(self, + *, + dtype: typing.Text = ..., + shape: typing.Optional[typing.Iterable[builtins.int]] = ..., + stype: typing.Text = ..., + data: builtins.bytes = ..., + ) -> None: ... + def ClearField(self, field_name: typing_extensions.Literal["data",b"data","dtype",b"dtype","shape",b"shape","stype",b"stype"]) -> None: ... +global___Array = Array + +class MetricsRecordValue(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor + DOUBLE_FIELD_NUMBER: builtins.int + SINT64_FIELD_NUMBER: builtins.int + DOUBLE_LIST_FIELD_NUMBER: builtins.int + SINT64_LIST_FIELD_NUMBER: builtins.int + double: builtins.float + """Single element""" + + sint64: builtins.int + @property + def double_list(self) -> global___DoubleList: + """List types""" + pass + @property + def sint64_list(self) -> global___Sint64List: ... + def __init__(self, + *, + double: builtins.float = ..., + sint64: builtins.int = ..., + double_list: typing.Optional[global___DoubleList] = ..., + sint64_list: typing.Optional[global___Sint64List] = ..., + ) -> None: ... + def HasField(self, field_name: typing_extensions.Literal["double",b"double","double_list",b"double_list","sint64",b"sint64","sint64_list",b"sint64_list","value",b"value"]) -> builtins.bool: ... + def ClearField(self, field_name: typing_extensions.Literal["double",b"double","double_list",b"double_list","sint64",b"sint64","sint64_list",b"sint64_list","value",b"value"]) -> None: ... + def WhichOneof(self, oneof_group: typing_extensions.Literal["value",b"value"]) -> typing.Optional[typing_extensions.Literal["double","sint64","double_list","sint64_list"]]: ... +global___MetricsRecordValue = MetricsRecordValue + +class ConfigsRecordValue(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor + DOUBLE_FIELD_NUMBER: builtins.int + SINT64_FIELD_NUMBER: builtins.int + BOOL_FIELD_NUMBER: builtins.int + STRING_FIELD_NUMBER: builtins.int + BYTES_FIELD_NUMBER: builtins.int + DOUBLE_LIST_FIELD_NUMBER: builtins.int + SINT64_LIST_FIELD_NUMBER: builtins.int + BOOL_LIST_FIELD_NUMBER: builtins.int + STRING_LIST_FIELD_NUMBER: builtins.int + BYTES_LIST_FIELD_NUMBER: builtins.int + double: builtins.float + """Single element""" + + sint64: builtins.int + bool: builtins.bool + string: typing.Text + bytes: builtins.bytes + @property + def double_list(self) -> global___DoubleList: + """List types""" + pass + @property + def sint64_list(self) -> global___Sint64List: ... + @property + def bool_list(self) -> global___BoolList: ... + @property + def string_list(self) -> global___StringList: ... + @property + def bytes_list(self) -> global___BytesList: ... + def __init__(self, + *, + double: builtins.float = ..., + sint64: builtins.int = ..., + bool: builtins.bool = ..., + string: typing.Text = ..., + bytes: builtins.bytes = ..., + double_list: typing.Optional[global___DoubleList] = ..., + sint64_list: typing.Optional[global___Sint64List] = ..., + bool_list: typing.Optional[global___BoolList] = ..., + string_list: typing.Optional[global___StringList] = ..., + bytes_list: typing.Optional[global___BytesList] = ..., + ) -> None: ... + def HasField(self, field_name: typing_extensions.Literal["bool",b"bool","bool_list",b"bool_list","bytes",b"bytes","bytes_list",b"bytes_list","double",b"double","double_list",b"double_list","sint64",b"sint64","sint64_list",b"sint64_list","string",b"string","string_list",b"string_list","value",b"value"]) -> builtins.bool: ... + def ClearField(self, field_name: typing_extensions.Literal["bool",b"bool","bool_list",b"bool_list","bytes",b"bytes","bytes_list",b"bytes_list","double",b"double","double_list",b"double_list","sint64",b"sint64","sint64_list",b"sint64_list","string",b"string","string_list",b"string_list","value",b"value"]) -> None: ... + def WhichOneof(self, oneof_group: typing_extensions.Literal["value",b"value"]) -> typing.Optional[typing_extensions.Literal["double","sint64","bool","string","bytes","double_list","sint64_list","bool_list","string_list","bytes_list"]]: ... +global___ConfigsRecordValue = ConfigsRecordValue + +class ParametersRecord(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor + DATA_KEYS_FIELD_NUMBER: builtins.int + DATA_VALUES_FIELD_NUMBER: builtins.int + @property + def data_keys(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[typing.Text]: ... + @property + def data_values(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___Array]: ... + def __init__(self, + *, + data_keys: typing.Optional[typing.Iterable[typing.Text]] = ..., + data_values: typing.Optional[typing.Iterable[global___Array]] = ..., + ) -> None: ... + def ClearField(self, field_name: typing_extensions.Literal["data_keys",b"data_keys","data_values",b"data_values"]) -> None: ... +global___ParametersRecord = ParametersRecord + +class MetricsRecord(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor + class DataEntry(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor + KEY_FIELD_NUMBER: builtins.int + VALUE_FIELD_NUMBER: builtins.int + key: typing.Text + @property + def value(self) -> global___MetricsRecordValue: ... + def __init__(self, + *, + key: typing.Text = ..., + value: typing.Optional[global___MetricsRecordValue] = ..., + ) -> None: ... + def HasField(self, field_name: typing_extensions.Literal["value",b"value"]) -> builtins.bool: ... + def ClearField(self, field_name: typing_extensions.Literal["key",b"key","value",b"value"]) -> None: ... + + DATA_FIELD_NUMBER: builtins.int + @property + def data(self) -> google.protobuf.internal.containers.MessageMap[typing.Text, global___MetricsRecordValue]: ... + def __init__(self, + *, + data: typing.Optional[typing.Mapping[typing.Text, global___MetricsRecordValue]] = ..., + ) -> None: ... + def ClearField(self, field_name: typing_extensions.Literal["data",b"data"]) -> None: ... +global___MetricsRecord = MetricsRecord + +class ConfigsRecord(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor + class DataEntry(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor + KEY_FIELD_NUMBER: builtins.int + VALUE_FIELD_NUMBER: builtins.int + key: typing.Text + @property + def value(self) -> global___ConfigsRecordValue: ... + def __init__(self, + *, + key: typing.Text = ..., + value: typing.Optional[global___ConfigsRecordValue] = ..., + ) -> None: ... + def HasField(self, field_name: typing_extensions.Literal["value",b"value"]) -> builtins.bool: ... + def ClearField(self, field_name: typing_extensions.Literal["key",b"key","value",b"value"]) -> None: ... + + DATA_FIELD_NUMBER: builtins.int + @property + def data(self) -> google.protobuf.internal.containers.MessageMap[typing.Text, global___ConfigsRecordValue]: ... + def __init__(self, + *, + data: typing.Optional[typing.Mapping[typing.Text, global___ConfigsRecordValue]] = ..., + ) -> None: ... + def ClearField(self, field_name: typing_extensions.Literal["data",b"data"]) -> None: ... +global___ConfigsRecord = ConfigsRecord + +class RecordSet(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor + class ParametersEntry(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor + KEY_FIELD_NUMBER: builtins.int + VALUE_FIELD_NUMBER: builtins.int + key: typing.Text + @property + def value(self) -> global___ParametersRecord: ... + def __init__(self, + *, + key: typing.Text = ..., + value: typing.Optional[global___ParametersRecord] = ..., + ) -> None: ... + def HasField(self, field_name: typing_extensions.Literal["value",b"value"]) -> builtins.bool: ... + def ClearField(self, field_name: typing_extensions.Literal["key",b"key","value",b"value"]) -> None: ... + + class MetricsEntry(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor + KEY_FIELD_NUMBER: builtins.int + VALUE_FIELD_NUMBER: builtins.int + key: typing.Text + @property + def value(self) -> global___MetricsRecord: ... + def __init__(self, + *, + key: typing.Text = ..., + value: typing.Optional[global___MetricsRecord] = ..., + ) -> None: ... + def HasField(self, field_name: typing_extensions.Literal["value",b"value"]) -> builtins.bool: ... + def ClearField(self, field_name: typing_extensions.Literal["key",b"key","value",b"value"]) -> None: ... + + class ConfigsEntry(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor + KEY_FIELD_NUMBER: builtins.int + VALUE_FIELD_NUMBER: builtins.int + key: typing.Text + @property + def value(self) -> global___ConfigsRecord: ... + def __init__(self, + *, + key: typing.Text = ..., + value: typing.Optional[global___ConfigsRecord] = ..., + ) -> None: ... + def HasField(self, field_name: typing_extensions.Literal["value",b"value"]) -> builtins.bool: ... + def ClearField(self, field_name: typing_extensions.Literal["key",b"key","value",b"value"]) -> None: ... + + PARAMETERS_FIELD_NUMBER: builtins.int + METRICS_FIELD_NUMBER: builtins.int + CONFIGS_FIELD_NUMBER: builtins.int + @property + def parameters(self) -> google.protobuf.internal.containers.MessageMap[typing.Text, global___ParametersRecord]: ... + @property + def metrics(self) -> google.protobuf.internal.containers.MessageMap[typing.Text, global___MetricsRecord]: ... + @property + def configs(self) -> google.protobuf.internal.containers.MessageMap[typing.Text, global___ConfigsRecord]: ... + def __init__(self, + *, + parameters: typing.Optional[typing.Mapping[typing.Text, global___ParametersRecord]] = ..., + metrics: typing.Optional[typing.Mapping[typing.Text, global___MetricsRecord]] = ..., + configs: typing.Optional[typing.Mapping[typing.Text, global___ConfigsRecord]] = ..., + ) -> None: ... + def ClearField(self, field_name: typing_extensions.Literal["configs",b"configs","metrics",b"metrics","parameters",b"parameters"]) -> None: ... +global___RecordSet = RecordSet diff --git a/src/py/flwr/proto/recordset_pb2_grpc.py b/src/py/flwr/proto/recordset_pb2_grpc.py new file mode 100644 index 000000000000..2daafffebfc8 --- /dev/null +++ b/src/py/flwr/proto/recordset_pb2_grpc.py @@ -0,0 +1,4 @@ +# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +"""Client and server classes corresponding to protobuf-defined services.""" +import grpc + diff --git a/src/py/flwr/proto/recordset_pb2_grpc.pyi b/src/py/flwr/proto/recordset_pb2_grpc.pyi new file mode 100644 index 000000000000..f3a5a087ef5d --- /dev/null +++ b/src/py/flwr/proto/recordset_pb2_grpc.pyi @@ -0,0 +1,4 @@ +""" +@generated by mypy-protobuf. Do not edit manually! +isort:skip_file +""" diff --git a/src/py/flwr/proto/task_pb2.py b/src/py/flwr/proto/task_pb2.py index ba0e2e3f5218..9294b27eec0e 100644 --- a/src/py/flwr/proto/task_pb2.py +++ b/src/py/flwr/proto/task_pb2.py @@ -1,148 +1,33 @@ # -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: flwr/proto/task.proto +# Protobuf Python Version: 4.25.0 """Generated protocol buffer code.""" from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() from flwr.proto import node_pb2 as flwr_dot_proto_dot_node__pb2 +from flwr.proto import recordset_pb2 as flwr_dot_proto_dot_recordset__pb2 from flwr.proto import transport_pb2 as flwr_dot_proto_dot_transport__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x15\x66lwr/proto/task.proto\x12\nflwr.proto\x1a\x15\x66lwr/proto/node.proto\x1a\x1a\x66lwr/proto/transport.proto\"\xbe\x02\n\x04Task\x12\"\n\x08producer\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\x12\"\n\x08\x63onsumer\x18\x02 \x01(\x0b\x32\x10.flwr.proto.Node\x12\x12\n\ncreated_at\x18\x03 \x01(\t\x12\x14\n\x0c\x64\x65livered_at\x18\x04 \x01(\t\x12\x0b\n\x03ttl\x18\x05 \x01(\t\x12\x10\n\x08\x61ncestry\x18\x06 \x03(\t\x12)\n\x02sa\x18\x07 \x01(\x0b\x32\x1d.flwr.proto.SecureAggregation\x12<\n\x15legacy_server_message\x18\x65 \x01(\x0b\x32\x19.flwr.proto.ServerMessageB\x02\x18\x01\x12<\n\x15legacy_client_message\x18\x66 \x01(\x0b\x32\x19.flwr.proto.ClientMessageB\x02\x18\x01\"\\\n\x07TaskIns\x12\x0f\n\x07task_id\x18\x01 \x01(\t\x12\x10\n\x08group_id\x18\x02 \x01(\t\x12\x0e\n\x06run_id\x18\x03 \x01(\x12\x12\x1e\n\x04task\x18\x04 \x01(\x0b\x32\x10.flwr.proto.Task\"\\\n\x07TaskRes\x12\x0f\n\x07task_id\x18\x01 \x01(\t\x12\x10\n\x08group_id\x18\x02 \x01(\t\x12\x0e\n\x06run_id\x18\x03 \x01(\x12\x12\x1e\n\x04task\x18\x04 \x01(\x0b\x32\x10.flwr.proto.Task\"\xf3\x03\n\x05Value\x12\x10\n\x06\x64ouble\x18\x01 \x01(\x01H\x00\x12\x10\n\x06sint64\x18\x02 \x01(\x12H\x00\x12\x0e\n\x04\x62ool\x18\x03 \x01(\x08H\x00\x12\x10\n\x06string\x18\x04 \x01(\tH\x00\x12\x0f\n\x05\x62ytes\x18\x05 \x01(\x0cH\x00\x12\x33\n\x0b\x64ouble_list\x18\x15 \x01(\x0b\x32\x1c.flwr.proto.Value.DoubleListH\x00\x12\x33\n\x0bsint64_list\x18\x16 \x01(\x0b\x32\x1c.flwr.proto.Value.Sint64ListH\x00\x12/\n\tbool_list\x18\x17 \x01(\x0b\x32\x1a.flwr.proto.Value.BoolListH\x00\x12\x33\n\x0bstring_list\x18\x18 \x01(\x0b\x32\x1c.flwr.proto.Value.StringListH\x00\x12\x31\n\nbytes_list\x18\x19 \x01(\x0b\x32\x1b.flwr.proto.Value.BytesListH\x00\x1a\x1a\n\nDoubleList\x12\x0c\n\x04vals\x18\x01 \x03(\x01\x1a\x1a\n\nSint64List\x12\x0c\n\x04vals\x18\x01 \x03(\x12\x1a\x18\n\x08\x42oolList\x12\x0c\n\x04vals\x18\x01 \x03(\x08\x1a\x1a\n\nStringList\x12\x0c\n\x04vals\x18\x01 \x03(\t\x1a\x19\n\tBytesList\x12\x0c\n\x04vals\x18\x01 \x03(\x0c\x42\x07\n\x05value\"\xa0\x01\n\x11SecureAggregation\x12\x44\n\x0cnamed_values\x18\x01 \x03(\x0b\x32..flwr.proto.SecureAggregation.NamedValuesEntry\x1a\x45\n\x10NamedValuesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12 \n\x05value\x18\x02 \x01(\x0b\x32\x11.flwr.proto.Value:\x02\x38\x01\x62\x06proto3') - - - -_TASK = DESCRIPTOR.message_types_by_name['Task'] -_TASKINS = DESCRIPTOR.message_types_by_name['TaskIns'] -_TASKRES = DESCRIPTOR.message_types_by_name['TaskRes'] -_VALUE = DESCRIPTOR.message_types_by_name['Value'] -_VALUE_DOUBLELIST = _VALUE.nested_types_by_name['DoubleList'] -_VALUE_SINT64LIST = _VALUE.nested_types_by_name['Sint64List'] -_VALUE_BOOLLIST = _VALUE.nested_types_by_name['BoolList'] -_VALUE_STRINGLIST = _VALUE.nested_types_by_name['StringList'] -_VALUE_BYTESLIST = _VALUE.nested_types_by_name['BytesList'] -_SECUREAGGREGATION = DESCRIPTOR.message_types_by_name['SecureAggregation'] -_SECUREAGGREGATION_NAMEDVALUESENTRY = _SECUREAGGREGATION.nested_types_by_name['NamedValuesEntry'] -Task = _reflection.GeneratedProtocolMessageType('Task', (_message.Message,), { - 'DESCRIPTOR' : _TASK, - '__module__' : 'flwr.proto.task_pb2' - # @@protoc_insertion_point(class_scope:flwr.proto.Task) - }) -_sym_db.RegisterMessage(Task) - -TaskIns = _reflection.GeneratedProtocolMessageType('TaskIns', (_message.Message,), { - 'DESCRIPTOR' : _TASKINS, - '__module__' : 'flwr.proto.task_pb2' - # @@protoc_insertion_point(class_scope:flwr.proto.TaskIns) - }) -_sym_db.RegisterMessage(TaskIns) - -TaskRes = _reflection.GeneratedProtocolMessageType('TaskRes', (_message.Message,), { - 'DESCRIPTOR' : _TASKRES, - '__module__' : 'flwr.proto.task_pb2' - # @@protoc_insertion_point(class_scope:flwr.proto.TaskRes) - }) -_sym_db.RegisterMessage(TaskRes) - -Value = _reflection.GeneratedProtocolMessageType('Value', (_message.Message,), { - - 'DoubleList' : _reflection.GeneratedProtocolMessageType('DoubleList', (_message.Message,), { - 'DESCRIPTOR' : _VALUE_DOUBLELIST, - '__module__' : 'flwr.proto.task_pb2' - # @@protoc_insertion_point(class_scope:flwr.proto.Value.DoubleList) - }) - , - - 'Sint64List' : _reflection.GeneratedProtocolMessageType('Sint64List', (_message.Message,), { - 'DESCRIPTOR' : _VALUE_SINT64LIST, - '__module__' : 'flwr.proto.task_pb2' - # @@protoc_insertion_point(class_scope:flwr.proto.Value.Sint64List) - }) - , - - 'BoolList' : _reflection.GeneratedProtocolMessageType('BoolList', (_message.Message,), { - 'DESCRIPTOR' : _VALUE_BOOLLIST, - '__module__' : 'flwr.proto.task_pb2' - # @@protoc_insertion_point(class_scope:flwr.proto.Value.BoolList) - }) - , - - 'StringList' : _reflection.GeneratedProtocolMessageType('StringList', (_message.Message,), { - 'DESCRIPTOR' : _VALUE_STRINGLIST, - '__module__' : 'flwr.proto.task_pb2' - # @@protoc_insertion_point(class_scope:flwr.proto.Value.StringList) - }) - , - - 'BytesList' : _reflection.GeneratedProtocolMessageType('BytesList', (_message.Message,), { - 'DESCRIPTOR' : _VALUE_BYTESLIST, - '__module__' : 'flwr.proto.task_pb2' - # @@protoc_insertion_point(class_scope:flwr.proto.Value.BytesList) - }) - , - 'DESCRIPTOR' : _VALUE, - '__module__' : 'flwr.proto.task_pb2' - # @@protoc_insertion_point(class_scope:flwr.proto.Value) - }) -_sym_db.RegisterMessage(Value) -_sym_db.RegisterMessage(Value.DoubleList) -_sym_db.RegisterMessage(Value.Sint64List) -_sym_db.RegisterMessage(Value.BoolList) -_sym_db.RegisterMessage(Value.StringList) -_sym_db.RegisterMessage(Value.BytesList) - -SecureAggregation = _reflection.GeneratedProtocolMessageType('SecureAggregation', (_message.Message,), { - - 'NamedValuesEntry' : _reflection.GeneratedProtocolMessageType('NamedValuesEntry', (_message.Message,), { - 'DESCRIPTOR' : _SECUREAGGREGATION_NAMEDVALUESENTRY, - '__module__' : 'flwr.proto.task_pb2' - # @@protoc_insertion_point(class_scope:flwr.proto.SecureAggregation.NamedValuesEntry) - }) - , - 'DESCRIPTOR' : _SECUREAGGREGATION, - '__module__' : 'flwr.proto.task_pb2' - # @@protoc_insertion_point(class_scope:flwr.proto.SecureAggregation) - }) -_sym_db.RegisterMessage(SecureAggregation) -_sym_db.RegisterMessage(SecureAggregation.NamedValuesEntry) +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x15\x66lwr/proto/task.proto\x12\nflwr.proto\x1a\x15\x66lwr/proto/node.proto\x1a\x1a\x66lwr/proto/recordset.proto\x1a\x1a\x66lwr/proto/transport.proto\"\xd4\x01\n\x04Task\x12\"\n\x08producer\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\x12\"\n\x08\x63onsumer\x18\x02 \x01(\x0b\x32\x10.flwr.proto.Node\x12\x12\n\ncreated_at\x18\x03 \x01(\t\x12\x14\n\x0c\x64\x65livered_at\x18\x04 \x01(\t\x12\x0b\n\x03ttl\x18\x05 \x01(\t\x12\x10\n\x08\x61ncestry\x18\x06 \x03(\t\x12\x11\n\ttask_type\x18\x07 \x01(\t\x12(\n\trecordset\x18\x08 \x01(\x0b\x32\x15.flwr.proto.RecordSet\"\\\n\x07TaskIns\x12\x0f\n\x07task_id\x18\x01 \x01(\t\x12\x10\n\x08group_id\x18\x02 \x01(\t\x12\x0e\n\x06run_id\x18\x03 \x01(\x12\x12\x1e\n\x04task\x18\x04 \x01(\x0b\x32\x10.flwr.proto.Task\"\\\n\x07TaskRes\x12\x0f\n\x07task_id\x18\x01 \x01(\t\x12\x10\n\x08group_id\x18\x02 \x01(\t\x12\x0e\n\x06run_id\x18\x03 \x01(\x12\x12\x1e\n\x04task\x18\x04 \x01(\x0b\x32\x10.flwr.proto.Taskb\x06proto3') +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'flwr.proto.task_pb2', _globals) if _descriptor._USE_C_DESCRIPTORS == False: - DESCRIPTOR._options = None - _TASK.fields_by_name['legacy_server_message']._options = None - _TASK.fields_by_name['legacy_server_message']._serialized_options = b'\030\001' - _TASK.fields_by_name['legacy_client_message']._options = None - _TASK.fields_by_name['legacy_client_message']._serialized_options = b'\030\001' - _SECUREAGGREGATION_NAMEDVALUESENTRY._options = None - _SECUREAGGREGATION_NAMEDVALUESENTRY._serialized_options = b'8\001' - _TASK._serialized_start=89 - _TASK._serialized_end=407 - _TASKINS._serialized_start=409 - _TASKINS._serialized_end=501 - _TASKRES._serialized_start=503 - _TASKRES._serialized_end=595 - _VALUE._serialized_start=598 - _VALUE._serialized_end=1097 - _VALUE_DOUBLELIST._serialized_start=953 - _VALUE_DOUBLELIST._serialized_end=979 - _VALUE_SINT64LIST._serialized_start=981 - _VALUE_SINT64LIST._serialized_end=1007 - _VALUE_BOOLLIST._serialized_start=1009 - _VALUE_BOOLLIST._serialized_end=1033 - _VALUE_STRINGLIST._serialized_start=1035 - _VALUE_STRINGLIST._serialized_end=1061 - _VALUE_BYTESLIST._serialized_start=1063 - _VALUE_BYTESLIST._serialized_end=1088 - _SECUREAGGREGATION._serialized_start=1100 - _SECUREAGGREGATION._serialized_end=1260 - _SECUREAGGREGATION_NAMEDVALUESENTRY._serialized_start=1191 - _SECUREAGGREGATION_NAMEDVALUESENTRY._serialized_end=1260 + _globals['_TASK']._serialized_start=117 + _globals['_TASK']._serialized_end=329 + _globals['_TASKINS']._serialized_start=331 + _globals['_TASKINS']._serialized_end=423 + _globals['_TASKRES']._serialized_start=425 + _globals['_TASKRES']._serialized_end=517 # @@protoc_insertion_point(module_scope) diff --git a/src/py/flwr/proto/task_pb2.pyi b/src/py/flwr/proto/task_pb2.pyi index f40a66ef98d1..4c8412fd691a 100644 --- a/src/py/flwr/proto/task_pb2.pyi +++ b/src/py/flwr/proto/task_pb2.pyi @@ -4,7 +4,7 @@ isort:skip_file """ import builtins import flwr.proto.node_pb2 -import flwr.proto.transport_pb2 +import flwr.proto.recordset_pb2 import google.protobuf.descriptor import google.protobuf.internal.containers import google.protobuf.message @@ -21,9 +21,8 @@ class Task(google.protobuf.message.Message): DELIVERED_AT_FIELD_NUMBER: builtins.int TTL_FIELD_NUMBER: builtins.int ANCESTRY_FIELD_NUMBER: builtins.int - SA_FIELD_NUMBER: builtins.int - LEGACY_SERVER_MESSAGE_FIELD_NUMBER: builtins.int - LEGACY_CLIENT_MESSAGE_FIELD_NUMBER: builtins.int + TASK_TYPE_FIELD_NUMBER: builtins.int + RECORDSET_FIELD_NUMBER: builtins.int @property def producer(self) -> flwr.proto.node_pb2.Node: ... @property @@ -33,12 +32,9 @@ class Task(google.protobuf.message.Message): ttl: typing.Text @property def ancestry(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[typing.Text]: ... + task_type: typing.Text @property - def sa(self) -> global___SecureAggregation: ... - @property - def legacy_server_message(self) -> flwr.proto.transport_pb2.ServerMessage: ... - @property - def legacy_client_message(self) -> flwr.proto.transport_pb2.ClientMessage: ... + def recordset(self) -> flwr.proto.recordset_pb2.RecordSet: ... def __init__(self, *, producer: typing.Optional[flwr.proto.node_pb2.Node] = ..., @@ -47,12 +43,11 @@ class Task(google.protobuf.message.Message): delivered_at: typing.Text = ..., ttl: typing.Text = ..., ancestry: typing.Optional[typing.Iterable[typing.Text]] = ..., - sa: typing.Optional[global___SecureAggregation] = ..., - legacy_server_message: typing.Optional[flwr.proto.transport_pb2.ServerMessage] = ..., - legacy_client_message: typing.Optional[flwr.proto.transport_pb2.ClientMessage] = ..., + task_type: typing.Text = ..., + recordset: typing.Optional[flwr.proto.recordset_pb2.RecordSet] = ..., ) -> None: ... - def HasField(self, field_name: typing_extensions.Literal["consumer",b"consumer","legacy_client_message",b"legacy_client_message","legacy_server_message",b"legacy_server_message","producer",b"producer","sa",b"sa"]) -> builtins.bool: ... - def ClearField(self, field_name: typing_extensions.Literal["ancestry",b"ancestry","consumer",b"consumer","created_at",b"created_at","delivered_at",b"delivered_at","legacy_client_message",b"legacy_client_message","legacy_server_message",b"legacy_server_message","producer",b"producer","sa",b"sa","ttl",b"ttl"]) -> None: ... + def HasField(self, field_name: typing_extensions.Literal["consumer",b"consumer","producer",b"producer","recordset",b"recordset"]) -> builtins.bool: ... + def ClearField(self, field_name: typing_extensions.Literal["ancestry",b"ancestry","consumer",b"consumer","created_at",b"created_at","delivered_at",b"delivered_at","producer",b"producer","recordset",b"recordset","task_type",b"task_type","ttl",b"ttl"]) -> None: ... global___Task = Task class TaskIns(google.protobuf.message.Message): @@ -98,134 +93,3 @@ class TaskRes(google.protobuf.message.Message): def HasField(self, field_name: typing_extensions.Literal["task",b"task"]) -> builtins.bool: ... def ClearField(self, field_name: typing_extensions.Literal["group_id",b"group_id","run_id",b"run_id","task",b"task","task_id",b"task_id"]) -> None: ... global___TaskRes = TaskRes - -class Value(google.protobuf.message.Message): - DESCRIPTOR: google.protobuf.descriptor.Descriptor - class DoubleList(google.protobuf.message.Message): - DESCRIPTOR: google.protobuf.descriptor.Descriptor - VALS_FIELD_NUMBER: builtins.int - @property - def vals(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.float]: ... - def __init__(self, - *, - vals: typing.Optional[typing.Iterable[builtins.float]] = ..., - ) -> None: ... - def ClearField(self, field_name: typing_extensions.Literal["vals",b"vals"]) -> None: ... - - class Sint64List(google.protobuf.message.Message): - DESCRIPTOR: google.protobuf.descriptor.Descriptor - VALS_FIELD_NUMBER: builtins.int - @property - def vals(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.int]: ... - def __init__(self, - *, - vals: typing.Optional[typing.Iterable[builtins.int]] = ..., - ) -> None: ... - def ClearField(self, field_name: typing_extensions.Literal["vals",b"vals"]) -> None: ... - - class BoolList(google.protobuf.message.Message): - DESCRIPTOR: google.protobuf.descriptor.Descriptor - VALS_FIELD_NUMBER: builtins.int - @property - def vals(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.bool]: ... - def __init__(self, - *, - vals: typing.Optional[typing.Iterable[builtins.bool]] = ..., - ) -> None: ... - def ClearField(self, field_name: typing_extensions.Literal["vals",b"vals"]) -> None: ... - - class StringList(google.protobuf.message.Message): - DESCRIPTOR: google.protobuf.descriptor.Descriptor - VALS_FIELD_NUMBER: builtins.int - @property - def vals(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[typing.Text]: ... - def __init__(self, - *, - vals: typing.Optional[typing.Iterable[typing.Text]] = ..., - ) -> None: ... - def ClearField(self, field_name: typing_extensions.Literal["vals",b"vals"]) -> None: ... - - class BytesList(google.protobuf.message.Message): - DESCRIPTOR: google.protobuf.descriptor.Descriptor - VALS_FIELD_NUMBER: builtins.int - @property - def vals(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.bytes]: ... - def __init__(self, - *, - vals: typing.Optional[typing.Iterable[builtins.bytes]] = ..., - ) -> None: ... - def ClearField(self, field_name: typing_extensions.Literal["vals",b"vals"]) -> None: ... - - DOUBLE_FIELD_NUMBER: builtins.int - SINT64_FIELD_NUMBER: builtins.int - BOOL_FIELD_NUMBER: builtins.int - STRING_FIELD_NUMBER: builtins.int - BYTES_FIELD_NUMBER: builtins.int - DOUBLE_LIST_FIELD_NUMBER: builtins.int - SINT64_LIST_FIELD_NUMBER: builtins.int - BOOL_LIST_FIELD_NUMBER: builtins.int - STRING_LIST_FIELD_NUMBER: builtins.int - BYTES_LIST_FIELD_NUMBER: builtins.int - double: builtins.float - """Single element""" - - sint64: builtins.int - bool: builtins.bool - string: typing.Text - bytes: builtins.bytes - @property - def double_list(self) -> global___Value.DoubleList: - """List types""" - pass - @property - def sint64_list(self) -> global___Value.Sint64List: ... - @property - def bool_list(self) -> global___Value.BoolList: ... - @property - def string_list(self) -> global___Value.StringList: ... - @property - def bytes_list(self) -> global___Value.BytesList: ... - def __init__(self, - *, - double: builtins.float = ..., - sint64: builtins.int = ..., - bool: builtins.bool = ..., - string: typing.Text = ..., - bytes: builtins.bytes = ..., - double_list: typing.Optional[global___Value.DoubleList] = ..., - sint64_list: typing.Optional[global___Value.Sint64List] = ..., - bool_list: typing.Optional[global___Value.BoolList] = ..., - string_list: typing.Optional[global___Value.StringList] = ..., - bytes_list: typing.Optional[global___Value.BytesList] = ..., - ) -> None: ... - def HasField(self, field_name: typing_extensions.Literal["bool",b"bool","bool_list",b"bool_list","bytes",b"bytes","bytes_list",b"bytes_list","double",b"double","double_list",b"double_list","sint64",b"sint64","sint64_list",b"sint64_list","string",b"string","string_list",b"string_list","value",b"value"]) -> builtins.bool: ... - def ClearField(self, field_name: typing_extensions.Literal["bool",b"bool","bool_list",b"bool_list","bytes",b"bytes","bytes_list",b"bytes_list","double",b"double","double_list",b"double_list","sint64",b"sint64","sint64_list",b"sint64_list","string",b"string","string_list",b"string_list","value",b"value"]) -> None: ... - def WhichOneof(self, oneof_group: typing_extensions.Literal["value",b"value"]) -> typing.Optional[typing_extensions.Literal["double","sint64","bool","string","bytes","double_list","sint64_list","bool_list","string_list","bytes_list"]]: ... -global___Value = Value - -class SecureAggregation(google.protobuf.message.Message): - DESCRIPTOR: google.protobuf.descriptor.Descriptor - class NamedValuesEntry(google.protobuf.message.Message): - DESCRIPTOR: google.protobuf.descriptor.Descriptor - KEY_FIELD_NUMBER: builtins.int - VALUE_FIELD_NUMBER: builtins.int - key: typing.Text - @property - def value(self) -> global___Value: ... - def __init__(self, - *, - key: typing.Text = ..., - value: typing.Optional[global___Value] = ..., - ) -> None: ... - def HasField(self, field_name: typing_extensions.Literal["value",b"value"]) -> builtins.bool: ... - def ClearField(self, field_name: typing_extensions.Literal["key",b"key","value",b"value"]) -> None: ... - - NAMED_VALUES_FIELD_NUMBER: builtins.int - @property - def named_values(self) -> google.protobuf.internal.containers.MessageMap[typing.Text, global___Value]: ... - def __init__(self, - *, - named_values: typing.Optional[typing.Mapping[typing.Text, global___Value]] = ..., - ) -> None: ... - def ClearField(self, field_name: typing_extensions.Literal["named_values",b"named_values"]) -> None: ... -global___SecureAggregation = SecureAggregation diff --git a/src/py/flwr/proto/transport_pb2.py b/src/py/flwr/proto/transport_pb2.py index 1e3785b0e312..d3aae72b63ab 100644 --- a/src/py/flwr/proto/transport_pb2.py +++ b/src/py/flwr/proto/transport_pb2.py @@ -1,13 +1,12 @@ # -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: flwr/proto/transport.proto +# Protobuf Python Version: 4.25.0 """Generated protocol buffer code.""" -from google.protobuf.internal import enum_type_wrapper from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() @@ -17,281 +16,73 @@ DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1a\x66lwr/proto/transport.proto\x12\nflwr.proto\"9\n\x06Status\x12\x1e\n\x04\x63ode\x18\x01 \x01(\x0e\x32\x10.flwr.proto.Code\x12\x0f\n\x07message\x18\x02 \x01(\t\"2\n\nParameters\x12\x0f\n\x07tensors\x18\x01 \x03(\x0c\x12\x13\n\x0btensor_type\x18\x02 \x01(\t\"\xba\x08\n\rServerMessage\x12?\n\rreconnect_ins\x18\x01 \x01(\x0b\x32&.flwr.proto.ServerMessage.ReconnectInsH\x00\x12H\n\x12get_properties_ins\x18\x02 \x01(\x0b\x32*.flwr.proto.ServerMessage.GetPropertiesInsH\x00\x12H\n\x12get_parameters_ins\x18\x03 \x01(\x0b\x32*.flwr.proto.ServerMessage.GetParametersInsH\x00\x12\x33\n\x07\x66it_ins\x18\x04 \x01(\x0b\x32 .flwr.proto.ServerMessage.FitInsH\x00\x12=\n\x0c\x65valuate_ins\x18\x05 \x01(\x0b\x32%.flwr.proto.ServerMessage.EvaluateInsH\x00\x1a\x1f\n\x0cReconnectIns\x12\x0f\n\x07seconds\x18\x01 \x01(\x03\x1a\x9d\x01\n\x10GetPropertiesIns\x12\x46\n\x06\x63onfig\x18\x01 \x03(\x0b\x32\x36.flwr.proto.ServerMessage.GetPropertiesIns.ConfigEntry\x1a\x41\n\x0b\x43onfigEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12!\n\x05value\x18\x02 \x01(\x0b\x32\x12.flwr.proto.Scalar:\x02\x38\x01\x1a\x9d\x01\n\x10GetParametersIns\x12\x46\n\x06\x63onfig\x18\x01 \x03(\x0b\x32\x36.flwr.proto.ServerMessage.GetParametersIns.ConfigEntry\x1a\x41\n\x0b\x43onfigEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12!\n\x05value\x18\x02 \x01(\x0b\x32\x12.flwr.proto.Scalar:\x02\x38\x01\x1a\xb5\x01\n\x06\x46itIns\x12*\n\nparameters\x18\x01 \x01(\x0b\x32\x16.flwr.proto.Parameters\x12<\n\x06\x63onfig\x18\x02 \x03(\x0b\x32,.flwr.proto.ServerMessage.FitIns.ConfigEntry\x1a\x41\n\x0b\x43onfigEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12!\n\x05value\x18\x02 \x01(\x0b\x32\x12.flwr.proto.Scalar:\x02\x38\x01\x1a\xbf\x01\n\x0b\x45valuateIns\x12*\n\nparameters\x18\x01 \x01(\x0b\x32\x16.flwr.proto.Parameters\x12\x41\n\x06\x63onfig\x18\x02 \x03(\x0b\x32\x31.flwr.proto.ServerMessage.EvaluateIns.ConfigEntry\x1a\x41\n\x0b\x43onfigEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12!\n\x05value\x18\x02 \x01(\x0b\x32\x12.flwr.proto.Scalar:\x02\x38\x01\x42\x05\n\x03msg\"\xa0\t\n\rClientMessage\x12\x41\n\x0e\x64isconnect_res\x18\x01 \x01(\x0b\x32\'.flwr.proto.ClientMessage.DisconnectResH\x00\x12H\n\x12get_properties_res\x18\x02 \x01(\x0b\x32*.flwr.proto.ClientMessage.GetPropertiesResH\x00\x12H\n\x12get_parameters_res\x18\x03 \x01(\x0b\x32*.flwr.proto.ClientMessage.GetParametersResH\x00\x12\x33\n\x07\x66it_res\x18\x04 \x01(\x0b\x32 .flwr.proto.ClientMessage.FitResH\x00\x12=\n\x0c\x65valuate_res\x18\x05 \x01(\x0b\x32%.flwr.proto.ClientMessage.EvaluateResH\x00\x1a\x33\n\rDisconnectRes\x12\"\n\x06reason\x18\x01 \x01(\x0e\x32\x12.flwr.proto.Reason\x1a\xcd\x01\n\x10GetPropertiesRes\x12\"\n\x06status\x18\x01 \x01(\x0b\x32\x12.flwr.proto.Status\x12N\n\nproperties\x18\x02 \x03(\x0b\x32:.flwr.proto.ClientMessage.GetPropertiesRes.PropertiesEntry\x1a\x45\n\x0fPropertiesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12!\n\x05value\x18\x02 \x01(\x0b\x32\x12.flwr.proto.Scalar:\x02\x38\x01\x1a\x62\n\x10GetParametersRes\x12\"\n\x06status\x18\x01 \x01(\x0b\x32\x12.flwr.proto.Status\x12*\n\nparameters\x18\x02 \x01(\x0b\x32\x16.flwr.proto.Parameters\x1a\xf2\x01\n\x06\x46itRes\x12\"\n\x06status\x18\x01 \x01(\x0b\x32\x12.flwr.proto.Status\x12*\n\nparameters\x18\x02 \x01(\x0b\x32\x16.flwr.proto.Parameters\x12\x14\n\x0cnum_examples\x18\x03 \x01(\x03\x12>\n\x07metrics\x18\x04 \x03(\x0b\x32-.flwr.proto.ClientMessage.FitRes.MetricsEntry\x1a\x42\n\x0cMetricsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12!\n\x05value\x18\x02 \x01(\x0b\x32\x12.flwr.proto.Scalar:\x02\x38\x01\x1a\xde\x01\n\x0b\x45valuateRes\x12\"\n\x06status\x18\x01 \x01(\x0b\x32\x12.flwr.proto.Status\x12\x0c\n\x04loss\x18\x02 \x01(\x02\x12\x14\n\x0cnum_examples\x18\x03 \x01(\x03\x12\x43\n\x07metrics\x18\x04 \x03(\x0b\x32\x32.flwr.proto.ClientMessage.EvaluateRes.MetricsEntry\x1a\x42\n\x0cMetricsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12!\n\x05value\x18\x02 \x01(\x0b\x32\x12.flwr.proto.Scalar:\x02\x38\x01\x42\x05\n\x03msg\"i\n\x06Scalar\x12\x10\n\x06\x64ouble\x18\x01 \x01(\x01H\x00\x12\x10\n\x06sint64\x18\x08 \x01(\x12H\x00\x12\x0e\n\x04\x62ool\x18\r \x01(\x08H\x00\x12\x10\n\x06string\x18\x0e \x01(\tH\x00\x12\x0f\n\x05\x62ytes\x18\x0f \x01(\x0cH\x00\x42\x08\n\x06scalar*\x8d\x01\n\x04\x43ode\x12\x06\n\x02OK\x10\x00\x12\"\n\x1eGET_PROPERTIES_NOT_IMPLEMENTED\x10\x01\x12\"\n\x1eGET_PARAMETERS_NOT_IMPLEMENTED\x10\x02\x12\x17\n\x13\x46IT_NOT_IMPLEMENTED\x10\x03\x12\x1c\n\x18\x45VALUATE_NOT_IMPLEMENTED\x10\x04*[\n\x06Reason\x12\x0b\n\x07UNKNOWN\x10\x00\x12\r\n\tRECONNECT\x10\x01\x12\x16\n\x12POWER_DISCONNECTED\x10\x02\x12\x14\n\x10WIFI_UNAVAILABLE\x10\x03\x12\x07\n\x03\x41\x43K\x10\x04\x32S\n\rFlowerService\x12\x42\n\x04Join\x12\x19.flwr.proto.ClientMessage\x1a\x19.flwr.proto.ServerMessage\"\x00(\x01\x30\x01\x62\x06proto3') -_CODE = DESCRIPTOR.enum_types_by_name['Code'] -Code = enum_type_wrapper.EnumTypeWrapper(_CODE) -_REASON = DESCRIPTOR.enum_types_by_name['Reason'] -Reason = enum_type_wrapper.EnumTypeWrapper(_REASON) -OK = 0 -GET_PROPERTIES_NOT_IMPLEMENTED = 1 -GET_PARAMETERS_NOT_IMPLEMENTED = 2 -FIT_NOT_IMPLEMENTED = 3 -EVALUATE_NOT_IMPLEMENTED = 4 -UNKNOWN = 0 -RECONNECT = 1 -POWER_DISCONNECTED = 2 -WIFI_UNAVAILABLE = 3 -ACK = 4 - - -_STATUS = DESCRIPTOR.message_types_by_name['Status'] -_PARAMETERS = DESCRIPTOR.message_types_by_name['Parameters'] -_SERVERMESSAGE = DESCRIPTOR.message_types_by_name['ServerMessage'] -_SERVERMESSAGE_RECONNECTINS = _SERVERMESSAGE.nested_types_by_name['ReconnectIns'] -_SERVERMESSAGE_GETPROPERTIESINS = _SERVERMESSAGE.nested_types_by_name['GetPropertiesIns'] -_SERVERMESSAGE_GETPROPERTIESINS_CONFIGENTRY = _SERVERMESSAGE_GETPROPERTIESINS.nested_types_by_name['ConfigEntry'] -_SERVERMESSAGE_GETPARAMETERSINS = _SERVERMESSAGE.nested_types_by_name['GetParametersIns'] -_SERVERMESSAGE_GETPARAMETERSINS_CONFIGENTRY = _SERVERMESSAGE_GETPARAMETERSINS.nested_types_by_name['ConfigEntry'] -_SERVERMESSAGE_FITINS = _SERVERMESSAGE.nested_types_by_name['FitIns'] -_SERVERMESSAGE_FITINS_CONFIGENTRY = _SERVERMESSAGE_FITINS.nested_types_by_name['ConfigEntry'] -_SERVERMESSAGE_EVALUATEINS = _SERVERMESSAGE.nested_types_by_name['EvaluateIns'] -_SERVERMESSAGE_EVALUATEINS_CONFIGENTRY = _SERVERMESSAGE_EVALUATEINS.nested_types_by_name['ConfigEntry'] -_CLIENTMESSAGE = DESCRIPTOR.message_types_by_name['ClientMessage'] -_CLIENTMESSAGE_DISCONNECTRES = _CLIENTMESSAGE.nested_types_by_name['DisconnectRes'] -_CLIENTMESSAGE_GETPROPERTIESRES = _CLIENTMESSAGE.nested_types_by_name['GetPropertiesRes'] -_CLIENTMESSAGE_GETPROPERTIESRES_PROPERTIESENTRY = _CLIENTMESSAGE_GETPROPERTIESRES.nested_types_by_name['PropertiesEntry'] -_CLIENTMESSAGE_GETPARAMETERSRES = _CLIENTMESSAGE.nested_types_by_name['GetParametersRes'] -_CLIENTMESSAGE_FITRES = _CLIENTMESSAGE.nested_types_by_name['FitRes'] -_CLIENTMESSAGE_FITRES_METRICSENTRY = _CLIENTMESSAGE_FITRES.nested_types_by_name['MetricsEntry'] -_CLIENTMESSAGE_EVALUATERES = _CLIENTMESSAGE.nested_types_by_name['EvaluateRes'] -_CLIENTMESSAGE_EVALUATERES_METRICSENTRY = _CLIENTMESSAGE_EVALUATERES.nested_types_by_name['MetricsEntry'] -_SCALAR = DESCRIPTOR.message_types_by_name['Scalar'] -Status = _reflection.GeneratedProtocolMessageType('Status', (_message.Message,), { - 'DESCRIPTOR' : _STATUS, - '__module__' : 'flwr.proto.transport_pb2' - # @@protoc_insertion_point(class_scope:flwr.proto.Status) - }) -_sym_db.RegisterMessage(Status) - -Parameters = _reflection.GeneratedProtocolMessageType('Parameters', (_message.Message,), { - 'DESCRIPTOR' : _PARAMETERS, - '__module__' : 'flwr.proto.transport_pb2' - # @@protoc_insertion_point(class_scope:flwr.proto.Parameters) - }) -_sym_db.RegisterMessage(Parameters) - -ServerMessage = _reflection.GeneratedProtocolMessageType('ServerMessage', (_message.Message,), { - - 'ReconnectIns' : _reflection.GeneratedProtocolMessageType('ReconnectIns', (_message.Message,), { - 'DESCRIPTOR' : _SERVERMESSAGE_RECONNECTINS, - '__module__' : 'flwr.proto.transport_pb2' - # @@protoc_insertion_point(class_scope:flwr.proto.ServerMessage.ReconnectIns) - }) - , - - 'GetPropertiesIns' : _reflection.GeneratedProtocolMessageType('GetPropertiesIns', (_message.Message,), { - - 'ConfigEntry' : _reflection.GeneratedProtocolMessageType('ConfigEntry', (_message.Message,), { - 'DESCRIPTOR' : _SERVERMESSAGE_GETPROPERTIESINS_CONFIGENTRY, - '__module__' : 'flwr.proto.transport_pb2' - # @@protoc_insertion_point(class_scope:flwr.proto.ServerMessage.GetPropertiesIns.ConfigEntry) - }) - , - 'DESCRIPTOR' : _SERVERMESSAGE_GETPROPERTIESINS, - '__module__' : 'flwr.proto.transport_pb2' - # @@protoc_insertion_point(class_scope:flwr.proto.ServerMessage.GetPropertiesIns) - }) - , - - 'GetParametersIns' : _reflection.GeneratedProtocolMessageType('GetParametersIns', (_message.Message,), { - - 'ConfigEntry' : _reflection.GeneratedProtocolMessageType('ConfigEntry', (_message.Message,), { - 'DESCRIPTOR' : _SERVERMESSAGE_GETPARAMETERSINS_CONFIGENTRY, - '__module__' : 'flwr.proto.transport_pb2' - # @@protoc_insertion_point(class_scope:flwr.proto.ServerMessage.GetParametersIns.ConfigEntry) - }) - , - 'DESCRIPTOR' : _SERVERMESSAGE_GETPARAMETERSINS, - '__module__' : 'flwr.proto.transport_pb2' - # @@protoc_insertion_point(class_scope:flwr.proto.ServerMessage.GetParametersIns) - }) - , - - 'FitIns' : _reflection.GeneratedProtocolMessageType('FitIns', (_message.Message,), { - - 'ConfigEntry' : _reflection.GeneratedProtocolMessageType('ConfigEntry', (_message.Message,), { - 'DESCRIPTOR' : _SERVERMESSAGE_FITINS_CONFIGENTRY, - '__module__' : 'flwr.proto.transport_pb2' - # @@protoc_insertion_point(class_scope:flwr.proto.ServerMessage.FitIns.ConfigEntry) - }) - , - 'DESCRIPTOR' : _SERVERMESSAGE_FITINS, - '__module__' : 'flwr.proto.transport_pb2' - # @@protoc_insertion_point(class_scope:flwr.proto.ServerMessage.FitIns) - }) - , - - 'EvaluateIns' : _reflection.GeneratedProtocolMessageType('EvaluateIns', (_message.Message,), { - - 'ConfigEntry' : _reflection.GeneratedProtocolMessageType('ConfigEntry', (_message.Message,), { - 'DESCRIPTOR' : _SERVERMESSAGE_EVALUATEINS_CONFIGENTRY, - '__module__' : 'flwr.proto.transport_pb2' - # @@protoc_insertion_point(class_scope:flwr.proto.ServerMessage.EvaluateIns.ConfigEntry) - }) - , - 'DESCRIPTOR' : _SERVERMESSAGE_EVALUATEINS, - '__module__' : 'flwr.proto.transport_pb2' - # @@protoc_insertion_point(class_scope:flwr.proto.ServerMessage.EvaluateIns) - }) - , - 'DESCRIPTOR' : _SERVERMESSAGE, - '__module__' : 'flwr.proto.transport_pb2' - # @@protoc_insertion_point(class_scope:flwr.proto.ServerMessage) - }) -_sym_db.RegisterMessage(ServerMessage) -_sym_db.RegisterMessage(ServerMessage.ReconnectIns) -_sym_db.RegisterMessage(ServerMessage.GetPropertiesIns) -_sym_db.RegisterMessage(ServerMessage.GetPropertiesIns.ConfigEntry) -_sym_db.RegisterMessage(ServerMessage.GetParametersIns) -_sym_db.RegisterMessage(ServerMessage.GetParametersIns.ConfigEntry) -_sym_db.RegisterMessage(ServerMessage.FitIns) -_sym_db.RegisterMessage(ServerMessage.FitIns.ConfigEntry) -_sym_db.RegisterMessage(ServerMessage.EvaluateIns) -_sym_db.RegisterMessage(ServerMessage.EvaluateIns.ConfigEntry) - -ClientMessage = _reflection.GeneratedProtocolMessageType('ClientMessage', (_message.Message,), { - - 'DisconnectRes' : _reflection.GeneratedProtocolMessageType('DisconnectRes', (_message.Message,), { - 'DESCRIPTOR' : _CLIENTMESSAGE_DISCONNECTRES, - '__module__' : 'flwr.proto.transport_pb2' - # @@protoc_insertion_point(class_scope:flwr.proto.ClientMessage.DisconnectRes) - }) - , - - 'GetPropertiesRes' : _reflection.GeneratedProtocolMessageType('GetPropertiesRes', (_message.Message,), { - - 'PropertiesEntry' : _reflection.GeneratedProtocolMessageType('PropertiesEntry', (_message.Message,), { - 'DESCRIPTOR' : _CLIENTMESSAGE_GETPROPERTIESRES_PROPERTIESENTRY, - '__module__' : 'flwr.proto.transport_pb2' - # @@protoc_insertion_point(class_scope:flwr.proto.ClientMessage.GetPropertiesRes.PropertiesEntry) - }) - , - 'DESCRIPTOR' : _CLIENTMESSAGE_GETPROPERTIESRES, - '__module__' : 'flwr.proto.transport_pb2' - # @@protoc_insertion_point(class_scope:flwr.proto.ClientMessage.GetPropertiesRes) - }) - , - - 'GetParametersRes' : _reflection.GeneratedProtocolMessageType('GetParametersRes', (_message.Message,), { - 'DESCRIPTOR' : _CLIENTMESSAGE_GETPARAMETERSRES, - '__module__' : 'flwr.proto.transport_pb2' - # @@protoc_insertion_point(class_scope:flwr.proto.ClientMessage.GetParametersRes) - }) - , - - 'FitRes' : _reflection.GeneratedProtocolMessageType('FitRes', (_message.Message,), { - - 'MetricsEntry' : _reflection.GeneratedProtocolMessageType('MetricsEntry', (_message.Message,), { - 'DESCRIPTOR' : _CLIENTMESSAGE_FITRES_METRICSENTRY, - '__module__' : 'flwr.proto.transport_pb2' - # @@protoc_insertion_point(class_scope:flwr.proto.ClientMessage.FitRes.MetricsEntry) - }) - , - 'DESCRIPTOR' : _CLIENTMESSAGE_FITRES, - '__module__' : 'flwr.proto.transport_pb2' - # @@protoc_insertion_point(class_scope:flwr.proto.ClientMessage.FitRes) - }) - , - - 'EvaluateRes' : _reflection.GeneratedProtocolMessageType('EvaluateRes', (_message.Message,), { - - 'MetricsEntry' : _reflection.GeneratedProtocolMessageType('MetricsEntry', (_message.Message,), { - 'DESCRIPTOR' : _CLIENTMESSAGE_EVALUATERES_METRICSENTRY, - '__module__' : 'flwr.proto.transport_pb2' - # @@protoc_insertion_point(class_scope:flwr.proto.ClientMessage.EvaluateRes.MetricsEntry) - }) - , - 'DESCRIPTOR' : _CLIENTMESSAGE_EVALUATERES, - '__module__' : 'flwr.proto.transport_pb2' - # @@protoc_insertion_point(class_scope:flwr.proto.ClientMessage.EvaluateRes) - }) - , - 'DESCRIPTOR' : _CLIENTMESSAGE, - '__module__' : 'flwr.proto.transport_pb2' - # @@protoc_insertion_point(class_scope:flwr.proto.ClientMessage) - }) -_sym_db.RegisterMessage(ClientMessage) -_sym_db.RegisterMessage(ClientMessage.DisconnectRes) -_sym_db.RegisterMessage(ClientMessage.GetPropertiesRes) -_sym_db.RegisterMessage(ClientMessage.GetPropertiesRes.PropertiesEntry) -_sym_db.RegisterMessage(ClientMessage.GetParametersRes) -_sym_db.RegisterMessage(ClientMessage.FitRes) -_sym_db.RegisterMessage(ClientMessage.FitRes.MetricsEntry) -_sym_db.RegisterMessage(ClientMessage.EvaluateRes) -_sym_db.RegisterMessage(ClientMessage.EvaluateRes.MetricsEntry) - -Scalar = _reflection.GeneratedProtocolMessageType('Scalar', (_message.Message,), { - 'DESCRIPTOR' : _SCALAR, - '__module__' : 'flwr.proto.transport_pb2' - # @@protoc_insertion_point(class_scope:flwr.proto.Scalar) - }) -_sym_db.RegisterMessage(Scalar) - -_FLOWERSERVICE = DESCRIPTOR.services_by_name['FlowerService'] +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'flwr.proto.transport_pb2', _globals) if _descriptor._USE_C_DESCRIPTORS == False: - DESCRIPTOR._options = None - _SERVERMESSAGE_GETPROPERTIESINS_CONFIGENTRY._options = None - _SERVERMESSAGE_GETPROPERTIESINS_CONFIGENTRY._serialized_options = b'8\001' - _SERVERMESSAGE_GETPARAMETERSINS_CONFIGENTRY._options = None - _SERVERMESSAGE_GETPARAMETERSINS_CONFIGENTRY._serialized_options = b'8\001' - _SERVERMESSAGE_FITINS_CONFIGENTRY._options = None - _SERVERMESSAGE_FITINS_CONFIGENTRY._serialized_options = b'8\001' - _SERVERMESSAGE_EVALUATEINS_CONFIGENTRY._options = None - _SERVERMESSAGE_EVALUATEINS_CONFIGENTRY._serialized_options = b'8\001' - _CLIENTMESSAGE_GETPROPERTIESRES_PROPERTIESENTRY._options = None - _CLIENTMESSAGE_GETPROPERTIESRES_PROPERTIESENTRY._serialized_options = b'8\001' - _CLIENTMESSAGE_FITRES_METRICSENTRY._options = None - _CLIENTMESSAGE_FITRES_METRICSENTRY._serialized_options = b'8\001' - _CLIENTMESSAGE_EVALUATERES_METRICSENTRY._options = None - _CLIENTMESSAGE_EVALUATERES_METRICSENTRY._serialized_options = b'8\001' - _CODE._serialized_start=2533 - _CODE._serialized_end=2674 - _REASON._serialized_start=2676 - _REASON._serialized_end=2767 - _STATUS._serialized_start=42 - _STATUS._serialized_end=99 - _PARAMETERS._serialized_start=101 - _PARAMETERS._serialized_end=151 - _SERVERMESSAGE._serialized_start=154 - _SERVERMESSAGE._serialized_end=1236 - _SERVERMESSAGE_RECONNECTINS._serialized_start=500 - _SERVERMESSAGE_RECONNECTINS._serialized_end=531 - _SERVERMESSAGE_GETPROPERTIESINS._serialized_start=534 - _SERVERMESSAGE_GETPROPERTIESINS._serialized_end=691 - _SERVERMESSAGE_GETPROPERTIESINS_CONFIGENTRY._serialized_start=626 - _SERVERMESSAGE_GETPROPERTIESINS_CONFIGENTRY._serialized_end=691 - _SERVERMESSAGE_GETPARAMETERSINS._serialized_start=694 - _SERVERMESSAGE_GETPARAMETERSINS._serialized_end=851 - _SERVERMESSAGE_GETPARAMETERSINS_CONFIGENTRY._serialized_start=626 - _SERVERMESSAGE_GETPARAMETERSINS_CONFIGENTRY._serialized_end=691 - _SERVERMESSAGE_FITINS._serialized_start=854 - _SERVERMESSAGE_FITINS._serialized_end=1035 - _SERVERMESSAGE_FITINS_CONFIGENTRY._serialized_start=626 - _SERVERMESSAGE_FITINS_CONFIGENTRY._serialized_end=691 - _SERVERMESSAGE_EVALUATEINS._serialized_start=1038 - _SERVERMESSAGE_EVALUATEINS._serialized_end=1229 - _SERVERMESSAGE_EVALUATEINS_CONFIGENTRY._serialized_start=626 - _SERVERMESSAGE_EVALUATEINS_CONFIGENTRY._serialized_end=691 - _CLIENTMESSAGE._serialized_start=1239 - _CLIENTMESSAGE._serialized_end=2423 - _CLIENTMESSAGE_DISCONNECTRES._serialized_start=1587 - _CLIENTMESSAGE_DISCONNECTRES._serialized_end=1638 - _CLIENTMESSAGE_GETPROPERTIESRES._serialized_start=1641 - _CLIENTMESSAGE_GETPROPERTIESRES._serialized_end=1846 - _CLIENTMESSAGE_GETPROPERTIESRES_PROPERTIESENTRY._serialized_start=1777 - _CLIENTMESSAGE_GETPROPERTIESRES_PROPERTIESENTRY._serialized_end=1846 - _CLIENTMESSAGE_GETPARAMETERSRES._serialized_start=1848 - _CLIENTMESSAGE_GETPARAMETERSRES._serialized_end=1946 - _CLIENTMESSAGE_FITRES._serialized_start=1949 - _CLIENTMESSAGE_FITRES._serialized_end=2191 - _CLIENTMESSAGE_FITRES_METRICSENTRY._serialized_start=2125 - _CLIENTMESSAGE_FITRES_METRICSENTRY._serialized_end=2191 - _CLIENTMESSAGE_EVALUATERES._serialized_start=2194 - _CLIENTMESSAGE_EVALUATERES._serialized_end=2416 - _CLIENTMESSAGE_EVALUATERES_METRICSENTRY._serialized_start=2125 - _CLIENTMESSAGE_EVALUATERES_METRICSENTRY._serialized_end=2191 - _SCALAR._serialized_start=2425 - _SCALAR._serialized_end=2530 - _FLOWERSERVICE._serialized_start=2769 - _FLOWERSERVICE._serialized_end=2852 + _globals['_SERVERMESSAGE_GETPROPERTIESINS_CONFIGENTRY']._options = None + _globals['_SERVERMESSAGE_GETPROPERTIESINS_CONFIGENTRY']._serialized_options = b'8\001' + _globals['_SERVERMESSAGE_GETPARAMETERSINS_CONFIGENTRY']._options = None + _globals['_SERVERMESSAGE_GETPARAMETERSINS_CONFIGENTRY']._serialized_options = b'8\001' + _globals['_SERVERMESSAGE_FITINS_CONFIGENTRY']._options = None + _globals['_SERVERMESSAGE_FITINS_CONFIGENTRY']._serialized_options = b'8\001' + _globals['_SERVERMESSAGE_EVALUATEINS_CONFIGENTRY']._options = None + _globals['_SERVERMESSAGE_EVALUATEINS_CONFIGENTRY']._serialized_options = b'8\001' + _globals['_CLIENTMESSAGE_GETPROPERTIESRES_PROPERTIESENTRY']._options = None + _globals['_CLIENTMESSAGE_GETPROPERTIESRES_PROPERTIESENTRY']._serialized_options = b'8\001' + _globals['_CLIENTMESSAGE_FITRES_METRICSENTRY']._options = None + _globals['_CLIENTMESSAGE_FITRES_METRICSENTRY']._serialized_options = b'8\001' + _globals['_CLIENTMESSAGE_EVALUATERES_METRICSENTRY']._options = None + _globals['_CLIENTMESSAGE_EVALUATERES_METRICSENTRY']._serialized_options = b'8\001' + _globals['_CODE']._serialized_start=2533 + _globals['_CODE']._serialized_end=2674 + _globals['_REASON']._serialized_start=2676 + _globals['_REASON']._serialized_end=2767 + _globals['_STATUS']._serialized_start=42 + _globals['_STATUS']._serialized_end=99 + _globals['_PARAMETERS']._serialized_start=101 + _globals['_PARAMETERS']._serialized_end=151 + _globals['_SERVERMESSAGE']._serialized_start=154 + _globals['_SERVERMESSAGE']._serialized_end=1236 + _globals['_SERVERMESSAGE_RECONNECTINS']._serialized_start=500 + _globals['_SERVERMESSAGE_RECONNECTINS']._serialized_end=531 + _globals['_SERVERMESSAGE_GETPROPERTIESINS']._serialized_start=534 + _globals['_SERVERMESSAGE_GETPROPERTIESINS']._serialized_end=691 + _globals['_SERVERMESSAGE_GETPROPERTIESINS_CONFIGENTRY']._serialized_start=626 + _globals['_SERVERMESSAGE_GETPROPERTIESINS_CONFIGENTRY']._serialized_end=691 + _globals['_SERVERMESSAGE_GETPARAMETERSINS']._serialized_start=694 + _globals['_SERVERMESSAGE_GETPARAMETERSINS']._serialized_end=851 + _globals['_SERVERMESSAGE_GETPARAMETERSINS_CONFIGENTRY']._serialized_start=626 + _globals['_SERVERMESSAGE_GETPARAMETERSINS_CONFIGENTRY']._serialized_end=691 + _globals['_SERVERMESSAGE_FITINS']._serialized_start=854 + _globals['_SERVERMESSAGE_FITINS']._serialized_end=1035 + _globals['_SERVERMESSAGE_FITINS_CONFIGENTRY']._serialized_start=626 + _globals['_SERVERMESSAGE_FITINS_CONFIGENTRY']._serialized_end=691 + _globals['_SERVERMESSAGE_EVALUATEINS']._serialized_start=1038 + _globals['_SERVERMESSAGE_EVALUATEINS']._serialized_end=1229 + _globals['_SERVERMESSAGE_EVALUATEINS_CONFIGENTRY']._serialized_start=626 + _globals['_SERVERMESSAGE_EVALUATEINS_CONFIGENTRY']._serialized_end=691 + _globals['_CLIENTMESSAGE']._serialized_start=1239 + _globals['_CLIENTMESSAGE']._serialized_end=2423 + _globals['_CLIENTMESSAGE_DISCONNECTRES']._serialized_start=1587 + _globals['_CLIENTMESSAGE_DISCONNECTRES']._serialized_end=1638 + _globals['_CLIENTMESSAGE_GETPROPERTIESRES']._serialized_start=1641 + _globals['_CLIENTMESSAGE_GETPROPERTIESRES']._serialized_end=1846 + _globals['_CLIENTMESSAGE_GETPROPERTIESRES_PROPERTIESENTRY']._serialized_start=1777 + _globals['_CLIENTMESSAGE_GETPROPERTIESRES_PROPERTIESENTRY']._serialized_end=1846 + _globals['_CLIENTMESSAGE_GETPARAMETERSRES']._serialized_start=1848 + _globals['_CLIENTMESSAGE_GETPARAMETERSRES']._serialized_end=1946 + _globals['_CLIENTMESSAGE_FITRES']._serialized_start=1949 + _globals['_CLIENTMESSAGE_FITRES']._serialized_end=2191 + _globals['_CLIENTMESSAGE_FITRES_METRICSENTRY']._serialized_start=2125 + _globals['_CLIENTMESSAGE_FITRES_METRICSENTRY']._serialized_end=2191 + _globals['_CLIENTMESSAGE_EVALUATERES']._serialized_start=2194 + _globals['_CLIENTMESSAGE_EVALUATERES']._serialized_end=2416 + _globals['_CLIENTMESSAGE_EVALUATERES_METRICSENTRY']._serialized_start=2125 + _globals['_CLIENTMESSAGE_EVALUATERES_METRICSENTRY']._serialized_end=2191 + _globals['_SCALAR']._serialized_start=2425 + _globals['_SCALAR']._serialized_end=2530 + _globals['_FLOWERSERVICE']._serialized_start=2769 + _globals['_FLOWERSERVICE']._serialized_end=2852 # @@protoc_insertion_point(module_scope) diff --git a/src/py/flwr/server/__init__.py b/src/py/flwr/server/__init__.py index 74abe8dd463c..09372e258861 100644 --- a/src/py/flwr/server/__init__.py +++ b/src/py/flwr/server/__init__.py @@ -15,26 +15,33 @@ """Flower server.""" -from . import strategy -from .app import ServerConfig as ServerConfig +from . import driver, strategy from .app import run_driver_api as run_driver_api from .app import run_fleet_api as run_fleet_api -from .app import run_server as run_server +from .app import run_superlink as run_superlink from .app import start_server as start_server from .client_manager import ClientManager as ClientManager from .client_manager import SimpleClientManager as SimpleClientManager +from .compat import start_driver as start_driver from .history import History as History +from .run_serverapp import run_server_app as run_server_app from .server import Server as Server +from .server_config import ServerConfig as ServerConfig +from .serverapp import ServerApp as ServerApp __all__ = [ "ClientManager", + "driver", "History", "run_driver_api", "run_fleet_api", - "run_server", + "run_server_app", + "run_superlink", "Server", + "ServerApp", "ServerConfig", "SimpleClientManager", + "start_driver", "start_server", "strategy", ] diff --git a/src/py/flwr/server/app.py b/src/py/flwr/server/app.py index 63c24c37a685..dbbf63b0fe5e 100644 --- a/src/py/flwr/server/app.py +++ b/src/py/flwr/server/app.py @@ -19,7 +19,6 @@ import importlib.util import sys import threading -from dataclasses import dataclass from logging import ERROR, INFO, WARN from os.path import isfile from pathlib import Path @@ -33,27 +32,29 @@ from flwr.common.address import parse_address from flwr.common.constant import ( MISSING_EXTRA_REST, - TRANSPORT_TYPE_GRPC_BIDI, TRANSPORT_TYPE_GRPC_RERE, TRANSPORT_TYPE_REST, ) from flwr.common.logger import log -from flwr.proto.driver_pb2_grpc import add_DriverServicer_to_server -from flwr.proto.fleet_pb2_grpc import add_FleetServicer_to_server -from flwr.proto.transport_pb2_grpc import add_FlowerServiceServicer_to_server -from flwr.server.client_manager import ClientManager, SimpleClientManager -from flwr.server.driver.driver_servicer import DriverServicer -from flwr.server.fleet.grpc_bidi.driver_client_manager import DriverClientManager -from flwr.server.fleet.grpc_bidi.flower_service_servicer import FlowerServiceServicer -from flwr.server.fleet.grpc_bidi.grpc_server import ( +from flwr.proto.driver_pb2_grpc import ( # pylint: disable=E0611 + add_DriverServicer_to_server, +) +from flwr.proto.fleet_pb2_grpc import ( # pylint: disable=E0611 + add_FleetServicer_to_server, +) + +from .client_manager import ClientManager, SimpleClientManager +from .history import History +from .server import Server +from .server_config import ServerConfig +from .strategy import FedAvg, Strategy +from .superlink.driver.driver_servicer import DriverServicer +from .superlink.fleet.grpc_bidi.grpc_server import ( generic_create_grpc_server, start_grpc_server, ) -from flwr.server.fleet.grpc_rere.fleet_servicer import FleetServicer -from flwr.server.history import History -from flwr.server.server import Server -from flwr.server.state import StateFactory -from flwr.server.strategy import FedAvg, Strategy +from .superlink.fleet.grpc_rere.fleet_servicer import FleetServicer +from .superlink.state import StateFactory ADDRESS_DRIVER_API = "0.0.0.0:9091" ADDRESS_FLEET_API_GRPC_RERE = "0.0.0.0:9092" @@ -63,18 +64,6 @@ DATABASE = ":flwr-in-memory-state:" -@dataclass -class ServerConfig: - """Flower server config. - - All attributes have default values which allows users to configure just the ones - they care about. - """ - - num_rounds: int = 1 - round_timeout: Optional[float] = None - - def start_server( # pylint: disable=too-many-arguments,too-many-locals *, server_address: str = ADDRESS_FLEET_API_GRPC_BIDI, @@ -239,7 +228,7 @@ def run_driver_api() -> None: """Run Flower server (Driver API).""" log(INFO, "Starting Flower server (Driver API)") event(EventType.RUN_DRIVER_API_ENTER) - args = _parse_args_driver().parse_args() + args = _parse_args_run_driver_api().parse_args() # Parse IP address parsed_address = parse_address(args.driver_api_address) @@ -276,7 +265,7 @@ def run_fleet_api() -> None: """Run Flower server (Fleet API).""" log(INFO, "Starting Flower server (Fleet API)") event(EventType.RUN_FLEET_API_ENTER) - args = _parse_args_fleet().parse_args() + args = _parse_args_run_fleet_api().parse_args() # Obtain certificates certificates = _try_obtain_certificates(args) @@ -313,19 +302,6 @@ def run_fleet_api() -> None: ) fleet_thread.start() bckg_threads.append(fleet_thread) - elif args.fleet_api_type == TRANSPORT_TYPE_GRPC_BIDI: - address_arg = args.grpc_fleet_api_address - parsed_address = parse_address(address_arg) - if not parsed_address: - sys.exit(f"Fleet IP address ({address_arg}) cannot be parsed.") - host, port, is_v6 = parsed_address - address = f"[{host}]:{port}" if is_v6 else f"{host}:{port}" - fleet_server = _run_fleet_api_grpc_bidi( - address=address, - state_factory=state_factory, - certificates=certificates, - ) - grpc_servers.append(fleet_server) elif args.fleet_api_type == TRANSPORT_TYPE_GRPC_RERE: address_arg = args.grpc_rere_fleet_api_address parsed_address = parse_address(address_arg) @@ -357,11 +333,11 @@ def run_fleet_api() -> None: # pylint: disable=too-many-branches, too-many-locals, too-many-statements -def run_server() -> None: +def run_superlink() -> None: """Run Flower server (Driver API and Fleet API).""" log(INFO, "Starting Flower server") - event(EventType.RUN_SERVER_ENTER) - args = _parse_args_server().parse_args() + event(EventType.RUN_SUPERLINK_ENTER) + args = _parse_args_run_superlink().parse_args() # Parse IP address parsed_address = parse_address(args.driver_api_address) @@ -412,19 +388,6 @@ def run_server() -> None: ) fleet_thread.start() bckg_threads.append(fleet_thread) - elif args.fleet_api_type == TRANSPORT_TYPE_GRPC_BIDI: - address_arg = args.grpc_bidi_fleet_api_address - parsed_address = parse_address(address_arg) - if not parsed_address: - sys.exit(f"Fleet IP address ({address_arg}) cannot be parsed.") - host, port, is_v6 = parsed_address - address = f"[{host}]:{port}" if is_v6 else f"{host}:{port}" - fleet_server = _run_fleet_api_grpc_bidi( - address=address, - state_factory=state_factory, - certificates=certificates, - ) - grpc_servers.append(fleet_server) elif args.fleet_api_type == TRANSPORT_TYPE_GRPC_RERE: address_arg = args.grpc_rere_fleet_api_address parsed_address = parse_address(address_arg) @@ -445,7 +408,7 @@ def run_server() -> None: _register_exit_handlers( grpc_servers=grpc_servers, bckg_threads=bckg_threads, - event_type=EventType.RUN_SERVER_LEAVE, + event_type=EventType.RUN_SUPERLINK_LEAVE, ) # Block @@ -550,35 +513,6 @@ def _run_driver_api_grpc( return driver_grpc_server -def _run_fleet_api_grpc_bidi( - address: str, - state_factory: StateFactory, - certificates: Optional[Tuple[bytes, bytes, bytes]], -) -> grpc.Server: - """Run Fleet API (gRPC, bidirectional streaming).""" - # DriverClientManager - driver_client_manager = DriverClientManager( - state_factory=state_factory, - ) - - # Create (legacy) Fleet API gRPC server - fleet_servicer = FlowerServiceServicer( - client_manager=driver_client_manager, - ) - fleet_add_servicer_to_server_fn = add_FlowerServiceServicer_to_server - fleet_grpc_server = generic_create_grpc_server( - servicer_and_add_fn=(fleet_servicer, fleet_add_servicer_to_server_fn), - server_address=address, - max_message_length=GRPC_MAX_MESSAGE_LENGTH, - certificates=certificates, - ) - - log(INFO, "Flower ECE: Starting Fleet API (gRPC-bidi) on %s", address) - fleet_grpc_server.start() - - return fleet_grpc_server - - def _run_fleet_api_grpc_rere( address: str, state_factory: StateFactory, @@ -587,7 +521,7 @@ def _run_fleet_api_grpc_rere( """Run Fleet API (gRPC, request-response).""" # Create Fleet API gRPC server fleet_servicer = FleetServicer( - state=state_factory.state(), + state_factory=state_factory, ) fleet_add_servicer_to_server_fn = add_FleetServicer_to_server fleet_grpc_server = generic_create_grpc_server( @@ -616,7 +550,7 @@ def _run_fleet_api_rest( try: import uvicorn - from flwr.server.fleet.rest_rere.rest_api import app as fast_api_app + from flwr.server.superlink.fleet.rest_rere.rest_api import app as fast_api_app except ModuleNotFoundError: sys.exit(MISSING_EXTRA_REST) if workers != 1: @@ -639,7 +573,7 @@ def _run_fleet_api_rest( raise ValueError(validation_exceptions) uvicorn.run( - app="flwr.server.fleet.rest_rere.rest_api:app", + app="flwr.server.superlink.fleet.rest_rere.rest_api:app", port=port, host=host, reload=False, @@ -676,7 +610,7 @@ def _validate_ssl_files( return validation_exceptions -def _parse_args_driver() -> argparse.ArgumentParser: +def _parse_args_run_driver_api() -> argparse.ArgumentParser: """Parse command line arguments for Driver API.""" parser = argparse.ArgumentParser( description="Start a Flower Driver API server. " @@ -693,7 +627,7 @@ def _parse_args_driver() -> argparse.ArgumentParser: return parser -def _parse_args_fleet() -> argparse.ArgumentParser: +def _parse_args_run_fleet_api() -> argparse.ArgumentParser: """Parse command line arguments for Fleet API.""" parser = argparse.ArgumentParser( description="Start a Flower Fleet API server." @@ -710,7 +644,7 @@ def _parse_args_fleet() -> argparse.ArgumentParser: return parser -def _parse_args_server() -> argparse.ArgumentParser: +def _parse_args_run_superlink() -> argparse.ArgumentParser: """Parse command line arguments for both Driver API and Fleet API.""" parser = argparse.ArgumentParser( description="This will start a Flower server " @@ -779,13 +713,6 @@ def _add_args_fleet_api(parser: argparse.ArgumentParser) -> None: const=TRANSPORT_TYPE_REST, help="Start a Fleet API server (REST, experimental)", ) - ex_group.add_argument( - "--grpc-bidi", - action="store_const", - dest="fleet_api_type", - const=TRANSPORT_TYPE_GRPC_BIDI, - help="Start a Fleet API server (gRPC-bidi)", - ) # Fleet API gRPC-rere options grpc_rere_group = parser.add_argument_group( @@ -822,13 +749,3 @@ def _add_args_fleet_api(parser: argparse.ArgumentParser) -> None: type=int, default=1, ) - - # Fleet API gRPC-bidi options - grpc_bidi_group = parser.add_argument_group( - "Fleet API (gRPC-bidi) server options", "" - ) - grpc_bidi_group.add_argument( - "--grpc-bidi-fleet-api-address", - help="Fleet API (gRPC-bidi) server address (IPv4, IPv6, or a domain name)", - default=ADDRESS_FLEET_API_GRPC_RERE, - ) diff --git a/src/py/flwr/server/client_manager_test.py b/src/py/flwr/server/client_manager_test.py index 8145b9b2ab7f..5820881b6aad 100644 --- a/src/py/flwr/server/client_manager_test.py +++ b/src/py/flwr/server/client_manager_test.py @@ -18,7 +18,7 @@ from unittest.mock import MagicMock from flwr.server.client_manager import SimpleClientManager -from flwr.server.fleet.grpc_bidi.grpc_client_proxy import GrpcClientProxy +from flwr.server.superlink.fleet.grpc_bidi.grpc_client_proxy import GrpcClientProxy def test_simple_client_manager_register() -> None: diff --git a/src/py/flwr/client/secure_aggregation/__init__.py b/src/py/flwr/server/compat/__init__.py similarity index 72% rename from src/py/flwr/client/secure_aggregation/__init__.py rename to src/py/flwr/server/compat/__init__.py index 37c816a390de..3a0c2b4e83a0 100644 --- a/src/py/flwr/client/secure_aggregation/__init__.py +++ b/src/py/flwr/server/compat/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,13 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== -"""Secure Aggregation handlers.""" +"""Flower ServerApp compatibility package.""" -from .handler import SecureAggregationHandler -from .secaggplus_handler import SecAggPlusHandler +from .app import start_driver as start_driver __all__ = [ - "SecAggPlusHandler", - "SecureAggregationHandler", + "start_driver", ] diff --git a/src/py/flwr/driver/app.py b/src/py/flwr/server/compat/app.py similarity index 89% rename from src/py/flwr/driver/app.py rename to src/py/flwr/server/compat/app.py index 987b4a31981b..06debb858c38 100644 --- a/src/py/flwr/driver/app.py +++ b/src/py/flwr/server/compat/app.py @@ -25,15 +25,16 @@ from flwr.common import EventType, event from flwr.common.address import parse_address from flwr.common.logger import log -from flwr.proto import driver_pb2 -from flwr.server.app import ServerConfig, init_defaults, run_fl +from flwr.proto import driver_pb2 # pylint: disable=E0611 +from flwr.server.app import init_defaults, run_fl from flwr.server.client_manager import ClientManager from flwr.server.history import History from flwr.server.server import Server +from flwr.server.server_config import ServerConfig from flwr.server.strategy import Strategy +from ..driver.grpc_driver import GrpcDriver from .driver_client_proxy import DriverClientProxy -from .grpc_driver import GrpcDriver DEFAULT_SERVER_ADDRESS_DRIVER = "[::]:9091" @@ -72,11 +73,10 @@ def start_driver( # pylint: disable=too-many-arguments, too-many-locals An implementation of the abstract base class `flwr.server.strategy.Strategy`. If no strategy is provided, then `start_server` will use `flwr.server.strategy.FedAvg`. - client_manager : Optional[flwr.driver.DriverClientManager] (default: None) - An implementation of the class - `flwr.driver.driver_client_manager.DriverClientManager`. If no + client_manager : Optional[flwr.server.ClientManager] (default: None) + An implementation of the class `flwr.server.ClientManager`. If no implementation is provided, then `start_driver` will use - `flwr.driver.driver_client_manager.DriverClientManager`. + `flwr.server.SimpleClientManager`. root_certificates : Optional[Union[bytes, str]] (default: None) The PEM-encoded root certificates as a byte string or a path string. If provided, a secure connection using the certificates will be @@ -111,8 +111,11 @@ def start_driver( # pylint: disable=too-many-arguments, too-many-locals # Create the Driver if isinstance(root_certificates, str): root_certificates = Path(root_certificates).read_bytes() - driver = GrpcDriver(driver_service_address=address, certificates=root_certificates) - driver.connect() + grpc_driver = GrpcDriver( + driver_service_address=address, root_certificates=root_certificates + ) + + grpc_driver.connect() lock = threading.Lock() # Initialize the Driver API server and config @@ -132,7 +135,7 @@ def start_driver( # pylint: disable=too-many-arguments, too-many-locals thread = threading.Thread( target=update_client_manager, args=( - driver, + grpc_driver, initialized_server.client_manager(), lock, ), @@ -147,7 +150,7 @@ def start_driver( # pylint: disable=too-many-arguments, too-many-locals # Stop the Driver API server and the thread with lock: - driver.disconnect() + grpc_driver.disconnect() thread.join() event(EventType.START_SERVER_LEAVE) @@ -171,7 +174,9 @@ def update_client_manager( `client_manager.unregister()`. """ # Request for run_id - run_id = driver.create_run(driver_pb2.CreateRunRequest()).run_id + run_id = driver.create_run( + driver_pb2.CreateRunRequest() # pylint: disable=E1101 + ).run_id # Loop until the driver is disconnected registered_nodes: Dict[int, DriverClientProxy] = {} @@ -181,7 +186,7 @@ def update_client_manager( if driver.stub is None: break get_nodes_res = driver.get_nodes( - req=driver_pb2.GetNodesRequest(run_id=run_id) + req=driver_pb2.GetNodesRequest(run_id=run_id) # pylint: disable=E1101 ) all_node_ids = {node.node_id for node in get_nodes_res.nodes} dead_nodes = set(registered_nodes).difference(all_node_ids) diff --git a/src/py/flwr/driver/app_test.py b/src/py/flwr/server/compat/app_test.py similarity index 93% rename from src/py/flwr/driver/app_test.py rename to src/py/flwr/server/compat/app_test.py index 82747e5afb2c..5f8f04ff2a06 100644 --- a/src/py/flwr/driver/app_test.py +++ b/src/py/flwr/server/compat/app_test.py @@ -20,11 +20,15 @@ import unittest from unittest.mock import MagicMock -from flwr.driver.app import update_client_manager -from flwr.proto.driver_pb2 import CreateRunResponse, GetNodesResponse -from flwr.proto.node_pb2 import Node +from flwr.proto.driver_pb2 import ( # pylint: disable=E0611 + CreateRunResponse, + GetNodesResponse, +) +from flwr.proto.node_pb2 import Node # pylint: disable=E0611 from flwr.server.client_manager import SimpleClientManager +from .app import update_client_manager + class TestClientManagerWithDriver(unittest.TestCase): """Tests for ClientManager. diff --git a/src/py/flwr/driver/driver_client_proxy.py b/src/py/flwr/server/compat/driver_client_proxy.py similarity index 56% rename from src/py/flwr/driver/driver_client_proxy.py rename to src/py/flwr/server/compat/driver_client_proxy.py index 6c15acb9ebde..1dc992106f60 100644 --- a/src/py/flwr/driver/driver_client_proxy.py +++ b/src/py/flwr/server/compat/driver_client_proxy.py @@ -16,14 +16,22 @@ import time -from typing import List, Optional, cast +from typing import List, Optional from flwr import common +from flwr.common import recordset_compat as compat from flwr.common import serde -from flwr.proto import driver_pb2, node_pb2, task_pb2, transport_pb2 +from flwr.common.constant import ( + MESSAGE_TYPE_EVALUATE, + MESSAGE_TYPE_FIT, + MESSAGE_TYPE_GET_PARAMETERS, + MESSAGE_TYPE_GET_PROPERTIES, +) +from flwr.common.recordset import RecordSet +from flwr.proto import driver_pb2, node_pb2, task_pb2 # pylint: disable=E0611 from flwr.server.client_proxy import ClientProxy -from .grpc_driver import GrpcDriver +from ..driver.grpc_driver import GrpcDriver SLEEP_TIME = 1 @@ -42,55 +50,51 @@ def get_properties( self, ins: common.GetPropertiesIns, timeout: Optional[float] ) -> common.GetPropertiesRes: """Return client's properties.""" - server_message_proto: transport_pb2.ServerMessage = ( - serde.server_message_to_proto( - server_message=common.ServerMessage(get_properties_ins=ins) - ) - ) - return cast( - common.GetPropertiesRes, - self._send_receive_msg(server_message_proto, timeout).get_properties_res, + # Ins to RecordSet + out_recordset = compat.getpropertiesins_to_recordset(ins) + # Fetch response + in_recordset = self._send_receive_recordset( + out_recordset, MESSAGE_TYPE_GET_PROPERTIES, timeout ) + # RecordSet to Res + return compat.recordset_to_getpropertiesres(in_recordset) def get_parameters( self, ins: common.GetParametersIns, timeout: Optional[float] ) -> common.GetParametersRes: """Return the current local model parameters.""" - server_message_proto: transport_pb2.ServerMessage = ( - serde.server_message_to_proto( - server_message=common.ServerMessage(get_parameters_ins=ins) - ) - ) - return cast( - common.GetParametersRes, - self._send_receive_msg(server_message_proto, timeout).get_parameters_res, + # Ins to RecordSet + out_recordset = compat.getparametersins_to_recordset(ins) + # Fetch response + in_recordset = self._send_receive_recordset( + out_recordset, MESSAGE_TYPE_GET_PARAMETERS, timeout ) + # RecordSet to Res + return compat.recordset_to_getparametersres(in_recordset, False) def fit(self, ins: common.FitIns, timeout: Optional[float]) -> common.FitRes: """Train model parameters on the locally held dataset.""" - server_message_proto: transport_pb2.ServerMessage = ( - serde.server_message_to_proto( - server_message=common.ServerMessage(fit_ins=ins) - ) - ) - return cast( - common.FitRes, - self._send_receive_msg(server_message_proto, timeout).fit_res, + # Ins to RecordSet + out_recordset = compat.fitins_to_recordset(ins, keep_input=True) + # Fetch response + in_recordset = self._send_receive_recordset( + out_recordset, MESSAGE_TYPE_FIT, timeout ) + # RecordSet to Res + return compat.recordset_to_fitres(in_recordset, keep_input=False) def evaluate( self, ins: common.EvaluateIns, timeout: Optional[float] ) -> common.EvaluateRes: """Evaluate model parameters on the locally held dataset.""" - server_message_proto: transport_pb2.ServerMessage = ( - serde.server_message_to_proto( - server_message=common.ServerMessage(evaluate_ins=ins) - ) - ) - return cast( - common.EvaluateRes, - self._send_receive_msg(server_message_proto, timeout).evaluate_res, + # Ins to RecordSet + out_recordset = compat.evaluateins_to_recordset(ins, keep_input=True) + # Fetch response + in_recordset = self._send_receive_recordset( + out_recordset, MESSAGE_TYPE_EVALUATE, timeout ) + # RecordSet to Res + return compat.recordset_to_evaluateres(in_recordset) def reconnect( self, ins: common.ReconnectIns, timeout: Optional[float] @@ -98,26 +102,32 @@ def reconnect( """Disconnect and (optionally) reconnect later.""" return common.DisconnectRes(reason="") # Nothing to do here (yet) - def _send_receive_msg( - self, server_message: transport_pb2.ServerMessage, timeout: Optional[float] - ) -> transport_pb2.ClientMessage: - task_ins = task_pb2.TaskIns( + def _send_receive_recordset( + self, + recordset: RecordSet, + task_type: str, + timeout: Optional[float], + ) -> RecordSet: + task_ins = task_pb2.TaskIns( # pylint: disable=E1101 task_id="", group_id="", run_id=self.run_id, - task=task_pb2.Task( - producer=node_pb2.Node( + task=task_pb2.Task( # pylint: disable=E1101 + producer=node_pb2.Node( # pylint: disable=E1101 node_id=0, anonymous=True, ), - consumer=node_pb2.Node( + consumer=node_pb2.Node( # pylint: disable=E1101 node_id=self.node_id, anonymous=self.anonymous, ), - legacy_server_message=server_message, + task_type=task_type, + recordset=serde.recordset_to_proto(recordset), ), ) - push_task_ins_req = driver_pb2.PushTaskInsRequest(task_ins_list=[task_ins]) + push_task_ins_req = driver_pb2.PushTaskInsRequest( # pylint: disable=E1101 + task_ins_list=[task_ins] + ) # Send TaskIns to Driver API push_task_ins_res = self.driver.push_task_ins(req=push_task_ins_req) @@ -133,22 +143,20 @@ def _send_receive_msg( start_time = time.time() while True: - pull_task_res_req = driver_pb2.PullTaskResRequest( - node=node_pb2.Node(node_id=0, anonymous=True), + pull_task_res_req = driver_pb2.PullTaskResRequest( # pylint: disable=E1101 + node=node_pb2.Node(node_id=0, anonymous=True), # pylint: disable=E1101 task_ids=[task_id], ) # Ask Driver API for TaskRes pull_task_res_res = self.driver.pull_task_res(req=pull_task_res_req) - task_res_list: List[task_pb2.TaskRes] = list( + task_res_list: List[task_pb2.TaskRes] = list( # pylint: disable=E1101 pull_task_res_res.task_res_list ) if len(task_res_list) == 1: task_res = task_res_list[0] - return serde.client_message_from_proto( # type: ignore - task_res.task.legacy_client_message - ) + return serde.recordset_from_proto(task_res.task.recordset) if timeout is not None and time.time() > start_time + timeout: raise RuntimeError("Timeout reached") diff --git a/src/py/flwr/server/compat/driver_client_proxy_test.py b/src/py/flwr/server/compat/driver_client_proxy_test.py new file mode 100644 index 000000000000..de6566622b74 --- /dev/null +++ b/src/py/flwr/server/compat/driver_client_proxy_test.py @@ -0,0 +1,248 @@ +# Copyright 2020 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""DriverClientProxy tests.""" + + +import unittest +from typing import Union, cast +from unittest.mock import MagicMock + +import numpy as np + +import flwr +from flwr.common import recordset_compat as compat +from flwr.common import serde +from flwr.common.constant import ( + MESSAGE_TYPE_EVALUATE, + MESSAGE_TYPE_FIT, + MESSAGE_TYPE_GET_PARAMETERS, + MESSAGE_TYPE_GET_PROPERTIES, +) +from flwr.common.typing import ( + Code, + Config, + EvaluateIns, + EvaluateRes, + FitRes, + GetParametersIns, + GetParametersRes, + GetPropertiesRes, + Parameters, + Properties, + Status, +) +from flwr.proto import driver_pb2, node_pb2, task_pb2 # pylint: disable=E0611 + +from .driver_client_proxy import DriverClientProxy + +MESSAGE_PARAMETERS = Parameters(tensors=[b"abc"], tensor_type="np") + +CLIENT_PROPERTIES = cast(Properties, {"tensor_type": "numpy.ndarray"}) +CLIENT_STATUS = Status(code=Code.OK, message="OK") + + +def _make_task( + res: Union[GetParametersRes, GetPropertiesRes, FitRes, EvaluateRes] +) -> task_pb2.Task: # pylint: disable=E1101 + if isinstance(res, GetParametersRes): + message_type = MESSAGE_TYPE_GET_PARAMETERS + recordset = compat.getparametersres_to_recordset(res, True) + elif isinstance(res, GetPropertiesRes): + message_type = MESSAGE_TYPE_GET_PROPERTIES + recordset = compat.getpropertiesres_to_recordset(res) + elif isinstance(res, FitRes): + message_type = MESSAGE_TYPE_FIT + recordset = compat.fitres_to_recordset(res, True) + elif isinstance(res, EvaluateRes): + message_type = MESSAGE_TYPE_EVALUATE + recordset = compat.evaluateres_to_recordset(res) + else: + raise ValueError(f"Unsupported type: {type(res)}") + return task_pb2.Task( # pylint: disable=E1101 + task_type=message_type, + recordset=serde.recordset_to_proto(recordset), + ) + + +class DriverClientProxyTestCase(unittest.TestCase): + """Tests for DriverClientProxy.""" + + def setUp(self) -> None: + """Set up mocks for tests.""" + self.driver = MagicMock() + self.driver.get_nodes.return_value = ( + driver_pb2.GetNodesResponse( # pylint: disable=E1101 + nodes=[ + node_pb2.Node(node_id=1, anonymous=False) # pylint: disable=E1101 + ] + ) + ) + + def test_get_properties(self) -> None: + """Test positive case.""" + # Prepare + self.driver.push_task_ins.return_value = ( + driver_pb2.PushTaskInsResponse( # pylint: disable=E1101 + task_ids=["19341fd7-62e1-4eb4-beb4-9876d3acda32"] + ) + ) + self.driver.pull_task_res.return_value = ( + driver_pb2.PullTaskResResponse( # pylint: disable=E1101 + task_res_list=[ + task_pb2.TaskRes( # pylint: disable=E1101 + task_id="554bd3c8-8474-4b93-a7db-c7bec1bf0012", + group_id="", + run_id=0, + task=_make_task( + GetPropertiesRes( + status=CLIENT_STATUS, properties=CLIENT_PROPERTIES + ) + ), + ) + ] + ) + ) + client = DriverClientProxy( + node_id=1, driver=self.driver, anonymous=True, run_id=0 + ) + request_properties: Config = {"tensor_type": "str"} + ins: flwr.common.GetPropertiesIns = flwr.common.GetPropertiesIns( + config=request_properties + ) + + # Execute + value: flwr.common.GetPropertiesRes = client.get_properties(ins, timeout=None) + + # Assert + assert value.properties["tensor_type"] == "numpy.ndarray" + + def test_get_parameters(self) -> None: + """Test positive case.""" + # Prepare + self.driver.push_task_ins.return_value = ( + driver_pb2.PushTaskInsResponse( # pylint: disable=E1101 + task_ids=["19341fd7-62e1-4eb4-beb4-9876d3acda32"] + ) + ) + self.driver.pull_task_res.return_value = ( + driver_pb2.PullTaskResResponse( # pylint: disable=E1101 + task_res_list=[ + task_pb2.TaskRes( # pylint: disable=E1101 + task_id="554bd3c8-8474-4b93-a7db-c7bec1bf0012", + group_id="", + run_id=0, + task=_make_task( + GetParametersRes( + status=CLIENT_STATUS, + parameters=MESSAGE_PARAMETERS, + ) + ), + ) + ] + ) + ) + client = DriverClientProxy( + node_id=1, driver=self.driver, anonymous=True, run_id=0 + ) + get_parameters_ins = GetParametersIns(config={}) + + # Execute + value: flwr.common.GetParametersRes = client.get_parameters( + ins=get_parameters_ins, timeout=None + ) + + # Assert + assert value.parameters.tensors[0] == b"abc" + + def test_fit(self) -> None: + """Test positive case.""" + # Prepare + self.driver.push_task_ins.return_value = ( + driver_pb2.PushTaskInsResponse( # pylint: disable=E1101 + task_ids=["19341fd7-62e1-4eb4-beb4-9876d3acda32"] + ) + ) + self.driver.pull_task_res.return_value = ( + driver_pb2.PullTaskResResponse( # pylint: disable=E1101 + task_res_list=[ + task_pb2.TaskRes( # pylint: disable=E1101 + task_id="554bd3c8-8474-4b93-a7db-c7bec1bf0012", + group_id="", + run_id=0, + task=_make_task( + FitRes( + status=CLIENT_STATUS, + parameters=MESSAGE_PARAMETERS, + num_examples=10, + metrics={}, + ) + ), + ) + ] + ) + ) + client = DriverClientProxy( + node_id=1, driver=self.driver, anonymous=True, run_id=0 + ) + parameters = flwr.common.ndarrays_to_parameters([np.ones((2, 2))]) + ins: flwr.common.FitIns = flwr.common.FitIns(parameters, {}) + + # Execute + fit_res = client.fit(ins=ins, timeout=None) + + # Assert + assert fit_res.parameters.tensor_type == "np" + assert fit_res.parameters.tensors[0] == b"abc" + assert fit_res.num_examples == 10 + + def test_evaluate(self) -> None: + """Test positive case.""" + # Prepare + self.driver.push_task_ins.return_value = ( + driver_pb2.PushTaskInsResponse( # pylint: disable=E1101 + task_ids=["19341fd7-62e1-4eb4-beb4-9876d3acda32"] + ) + ) + self.driver.pull_task_res.return_value = ( + driver_pb2.PullTaskResResponse( # pylint: disable=E1101 + task_res_list=[ + task_pb2.TaskRes( # pylint: disable=E1101 + task_id="554bd3c8-8474-4b93-a7db-c7bec1bf0012", + group_id="", + run_id=0, + task=_make_task( + EvaluateRes( + status=CLIENT_STATUS, + loss=0.0, + num_examples=0, + metrics={}, + ) + ), + ) + ] + ) + ) + client = DriverClientProxy( + node_id=1, driver=self.driver, anonymous=True, run_id=0 + ) + parameters = Parameters(tensors=[], tensor_type="np") + evaluate_ins = EvaluateIns(parameters, {}) + + # Execute + evaluate_res = client.evaluate(evaluate_ins, timeout=None) + + # Assert + assert 0.0 == evaluate_res.loss + assert 0 == evaluate_res.num_examples diff --git a/src/py/flwr/server/criterion_test.py b/src/py/flwr/server/criterion_test.py index a7e5b62b5977..f678825f064e 100644 --- a/src/py/flwr/server/criterion_test.py +++ b/src/py/flwr/server/criterion_test.py @@ -20,7 +20,7 @@ from flwr.server.client_manager import SimpleClientManager from flwr.server.client_proxy import ClientProxy from flwr.server.criterion import Criterion -from flwr.server.fleet.grpc_bidi.grpc_client_proxy import GrpcClientProxy +from flwr.server.superlink.fleet.grpc_bidi.grpc_client_proxy import GrpcClientProxy def test_criterion_applied() -> None: diff --git a/src/py/flwr/server/driver/__init__.py b/src/py/flwr/server/driver/__init__.py index 2bfe63e6065f..b61f6eebf6a8 100644 --- a/src/py/flwr/server/driver/__init__.py +++ b/src/py/flwr/server/driver/__init__.py @@ -12,4 +12,13 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== -"""Flower driver service.""" +"""Flower driver SDK.""" + + +from .driver import Driver +from .grpc_driver import GrpcDriver + +__all__ = [ + "Driver", + "GrpcDriver", +] diff --git a/src/py/flwr/driver/driver.py b/src/py/flwr/server/driver/driver.py similarity index 87% rename from src/py/flwr/driver/driver.py rename to src/py/flwr/server/driver/driver.py index 9f96cc46ce1e..0a7cb36f8847 100644 --- a/src/py/flwr/driver/driver.py +++ b/src/py/flwr/server/driver/driver.py @@ -17,15 +17,15 @@ from typing import Iterable, List, Optional, Tuple -from flwr.driver.grpc_driver import DEFAULT_SERVER_ADDRESS_DRIVER, GrpcDriver -from flwr.proto.driver_pb2 import ( +from flwr.proto.driver_pb2 import ( # pylint: disable=E0611 CreateRunRequest, GetNodesRequest, PullTaskResRequest, PushTaskInsRequest, ) -from flwr.proto.node_pb2 import Node -from flwr.proto.task_pb2 import TaskIns, TaskRes +from flwr.proto.node_pb2 import Node # pylint: disable=E0611 +from flwr.proto.task_pb2 import TaskIns, TaskRes # pylint: disable=E0611 +from flwr.server.driver.grpc_driver import DEFAULT_SERVER_ADDRESS_DRIVER, GrpcDriver class Driver: @@ -49,10 +49,10 @@ class Driver: def __init__( self, driver_service_address: str = DEFAULT_SERVER_ADDRESS_DRIVER, - certificates: Optional[bytes] = None, + root_certificates: Optional[bytes] = None, ) -> None: self.addr = driver_service_address - self.certificates = certificates + self.root_certificates = root_certificates self.grpc_driver: Optional[GrpcDriver] = None self.run_id: Optional[int] = None self.node = Node(node_id=0, anonymous=True) @@ -62,7 +62,8 @@ def _get_grpc_driver_and_run_id(self) -> Tuple[GrpcDriver, int]: if self.grpc_driver is None or self.run_id is None: # Connect and create run self.grpc_driver = GrpcDriver( - driver_service_address=self.addr, certificates=self.certificates + driver_service_address=self.addr, + root_certificates=self.root_certificates, ) self.grpc_driver.connect() res = self.grpc_driver.create_run(CreateRunRequest()) diff --git a/src/py/flwr/driver/driver_test.py b/src/py/flwr/server/driver/driver_test.py similarity index 95% rename from src/py/flwr/driver/driver_test.py rename to src/py/flwr/server/driver/driver_test.py index 8f75bbf78362..0ee7fbfec37e 100644 --- a/src/py/flwr/driver/driver_test.py +++ b/src/py/flwr/server/driver/driver_test.py @@ -18,13 +18,13 @@ import unittest from unittest.mock import Mock, patch -from flwr.driver.driver import Driver -from flwr.proto.driver_pb2 import ( +from flwr.proto.driver_pb2 import ( # pylint: disable=E0611 GetNodesRequest, PullTaskResRequest, PushTaskInsRequest, ) -from flwr.proto.task_pb2 import Task, TaskIns, TaskRes +from flwr.proto.task_pb2 import Task, TaskIns, TaskRes # pylint: disable=E0611 +from flwr.server.driver.driver import Driver class TestDriver(unittest.TestCase): @@ -37,7 +37,7 @@ def setUp(self) -> None: self.mock_grpc_driver = Mock() self.mock_grpc_driver.create_run.return_value = mock_response self.patcher = patch( - "flwr.driver.driver.GrpcDriver", return_value=self.mock_grpc_driver + "flwr.server.driver.driver.GrpcDriver", return_value=self.mock_grpc_driver ) self.patcher.start() self.driver = Driver() diff --git a/src/py/flwr/driver/grpc_driver.py b/src/py/flwr/server/driver/grpc_driver.py similarity index 92% rename from src/py/flwr/driver/grpc_driver.py rename to src/py/flwr/server/driver/grpc_driver.py index 627b95cdb1b4..c3f66f7343db 100644 --- a/src/py/flwr/driver/grpc_driver.py +++ b/src/py/flwr/server/driver/grpc_driver.py @@ -23,7 +23,7 @@ from flwr.common import EventType, event from flwr.common.grpc import create_channel from flwr.common.logger import log -from flwr.proto.driver_pb2 import ( +from flwr.proto.driver_pb2 import ( # pylint: disable=E0611 CreateRunRequest, CreateRunResponse, GetNodesRequest, @@ -33,7 +33,7 @@ PushTaskInsRequest, PushTaskInsResponse, ) -from flwr.proto.driver_pb2_grpc import DriverStub +from flwr.proto.driver_pb2_grpc import DriverStub # pylint: disable=E0611 DEFAULT_SERVER_ADDRESS_DRIVER = "[::]:9091" @@ -51,10 +51,10 @@ class GrpcDriver: def __init__( self, driver_service_address: str = DEFAULT_SERVER_ADDRESS_DRIVER, - certificates: Optional[bytes] = None, + root_certificates: Optional[bytes] = None, ) -> None: self.driver_service_address = driver_service_address - self.certificates = certificates + self.root_certificates = root_certificates self.channel: Optional[grpc.Channel] = None self.stub: Optional[DriverStub] = None @@ -66,8 +66,8 @@ def connect(self) -> None: return self.channel = create_channel( server_address=self.driver_service_address, - insecure=(self.certificates is None), - root_certificates=self.certificates, + insecure=(self.root_certificates is None), + root_certificates=self.root_certificates, ) self.stub = DriverStub(self.channel) log(INFO, "[Driver] Connected to %s", self.driver_service_address) diff --git a/src/py/flwr/server/fleet/grpc_bidi/driver_client_manager.py b/src/py/flwr/server/fleet/grpc_bidi/driver_client_manager.py deleted file mode 100644 index dc94bf3912d7..000000000000 --- a/src/py/flwr/server/fleet/grpc_bidi/driver_client_manager.py +++ /dev/null @@ -1,140 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Flower DriverClientManager.""" - - -import threading -from typing import Dict, List, Optional, Set, Tuple - -from flwr.server.client_manager import ClientManager -from flwr.server.client_proxy import ClientProxy -from flwr.server.criterion import Criterion -from flwr.server.state import State, StateFactory - -from .ins_scheduler import InsScheduler - - -class DriverClientManager(ClientManager): - """Provides a pool of available clients.""" - - def __init__(self, state_factory: StateFactory) -> None: - self._cv = threading.Condition() - self.nodes: Dict[str, Tuple[int, InsScheduler]] = {} - self.state_factory = state_factory - - def __len__(self) -> int: - """Return the number of available clients. - - Returns - ------- - num_available : int - The number of currently available clients. - """ - return len(self.nodes) - - def num_available(self) -> int: - """Return the number of available clients. - - Returns - ------- - num_available : int - The number of currently available clients. - """ - return len(self) - - def register(self, client: ClientProxy) -> bool: - """Register Flower ClientProxy instance. - - Parameters - ---------- - client : flwr.server.client_proxy.ClientProxy - - Returns - ------- - success : bool - Indicating if registration was successful. False if ClientProxy is - already registered or can not be registered for any reason. - """ - if client.cid in self.nodes: - return False - - # Create node in State - state: State = self.state_factory.state() - client.node_id = state.create_node() - - # Create and start the instruction scheduler - ins_scheduler = InsScheduler( - client_proxy=client, - state_factory=self.state_factory, - ) - ins_scheduler.start() - - # Store cid, node_id, and InsScheduler - self.nodes[client.cid] = (client.node_id, ins_scheduler) - - with self._cv: - self._cv.notify_all() - - return True - - def unregister(self, client: ClientProxy) -> None: - """Unregister Flower ClientProxy instance. - - This method is idempotent. - - Parameters - ---------- - client : flwr.server.client_proxy.ClientProxy - """ - if client.cid in self.nodes: - node_id, ins_scheduler = self.nodes[client.cid] - del self.nodes[client.cid] - ins_scheduler.stop() - - # Delete node_id in State - state: State = self.state_factory.state() - state.delete_node(node_id=node_id) - - with self._cv: - self._cv.notify_all() - - def all_ids(self) -> Set[int]: - """Return all available node ids. - - Returns - ------- - ids : Set[int] - The IDs of all currently available nodes. - """ - return {node_id for _, (node_id, _) in self.nodes.items()} - - # --- Unimplemented methods ----------------------------------------------- - - def all(self) -> Dict[str, ClientProxy]: - """Not implemented.""" - raise NotImplementedError() - - def wait_for(self, num_clients: int, timeout: int = 86400) -> bool: - """Not implemented.""" - raise NotImplementedError() - - def sample( - self, - num_clients: int, - min_num_clients: Optional[int] = None, - criterion: Optional[Criterion] = None, - ) -> List[ClientProxy]: - """Not implemented.""" - raise NotImplementedError() diff --git a/src/py/flwr/server/fleet/grpc_bidi/ins_scheduler.py b/src/py/flwr/server/fleet/grpc_bidi/ins_scheduler.py deleted file mode 100644 index 0fa6f82a89b5..000000000000 --- a/src/py/flwr/server/fleet/grpc_bidi/ins_scheduler.py +++ /dev/null @@ -1,171 +0,0 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Instruction scheduler for the legacy gRPC transport stack.""" - - -import threading -import time -from logging import DEBUG, ERROR -from typing import Dict, List, Optional - -from flwr.client.message_handler.task_handler import configure_task_res -from flwr.common import EvaluateRes, FitRes, GetParametersRes, GetPropertiesRes, serde -from flwr.common.logger import log -from flwr.proto.node_pb2 import Node -from flwr.proto.task_pb2 import Task, TaskIns, TaskRes -from flwr.proto.transport_pb2 import ClientMessage, ServerMessage -from flwr.server.client_proxy import ClientProxy -from flwr.server.state import State, StateFactory - - -class InsScheduler: - """Schedule ClientProxy calls on a background thread.""" - - def __init__(self, client_proxy: ClientProxy, state_factory: StateFactory): - self.client_proxy = client_proxy - self.state_factory = state_factory - self.worker_thread: Optional[threading.Thread] = None - self.shared_memory_state = {"stop": False} - - def start(self) -> None: - """Start the worker thread.""" - self.worker_thread = threading.Thread( - target=_worker, - args=( - self.client_proxy, - self.shared_memory_state, - self.state_factory, - ), - ) - self.worker_thread.start() - - def stop(self) -> None: - """Stop the worker thread.""" - if self.worker_thread is None: - log(ERROR, "InsScheduler.stop called, but worker_thread is None") - return - self.shared_memory_state["stop"] = True - self.worker_thread.join() - self.worker_thread = None - self.shared_memory_state["stop"] = False - - -def _worker( - client_proxy: ClientProxy, - shared_memory_state: Dict[str, bool], - state_factory: StateFactory, -) -> None: - """Sequentially call ClientProxy methods to process outstanding tasks.""" - log(DEBUG, "Worker for node %i started", client_proxy.node_id) - - state: State = state_factory.state() - while not shared_memory_state["stop"]: - log(DEBUG, "Worker for node %i checking state", client_proxy.node_id) - - # Step 1: pull *Ins (next task) out of `state` - task_ins_list: List[TaskIns] = state.get_task_ins( - node_id=client_proxy.node_id, - limit=1, - ) - if not task_ins_list: - log(DEBUG, "Worker for node %i: no task found", client_proxy.node_id) - time.sleep(3) - continue - - task_ins = task_ins_list[0] - log( - DEBUG, - "Worker for node %i: FOUND task %s", - client_proxy.node_id, - task_ins.task_id, - ) - - # Step 2: call client_proxy.{fit,evaluate,...} - server_message = task_ins.task.legacy_server_message - client_message_proto = _call_client_proxy( - client_proxy=client_proxy, - server_message=server_message, - timeout=None, - ) - - # Step 3: wrap FitRes in a ClientMessage in a Task in a TaskRes - task_res = configure_task_res( - TaskRes(task=Task(legacy_client_message=client_message_proto)), - task_ins, - Node(node_id=client_proxy.node_id, anonymous=False), - ) - - # Step 4: write *Res (result) back to `state` - state.store_task_res(task_res=task_res) - - # Exit worker thread - log(DEBUG, "Worker for node %i stopped", client_proxy.node_id) - - -def _call_client_proxy( - client_proxy: ClientProxy, server_message: ServerMessage, timeout: Optional[float] -) -> ClientMessage: - """.""" - # pylint: disable=too-many-locals - - field = server_message.WhichOneof("msg") - - if field == "get_properties_ins": - get_properties_ins = serde.get_properties_ins_from_proto( - msg=server_message.get_properties_ins - ) - get_properties_res: GetPropertiesRes = client_proxy.get_properties( - ins=get_properties_ins, - timeout=timeout, - ) - get_properties_res_proto = serde.get_properties_res_to_proto( - res=get_properties_res - ) - return ClientMessage(get_properties_res=get_properties_res_proto) - - if field == "get_parameters_ins": - get_parameters_ins = serde.get_parameters_ins_from_proto( - msg=server_message.get_parameters_ins - ) - get_parameters_res: GetParametersRes = client_proxy.get_parameters( - ins=get_parameters_ins, - timeout=timeout, - ) - get_parameters_res_proto = serde.get_parameters_res_to_proto( - res=get_parameters_res - ) - return ClientMessage(get_parameters_res=get_parameters_res_proto) - - if field == "fit_ins": - fit_ins = serde.fit_ins_from_proto(msg=server_message.fit_ins) - fit_res: FitRes = client_proxy.fit( - ins=fit_ins, - timeout=timeout, - ) - fit_res_proto = serde.fit_res_to_proto(res=fit_res) - return ClientMessage(fit_res=fit_res_proto) - - if field == "evaluate_ins": - evaluate_ins = serde.evaluate_ins_from_proto(msg=server_message.evaluate_ins) - evaluate_res: EvaluateRes = client_proxy.evaluate( - ins=evaluate_ins, - timeout=timeout, - ) - evaluate_res_proto = serde.evaluate_res_to_proto(res=evaluate_res) - return ClientMessage(evaluate_res=evaluate_res_proto) - - raise ValueError( - "Unsupported instruction in ServerMessage, cannot deserialize from ProtoBuf" - ) diff --git a/src/py/flwr/server/run_serverapp.py b/src/py/flwr/server/run_serverapp.py new file mode 100644 index 000000000000..35fffcf2d7ba --- /dev/null +++ b/src/py/flwr/server/run_serverapp.py @@ -0,0 +1,131 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Run ServerApp.""" + + +import argparse +import sys +from logging import DEBUG, WARN +from pathlib import Path + +from flwr.common import EventType, event +from flwr.common.logger import log + +from .serverapp import ServerApp, load_server_app + + +def run_server_app() -> None: + """Run Flower server app.""" + event(EventType.RUN_SERVER_APP_ENTER) + + args = _parse_args_run_server_app().parse_args() + + # Obtain certificates + if args.insecure: + if args.root_certificates is not None: + sys.exit( + "Conflicting options: The '--insecure' flag disables HTTPS, " + "but '--root-certificates' was also specified. Please remove " + "the '--root-certificates' option when running in insecure mode, " + "or omit '--insecure' to use HTTPS." + ) + log( + WARN, + "Option `--insecure` was set. " + "Starting insecure HTTP client connected to %s.", + args.server, + ) + root_certificates = None + else: + # Load the certificates if provided, or load the system certificates + cert_path = args.root_certificates + if cert_path is None: + root_certificates = None + else: + root_certificates = Path(cert_path).read_bytes() + log( + DEBUG, + "Starting secure HTTPS client connected to %s " + "with the following certificates: %s.", + args.server, + cert_path, + ) + + log( + DEBUG, + "Flower will load ServerApp `%s`", + getattr(args, "server-app"), + ) + + log( + DEBUG, + "root_certificates: `%s`", + root_certificates, + ) + + log(WARN, "Not implemented: run_server_app") + + server_app_dir = args.dir + if server_app_dir is not None: + sys.path.insert(0, server_app_dir) + + def _load() -> ServerApp: + server_app: ServerApp = load_server_app(getattr(args, "server-app")) + return server_app + + server_app = _load() + + log(DEBUG, "server_app: `%s`", server_app) + + event(EventType.RUN_SERVER_APP_LEAVE) + + +def _parse_args_run_server_app() -> argparse.ArgumentParser: + """Parse flower-server-app command line arguments.""" + parser = argparse.ArgumentParser( + description="Start a Flower server app", + ) + + parser.add_argument( + "server-app", + help="For example: `server:app` or `project.package.module:wrapper.app`", + ) + parser.add_argument( + "--insecure", + action="store_true", + help="Run the server app without HTTPS. By default, the app runs with " + "HTTPS enabled. Use this flag only if you understand the risks.", + ) + parser.add_argument( + "--root-certificates", + metavar="ROOT_CERT", + type=str, + help="Specifies the path to the PEM-encoded root certificate file for " + "establishing secure HTTPS connections.", + ) + parser.add_argument( + "--server", + default="0.0.0.0:9092", + help="Server address", + ) + parser.add_argument( + "--dir", + default="", + help="Add specified directory to the PYTHONPATH and load Flower " + "app from there." + " Default: current working directory.", + ) + + return parser diff --git a/src/py/flwr/server/server_config.py b/src/py/flwr/server/server_config.py new file mode 100644 index 000000000000..823f832da6f8 --- /dev/null +++ b/src/py/flwr/server/server_config.py @@ -0,0 +1,31 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Flower ServerConfig.""" + + +from dataclasses import dataclass +from typing import Optional + + +@dataclass +class ServerConfig: + """Flower server config. + + All attributes have default values which allows users to configure just the ones + they care about. + """ + + num_rounds: int = 1 + round_timeout: Optional[float] = None diff --git a/src/py/flwr/server/serverapp.py b/src/py/flwr/server/serverapp.py new file mode 100644 index 000000000000..1ffa087719dc --- /dev/null +++ b/src/py/flwr/server/serverapp.py @@ -0,0 +1,95 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Flower ServerApp.""" + + +import importlib +from typing import Optional, cast + +from flwr.common.context import Context +from flwr.server.driver.driver import Driver +from flwr.server.strategy import Strategy + +from .client_manager import ClientManager +from .server import Server +from .server_config import ServerConfig + + +class ServerApp: + """Flower ServerApp.""" + + def __init__( + self, + server: Optional[Server] = None, + config: Optional[ServerConfig] = None, + strategy: Optional[Strategy] = None, + client_manager: Optional[ClientManager] = None, + ) -> None: + self.server = server + self.config = config + self.strategy = strategy + self.client_manager = client_manager + + def __call__(self, driver: Driver, context: Context) -> None: + """Execute `ServerApp`.""" + + +class LoadServerAppError(Exception): + """Error when trying to load `ServerApp`.""" + + +def load_server_app(module_attribute_str: str) -> ServerApp: + """Load the `ServerApp` object specified in a module attribute string. + + The module/attribute string should have the form :. Valid + examples include `server:app` and `project.package.module:wrapper.app`. It + must refer to a module on the PYTHONPATH, the module needs to have the specified + attribute, and the attribute must be of type `ServerApp`. + """ + module_str, _, attributes_str = module_attribute_str.partition(":") + if not module_str: + raise LoadServerAppError( + f"Missing module in {module_attribute_str}", + ) from None + if not attributes_str: + raise LoadServerAppError( + f"Missing attribute in {module_attribute_str}", + ) from None + + # Load module + try: + module = importlib.import_module(module_str) + except ModuleNotFoundError: + raise LoadServerAppError( + f"Unable to load module {module_str}", + ) from None + + # Recursively load attribute + attribute = module + try: + for attribute_str in attributes_str.split("."): + attribute = getattr(attribute, attribute_str) + except AttributeError: + raise LoadServerAppError( + f"Unable to load attribute {attributes_str} from module {module_str}", + ) from None + + # Check type + if not isinstance(attribute, ServerApp): + raise LoadServerAppError( + f"Attribute {attributes_str} is not of type {ServerApp}", + ) from None + + return cast(ServerApp, attribute) diff --git a/src/py/flwr/server/strategy/dpfedavg_adaptive.py b/src/py/flwr/server/strategy/dpfedavg_adaptive.py index 8b3278cc9ba0..a908679ed668 100644 --- a/src/py/flwr/server/strategy/dpfedavg_adaptive.py +++ b/src/py/flwr/server/strategy/dpfedavg_adaptive.py @@ -24,6 +24,7 @@ import numpy as np from flwr.common import FitIns, FitRes, Parameters, Scalar +from flwr.common.logger import warn_deprecated_feature from flwr.server.client_manager import ClientManager from flwr.server.client_proxy import ClientProxy from flwr.server.strategy.dpfedavg_fixed import DPFedAvgFixed @@ -31,7 +32,12 @@ class DPFedAvgAdaptive(DPFedAvgFixed): - """Wrapper for configuring a Strategy for DP with Adaptive Clipping.""" + """Wrapper for configuring a Strategy for DP with Adaptive Clipping. + + Warning + ------- + This class is deprecated and will be removed in a future release. + """ # pylint: disable=too-many-arguments,too-many-instance-attributes def __init__( @@ -45,6 +51,7 @@ def __init__( clip_norm_target_quantile: float = 0.5, clip_count_stddev: Optional[float] = None, ) -> None: + warn_deprecated_feature("`DPFedAvgAdaptive` wrapper") super().__init__( strategy=strategy, num_sampled_clients=num_sampled_clients, diff --git a/src/py/flwr/server/strategy/dpfedavg_fixed.py b/src/py/flwr/server/strategy/dpfedavg_fixed.py index f2f1c206f3de..b182ac26cef8 100644 --- a/src/py/flwr/server/strategy/dpfedavg_fixed.py +++ b/src/py/flwr/server/strategy/dpfedavg_fixed.py @@ -17,11 +17,11 @@ Paper: arxiv.org/pdf/1710.06963.pdf """ - from typing import Dict, List, Optional, Tuple, Union from flwr.common import EvaluateIns, EvaluateRes, FitIns, FitRes, Parameters, Scalar from flwr.common.dp import add_gaussian_noise +from flwr.common.logger import warn_deprecated_feature from flwr.common.parameter import ndarrays_to_parameters, parameters_to_ndarrays from flwr.server.client_manager import ClientManager from flwr.server.client_proxy import ClientProxy @@ -29,7 +29,12 @@ class DPFedAvgFixed(Strategy): - """Wrapper for configuring a Strategy for DP with Fixed Clipping.""" + """Wrapper for configuring a Strategy for DP with Fixed Clipping. + + Warning + ------- + This class is deprecated and will be removed in a future release. + """ # pylint: disable=too-many-arguments,too-many-instance-attributes def __init__( @@ -40,6 +45,7 @@ def __init__( noise_multiplier: float = 1, server_side_noising: bool = True, ) -> None: + warn_deprecated_feature("`DPFedAvgFixed` wrapper") super().__init__() self.strategy = strategy # Doing fixed-size subsampling as in https://arxiv.org/abs/1905.03871. diff --git a/src/py/flwr/server/strategy/fedadagrad_test.py b/src/py/flwr/server/strategy/fedadagrad_test.py index b3380a5be2f9..0c966442ecaf 100644 --- a/src/py/flwr/server/strategy/fedadagrad_test.py +++ b/src/py/flwr/server/strategy/fedadagrad_test.py @@ -30,7 +30,7 @@ parameters_to_ndarrays, ) from flwr.server.client_proxy import ClientProxy -from flwr.server.fleet.grpc_bidi.grpc_client_proxy import GrpcClientProxy +from flwr.server.superlink.fleet.grpc_bidi.grpc_client_proxy import GrpcClientProxy from .fedadagrad import FedAdagrad diff --git a/src/py/flwr/server/strategy/fedmedian_test.py b/src/py/flwr/server/strategy/fedmedian_test.py index 180503df6c80..57cf08d8c01d 100644 --- a/src/py/flwr/server/strategy/fedmedian_test.py +++ b/src/py/flwr/server/strategy/fedmedian_test.py @@ -30,7 +30,7 @@ parameters_to_ndarrays, ) from flwr.server.client_proxy import ClientProxy -from flwr.server.fleet.grpc_bidi.grpc_client_proxy import GrpcClientProxy +from flwr.server.superlink.fleet.grpc_bidi.grpc_client_proxy import GrpcClientProxy from .fedmedian import FedMedian diff --git a/src/py/flwr/server/strategy/fedxgb_bagging.py b/src/py/flwr/server/strategy/fedxgb_bagging.py index cafb466c2e8b..a8e8adddafbb 100644 --- a/src/py/flwr/server/strategy/fedxgb_bagging.py +++ b/src/py/flwr/server/strategy/fedxgb_bagging.py @@ -44,6 +44,11 @@ def __init__( self.global_model: Optional[bytes] = None super().__init__(**kwargs) + def __repr__(self) -> str: + """Compute a string representation of the strategy.""" + rep = f"FedXgbBagging(accept_failures={self.accept_failures})" + return rep + def aggregate_fit( self, server_round: int, diff --git a/src/py/flwr/server/strategy/fedxgb_cyclic.py b/src/py/flwr/server/strategy/fedxgb_cyclic.py index e2707b02d19d..2605daab29f4 100644 --- a/src/py/flwr/server/strategy/fedxgb_cyclic.py +++ b/src/py/flwr/server/strategy/fedxgb_cyclic.py @@ -37,6 +37,11 @@ def __init__( self.global_model: Optional[bytes] = None super().__init__(**kwargs) + def __repr__(self) -> str: + """Compute a string representation of the strategy.""" + rep = f"FedXgbCyclic(accept_failures={self.accept_failures})" + return rep + def aggregate_fit( self, server_round: int, diff --git a/src/py/flwr/server/strategy/fedxgb_nn_avg.py b/src/py/flwr/server/strategy/fedxgb_nn_avg.py index f300633d0d9f..8dedc925f350 100644 --- a/src/py/flwr/server/strategy/fedxgb_nn_avg.py +++ b/src/py/flwr/server/strategy/fedxgb_nn_avg.py @@ -25,7 +25,7 @@ from typing import Any, Dict, List, Optional, Tuple, Union from flwr.common import FitRes, Scalar, ndarrays_to_parameters, parameters_to_ndarrays -from flwr.common.logger import log +from flwr.common.logger import log, warn_deprecated_feature from flwr.server.client_proxy import ClientProxy from .aggregate import aggregate @@ -33,7 +33,13 @@ class FedXgbNnAvg(FedAvg): - """Configurable FedXgbNnAvg strategy implementation.""" + """Configurable FedXgbNnAvg strategy implementation. + + Warning + ------- + This strategy is deprecated, but a copy of it is available in Flower Baselines: + https://github.com/adap/flower/tree/main/baselines/hfedxgboost. + """ def __init__(self, *args: Any, **kwargs: Any) -> None: """Federated XGBoost [Ma et al., 2023] strategy. @@ -41,6 +47,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: Implementation based on https://arxiv.org/abs/2304.07537. """ super().__init__(*args, **kwargs) + warn_deprecated_feature("`FedXgbNnAvg` strategy") def __repr__(self) -> str: """Compute a string representation of the strategy.""" diff --git a/src/py/flwr/server/strategy/krum_test.py b/src/py/flwr/server/strategy/krum_test.py index 81e59230739a..653dc9a8475d 100644 --- a/src/py/flwr/server/strategy/krum_test.py +++ b/src/py/flwr/server/strategy/krum_test.py @@ -30,7 +30,7 @@ parameters_to_ndarrays, ) from flwr.server.client_proxy import ClientProxy -from flwr.server.fleet.grpc_bidi.grpc_client_proxy import GrpcClientProxy +from flwr.server.superlink.fleet.grpc_bidi.grpc_client_proxy import GrpcClientProxy from .krum import Krum diff --git a/src/py/flwr/server/strategy/multikrum_test.py b/src/py/flwr/server/strategy/multikrum_test.py index 1469db104252..f874dc2f9800 100644 --- a/src/py/flwr/server/strategy/multikrum_test.py +++ b/src/py/flwr/server/strategy/multikrum_test.py @@ -30,7 +30,7 @@ parameters_to_ndarrays, ) from flwr.server.client_proxy import ClientProxy -from flwr.server.fleet.grpc_bidi.grpc_client_proxy import GrpcClientProxy +from flwr.server.superlink.fleet.grpc_bidi.grpc_client_proxy import GrpcClientProxy from .krum import Krum diff --git a/src/py/flwr/server/superlink/__init__.py b/src/py/flwr/server/superlink/__init__.py new file mode 100644 index 000000000000..94102100de26 --- /dev/null +++ b/src/py/flwr/server/superlink/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Flower SuperLink.""" diff --git a/src/py/flwr/driver/__init__.py b/src/py/flwr/server/superlink/driver/__init__.py similarity index 78% rename from src/py/flwr/driver/__init__.py rename to src/py/flwr/server/superlink/driver/__init__.py index 1c3b09cc334b..2bfe63e6065f 100644 --- a/src/py/flwr/driver/__init__.py +++ b/src/py/flwr/server/superlink/driver/__init__.py @@ -12,15 +12,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== -"""Flower driver SDK.""" - - -from .app import start_driver -from .driver import Driver -from .grpc_driver import GrpcDriver - -__all__ = [ - "Driver", - "GrpcDriver", - "start_driver", -] +"""Flower driver service.""" diff --git a/src/py/flwr/server/driver/driver_servicer.py b/src/py/flwr/server/superlink/driver/driver_servicer.py similarity index 93% rename from src/py/flwr/server/driver/driver_servicer.py rename to src/py/flwr/server/superlink/driver/driver_servicer.py index 546ebd884ca9..ca2cd69b2f20 100644 --- a/src/py/flwr/server/driver/driver_servicer.py +++ b/src/py/flwr/server/superlink/driver/driver_servicer.py @@ -22,8 +22,8 @@ import grpc from flwr.common.logger import log -from flwr.proto import driver_pb2_grpc -from flwr.proto.driver_pb2 import ( +from flwr.proto import driver_pb2_grpc # pylint: disable=E0611 +from flwr.proto.driver_pb2 import ( # pylint: disable=E0611 CreateRunRequest, CreateRunResponse, GetNodesRequest, @@ -33,9 +33,9 @@ PushTaskInsRequest, PushTaskInsResponse, ) -from flwr.proto.node_pb2 import Node -from flwr.proto.task_pb2 import TaskRes -from flwr.server.state import State, StateFactory +from flwr.proto.node_pb2 import Node # pylint: disable=E0611 +from flwr.proto.task_pb2 import TaskRes # pylint: disable=E0611 +from flwr.server.superlink.state import State, StateFactory from flwr.server.utils.validator import validate_task_ins_or_res diff --git a/src/py/flwr/server/driver/driver_servicer_test.py b/src/py/flwr/server/superlink/driver/driver_servicer_test.py similarity index 95% rename from src/py/flwr/server/driver/driver_servicer_test.py rename to src/py/flwr/server/superlink/driver/driver_servicer_test.py index c432c026a632..99f7cc007a89 100644 --- a/src/py/flwr/server/driver/driver_servicer_test.py +++ b/src/py/flwr/server/superlink/driver/driver_servicer_test.py @@ -15,7 +15,7 @@ """DriverServicer tests.""" -from flwr.server.driver.driver_servicer import _raise_if +from flwr.server.superlink.driver.driver_servicer import _raise_if # pylint: disable=broad-except diff --git a/src/py/flwr/server/fleet/__init__.py b/src/py/flwr/server/superlink/fleet/__init__.py similarity index 100% rename from src/py/flwr/server/fleet/__init__.py rename to src/py/flwr/server/superlink/fleet/__init__.py diff --git a/src/py/flwr/server/fleet/grpc_bidi/__init__.py b/src/py/flwr/server/superlink/fleet/grpc_bidi/__init__.py similarity index 100% rename from src/py/flwr/server/fleet/grpc_bidi/__init__.py rename to src/py/flwr/server/superlink/fleet/grpc_bidi/__init__.py diff --git a/src/py/flwr/server/fleet/grpc_bidi/flower_service_servicer.py b/src/py/flwr/server/superlink/fleet/grpc_bidi/flower_service_servicer.py similarity index 88% rename from src/py/flwr/server/fleet/grpc_bidi/flower_service_servicer.py rename to src/py/flwr/server/superlink/fleet/grpc_bidi/flower_service_servicer.py index 1f7a8e9259fc..6f94ea844e38 100644 --- a/src/py/flwr/server/fleet/grpc_bidi/flower_service_servicer.py +++ b/src/py/flwr/server/superlink/fleet/grpc_bidi/flower_service_servicer.py @@ -18,17 +18,24 @@ - https://github.com/grpc/grpc/blob/master/doc/statuscodes.md """ - +import uuid from typing import Callable, Iterator import grpc from iterators import TimeoutIterator -from flwr.proto import transport_pb2_grpc -from flwr.proto.transport_pb2 import ClientMessage, ServerMessage +from flwr.proto import transport_pb2_grpc # pylint: disable=E0611 +from flwr.proto.transport_pb2 import ( # pylint: disable=E0611 + ClientMessage, + ServerMessage, +) from flwr.server.client_manager import ClientManager -from flwr.server.fleet.grpc_bidi.grpc_bridge import GrpcBridge, InsWrapper, ResWrapper -from flwr.server.fleet.grpc_bidi.grpc_client_proxy import GrpcClientProxy +from flwr.server.superlink.fleet.grpc_bidi.grpc_bridge import ( + GrpcBridge, + InsWrapper, + ResWrapper, +) +from flwr.server.superlink.fleet.grpc_bidi.grpc_client_proxy import GrpcClientProxy def default_bridge_factory() -> GrpcBridge: @@ -88,9 +95,12 @@ def Join( # pylint: disable=invalid-name wrapping the actual message - The `Join` method is (pretty much) unaware of the protocol """ - peer: str = context.peer() + # When running Flower behind a proxy, the peer can be the same for + # different clients, so instead of `cid: str = context.peer()` we + # use a `UUID4` that is unique. + cid: str = uuid.uuid4().hex bridge = self.grpc_bridge_factory() - client_proxy = self.client_proxy_factory(peer, bridge) + client_proxy = self.client_proxy_factory(cid, bridge) is_success = register_client_proxy(self.client_manager, client_proxy, context) if is_success: diff --git a/src/py/flwr/server/fleet/grpc_bidi/flower_service_servicer_test.py b/src/py/flwr/server/superlink/fleet/grpc_bidi/flower_service_servicer_test.py similarity index 89% rename from src/py/flwr/server/fleet/grpc_bidi/flower_service_servicer_test.py rename to src/py/flwr/server/superlink/fleet/grpc_bidi/flower_service_servicer_test.py index 64140ed274c9..bd93554a6a32 100644 --- a/src/py/flwr/server/fleet/grpc_bidi/flower_service_servicer_test.py +++ b/src/py/flwr/server/superlink/fleet/grpc_bidi/flower_service_servicer_test.py @@ -16,18 +16,23 @@ import unittest +import uuid from unittest.mock import MagicMock, call -from flwr.proto.transport_pb2 import ClientMessage, ServerMessage -from flwr.server.fleet.grpc_bidi.flower_service_servicer import ( +from flwr.proto.transport_pb2 import ( # pylint: disable=E0611 + ClientMessage, + ServerMessage, +) +from flwr.server.superlink.fleet.grpc_bidi.flower_service_servicer import ( FlowerServiceServicer, register_client_proxy, ) -from flwr.server.fleet.grpc_bidi.grpc_bridge import InsWrapper, ResWrapper +from flwr.server.superlink.fleet.grpc_bidi.grpc_bridge import InsWrapper, ResWrapper CLIENT_MESSAGE = ClientMessage() SERVER_MESSAGE = ServerMessage() -CLIENT_CID = "some_client_cid" + +CID: str = uuid.uuid4().hex class FlowerServiceServicerTestCase(unittest.TestCase): @@ -39,7 +44,6 @@ def setUp(self) -> None: """Create mocks for tests.""" # Mock for the gRPC context argument self.context_mock = MagicMock() - self.context_mock.peer.return_value = CLIENT_CID # Define client_messages to be processed by FlowerServiceServicer instance self.client_messages = [CLIENT_MESSAGE for _ in range(5)] @@ -67,7 +71,7 @@ def setUp(self) -> None: # Create a GrpcClientProxy mock which we will use to test if correct # methods where called and client_messages are getting passed to it self.grpc_client_proxy_mock = MagicMock() - self.grpc_client_proxy_mock.cid = CLIENT_CID + self.grpc_client_proxy_mock.cid = CID self.client_proxy_factory_mock = MagicMock() self.client_proxy_factory_mock.return_value = self.grpc_client_proxy_mock @@ -124,11 +128,7 @@ def test_join(self) -> None: num_server_messages += 1 assert len(self.client_messages) == num_server_messages - assert self.grpc_client_proxy_mock.cid == CLIENT_CID - - self.client_proxy_factory_mock.assert_called_once_with( - CLIENT_CID, self.grpc_bridge_mock - ) + assert self.grpc_client_proxy_mock.cid == CID # Check if the client was registered with the client_manager self.client_manager_mock.register.assert_called_once_with( diff --git a/src/py/flwr/server/fleet/grpc_bidi/grpc_bridge.py b/src/py/flwr/server/superlink/fleet/grpc_bidi/grpc_bridge.py similarity index 98% rename from src/py/flwr/server/fleet/grpc_bidi/grpc_bridge.py rename to src/py/flwr/server/superlink/fleet/grpc_bidi/grpc_bridge.py index 4e68499f018d..d5b4a915c609 100644 --- a/src/py/flwr/server/fleet/grpc_bidi/grpc_bridge.py +++ b/src/py/flwr/server/superlink/fleet/grpc_bidi/grpc_bridge.py @@ -20,7 +20,10 @@ from threading import Condition from typing import Iterator, Optional -from flwr.proto.transport_pb2 import ClientMessage, ServerMessage +from flwr.proto.transport_pb2 import ( # pylint: disable=E0611 + ClientMessage, + ServerMessage, +) @dataclass diff --git a/src/py/flwr/server/fleet/grpc_bidi/grpc_bridge_test.py b/src/py/flwr/server/superlink/fleet/grpc_bidi/grpc_bridge_test.py similarity index 96% rename from src/py/flwr/server/fleet/grpc_bidi/grpc_bridge_test.py rename to src/py/flwr/server/superlink/fleet/grpc_bidi/grpc_bridge_test.py index bcfbe6e6fac8..f7c236acd7a1 100644 --- a/src/py/flwr/server/fleet/grpc_bidi/grpc_bridge_test.py +++ b/src/py/flwr/server/superlink/fleet/grpc_bidi/grpc_bridge_test.py @@ -19,8 +19,11 @@ from threading import Thread from typing import List, Union -from flwr.proto.transport_pb2 import ClientMessage, ServerMessage -from flwr.server.fleet.grpc_bidi.grpc_bridge import ( +from flwr.proto.transport_pb2 import ( # pylint: disable=E0611 + ClientMessage, + ServerMessage, +) +from flwr.server.superlink.fleet.grpc_bidi.grpc_bridge import ( GrpcBridge, GrpcBridgeClosed, InsWrapper, diff --git a/src/py/flwr/server/fleet/grpc_bidi/grpc_client_proxy.py b/src/py/flwr/server/superlink/fleet/grpc_bidi/grpc_client_proxy.py similarity index 95% rename from src/py/flwr/server/fleet/grpc_bidi/grpc_client_proxy.py rename to src/py/flwr/server/superlink/fleet/grpc_bidi/grpc_client_proxy.py index b9bc7330db31..026e8dfe51ef 100644 --- a/src/py/flwr/server/fleet/grpc_bidi/grpc_client_proxy.py +++ b/src/py/flwr/server/superlink/fleet/grpc_bidi/grpc_client_proxy.py @@ -19,9 +19,16 @@ from flwr import common from flwr.common import serde -from flwr.proto.transport_pb2 import ClientMessage, ServerMessage +from flwr.proto.transport_pb2 import ( # pylint: disable=E0611 + ClientMessage, + ServerMessage, +) from flwr.server.client_proxy import ClientProxy -from flwr.server.fleet.grpc_bidi.grpc_bridge import GrpcBridge, InsWrapper, ResWrapper +from flwr.server.superlink.fleet.grpc_bidi.grpc_bridge import ( + GrpcBridge, + InsWrapper, + ResWrapper, +) class GrpcClientProxy(ClientProxy): diff --git a/src/py/flwr/server/fleet/grpc_bidi/grpc_client_proxy_test.py b/src/py/flwr/server/superlink/fleet/grpc_bidi/grpc_client_proxy_test.py similarity index 94% rename from src/py/flwr/server/fleet/grpc_bidi/grpc_client_proxy_test.py rename to src/py/flwr/server/superlink/fleet/grpc_bidi/grpc_client_proxy_test.py index 329f29b3f616..360570eb663d 100644 --- a/src/py/flwr/server/fleet/grpc_bidi/grpc_client_proxy_test.py +++ b/src/py/flwr/server/superlink/fleet/grpc_bidi/grpc_client_proxy_test.py @@ -22,9 +22,13 @@ import flwr from flwr.common.typing import Config, GetParametersIns -from flwr.proto.transport_pb2 import ClientMessage, Parameters, Scalar -from flwr.server.fleet.grpc_bidi.grpc_bridge import ResWrapper -from flwr.server.fleet.grpc_bidi.grpc_client_proxy import GrpcClientProxy +from flwr.proto.transport_pb2 import ( # pylint: disable=E0611 + ClientMessage, + Parameters, + Scalar, +) +from flwr.server.superlink.fleet.grpc_bidi.grpc_bridge import ResWrapper +from flwr.server.superlink.fleet.grpc_bidi.grpc_client_proxy import GrpcClientProxy MESSAGE_PARAMETERS = Parameters(tensors=[], tensor_type="np") MESSAGE_FIT_RES = ClientMessage( diff --git a/src/py/flwr/server/fleet/grpc_bidi/grpc_server.py b/src/py/flwr/server/superlink/fleet/grpc_bidi/grpc_server.py similarity index 96% rename from src/py/flwr/server/fleet/grpc_bidi/grpc_server.py rename to src/py/flwr/server/superlink/fleet/grpc_bidi/grpc_server.py index fc81e8eb8f4c..82f049844bd6 100644 --- a/src/py/flwr/server/fleet/grpc_bidi/grpc_server.py +++ b/src/py/flwr/server/superlink/fleet/grpc_bidi/grpc_server.py @@ -24,11 +24,15 @@ from flwr.common import GRPC_MAX_MESSAGE_LENGTH from flwr.common.logger import log -from flwr.proto.transport_pb2_grpc import add_FlowerServiceServicer_to_server +from flwr.proto.transport_pb2_grpc import ( # pylint: disable=E0611 + add_FlowerServiceServicer_to_server, +) from flwr.server.client_manager import ClientManager -from flwr.server.driver.driver_servicer import DriverServicer -from flwr.server.fleet.grpc_bidi.flower_service_servicer import FlowerServiceServicer -from flwr.server.fleet.grpc_rere.fleet_servicer import FleetServicer +from flwr.server.superlink.driver.driver_servicer import DriverServicer +from flwr.server.superlink.fleet.grpc_bidi.flower_service_servicer import ( + FlowerServiceServicer, +) +from flwr.server.superlink.fleet.grpc_rere.fleet_servicer import FleetServicer INVALID_CERTIFICATES_ERR_MSG = """ When setting any of root_certificate, certificate, or private_key, diff --git a/src/py/flwr/server/fleet/grpc_bidi/grpc_server_test.py b/src/py/flwr/server/superlink/fleet/grpc_bidi/grpc_server_test.py similarity index 96% rename from src/py/flwr/server/fleet/grpc_bidi/grpc_server_test.py rename to src/py/flwr/server/superlink/fleet/grpc_bidi/grpc_server_test.py index 4cd093d6ab0f..8afa37515950 100644 --- a/src/py/flwr/server/fleet/grpc_bidi/grpc_server_test.py +++ b/src/py/flwr/server/superlink/fleet/grpc_bidi/grpc_server_test.py @@ -23,12 +23,12 @@ from typing import Tuple, cast from flwr.server.client_manager import SimpleClientManager -from flwr.server.fleet.grpc_bidi.grpc_server import ( +from flwr.server.superlink.fleet.grpc_bidi.grpc_server import ( start_grpc_server, valid_certificates, ) -root_dir = dirname(abspath(join(__file__, "../../../../../.."))) +root_dir = dirname(abspath(join(__file__, "../../../../../../.."))) def load_certificates() -> Tuple[str, str, str]: diff --git a/src/py/flwr/server/fleet/grpc_rere/__init__.py b/src/py/flwr/server/superlink/fleet/grpc_rere/__init__.py similarity index 100% rename from src/py/flwr/server/fleet/grpc_rere/__init__.py rename to src/py/flwr/server/superlink/fleet/grpc_rere/__init__.py diff --git a/src/py/flwr/server/fleet/grpc_rere/fleet_servicer.py b/src/py/flwr/server/superlink/fleet/grpc_rere/fleet_servicer.py similarity index 80% rename from src/py/flwr/server/fleet/grpc_rere/fleet_servicer.py rename to src/py/flwr/server/superlink/fleet/grpc_rere/fleet_servicer.py index 022470cffe8a..278474477379 100644 --- a/src/py/flwr/server/fleet/grpc_rere/fleet_servicer.py +++ b/src/py/flwr/server/superlink/fleet/grpc_rere/fleet_servicer.py @@ -20,8 +20,8 @@ import grpc from flwr.common.logger import log -from flwr.proto import fleet_pb2_grpc -from flwr.proto.fleet_pb2 import ( +from flwr.proto import fleet_pb2_grpc # pylint: disable=E0611 +from flwr.proto.fleet_pb2 import ( # pylint: disable=E0611 CreateNodeRequest, CreateNodeResponse, DeleteNodeRequest, @@ -31,15 +31,15 @@ PushTaskResRequest, PushTaskResResponse, ) -from flwr.server.fleet.message_handler import message_handler -from flwr.server.state import State +from flwr.server.superlink.fleet.message_handler import message_handler +from flwr.server.superlink.state import StateFactory class FleetServicer(fleet_pb2_grpc.FleetServicer): """Fleet API servicer.""" - def __init__(self, state: State) -> None: - self.state = state + def __init__(self, state_factory: StateFactory) -> None: + self.state_factory = state_factory def CreateNode( self, request: CreateNodeRequest, context: grpc.ServicerContext @@ -48,7 +48,7 @@ def CreateNode( log(INFO, "FleetServicer.CreateNode") return message_handler.create_node( request=request, - state=self.state, + state=self.state_factory.state(), ) def DeleteNode( @@ -58,7 +58,7 @@ def DeleteNode( log(INFO, "FleetServicer.DeleteNode") return message_handler.delete_node( request=request, - state=self.state, + state=self.state_factory.state(), ) def PullTaskIns( @@ -68,7 +68,7 @@ def PullTaskIns( log(INFO, "FleetServicer.PullTaskIns") return message_handler.pull_task_ins( request=request, - state=self.state, + state=self.state_factory.state(), ) def PushTaskRes( @@ -78,5 +78,5 @@ def PushTaskRes( log(INFO, "FleetServicer.PushTaskRes") return message_handler.push_task_res( request=request, - state=self.state, + state=self.state_factory.state(), ) diff --git a/src/py/flwr/server/fleet/message_handler/__init__.py b/src/py/flwr/server/superlink/fleet/message_handler/__init__.py similarity index 100% rename from src/py/flwr/server/fleet/message_handler/__init__.py rename to src/py/flwr/server/superlink/fleet/message_handler/__init__.py diff --git a/src/py/flwr/server/fleet/message_handler/message_handler.py b/src/py/flwr/server/superlink/fleet/message_handler/message_handler.py similarity index 91% rename from src/py/flwr/server/fleet/message_handler/message_handler.py rename to src/py/flwr/server/superlink/fleet/message_handler/message_handler.py index 71876386f059..5fe815180823 100644 --- a/src/py/flwr/server/fleet/message_handler/message_handler.py +++ b/src/py/flwr/server/superlink/fleet/message_handler/message_handler.py @@ -18,7 +18,7 @@ from typing import List, Optional from uuid import UUID -from flwr.proto.fleet_pb2 import ( +from flwr.proto.fleet_pb2 import ( # pylint: disable=E0611 CreateNodeRequest, CreateNodeResponse, DeleteNodeRequest, @@ -29,9 +29,9 @@ PushTaskResResponse, Reconnect, ) -from flwr.proto.node_pb2 import Node -from flwr.proto.task_pb2 import TaskIns, TaskRes -from flwr.server.state import State +from flwr.proto.node_pb2 import Node # pylint: disable=E0611 +from flwr.proto.task_pb2 import TaskIns, TaskRes # pylint: disable=E0611 +from flwr.server.superlink.state import State def create_node( diff --git a/src/py/flwr/server/fleet/message_handler/message_handler_test.py b/src/py/flwr/server/superlink/fleet/message_handler/message_handler_test.py similarity index 94% rename from src/py/flwr/server/fleet/message_handler/message_handler_test.py rename to src/py/flwr/server/superlink/fleet/message_handler/message_handler_test.py index bb2205e26b18..c135f6fb7b61 100644 --- a/src/py/flwr/server/fleet/message_handler/message_handler_test.py +++ b/src/py/flwr/server/superlink/fleet/message_handler/message_handler_test.py @@ -17,14 +17,14 @@ from unittest.mock import MagicMock -from flwr.proto.fleet_pb2 import ( +from flwr.proto.fleet_pb2 import ( # pylint: disable=E0611 CreateNodeRequest, DeleteNodeRequest, PullTaskInsRequest, PushTaskResRequest, ) -from flwr.proto.node_pb2 import Node -from flwr.proto.task_pb2 import Task, TaskRes +from flwr.proto.node_pb2 import Node # pylint: disable=E0611 +from flwr.proto.task_pb2 import Task, TaskRes # pylint: disable=E0611 from .message_handler import create_node, delete_node, pull_task_ins, push_task_res diff --git a/src/py/flwr/server/fleet/rest_rere/__init__.py b/src/py/flwr/server/superlink/fleet/rest_rere/__init__.py similarity index 100% rename from src/py/flwr/server/fleet/rest_rere/__init__.py rename to src/py/flwr/server/superlink/fleet/rest_rere/__init__.py diff --git a/src/py/flwr/server/fleet/rest_rere/rest_api.py b/src/py/flwr/server/superlink/fleet/rest_rere/rest_api.py similarity index 96% rename from src/py/flwr/server/fleet/rest_rere/rest_api.py rename to src/py/flwr/server/superlink/fleet/rest_rere/rest_api.py index cd1e47f24f00..b022b34c68c8 100644 --- a/src/py/flwr/server/fleet/rest_rere/rest_api.py +++ b/src/py/flwr/server/superlink/fleet/rest_rere/rest_api.py @@ -18,14 +18,14 @@ import sys from flwr.common.constant import MISSING_EXTRA_REST -from flwr.proto.fleet_pb2 import ( +from flwr.proto.fleet_pb2 import ( # pylint: disable=E0611 CreateNodeRequest, DeleteNodeRequest, PullTaskInsRequest, PushTaskResRequest, ) -from flwr.server.fleet.message_handler import message_handler -from flwr.server.state import State +from flwr.server.superlink.fleet.message_handler import message_handler +from flwr.server.superlink.state import State try: from starlette.applications import Starlette diff --git a/src/py/flwr/server/state/__init__.py b/src/py/flwr/server/superlink/state/__init__.py similarity index 100% rename from src/py/flwr/server/state/__init__.py rename to src/py/flwr/server/superlink/state/__init__.py diff --git a/src/py/flwr/server/state/in_memory_state.py b/src/py/flwr/server/superlink/state/in_memory_state.py similarity index 98% rename from src/py/flwr/server/state/in_memory_state.py rename to src/py/flwr/server/superlink/state/in_memory_state.py index f8352fcfb091..ecb39f18300a 100644 --- a/src/py/flwr/server/state/in_memory_state.py +++ b/src/py/flwr/server/superlink/state/in_memory_state.py @@ -22,8 +22,8 @@ from uuid import UUID, uuid4 from flwr.common import log, now -from flwr.proto.task_pb2 import TaskIns, TaskRes -from flwr.server.state.state import State +from flwr.proto.task_pb2 import TaskIns, TaskRes # pylint: disable=E0611 +from flwr.server.superlink.state.state import State from flwr.server.utils import validate_task_ins_or_res diff --git a/src/py/flwr/server/state/sqlite_state.py b/src/py/flwr/server/superlink/state/sqlite_state.py similarity index 95% rename from src/py/flwr/server/state/sqlite_state.py rename to src/py/flwr/server/superlink/state/sqlite_state.py index 26f326819971..224c16cdf013 100644 --- a/src/py/flwr/server/state/sqlite_state.py +++ b/src/py/flwr/server/superlink/state/sqlite_state.py @@ -24,9 +24,9 @@ from uuid import UUID, uuid4 from flwr.common import log, now -from flwr.proto.node_pb2 import Node -from flwr.proto.task_pb2 import Task, TaskIns, TaskRes -from flwr.proto.transport_pb2 import ClientMessage, ServerMessage +from flwr.proto.node_pb2 import Node # pylint: disable=E0611 +from flwr.proto.recordset_pb2 import RecordSet # pylint: disable=E0611 +from flwr.proto.task_pb2 import Task, TaskIns, TaskRes # pylint: disable=E0611 from flwr.server.utils.validator import validate_task_ins_or_res from .state import State @@ -47,7 +47,7 @@ CREATE TABLE IF NOT EXISTS task_ins( task_id TEXT UNIQUE, group_id TEXT, - run_id INTEGER, + run_id INTEGER, producer_anonymous BOOLEAN, producer_node_id INTEGER, consumer_anonymous BOOLEAN, @@ -56,8 +56,8 @@ delivered_at TEXT, ttl TEXT, ancestry TEXT, - legacy_server_message BLOB, - legacy_client_message BLOB, + task_type TEXT, + recordset BLOB, FOREIGN KEY(run_id) REFERENCES run(run_id) ); """ @@ -67,7 +67,7 @@ CREATE TABLE IF NOT EXISTS task_res( task_id TEXT UNIQUE, group_id TEXT, - run_id INTEGER, + run_id INTEGER, producer_anonymous BOOLEAN, producer_node_id INTEGER, consumer_anonymous BOOLEAN, @@ -76,8 +76,8 @@ delivered_at TEXT, ttl TEXT, ancestry TEXT, - legacy_server_message BLOB, - legacy_client_message BLOB, + task_type TEXT, + recordset BLOB, FOREIGN KEY(run_id) REFERENCES run(run_id) ); """ @@ -546,10 +546,8 @@ def task_ins_to_dict(task_msg: TaskIns) -> Dict[str, Any]: "delivered_at": task_msg.task.delivered_at, "ttl": task_msg.task.ttl, "ancestry": ",".join(task_msg.task.ancestry), - "legacy_server_message": ( - task_msg.task.legacy_server_message.SerializeToString() - ), - "legacy_client_message": None, + "task_type": task_msg.task.task_type, + "recordset": task_msg.task.recordset.SerializeToString(), } return result @@ -568,18 +566,16 @@ def task_res_to_dict(task_msg: TaskRes) -> Dict[str, Any]: "delivered_at": task_msg.task.delivered_at, "ttl": task_msg.task.ttl, "ancestry": ",".join(task_msg.task.ancestry), - "legacy_server_message": None, - "legacy_client_message": ( - task_msg.task.legacy_client_message.SerializeToString() - ), + "task_type": task_msg.task.task_type, + "recordset": task_msg.task.recordset.SerializeToString(), } return result def dict_to_task_ins(task_dict: Dict[str, Any]) -> TaskIns: """Turn task_dict into protobuf message.""" - server_message = ServerMessage() - server_message.ParseFromString(task_dict["legacy_server_message"]) + recordset = RecordSet() + recordset.ParseFromString(task_dict["recordset"]) result = TaskIns( task_id=task_dict["task_id"], @@ -598,7 +594,8 @@ def dict_to_task_ins(task_dict: Dict[str, Any]) -> TaskIns: delivered_at=task_dict["delivered_at"], ttl=task_dict["ttl"], ancestry=task_dict["ancestry"].split(","), - legacy_server_message=server_message, + task_type=task_dict["task_type"], + recordset=recordset, ), ) return result @@ -606,8 +603,8 @@ def dict_to_task_ins(task_dict: Dict[str, Any]) -> TaskIns: def dict_to_task_res(task_dict: Dict[str, Any]) -> TaskRes: """Turn task_dict into protobuf message.""" - client_message = ClientMessage() - client_message.ParseFromString(task_dict["legacy_client_message"]) + recordset = RecordSet() + recordset.ParseFromString(task_dict["recordset"]) result = TaskRes( task_id=task_dict["task_id"], @@ -626,7 +623,8 @@ def dict_to_task_res(task_dict: Dict[str, Any]) -> TaskRes: delivered_at=task_dict["delivered_at"], ttl=task_dict["ttl"], ancestry=task_dict["ancestry"].split(","), - legacy_client_message=client_message, + task_type=task_dict["task_type"], + recordset=recordset, ), ) return result diff --git a/src/py/flwr/server/state/sqlite_state_test.py b/src/py/flwr/server/superlink/state/sqlite_state_test.py similarity index 89% rename from src/py/flwr/server/state/sqlite_state_test.py rename to src/py/flwr/server/superlink/state/sqlite_state_test.py index a3f899386011..9eef71e396e3 100644 --- a/src/py/flwr/server/state/sqlite_state_test.py +++ b/src/py/flwr/server/superlink/state/sqlite_state_test.py @@ -17,8 +17,8 @@ import unittest -from flwr.server.state.sqlite_state import task_ins_to_dict -from flwr.server.state.state_test import create_task_ins +from flwr.server.superlink.state.sqlite_state import task_ins_to_dict +from flwr.server.superlink.state.state_test import create_task_ins class SqliteStateTest(unittest.TestCase): @@ -40,8 +40,8 @@ def test_ins_res_to_dict(self) -> None: "delivered_at", "ttl", "ancestry", - "legacy_server_message", - "legacy_client_message", + "task_type", + "recordset", ] # Execute diff --git a/src/py/flwr/server/state/state.py b/src/py/flwr/server/superlink/state/state.py similarity index 98% rename from src/py/flwr/server/state/state.py rename to src/py/flwr/server/superlink/state/state.py index 7ab3b6bc0848..9337ae6d8624 100644 --- a/src/py/flwr/server/state/state.py +++ b/src/py/flwr/server/superlink/state/state.py @@ -19,7 +19,7 @@ from typing import List, Optional, Set from uuid import UUID -from flwr.proto.task_pb2 import TaskIns, TaskRes +from flwr.proto.task_pb2 import TaskIns, TaskRes # pylint: disable=E0611 class State(abc.ABC): diff --git a/src/py/flwr/server/state/state_factory.py b/src/py/flwr/server/superlink/state/state_factory.py similarity index 100% rename from src/py/flwr/server/state/state_factory.py rename to src/py/flwr/server/superlink/state/state_factory.py diff --git a/src/py/flwr/server/state/state_test.py b/src/py/flwr/server/superlink/state/state_test.py similarity index 96% rename from src/py/flwr/server/state/state_test.py rename to src/py/flwr/server/superlink/state/state_test.py index 204b4ba97b5f..d0470a7ce7f7 100644 --- a/src/py/flwr/server/state/state_test.py +++ b/src/py/flwr/server/superlink/state/state_test.py @@ -22,10 +22,10 @@ from typing import List from uuid import uuid4 -from flwr.proto.node_pb2 import Node -from flwr.proto.task_pb2 import Task, TaskIns, TaskRes -from flwr.proto.transport_pb2 import ClientMessage, ServerMessage -from flwr.server.state import InMemoryState, SqliteState, State +from flwr.proto.node_pb2 import Node # pylint: disable=E0611 +from flwr.proto.recordset_pb2 import RecordSet # pylint: disable=E0611 +from flwr.proto.task_pb2 import Task, TaskIns, TaskRes # pylint: disable=E0611 +from flwr.server.superlink.state import InMemoryState, SqliteState, State class StateTest(unittest.TestCase): @@ -418,9 +418,8 @@ def create_task_ins( delivered_at=delivered_at, producer=Node(node_id=0, anonymous=True), consumer=consumer, - legacy_server_message=ServerMessage( - reconnect_ins=ServerMessage.ReconnectIns() - ), + task_type="mock", + recordset=RecordSet(parameters={}, metrics={}, configs={}), ), ) return task @@ -441,9 +440,8 @@ def create_task_res( producer=Node(node_id=producer_node_id, anonymous=anonymous), consumer=Node(node_id=0, anonymous=True), ancestry=ancestry, - legacy_client_message=ClientMessage( - disconnect_res=ClientMessage.DisconnectRes() - ), + task_type="mock", + recordset=RecordSet(parameters={}, metrics={}, configs={}), ), ) return task_res diff --git a/src/py/flwr/server/typing.py b/src/py/flwr/server/typing.py new file mode 100644 index 000000000000..728121c2eddf --- /dev/null +++ b/src/py/flwr/server/typing.py @@ -0,0 +1,23 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Custom types for Flower servers.""" + + +from typing import Callable + +from flwr.common.context import Context +from flwr.server.driver import Driver + +ServerAppCallable = Callable[[Driver, Context], None] diff --git a/src/py/flwr/server/utils/validator.py b/src/py/flwr/server/utils/validator.py index fd89a01e4a4e..f9b271beafdc 100644 --- a/src/py/flwr/server/utils/validator.py +++ b/src/py/flwr/server/utils/validator.py @@ -17,7 +17,7 @@ from typing import List, Union -from flwr.proto.task_pb2 import TaskIns, TaskRes +from flwr.proto.task_pb2 import TaskIns, TaskRes # pylint: disable=E0611 # pylint: disable-next=too-many-branches,too-many-statements @@ -64,21 +64,10 @@ def validate_task_ins_or_res(tasks_ins_res: Union[TaskIns, TaskRes]) -> List[str validation_errors.append("non-anonymous consumer MUST provide a `node_id`") # Content check - has_fields = { - "sa": tasks_ins_res.task.HasField("sa"), - "legacy_server_message": tasks_ins_res.task.HasField( - "legacy_server_message" - ), - } - if not (has_fields["sa"] or has_fields["legacy_server_message"]): - err_msg = ", ".join([f"`{field}`" for field in has_fields]) - validation_errors.append( - f"`task` in `TaskIns` must set at least one of fields {{{err_msg}}}" - ) - if has_fields[ - "legacy_server_message" - ] and not tasks_ins_res.task.legacy_server_message.HasField("msg"): - validation_errors.append("`legacy_server_message` does not set field `msg`") + if tasks_ins_res.task.task_type == "": + validation_errors.append("`task_type` MUST be set") + if not tasks_ins_res.task.HasField("recordset"): + validation_errors.append("`recordset` MUST be set") # Ancestors if len(tasks_ins_res.task.ancestry) != 0: @@ -115,21 +104,10 @@ def validate_task_ins_or_res(tasks_ins_res: Union[TaskIns, TaskRes]) -> List[str validation_errors.append("non-anonymous consumer MUST provide a `node_id`") # Content check - has_fields = { - "sa": tasks_ins_res.task.HasField("sa"), - "legacy_client_message": tasks_ins_res.task.HasField( - "legacy_client_message" - ), - } - if not (has_fields["sa"] or has_fields["legacy_client_message"]): - err_msg = ", ".join([f"`{field}`" for field in has_fields]) - validation_errors.append( - f"`task` in `TaskRes` must set at least one of fields {{{err_msg}}}" - ) - if has_fields[ - "legacy_client_message" - ] and not tasks_ins_res.task.legacy_client_message.HasField("msg"): - validation_errors.append("`legacy_client_message` does not set field `msg`") + if tasks_ins_res.task.task_type == "": + validation_errors.append("`task_type` MUST be set") + if not tasks_ins_res.task.HasField("recordset"): + validation_errors.append("`recordset` MUST be set") # Ancestors if len(tasks_ins_res.task.ancestry) == 0: diff --git a/src/py/flwr/server/utils/validator_test.py b/src/py/flwr/server/utils/validator_test.py index 6627cc9a7887..8e0849508020 100644 --- a/src/py/flwr/server/utils/validator_test.py +++ b/src/py/flwr/server/utils/validator_test.py @@ -18,9 +18,9 @@ import unittest from typing import List, Tuple -from flwr.proto.node_pb2 import Node -from flwr.proto.task_pb2 import SecureAggregation, Task, TaskIns, TaskRes -from flwr.proto.transport_pb2 import ClientMessage, ServerMessage +from flwr.proto.node_pb2 import Node # pylint: disable=E0611 +from flwr.proto.recordset_pb2 import RecordSet # pylint: disable=E0611 +from flwr.proto.task_pb2 import Task, TaskIns, TaskRes # pylint: disable=E0611 from .validator import validate_task_ins_or_res @@ -37,16 +37,12 @@ def test_task_ins(self) -> None: # Execute & Assert for consumer_node_id, anonymous in valid_ins: - msg = create_task_ins( - consumer_node_id, anonymous, has_legacy_server_message=True - ) + msg = create_task_ins(consumer_node_id, anonymous) val_errors = validate_task_ins_or_res(msg) self.assertFalse(val_errors) for consumer_node_id, anonymous in invalid_ins: - msg = create_task_ins( - consumer_node_id, anonymous, has_legacy_server_message=True - ) + msg = create_task_ins(consumer_node_id, anonymous) val_errors = validate_task_ins_or_res(msg) self.assertTrue(val_errors) @@ -70,61 +66,19 @@ def test_is_valid_task_res(self) -> None: # Execute & Assert for producer_node_id, anonymous, ancestry in valid_res: - msg = create_task_res( - producer_node_id, anonymous, ancestry, has_legacy_client_message=True - ) + msg = create_task_res(producer_node_id, anonymous, ancestry) val_errors = validate_task_ins_or_res(msg) self.assertFalse(val_errors) for producer_node_id, anonymous, ancestry in invalid_res: - msg = create_task_res( - producer_node_id, anonymous, ancestry, has_legacy_client_message=True - ) + msg = create_task_res(producer_node_id, anonymous, ancestry) val_errors = validate_task_ins_or_res(msg) self.assertTrue(val_errors, (producer_node_id, anonymous, ancestry)) - def test_task_ins_secure_aggregation(self) -> None: - """Test is_valid task_ins for Secure Aggregation.""" - # Prepare - # (has_legacy_server_message, has_sa) - valid_ins = [(True, True), (False, True)] - invalid_ins = [(False, False)] - - # Execute & Assert - for has_legacy_server_message, has_sa in valid_ins: - msg = create_task_ins(1, False, has_legacy_server_message, has_sa) - val_errors = validate_task_ins_or_res(msg) - self.assertFalse(val_errors) - - for has_legacy_server_message, has_sa in invalid_ins: - msg = create_task_ins(1, False, has_legacy_server_message, has_sa) - val_errors = validate_task_ins_or_res(msg) - self.assertTrue(val_errors) - - def test_task_res_secure_aggregation(self) -> None: - """Test is_valid task_res for Secure Aggregation.""" - # Prepare - # (has_legacy_server_message, has_sa) - valid_res = [(True, True), (False, True)] - invalid_res = [(False, False)] - - # Execute & Assert - for has_legacy_client_message, has_sa in valid_res: - msg = create_task_res(0, True, ["1"], has_legacy_client_message, has_sa) - val_errors = validate_task_ins_or_res(msg) - self.assertFalse(val_errors) - - for has_legacy_client_message, has_sa in invalid_res: - msg = create_task_res(0, True, ["1"], has_legacy_client_message, has_sa) - val_errors = validate_task_ins_or_res(msg) - self.assertTrue(val_errors) - def create_task_ins( consumer_node_id: int, anonymous: bool, - has_legacy_server_message: bool = False, - has_sa: bool = False, delivered_at: str = "", ) -> TaskIns: """Create a TaskIns for testing.""" @@ -140,12 +94,8 @@ def create_task_ins( delivered_at=delivered_at, producer=Node(node_id=0, anonymous=True), consumer=consumer, - legacy_server_message=ServerMessage( - reconnect_ins=ServerMessage.ReconnectIns() - ) - if has_legacy_server_message - else None, - sa=SecureAggregation(named_values={}) if has_sa else None, + task_type="mock", + recordset=RecordSet(parameters={}, metrics={}, configs={}), ), ) return task @@ -155,8 +105,6 @@ def create_task_res( producer_node_id: int, anonymous: bool, ancestry: List[str], - has_legacy_client_message: bool = False, - has_sa: bool = False, ) -> TaskRes: """Create a TaskRes for testing.""" task_res = TaskRes( @@ -167,12 +115,8 @@ def create_task_res( producer=Node(node_id=producer_node_id, anonymous=anonymous), consumer=Node(node_id=0, anonymous=True), ancestry=ancestry, - legacy_client_message=ClientMessage( - disconnect_res=ClientMessage.DisconnectRes() - ) - if has_legacy_client_message - else None, - sa=SecureAggregation(named_values={}) if has_sa else None, + task_type="mock", + recordset=RecordSet(parameters={}, metrics={}, configs={}), ), ) return task_res diff --git a/src/py/flwr/simulation/app.py b/src/py/flwr/simulation/app.py index 6a18a258ac60..b159042588c9 100644 --- a/src/py/flwr/simulation/app.py +++ b/src/py/flwr/simulation/app.py @@ -29,9 +29,10 @@ from flwr.common import EventType, event from flwr.common.logger import log from flwr.server import Server -from flwr.server.app import ServerConfig, init_defaults, run_fl +from flwr.server.app import init_defaults, run_fl from flwr.server.client_manager import ClientManager from flwr.server.history import History +from flwr.server.server_config import ServerConfig from flwr.server.strategy import Strategy from flwr.simulation.ray_transport.ray_actor import ( DefaultActor, diff --git a/src/py/flwr/simulation/ray_transport/ray_actor.py b/src/py/flwr/simulation/ray_transport/ray_actor.py index 38af3f08daa2..853566a4cbeb 100644 --- a/src/py/flwr/simulation/ray_transport/ray_actor.py +++ b/src/py/flwr/simulation/ray_transport/ray_actor.py @@ -27,7 +27,7 @@ from flwr import common from flwr.client import Client, ClientFn -from flwr.client.run_state import RunState +from flwr.common.context import Context from flwr.common.logger import log from flwr.simulation.ray_transport.utils import check_clientfn_returns_client @@ -61,8 +61,8 @@ def run( client_fn: ClientFn, job_fn: JobFn, cid: str, - state: RunState, - ) -> Tuple[str, ClientRes, RunState]: + context: Context, + ) -> Tuple[str, ClientRes, Context]: """Run a client run.""" # Execute tasks and return result # return also cid which is needed to ensure results @@ -70,12 +70,12 @@ def run( try: # Instantiate client (check 'Client' type is returned) client = check_clientfn_returns_client(client_fn(cid)) - # Inject state - client.set_state(state) + # Inject context + client.set_context(context) # Run client job job_results = job_fn(client) - # Retrieve state (potentially updated) - updated_state = client.get_state() + # Retrieve context (potentially updated) + updated_context = client.get_context() except Exception as ex: client_trace = traceback.format_exc() message = ( @@ -89,7 +89,7 @@ def run( ) raise ClientException(str(message)) from ex - return cid, job_results, updated_state + return cid, job_results, updated_context @ray.remote @@ -237,16 +237,16 @@ def add_actors_to_pool(self, num_actors: int) -> None: self._idle_actors.extend(new_actors) self.num_actors += num_actors - def submit(self, fn: Any, value: Tuple[ClientFn, JobFn, str, RunState]) -> None: + def submit(self, fn: Any, value: Tuple[ClientFn, JobFn, str, Context]) -> None: """Take idle actor and assign it a client run. Submit a job to an actor by first removing it from the list of idle actors, then check if this actor was flagged to be removed from the pool """ - client_fn, job_fn, cid, state = value + client_fn, job_fn, cid, context = value actor = self._idle_actors.pop() if self._check_and_remove_actor_from_pool(actor): - future = fn(actor, client_fn, job_fn, cid, state) + future = fn(actor, client_fn, job_fn, cid, context) future_key = tuple(future) if isinstance(future, List) else future self._future_to_actor[future_key] = (self._next_task_index, actor, cid) self._next_task_index += 1 @@ -255,7 +255,7 @@ def submit(self, fn: Any, value: Tuple[ClientFn, JobFn, str, RunState]) -> None: self._cid_to_future[cid]["future"] = future_key def submit_client_job( - self, actor_fn: Any, job: Tuple[ClientFn, JobFn, str, RunState] + self, actor_fn: Any, job: Tuple[ClientFn, JobFn, str, Context] ) -> None: """Submit a job while tracking client ids.""" _, _, cid, _ = job @@ -295,17 +295,17 @@ def _is_future_ready(self, cid: str) -> bool: return self._cid_to_future[cid]["ready"] # type: ignore - def _fetch_future_result(self, cid: str) -> Tuple[ClientRes, RunState]: - """Fetch result and updated state for a VirtualClient from Object Store. + def _fetch_future_result(self, cid: str) -> Tuple[ClientRes, Context]: + """Fetch result and updated context for a VirtualClient from Object Store. The job submitted by the ClientProxy interfacing with client with cid=cid is ready. Here we fetch it from the object store and return. """ try: future: ObjectRef[Any] = self._cid_to_future[cid]["future"] # type: ignore - res_cid, res, updated_state = ray.get( + res_cid, res, updated_context = ray.get( future - ) # type: (str, ClientRes, RunState) + ) # type: (str, ClientRes, Context) except ray.exceptions.RayActorError as ex: log(ERROR, ex) if hasattr(ex, "actor_id"): @@ -322,7 +322,7 @@ def _fetch_future_result(self, cid: str) -> Tuple[ClientRes, RunState]: # Reset mapping self._reset_cid_to_future_dict(cid) - return res, updated_state + return res, updated_context def _flag_actor_for_removal(self, actor_id_hex: str) -> None: """Flag actor that should be removed from pool.""" @@ -409,7 +409,7 @@ def process_unordered_future(self, timeout: Optional[float] = None) -> None: def get_client_result( self, cid: str, timeout: Optional[float] - ) -> Tuple[ClientRes, RunState]: + ) -> Tuple[ClientRes, Context]: """Get result from VirtualClient with specific cid.""" # Loop until all jobs submitted to the pool are completed. Break early # if the result for the ClientProxy calling this method is ready @@ -421,5 +421,5 @@ def get_client_result( break # Fetch result belonging to the VirtualClient calling this method - # Return both result from tasks and (potentially) updated run state + # Return both result from tasks and (potentially) updated run context return self._fetch_future_result(cid) diff --git a/src/py/flwr/simulation/ray_transport/ray_client_proxy.py b/src/py/flwr/simulation/ray_transport/ray_client_proxy.py index 5c05850dfd2f..894012dc6d70 100644 --- a/src/py/flwr/simulation/ray_transport/ray_client_proxy.py +++ b/src/py/flwr/simulation/ray_transport/ray_client_proxy.py @@ -138,20 +138,20 @@ def _submit_job(self, job_fn: JobFn, timeout: Optional[float]) -> ClientRes: run_id = 0 # Register state - self.proxy_state.register_runstate(run_id=run_id) + self.proxy_state.register_context(run_id=run_id) # Retrieve state - state = self.proxy_state.retrieve_runstate(run_id=run_id) + state = self.proxy_state.retrieve_context(run_id=run_id) try: self.actor_pool.submit_client_job( lambda a, c_fn, j_fn, cid, state: a.run.remote(c_fn, j_fn, cid, state), (self.client_fn, job_fn, self.cid, state), ) - res, updated_state = self.actor_pool.get_client_result(self.cid, timeout) + res, updated_context = self.actor_pool.get_client_result(self.cid, timeout) # Update state - self.proxy_state.update_runstate(run_id=run_id, run_state=updated_state) + self.proxy_state.update_context(run_id=run_id, context=updated_context) except Exception as ex: if self.actor_pool.num_actors == 0: diff --git a/src/py/flwr/simulation/ray_transport/ray_client_proxy_test.py b/src/py/flwr/simulation/ray_transport/ray_client_proxy_test.py index 9df71635b949..b380d37d01c8 100644 --- a/src/py/flwr/simulation/ray_transport/ray_client_proxy_test.py +++ b/src/py/flwr/simulation/ray_transport/ray_client_proxy_test.py @@ -22,8 +22,10 @@ import ray from flwr.client import Client, NumPyClient -from flwr.client.run_state import RunState from flwr.common import Code, GetPropertiesRes, Status +from flwr.common.configsrecord import ConfigsRecord +from flwr.common.context import Context +from flwr.common.recordset import RecordSet from flwr.simulation.ray_transport.ray_actor import ( ClientRes, DefaultActor, @@ -53,8 +55,10 @@ def job_fn(cid: str) -> JobFn: # pragma: no cover def cid_times_pi(client: Client) -> ClientRes: # pylint: disable=unused-argument result = int(cid) * pi - # store something in state - client.numpy_client.state.state["result"] = str(result) # type: ignore + # store something in context + client.numpy_client.context.state.set_configs( # type: ignore + "result", record=ConfigsRecord({"result": str(result)}) + ) # now let's convert it to a GetPropertiesRes response return GetPropertiesRes( @@ -125,9 +129,9 @@ def test_cid_consistency_all_submit_first_run_consistency() -> None: shuffle(proxies) for prox in proxies: # Register state - prox.proxy_state.register_runstate(run_id=run_id) + prox.proxy_state.register_context(run_id=run_id) # Retrieve state - state = prox.proxy_state.retrieve_runstate(run_id=run_id) + state = prox.proxy_state.retrieve_context(run_id=run_id) job = job_fn(prox.cid) prox.actor_pool.submit_client_job( @@ -138,13 +142,16 @@ def test_cid_consistency_all_submit_first_run_consistency() -> None: # fetch results one at a time shuffle(proxies) for prox in proxies: - res, updated_state = prox.actor_pool.get_client_result(prox.cid, timeout=None) - prox.proxy_state.update_runstate(run_id, run_state=updated_state) + res, updated_context = prox.actor_pool.get_client_result(prox.cid, timeout=None) + prox.proxy_state.update_context(run_id, context=updated_context) res = cast(GetPropertiesRes, res) + assert int(prox.cid) * pi == res.properties["result"] assert ( str(int(prox.cid) * pi) - == prox.proxy_state.retrieve_runstate(run_id).state["result"] + == prox.proxy_state.retrieve_context(run_id).state.get_configs("result")[ + "result" + ] ) ray.shutdown() @@ -162,7 +169,7 @@ def test_cid_consistency_without_proxies() -> None: job = job_fn(cid) pool.submit_client_job( lambda a, c_fn, j_fn, cid_, state: a.run.remote(c_fn, j_fn, cid_, state), - (get_dummy_client, job, cid, RunState(state={})), + (get_dummy_client, job, cid, Context(state=RecordSet())), ) # fetch results one at a time diff --git a/src/py/flwr/simulation/ray_transport/utils.py b/src/py/flwr/simulation/ray_transport/utils.py index 41aa8049eaf0..dd9fb6b2aa85 100644 --- a/src/py/flwr/simulation/ray_transport/utils.py +++ b/src/py/flwr/simulation/ray_transport/utils.py @@ -15,6 +15,7 @@ """Utilities for Actors in the Virtual Client Engine.""" import traceback +import warnings from logging import ERROR from flwr.client import Client @@ -26,7 +27,7 @@ TF = None # Display Deprecation warning once -# warnings.filterwarnings("once", category=DeprecationWarning) +warnings.filterwarnings("once", category=DeprecationWarning) def enable_tf_gpu_growth() -> None: @@ -69,15 +70,15 @@ def check_clientfn_returns_client(client: Client) -> Client: the client internally to `Client` by calling `.to_client()`. """ if not isinstance(client, Client): - # mssg = ( - # " Ensure your client is of type `Client`. Please convert it" - # " using the `.to_client()` method before returning it" - # " in the `client_fn` you pass to `start_simulation`." - # " We have applied this conversion on your behalf." - # " Not returning a `Client` might trigger an error in future" - # " versions of Flower." - # ) + mssg = ( + " Ensure your client is of type `flwr.client.Client`. Please convert it" + " using the `.to_client()` method before returning it" + " in the `client_fn` you pass to `start_simulation`." + " We have applied this conversion on your behalf." + " Not returning a `Client` might trigger an error in future" + " versions of Flower." + ) - # warnings.warn(mssg, DeprecationWarning, stacklevel=2) + warnings.warn(mssg, DeprecationWarning, stacklevel=2) client = client.to_client() return client diff --git a/src/py/flwr_tool/init_py_check.py b/src/py/flwr_tool/init_py_check.py index 8cdc2e0ab5be..67425139f991 100755 --- a/src/py/flwr_tool/init_py_check.py +++ b/src/py/flwr_tool/init_py_check.py @@ -36,7 +36,7 @@ def check_missing_init_files(absolute_path: str) -> None: if __name__ == "__main__": if len(sys.argv) == 0: - raise Exception( + raise Exception( # pylint: disable=W0719 "Please provide at least one directory path relative to your current working directory." ) for i, _ in enumerate(sys.argv): diff --git a/src/py/flwr_tool/protoc.py b/src/py/flwr_tool/protoc.py index 5d3ce942c1e0..b0b078c2eae4 100644 --- a/src/py/flwr_tool/protoc.py +++ b/src/py/flwr_tool/protoc.py @@ -51,7 +51,7 @@ def compile_all() -> None: exit_code = protoc.main(command) if exit_code != 0: - raise Exception(f"Error: {command} failed") + raise Exception(f"Error: {command} failed") # pylint: disable=W0719 if __name__ == "__main__": diff --git a/src/py/flwr_tool/protoc_test.py b/src/py/flwr_tool/protoc_test.py index 57ca3ff423c2..607d808c8497 100644 --- a/src/py/flwr_tool/protoc_test.py +++ b/src/py/flwr_tool/protoc_test.py @@ -28,4 +28,4 @@ def test_directories() -> None: def test_proto_file_count() -> None: """Test if the correct number of proto files were captured by the glob.""" - assert len(PROTO_FILES) == 5 + assert len(PROTO_FILES) == 6 diff --git a/src/py/flwr_tool/update_changelog.py b/src/py/flwr_tool/update_changelog.py index bbd5c7f3dc7b..a158cca21765 100644 --- a/src/py/flwr_tool/update_changelog.py +++ b/src/py/flwr_tool/update_changelog.py @@ -62,7 +62,7 @@ def _extract_changelog_entry(pr_info): f"{CHANGELOG_SECTION_HEADER}(.+?)(?=##|$)", pr_info.body, re.DOTALL ) if not entry_match: - return None, "general" + return None, None entry_text = entry_match.group(1).strip()