From 39348d532ed0a99dc2417df9d5ed2f6a3730201d Mon Sep 17 00:00:00 2001 From: Chong Shen Ng Date: Wed, 22 May 2024 12:23:14 +0100 Subject: [PATCH 01/23] docs(framework:skip) Update link for `run-simulation-from-cli` (#3382) --------- Co-authored-by: Daniel J. Beutel --- doc/source/conf.py | 1 + doc/source/how-to-upgrade-to-flower-next.rst | 7 ++++--- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/doc/source/conf.py b/doc/source/conf.py index 1c53a827dcf5..feb173c0efa8 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -248,6 +248,7 @@ def find_test_modules(package_path): "quickstart-mxnet": "index.html", "tutorial-quickstart-mxnet": "index.html", "example-mxnet-walk-through": "index.html", + "ref-api/flwr.simulation.run_simulation_from_cli.html": "index.html", } # -- Options for HTML output ------------------------------------------------- diff --git a/doc/source/how-to-upgrade-to-flower-next.rst b/doc/source/how-to-upgrade-to-flower-next.rst index 8c8f3c3f8fd7..44fad51546b6 100644 --- a/doc/source/how-to-upgrade-to-flower-next.rst +++ b/doc/source/how-to-upgrade-to-flower-next.rst @@ -22,21 +22,22 @@ Let's dive in! .. |startclient_link| replace:: ``start_client()`` .. |startserver_link| replace:: ``start_server()`` .. |startsim_link| replace:: ``start_simulation()`` -.. |runsimcli_link| replace:: ``flower-simulation`` .. |runsim_link| replace:: ``run_simulation()`` .. |flowernext_superlink_link| replace:: ``flower-superlink`` .. |flowernext_clientapp_link| replace:: ``flower-client-app`` .. |flowernext_serverapp_link| replace:: ``flower-server-app`` +.. |flower_simulation_link| replace:: ``flower-simulation`` .. _clientapp_link: ref-api/flwr.client.ClientApp.html .. _serverapp_link: ref-api/flwr.server.ServerApp.html .. _startclient_link: ref-api/flwr.client.start_client.html .. _startserver_link: ref-api/flwr.server.start_server.html .. _startsim_link: ref-api/flwr.simulation.start_simulation.html -.. _runsimcli_link: ref-api/flwr.simulation.run_simulation_from_cli.html .. _runsim_link: ref-api/flwr.simulation.run_simulation.html .. _flowernext_superlink_link: ref-api-cli.html#flower-superlink .. _flowernext_clientapp_link: ref-api-cli.html#flower-client-app .. _flowernext_serverapp_link: ref-api-cli.html#flower-server-app +.. _flower_simulation_link: ref-api-cli.html#flower-simulation + Install update -------------- @@ -228,7 +229,7 @@ Simulation in CLI ... ) -- Run |runsimcli_link|_ in CLI and point to the ``server_app`` / ``client_app`` object in the +- Run |flower_simulation_link|_ in CLI and point to the ``server_app`` / ``client_app`` object in the code instead of executing the Python script. Here's an example (assuming the ``server_app`` and ``client_app`` objects are in a ``sim.py`` module): From b5b662f4b27698874518fa95723ff166cbf3a217 Mon Sep 17 00:00:00 2001 From: Charles Beauville Date: Wed, 22 May 2024 15:33:01 +0200 Subject: [PATCH 02/23] refactor(framework:skip) Move simulation defaults to toml files in CLI (#3455) --- src/py/flwr/cli/build.py | 6 ++--- src/py/flwr/cli/config_utils.py | 24 +------------------ .../new/templates/app/pyproject.hf.toml.tpl | 6 +++++ .../new/templates/app/pyproject.mlx.toml.tpl | 6 +++++ .../templates/app/pyproject.numpy.toml.tpl | 6 +++++ .../templates/app/pyproject.pytorch.toml.tpl | 6 +++++ .../templates/app/pyproject.sklearn.toml.tpl | 6 +++++ .../app/pyproject.tensorflow.toml.tpl | 6 +++++ src/py/flwr/cli/run/run.py | 24 +++++++++++++++---- 9 files changed, 59 insertions(+), 31 deletions(-) diff --git a/src/py/flwr/cli/build.py b/src/py/flwr/cli/build.py index 37753e5b57b1..ca7ab8686c5c 100644 --- a/src/py/flwr/cli/build.py +++ b/src/py/flwr/cli/build.py @@ -24,7 +24,7 @@ import typer from typing_extensions import Annotated -from .config_utils import load_and_validate_with_defaults +from .config_utils import load_and_validate from .utils import is_valid_project_name @@ -67,9 +67,7 @@ def build( ) raise typer.Exit(code=1) - conf, errors, warnings = load_and_validate_with_defaults( - directory / "pyproject.toml" - ) + conf, errors, warnings = load_and_validate(directory / "pyproject.toml") if conf is None: typer.secho( "Project configuration could not be loaded.\npyproject.toml is invalid:\n" diff --git a/src/py/flwr/cli/config_utils.py b/src/py/flwr/cli/config_utils.py index bca35a51dde5..d943d87e3812 100644 --- a/src/py/flwr/cli/config_utils.py +++ b/src/py/flwr/cli/config_utils.py @@ -22,7 +22,7 @@ from flwr.common import object_ref -def load_and_validate_with_defaults( +def load_and_validate( path: Optional[Path] = None, ) -> Tuple[Optional[Dict[str, Any]], List[str], List[str]]: """Load and validate pyproject.toml as dict. @@ -47,14 +47,6 @@ def load_and_validate_with_defaults( if not is_valid: return (None, errors, warnings) - # Apply defaults - defaults = { - "flower": { - "engine": {"name": "simulation", "simulation": {"supernode": {"num": 2}}} - } - } - config = apply_defaults(config, defaults) - return (config, errors, warnings) @@ -129,17 +121,3 @@ def validate(config: Dict[str, Any]) -> Tuple[bool, List[str], List[str]]: return False, [reason], [] return True, [], [] - - -def apply_defaults( - config: Dict[str, Any], - defaults: Dict[str, Any], -) -> Dict[str, Any]: - """Apply defaults to config.""" - for key in defaults: - if key in config: - if isinstance(config[key], dict) and isinstance(defaults[key], dict): - apply_defaults(config[key], defaults[key]) - else: - config[key] = defaults[key] - return config diff --git a/src/py/flwr/cli/new/templates/app/pyproject.hf.toml.tpl b/src/py/flwr/cli/new/templates/app/pyproject.hf.toml.tpl index 3bd980b2340e..6a235b7b15cf 100644 --- a/src/py/flwr/cli/new/templates/app/pyproject.hf.toml.tpl +++ b/src/py/flwr/cli/new/templates/app/pyproject.hf.toml.tpl @@ -29,3 +29,9 @@ publisher = "$username" [flower.components] serverapp = "$import_name.server:app" clientapp = "$import_name.client:app" + +[flower.engine] +name = "simulation" + +[flower.engine.simulation.supernode] +num = 2 diff --git a/src/py/flwr/cli/new/templates/app/pyproject.mlx.toml.tpl b/src/py/flwr/cli/new/templates/app/pyproject.mlx.toml.tpl index 0f56dc7eacc5..321dfaab41cc 100644 --- a/src/py/flwr/cli/new/templates/app/pyproject.mlx.toml.tpl +++ b/src/py/flwr/cli/new/templates/app/pyproject.mlx.toml.tpl @@ -26,3 +26,9 @@ publisher = "$username" [flower.components] serverapp = "$import_name.server:app" clientapp = "$import_name.client:app" + +[flower.engine] +name = "simulation" + +[flower.engine.simulation.supernode] +num = 2 diff --git a/src/py/flwr/cli/new/templates/app/pyproject.numpy.toml.tpl b/src/py/flwr/cli/new/templates/app/pyproject.numpy.toml.tpl index bbf8463054f4..6b1c40d12561 100644 --- a/src/py/flwr/cli/new/templates/app/pyproject.numpy.toml.tpl +++ b/src/py/flwr/cli/new/templates/app/pyproject.numpy.toml.tpl @@ -24,3 +24,9 @@ publisher = "$username" [flower.components] serverapp = "$import_name.server:app" clientapp = "$import_name.client:app" + +[flower.engine] +name = "simulation" + +[flower.engine.simulation.supernode] +num = 2 diff --git a/src/py/flwr/cli/new/templates/app/pyproject.pytorch.toml.tpl b/src/py/flwr/cli/new/templates/app/pyproject.pytorch.toml.tpl index a41ce1a6a4c6..df404d178495 100644 --- a/src/py/flwr/cli/new/templates/app/pyproject.pytorch.toml.tpl +++ b/src/py/flwr/cli/new/templates/app/pyproject.pytorch.toml.tpl @@ -26,3 +26,9 @@ publisher = "$username" [flower.components] serverapp = "$import_name.server:app" clientapp = "$import_name.client:app" + +[flower.engine] +name = "simulation" + +[flower.engine.simulation.supernode] +num = 2 diff --git a/src/py/flwr/cli/new/templates/app/pyproject.sklearn.toml.tpl b/src/py/flwr/cli/new/templates/app/pyproject.sklearn.toml.tpl index 25645f0cde1a..7ee655967c4a 100644 --- a/src/py/flwr/cli/new/templates/app/pyproject.sklearn.toml.tpl +++ b/src/py/flwr/cli/new/templates/app/pyproject.sklearn.toml.tpl @@ -25,3 +25,9 @@ publisher = "$username" [flower.components] serverapp = "$import_name.server:app" clientapp = "$import_name.client:app" + +[flower.engine] +name = "simulation" + +[flower.engine.simulation.supernode] +num = 2 diff --git a/src/py/flwr/cli/new/templates/app/pyproject.tensorflow.toml.tpl b/src/py/flwr/cli/new/templates/app/pyproject.tensorflow.toml.tpl index 3968e3aa327b..f453bce668fa 100644 --- a/src/py/flwr/cli/new/templates/app/pyproject.tensorflow.toml.tpl +++ b/src/py/flwr/cli/new/templates/app/pyproject.tensorflow.toml.tpl @@ -25,3 +25,9 @@ publisher = "$username" [flower.components] serverapp = "$import_name.server:app" clientapp = "$import_name.client:app" + +[flower.engine] +name = "simulation" + +[flower.engine.simulation.supernode] +num = 2 diff --git a/src/py/flwr/cli/run/run.py b/src/py/flwr/cli/run/run.py index 9c50c8cb1980..dbaf7feb3500 100644 --- a/src/py/flwr/cli/run/run.py +++ b/src/py/flwr/cli/run/run.py @@ -15,18 +15,32 @@ """Flower command line interface `run` command.""" import sys +from enum import Enum +from typing import Optional import typer +from typing_extensions import Annotated from flwr.cli import config_utils from flwr.simulation.run_simulation import _run_simulation -def run() -> None: +class Engine(str, Enum): + """Enum defining the engine to run on.""" + + SIMULATION = "simulation" + + +def run( + engine: Annotated[ + Optional[Engine], + typer.Option(case_sensitive=False, help="The ML framework to use"), + ] = None, +) -> None: """Run Flower project.""" typer.secho("Loading project configuration... ", fg=typer.colors.BLUE) - config, errors, warnings = config_utils.load_and_validate_with_defaults() + config, errors, warnings = config_utils.load_and_validate() if config is None: typer.secho( @@ -49,9 +63,11 @@ def run() -> None: server_app_ref = config["flower"]["components"]["serverapp"] client_app_ref = config["flower"]["components"]["clientapp"] - engine = config["flower"]["engine"]["name"] - if engine == "simulation": + if engine is None: + engine = config["flower"]["engine"]["name"] + + if engine == Engine.SIMULATION: num_supernodes = config["flower"]["engine"]["simulation"]["supernode"]["num"] typer.secho("Starting run... ", fg=typer.colors.BLUE) From bd4d7e0bc71c49343c46e832b8da9a6f21f70b83 Mon Sep 17 00:00:00 2001 From: Javier Date: Wed, 22 May 2024 18:27:08 +0100 Subject: [PATCH 03/23] ci(framework:skip) Enable auto-formatting for E2E (#3473) --- dev/format.sh | 5 +++++ dev/test.sh | 6 +++--- e2e/bare-client-auth/client.py | 10 ++++++---- e2e/bare-client-auth/server.py | 3 ++- e2e/bare-https/client.py | 14 +++++++++----- e2e/bare-https/server.py | 2 +- e2e/bare/client.py | 2 +- e2e/bare/simulation.py | 24 +++++++++++++++++------- e2e/docker/client.py | 5 +++-- e2e/docker/server.py | 2 +- e2e/fastai/client.py | 4 ++-- e2e/fastai/simulation.py | 9 ++++++--- e2e/jax/client.py | 15 ++++++++------- e2e/jax/jax_training.py | 4 ++-- e2e/jax/simulation.py | 9 ++++++--- e2e/opacus/client.py | 24 +++++++++++++++--------- e2e/opacus/simulation.py | 9 ++++++--- e2e/pandas/client.py | 3 ++- e2e/pandas/server.py | 4 ++-- e2e/pandas/simulation.py | 4 ++-- e2e/pytorch-lightning/client.py | 9 +++++++-- e2e/pytorch-lightning/mnist.py | 12 +++++++----- e2e/pytorch-lightning/simulation.py | 9 ++++++--- e2e/pytorch/client.py | 28 +++++++++++++++++++++------- e2e/pytorch/simulation.py | 25 +++++++++++++++++-------- e2e/scikit-learn/client.py | 15 ++++++++++----- e2e/scikit-learn/simulation.py | 9 ++++++--- e2e/scikit-learn/utils.py | 14 +++++++------- e2e/server.py | 3 +-- e2e/strategies/client.py | 21 ++++++++++++++------- e2e/tabnet/client.py | 11 ++++++++--- e2e/tabnet/simulation.py | 9 ++++++--- e2e/tensorflow/client.py | 9 +++++++-- e2e/tensorflow/simulation.py | 9 ++++++--- 34 files changed, 222 insertions(+), 119 deletions(-) diff --git a/dev/format.sh b/dev/format.sh index 6b9cdaf5f44c..05248b5eed3d 100755 --- a/dev/format.sh +++ b/dev/format.sh @@ -16,6 +16,11 @@ find src/proto/flwr/proto -name *.proto | grep "\.proto" | xargs clang-format -i python -m black -q examples python -m docformatter -i -r examples +# E2E +python -m isort e2e +python -m black -q e2e +python -m docformatter -i -r e2e + # Notebooks python -m black --ipynb -q doc/source/*.ipynb KEYS="metadata.celltoolbar metadata.language_info metadata.toc metadata.notify_time metadata.varInspector metadata.accelerator metadata.vscode cell.metadata.id cell.metadata.heading_collapsed cell.metadata.hidden cell.metadata.code_folding cell.metadata.tags cell.metadata.init_cell cell.metadata.vscode cell.metadata.pycharm" diff --git a/dev/test.sh b/dev/test.sh index 1ed1b96edea1..7cabf35abf41 100755 --- a/dev/test.sh +++ b/dev/test.sh @@ -11,11 +11,11 @@ clang-format --Werror --dry-run src/proto/flwr/proto/* echo "- clang-format: done" echo "- isort: start" -python -m isort --check-only --skip src/py/flwr/proto src/py/flwr +python -m isort --check-only --skip src/py/flwr/proto src/py/flwr e2e echo "- isort: done" echo "- black: start" -python -m black --exclude "src\/py\/flwr\/proto" --check src/py/flwr examples +python -m black --exclude "src\/py\/flwr\/proto" --check src/py/flwr examples e2e echo "- black: done" echo "- init_py_check: start" @@ -23,7 +23,7 @@ python -m flwr_tool.init_py_check src/py/flwr src/py/flwr_tool echo "- init_py_check: done" echo "- docformatter: start" -python -m docformatter -c -r src/py/flwr -e src/py/flwr/proto +python -m docformatter -c -r src/py/flwr e2e -e src/py/flwr/proto echo "- docformatter: done" echo "- ruff: start" diff --git a/e2e/bare-client-auth/client.py b/e2e/bare-client-auth/client.py index a56ba5eca552..e82f17088bd9 100644 --- a/e2e/bare-client-auth/client.py +++ b/e2e/bare-client-auth/client.py @@ -1,11 +1,11 @@ -import flwr as fl import numpy as np -from pathlib import Path +import flwr as fl model_params = np.array([1]) objective = 5 + # Define Flower client class FlowerClient(fl.client.NumPyClient): def get_parameters(self, config): @@ -13,18 +13,20 @@ def get_parameters(self, config): def fit(self, parameters, config): model_params = parameters - model_params = [param * (objective/np.mean(param)) for param in model_params] + model_params = [param * (objective / np.mean(param)) for param in model_params] return model_params, 1, {} def evaluate(self, parameters, config): model_params = parameters - loss = min(np.abs(1 - np.mean(model_params)/objective), 1) + loss = min(np.abs(1 - np.mean(model_params) / objective), 1) accuracy = 1 - loss return loss, 1, {"accuracy": accuracy} + def client_fn(cid): return FlowerClient().to_client() + app = fl.client.ClientApp( client_fn=client_fn, ) diff --git a/e2e/bare-client-auth/server.py b/e2e/bare-client-auth/server.py index 7e4f96e15fd9..e10d5ebc5760 100644 --- a/e2e/bare-client-auth/server.py +++ b/e2e/bare-client-auth/server.py @@ -1,6 +1,7 @@ -import flwr as fl from pathlib import Path +import flwr as fl + app = fl.server.ServerApp() diff --git a/e2e/bare-https/client.py b/e2e/bare-https/client.py index b4570b36512d..8f5c1412fd01 100644 --- a/e2e/bare-https/client.py +++ b/e2e/bare-https/client.py @@ -1,11 +1,13 @@ -import flwr as fl -import numpy as np from pathlib import Path +import numpy as np + +import flwr as fl model_params = np.array([1]) objective = 5 + # Define Flower client class FlowerClient(fl.client.NumPyClient): def get_parameters(self, config): @@ -13,18 +15,20 @@ def get_parameters(self, config): def fit(self, parameters, config): model_params = parameters - model_params = [param * (objective/np.mean(param)) for param in model_params] + model_params = [param * (objective / np.mean(param)) for param in model_params] return model_params, 1, {} def evaluate(self, parameters, config): model_params = parameters - loss = min(np.abs(1 - np.mean(model_params)/objective), 1) + loss = min(np.abs(1 - np.mean(model_params) / objective), 1) accuracy = 1 - loss return loss, 1, {"accuracy": accuracy} + def client_fn(cid): return FlowerClient().to_client() + app = fl.client.ClientApp( client_fn=client_fn, ) @@ -32,7 +36,7 @@ def client_fn(cid): if __name__ == "__main__": # Start Flower client fl.client.start_client( - server_address="127.0.0.1:8080", + server_address="127.0.0.1:8080", client=FlowerClient().to_client(), root_certificates=Path("certificates/ca.crt").read_bytes(), insecure=False, diff --git a/e2e/bare-https/server.py b/e2e/bare-https/server.py index d85c0623e92c..e10d5ebc5760 100644 --- a/e2e/bare-https/server.py +++ b/e2e/bare-https/server.py @@ -1,6 +1,6 @@ -import flwr as fl from pathlib import Path +import flwr as fl app = fl.server.ServerApp() diff --git a/e2e/bare/client.py b/e2e/bare/client.py index 5f8642e27675..402d775ac3a9 100644 --- a/e2e/bare/client.py +++ b/e2e/bare/client.py @@ -1,8 +1,8 @@ from datetime import datetime -import flwr as fl import numpy as np +import flwr as fl from flwr.common import ConfigsRecord SUBSET_SIZE = 1000 diff --git a/e2e/bare/simulation.py b/e2e/bare/simulation.py index 3a90d90a0ae0..25868eb8e33f 100644 --- a/e2e/bare/simulation.py +++ b/e2e/bare/simulation.py @@ -1,11 +1,12 @@ from typing import List, Tuple + import numpy as np +from client import client_fn import flwr as fl from flwr.common import Metrics -from client import client_fn -STATE_VAR = 'timestamp' +STATE_VAR = "timestamp" # Define metric aggregation function @@ -14,18 +15,22 @@ def record_state_metrics(metrics: List[Tuple[int, Metrics]]) -> Metrics: states = [] for _, m in metrics: # split string and covert timestamps to float - states.append([float(tt) for tt in m[STATE_VAR].split(',')]) + states.append([float(tt) for tt in m[STATE_VAR].split(",")]) for client_state in states: if len(client_state) == 1: continue deltas = np.diff(client_state) - assert np.all(deltas > 0), f"Timestamps are not monotonically increasing: {client_state}" + assert np.all( + deltas > 0 + ), f"Timestamps are not monotonically increasing: {client_state}" return {STATE_VAR: states} -strategy = fl.server.strategy.FedAvg(evaluate_metrics_aggregation_fn=record_state_metrics) +strategy = fl.server.strategy.FedAvg( + evaluate_metrics_aggregation_fn=record_state_metrics +) hist = fl.simulation.start_simulation( client_fn=client_fn, @@ -34,8 +39,13 @@ def record_state_metrics(metrics: List[Tuple[int, Metrics]]) -> Metrics: strategy=strategy, ) -assert hist.losses_distributed[-1][1] == 0 or (hist.losses_distributed[0][1] / hist.losses_distributed[-1][1]) >= 0.98 +assert ( + hist.losses_distributed[-1][1] == 0 + or (hist.losses_distributed[0][1] / hist.losses_distributed[-1][1]) >= 0.98 +) # The checks in record_state_metrics don't do anythinng if client's state has a single entry state_metrics_last_round = hist.metrics_distributed[STATE_VAR][-1] -assert len(state_metrics_last_round[1][0]) == 2*state_metrics_last_round[0], f"There should be twice as many entries in the client state as rounds" +assert ( + len(state_metrics_last_round[1][0]) == 2 * state_metrics_last_round[0] +), "There should be twice as many entries in the client state as rounds" diff --git a/e2e/docker/client.py b/e2e/docker/client.py index cea752ea5777..8451b810416b 100644 --- a/e2e/docker/client.py +++ b/e2e/docker/client.py @@ -1,14 +1,14 @@ import warnings from collections import OrderedDict -from flwr.client import NumPyClient, ClientApp -from flwr_datasets import FederatedDataset import torch import torch.nn as nn import torch.nn.functional as F +from flwr_datasets import FederatedDataset from torch.utils.data import DataLoader, Subset from torchvision.transforms import Compose, Normalize, ToTensor +from flwr.client import ClientApp, NumPyClient # ############################################################################# # 1. Regular PyTorch pipeline: nn.Module, train, test, and DataLoader @@ -19,6 +19,7 @@ SUBSET_SIZE = 1_000 + class Net(nn.Module): """Model (simple CNN adapted from 'PyTorch: A 60 Minute Blitz')""" diff --git a/e2e/docker/server.py b/e2e/docker/server.py index cb3490b75dca..61825f0a5df3 100644 --- a/e2e/docker/server.py +++ b/e2e/docker/server.py @@ -1,8 +1,8 @@ from typing import List, Tuple +from flwr.common import Metrics from flwr.server import ServerApp, ServerConfig from flwr.server.strategy import FedAvg -from flwr.common import Metrics # Define metric aggregation function diff --git a/e2e/fastai/client.py b/e2e/fastai/client.py index c4bfb89c2dde..1d98a1134941 100644 --- a/e2e/fastai/client.py +++ b/e2e/fastai/client.py @@ -1,11 +1,11 @@ import warnings from collections import OrderedDict -import flwr as fl import numpy as np import torch from fastai.vision.all import * +import flwr as fl warnings.filterwarnings("ignore", category=UserWarning) @@ -17,7 +17,7 @@ path, valid_pct=0.5, train="training", valid="testing", num_workers=0 ) -subset_size = 100 # Or whatever +subset_size = 100 # Or whatever selected_train = np.random.choice(dls.train_ds.items, subset_size, replace=False) selected_valid = np.random.choice(dls.valid_ds.items, subset_size, replace=False) # Swap in the subset for the whole thing (Note: this mutates dls, so re-initialize before full training!) diff --git a/e2e/fastai/simulation.py b/e2e/fastai/simulation.py index 5f0e5334bd08..bf05a77cf32a 100644 --- a/e2e/fastai/simulation.py +++ b/e2e/fastai/simulation.py @@ -1,11 +1,14 @@ -import flwr as fl - from client import client_fn +import flwr as fl + hist = fl.simulation.start_simulation( client_fn=client_fn, num_clients=2, config=fl.server.ServerConfig(num_rounds=3), ) -assert hist.losses_distributed[-1][1] == 0 or (hist.losses_distributed[0][1] / hist.losses_distributed[-1][1]) >= 0.98 +assert ( + hist.losses_distributed[-1][1] == 0 + or (hist.losses_distributed[0][1] / hist.losses_distributed[-1][1]) >= 0.98 +) diff --git a/e2e/jax/client.py b/e2e/jax/client.py index a4e4d1f55117..347a005d923a 100644 --- a/e2e/jax/client.py +++ b/e2e/jax/client.py @@ -1,15 +1,12 @@ """Flower client example using JAX for linear regression.""" +from typing import Dict, List, Tuple -from typing import Dict, List, Tuple, Callable - -import flwr as fl -import numpy as np import jax -import jax.numpy as jnp - import jax_training +import numpy as np +import flwr as fl # Load data and determine model shape train_x, train_y, test_x, test_y = jax_training.load_data() @@ -50,13 +47,17 @@ def evaluate( ) return float(loss), num_examples, {"loss": float(loss)} + def client_fn(cid): return FlowerClient().to_client() + app = fl.client.ClientApp( client_fn=client_fn, ) if __name__ == "__main__": # Start Flower client - fl.client.start_client(server_address="127.0.0.1:8080", client=FlowerClient().to_client()) + fl.client.start_client( + server_address="127.0.0.1:8080", client=FlowerClient().to_client() + ) diff --git a/e2e/jax/jax_training.py b/e2e/jax/jax_training.py index 2b523a08516e..f57db75d5963 100644 --- a/e2e/jax/jax_training.py +++ b/e2e/jax/jax_training.py @@ -7,13 +7,13 @@ please read the JAX documentation or the mentioned tutorial. """ +from typing import Callable, Dict, List, Tuple -from typing import Dict, List, Tuple, Callable import jax import jax.numpy as jnp +import numpy as np from sklearn.datasets import make_regression from sklearn.model_selection import train_test_split -import numpy as np key = jax.random.PRNGKey(0) diff --git a/e2e/jax/simulation.py b/e2e/jax/simulation.py index 5f0e5334bd08..bf05a77cf32a 100644 --- a/e2e/jax/simulation.py +++ b/e2e/jax/simulation.py @@ -1,11 +1,14 @@ -import flwr as fl - from client import client_fn +import flwr as fl + hist = fl.simulation.start_simulation( client_fn=client_fn, num_clients=2, config=fl.server.ServerConfig(num_rounds=3), ) -assert hist.losses_distributed[-1][1] == 0 or (hist.losses_distributed[0][1] / hist.losses_distributed[-1][1]) >= 0.98 +assert ( + hist.losses_distributed[-1][1] == 0 + or (hist.losses_distributed[0][1] / hist.losses_distributed[-1][1]) >= 0.98 +) diff --git a/e2e/opacus/client.py b/e2e/opacus/client.py index 00437a31233c..c9ebe319063a 100644 --- a/e2e/opacus/client.py +++ b/e2e/opacus/client.py @@ -11,7 +11,6 @@ import flwr as fl - # Define parameters. PARAMS = { "batch_size": 32, @@ -57,9 +56,7 @@ def train(net, trainloader, privacy_engine, optimizer, epochs): loss = criterion(net(images), labels) loss.backward() optimizer.step() - epsilon = privacy_engine.get_epsilon( - delta=PRIVACY_PARAMS["target_delta"] - ) + epsilon = privacy_engine.get_epsilon(delta=PRIVACY_PARAMS["target_delta"]) return epsilon @@ -76,22 +73,27 @@ def test(net, testloader): accuracy = correct / len(testloader.dataset) return loss, accuracy + def load_data(): transform = transforms.Compose( [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))] ) data = CIFAR10("./data", train=True, download=True, transform=transform) - split = math.floor(len(data)* 0.01 * PARAMS["train_split"]) + split = math.floor(len(data) * 0.01 * PARAMS["train_split"]) trainset = torch.utils.data.Subset(data, list(range(0, split))) - testset = torch.utils.data.Subset(data, list(range(split, math.floor(len(data) * 0.01)))) + testset = torch.utils.data.Subset( + data, list(range(split, math.floor(len(data) * 0.01))) + ) trainloader = DataLoader(trainset, PARAMS["batch_size"]) testloader = DataLoader(testset, PARAMS["batch_size"]) sample_rate = PARAMS["batch_size"] / len(trainset) return trainloader, testloader, sample_rate + model = Net() trainloader, testloader, sample_rate = load_data() + # Define Flower client. class FlowerClient(fl.client.NumPyClient): def __init__(self, model) -> None: @@ -118,7 +120,11 @@ def set_parameters(self, parameters): def fit(self, parameters, config): self.set_parameters(parameters) epsilon = train( - self.model, self.trainloader, self.privacy_engine, self.optimizer, PARAMS["local_epochs"] + self.model, + self.trainloader, + self.privacy_engine, + self.optimizer, + PARAMS["local_epochs"], ) print(f"epsilon = {epsilon:.2f}") return ( @@ -137,12 +143,12 @@ def client_fn(cid): model = Net() return FlowerClient(model).to_client() + app = fl.client.ClientApp( client_fn=client_fn, ) if __name__ == "__main__": fl.client.start_client( - server_address="127.0.0.1:8080", - client=FlowerClient(model).to_client() + server_address="127.0.0.1:8080", client=FlowerClient(model).to_client() ) diff --git a/e2e/opacus/simulation.py b/e2e/opacus/simulation.py index 5f0e5334bd08..bf05a77cf32a 100644 --- a/e2e/opacus/simulation.py +++ b/e2e/opacus/simulation.py @@ -1,11 +1,14 @@ -import flwr as fl - from client import client_fn +import flwr as fl + hist = fl.simulation.start_simulation( client_fn=client_fn, num_clients=2, config=fl.server.ServerConfig(num_rounds=3), ) -assert hist.losses_distributed[-1][1] == 0 or (hist.losses_distributed[0][1] / hist.losses_distributed[-1][1]) >= 0.98 +assert ( + hist.losses_distributed[-1][1] == 0 + or (hist.losses_distributed[0][1] / hist.losses_distributed[-1][1]) >= 0.98 +) diff --git a/e2e/pandas/client.py b/e2e/pandas/client.py index 0ecd75df3ae8..19e15f5a3b11 100644 --- a/e2e/pandas/client.py +++ b/e2e/pandas/client.py @@ -5,7 +5,6 @@ import flwr as fl - df = pd.read_csv("./data/client.csv") column_names = ["sepal length (cm)", "sepal width (cm)"] @@ -32,9 +31,11 @@ def fit( {}, ) + def client_fn(cid): return FlowerClient().to_client() + app = fl.client.ClientApp( client_fn=client_fn, ) diff --git a/e2e/pandas/server.py b/e2e/pandas/server.py index 4c69ab3881d2..ef0e92a11ea2 100644 --- a/e2e/pandas/server.py +++ b/e2e/pandas/server.py @@ -1,7 +1,7 @@ -import flwr as fl - from strategy import FedAnalytics +import flwr as fl + app = fl.server.ServerApp() diff --git a/e2e/pandas/simulation.py b/e2e/pandas/simulation.py index b548b5ebb760..8160fb744229 100644 --- a/e2e/pandas/simulation.py +++ b/e2e/pandas/simulation.py @@ -1,8 +1,8 @@ -import flwr as fl - from client import client_fn from strategy import FedAnalytics +import flwr as fl + hist = fl.simulation.start_simulation( client_fn=client_fn, num_clients=2, diff --git a/e2e/pytorch-lightning/client.py b/e2e/pytorch-lightning/client.py index fde550e31c08..fdd55b3dc344 100644 --- a/e2e/pytorch-lightning/client.py +++ b/e2e/pytorch-lightning/client.py @@ -1,9 +1,11 @@ -import flwr as fl +from collections import OrderedDict + import mnist import pytorch_lightning as pl -from collections import OrderedDict import torch +import flwr as fl + class FlowerClient(fl.client.NumPyClient): def __init__(self, model, train_loader, val_loader, test_loader): @@ -48,6 +50,7 @@ def _set_parameters(model, parameters): state_dict = OrderedDict({k: torch.tensor(v) for k, v in params_dict}) model.load_state_dict(state_dict, strict=True) + def client_fn(cid): model = mnist.LitAutoEncoder() train_loader, val_loader, test_loader = mnist.load_data() @@ -55,10 +58,12 @@ def client_fn(cid): # Flower client return FlowerClient(model, train_loader, val_loader, test_loader).to_client() + app = fl.client.ClientApp( client_fn=client_fn, ) + def main() -> None: # Model and data model = mnist.LitAutoEncoder() diff --git a/e2e/pytorch-lightning/mnist.py b/e2e/pytorch-lightning/mnist.py index 462087739474..b23efc50d1e4 100644 --- a/e2e/pytorch-lightning/mnist.py +++ b/e2e/pytorch-lightning/mnist.py @@ -3,14 +3,13 @@ Source: pytorchlightning.ai (2021/02/04) """ - +import pytorch_lightning as pl import torch from torch import nn from torch.nn import functional as F from torch.utils.data import DataLoader, Subset, random_split from torchvision import transforms from torchvision.datasets import MNIST -import pytorch_lightning as pl class LitAutoEncoder(pl.LightningModule): @@ -62,14 +61,18 @@ def _evaluate(self, batch, stage=None): def load_data(): # Training / validation set - trainset = MNIST("./data", train=True, download=True, transform=transforms.ToTensor()) + trainset = MNIST( + "./data", train=True, download=True, transform=transforms.ToTensor() + ) trainset = Subset(trainset, range(1000)) mnist_train, mnist_val = random_split(trainset, [800, 200]) train_loader = DataLoader(mnist_train, batch_size=32, shuffle=True, num_workers=0) val_loader = DataLoader(mnist_val, batch_size=32, shuffle=False, num_workers=0) # Test set - testset = MNIST("./data", train=False, download=True, transform=transforms.ToTensor()) + testset = MNIST( + "./data", train=False, download=True, transform=transforms.ToTensor() + ) testset = Subset(testset, range(10)) test_loader = DataLoader(testset, batch_size=32, shuffle=False, num_workers=0) @@ -78,7 +81,6 @@ def load_data(): def main() -> None: """Centralized training.""" - # Load data train_loader, val_loader, test_loader = load_data() diff --git a/e2e/pytorch-lightning/simulation.py b/e2e/pytorch-lightning/simulation.py index 5f0e5334bd08..bf05a77cf32a 100644 --- a/e2e/pytorch-lightning/simulation.py +++ b/e2e/pytorch-lightning/simulation.py @@ -1,11 +1,14 @@ -import flwr as fl - from client import client_fn +import flwr as fl + hist = fl.simulation.start_simulation( client_fn=client_fn, num_clients=2, config=fl.server.ServerConfig(num_rounds=3), ) -assert hist.losses_distributed[-1][1] == 0 or (hist.losses_distributed[0][1] / hist.losses_distributed[-1][1]) >= 0.98 +assert ( + hist.losses_distributed[-1][1] == 0 + or (hist.losses_distributed[0][1] / hist.losses_distributed[-1][1]) >= 0.98 +) diff --git a/e2e/pytorch/client.py b/e2e/pytorch/client.py index 1fd07763148e..dbfbfed1ffa7 100644 --- a/e2e/pytorch/client.py +++ b/e2e/pytorch/client.py @@ -20,7 +20,7 @@ warnings.filterwarnings("ignore", category=UserWarning) DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") SUBSET_SIZE = 1000 -STATE_VAR = 'timestamp' +STATE_VAR = "timestamp" class Net(nn.Module): @@ -92,7 +92,7 @@ def load_data(): class FlowerClient(fl.client.NumPyClient): def get_parameters(self, config): return [val.cpu().numpy() for _, val in net.state_dict().items()] - + def _record_timestamp_to_state(self): """Record timestamp to client's state.""" t_stamp = datetime.now().timestamp() @@ -101,31 +101,45 @@ def _record_timestamp_to_state(self): value = self.context.state.configs_records[STATE_VAR][STATE_VAR] # type: ignore value += f",{t_stamp}" - self.context.state.configs_records[STATE_VAR] = ConfigsRecord({STATE_VAR: value}) - + self.context.state.configs_records[STATE_VAR] = ConfigsRecord( + {STATE_VAR: value} + ) + def _retrieve_timestamp_from_state(self): return self.context.state.configs_records[STATE_VAR][STATE_VAR] + def fit(self, parameters, config): set_parameters(net, parameters) train(net, trainloader, epochs=1) self._record_timestamp_to_state() - return self.get_parameters(config={}), len(trainloader.dataset), {STATE_VAR: self._retrieve_timestamp_from_state()} + return ( + self.get_parameters(config={}), + len(trainloader.dataset), + {STATE_VAR: self._retrieve_timestamp_from_state()}, + ) def evaluate(self, parameters, config): set_parameters(net, parameters) loss, accuracy = test(net, testloader) self._record_timestamp_to_state() - return loss, len(testloader.dataset), {"accuracy": accuracy, STATE_VAR: self._retrieve_timestamp_from_state()} + return ( + loss, + len(testloader.dataset), + {"accuracy": accuracy, STATE_VAR: self._retrieve_timestamp_from_state()}, + ) + def set_parameters(model, parameters): params_dict = zip(model.state_dict().keys(), parameters) state_dict = OrderedDict({k: torch.tensor(v) for k, v in params_dict}) model.load_state_dict(state_dict, strict=True) - return + return + def client_fn(cid): return FlowerClient().to_client() + app = fl.client.ClientApp( client_fn=client_fn, ) diff --git a/e2e/pytorch/simulation.py b/e2e/pytorch/simulation.py index a4c8d4642be2..25868eb8e33f 100644 --- a/e2e/pytorch/simulation.py +++ b/e2e/pytorch/simulation.py @@ -1,12 +1,12 @@ from typing import List, Tuple + import numpy as np +from client import client_fn import flwr as fl from flwr.common import Metrics - -from client import client_fn -STATE_VAR = 'timestamp' +STATE_VAR = "timestamp" # Define metric aggregation function @@ -15,18 +15,22 @@ def record_state_metrics(metrics: List[Tuple[int, Metrics]]) -> Metrics: states = [] for _, m in metrics: # split string and covert timestamps to float - states.append([float(tt) for tt in m[STATE_VAR].split(',')]) + states.append([float(tt) for tt in m[STATE_VAR].split(",")]) for client_state in states: if len(client_state) == 1: continue deltas = np.diff(client_state) - assert np.all(deltas > 0), f"Timestamps are not monotonically increasing: {client_state}" + assert np.all( + deltas > 0 + ), f"Timestamps are not monotonically increasing: {client_state}" return {STATE_VAR: states} -strategy = fl.server.strategy.FedAvg(evaluate_metrics_aggregation_fn=record_state_metrics) +strategy = fl.server.strategy.FedAvg( + evaluate_metrics_aggregation_fn=record_state_metrics +) hist = fl.simulation.start_simulation( client_fn=client_fn, @@ -35,8 +39,13 @@ def record_state_metrics(metrics: List[Tuple[int, Metrics]]) -> Metrics: strategy=strategy, ) -assert hist.losses_distributed[-1][1] == 0 or (hist.losses_distributed[0][1] / hist.losses_distributed[-1][1]) >= 0.98 +assert ( + hist.losses_distributed[-1][1] == 0 + or (hist.losses_distributed[0][1] / hist.losses_distributed[-1][1]) >= 0.98 +) # The checks in record_state_metrics don't do anythinng if client's state has a single entry state_metrics_last_round = hist.metrics_distributed[STATE_VAR][-1] -assert len(state_metrics_last_round[1][0]) == 2*state_metrics_last_round[0], f"There should be twice as many entries in the client state as rounds" +assert ( + len(state_metrics_last_round[1][0]) == 2 * state_metrics_last_round[0] +), "There should be twice as many entries in the client state as rounds" diff --git a/e2e/scikit-learn/client.py b/e2e/scikit-learn/client.py index e073d3cb2748..b0691e75a79d 100644 --- a/e2e/scikit-learn/client.py +++ b/e2e/scikit-learn/client.py @@ -1,11 +1,11 @@ import warnings -import flwr as fl -import numpy as np +import numpy as np +import utils from sklearn.linear_model import LogisticRegression from sklearn.metrics import log_loss -import utils +import flwr as fl # Load MNIST dataset from https://www.openml.org/d/554 (X_train, y_train), (X_test, y_test) = utils.load_mnist() @@ -24,6 +24,7 @@ # Setting initial parameters, akin to model.compile for keras models utils.set_initial_params(model) + # Define Flower client class FlowerClient(fl.client.NumPyClient): def get_parameters(self, config): # type: ignore @@ -42,14 +43,18 @@ def evaluate(self, parameters, config): # type: ignore loss = log_loss(y_test, model.predict_proba(X_test)) accuracy = model.score(X_test, y_test) return loss, len(X_test), {"accuracy": accuracy} - + + def client_fn(cid): return FlowerClient().to_client() + app = fl.client.ClientApp( client_fn=client_fn, ) if __name__ == "__main__": # Start Flower client - fl.client.start_client(server_address="0.0.0.0:8080", client=FlowerClient().to_client()) + fl.client.start_client( + server_address="0.0.0.0:8080", client=FlowerClient().to_client() + ) diff --git a/e2e/scikit-learn/simulation.py b/e2e/scikit-learn/simulation.py index 5f0e5334bd08..bf05a77cf32a 100644 --- a/e2e/scikit-learn/simulation.py +++ b/e2e/scikit-learn/simulation.py @@ -1,11 +1,14 @@ -import flwr as fl - from client import client_fn +import flwr as fl + hist = fl.simulation.start_simulation( client_fn=client_fn, num_clients=2, config=fl.server.ServerConfig(num_rounds=3), ) -assert hist.losses_distributed[-1][1] == 0 or (hist.losses_distributed[0][1] / hist.losses_distributed[-1][1]) >= 0.98 +assert ( + hist.losses_distributed[-1][1] == 0 + or (hist.losses_distributed[0][1] / hist.losses_distributed[-1][1]) >= 0.98 +) diff --git a/e2e/scikit-learn/utils.py b/e2e/scikit-learn/utils.py index 2b8dcf8655ee..b7b202f7b760 100644 --- a/e2e/scikit-learn/utils.py +++ b/e2e/scikit-learn/utils.py @@ -1,7 +1,8 @@ -from typing import Tuple, Union, List +from typing import List, Tuple, Union + import numpy as np -from sklearn.linear_model import LogisticRegression import openml +from sklearn.linear_model import LogisticRegression XY = Tuple[np.ndarray, np.ndarray] Dataset = Tuple[XY, XY] @@ -34,12 +35,11 @@ def set_model_params( def set_initial_params(model: LogisticRegression): - """Sets initial parameters as zeros Required since model params are - uninitialized until model.fit is called. + """Sets initial parameters as zeros Required since model params are uninitialized + until model.fit is called. - But server asks for initial parameters from clients at launch. Refer - to sklearn.linear_model.LogisticRegression documentation for more - information. + But server asks for initial parameters from clients at launch. Refer to + sklearn.linear_model.LogisticRegression documentation for more information. """ n_classes = 10 # MNIST has 10 classes n_features = 784 # Number of features in dataset diff --git a/e2e/server.py b/e2e/server.py index 3b37ac4244e9..c678cd0a2446 100644 --- a/e2e/server.py +++ b/e2e/server.py @@ -1,6 +1,5 @@ import numpy as np - import flwr as fl STATE_VAR = "timestamp" @@ -77,4 +76,4 @@ def main(driver, context): state_metrics_last_round = hist.metrics_distributed[STATE_VAR][-1] assert ( len(state_metrics_last_round[1][0]) == 2 * state_metrics_last_round[0] - ), f"There should be twice as many entries in the client state as rounds" + ), "There should be twice as many entries in the client state as rounds" diff --git a/e2e/strategies/client.py b/e2e/strategies/client.py index 3b49f770dc6b..505340e013a5 100644 --- a/e2e/strategies/client.py +++ b/e2e/strategies/client.py @@ -1,19 +1,23 @@ import os -import flwr as fl import tensorflow as tf +import flwr as fl + SUBSET_SIZE = 1000 # Make TensorFlow log less verbose os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3" + def get_model(): - model = tf.keras.models.Sequential([ - tf.keras.layers.Flatten(input_shape=(28, 28)), - tf.keras.layers.Dense(128, activation='relu'), - tf.keras.layers.Dense(10) - ]) + model = tf.keras.models.Sequential( + [ + tf.keras.layers.Flatten(input_shape=(28, 28)), + tf.keras.layers.Dense(128, activation="relu"), + tf.keras.layers.Dense(10), + ] + ) model.compile( optimizer=tf.keras.optimizers.Adam(0.001), loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), @@ -21,6 +25,7 @@ def get_model(): ) return model + model = get_model() (x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data() x_train, y_train = x_train[:SUBSET_SIZE], y_train[:SUBSET_SIZE] @@ -54,4 +59,6 @@ def client_fn(cid): if __name__ == "__main__": # Start Flower client - fl.client.start_client(server_address="127.0.0.1:8080", client=FlowerClient().to_client()) + fl.client.start_client( + server_address="127.0.0.1:8080", client=FlowerClient().to_client() + ) diff --git a/e2e/tabnet/client.py b/e2e/tabnet/client.py index 0290ba4629de..1a7ecfd68f73 100644 --- a/e2e/tabnet/client.py +++ b/e2e/tabnet/client.py @@ -1,8 +1,10 @@ import os -import flwr as fl + +import tabnet import tensorflow as tf import tensorflow_datasets as tfds -import tabnet + +import flwr as fl train_size = 125 BATCH_SIZE = 50 @@ -81,10 +83,13 @@ def evaluate(self, parameters, config): def client_fn(cid): return FlowerClient().to_client() + app = fl.client.ClientApp( client_fn=client_fn, ) if __name__ == "__main__": # Start Flower client - fl.client.start_client(server_address="127.0.0.1:8080", client=FlowerClient().to_client()) + fl.client.start_client( + server_address="127.0.0.1:8080", client=FlowerClient().to_client() + ) diff --git a/e2e/tabnet/simulation.py b/e2e/tabnet/simulation.py index 5f0e5334bd08..bf05a77cf32a 100644 --- a/e2e/tabnet/simulation.py +++ b/e2e/tabnet/simulation.py @@ -1,11 +1,14 @@ -import flwr as fl - from client import client_fn +import flwr as fl + hist = fl.simulation.start_simulation( client_fn=client_fn, num_clients=2, config=fl.server.ServerConfig(num_rounds=3), ) -assert hist.losses_distributed[-1][1] == 0 or (hist.losses_distributed[0][1] / hist.losses_distributed[-1][1]) >= 0.98 +assert ( + hist.losses_distributed[-1][1] == 0 + or (hist.losses_distributed[0][1] / hist.losses_distributed[-1][1]) >= 0.98 +) diff --git a/e2e/tensorflow/client.py b/e2e/tensorflow/client.py index 10ee91136241..779be0c3746d 100644 --- a/e2e/tensorflow/client.py +++ b/e2e/tensorflow/client.py @@ -1,8 +1,9 @@ import os -import flwr as fl import tensorflow as tf +import flwr as fl + SUBSET_SIZE = 1000 # Make TensorFlow log less verbose @@ -31,13 +32,17 @@ def evaluate(self, parameters, config): loss, accuracy = model.evaluate(x_test, y_test) return loss, len(x_test), {"accuracy": accuracy} + def client_fn(cid): return FlowerClient().to_client() + app = fl.client.ClientApp( client_fn=client_fn, ) if __name__ == "__main__": # Start Flower client - fl.client.start_client(server_address="127.0.0.1:8080", client=FlowerClient().to_client()) + fl.client.start_client( + server_address="127.0.0.1:8080", client=FlowerClient().to_client() + ) diff --git a/e2e/tensorflow/simulation.py b/e2e/tensorflow/simulation.py index 5f0e5334bd08..bf05a77cf32a 100644 --- a/e2e/tensorflow/simulation.py +++ b/e2e/tensorflow/simulation.py @@ -1,11 +1,14 @@ -import flwr as fl - from client import client_fn +import flwr as fl + hist = fl.simulation.start_simulation( client_fn=client_fn, num_clients=2, config=fl.server.ServerConfig(num_rounds=3), ) -assert hist.losses_distributed[-1][1] == 0 or (hist.losses_distributed[0][1] / hist.losses_distributed[-1][1]) >= 0.98 +assert ( + hist.losses_distributed[-1][1] == 0 + or (hist.losses_distributed[0][1] / hist.losses_distributed[-1][1]) >= 0.98 +) From 21272418f27de2ab8b30178f44724242f0704dc6 Mon Sep 17 00:00:00 2001 From: Javier Date: Thu, 23 May 2024 09:31:07 +0100 Subject: [PATCH 04/23] fix (#3475) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 909cf890dccf..dabeb0a9eba9 100644 --- a/README.md +++ b/README.md @@ -150,7 +150,7 @@ Other [examples](https://github.com/adap/flower/tree/main/examples): - Single-Machine Simulation of Federated Learning Systems ([PyTorch](https://github.com/adap/flower/tree/main/examples/simulation-pytorch)) ([Tensorflow](https://github.com/adap/flower/tree/main/examples/simulation-tensorflow)) - [Comprehensive Flower+XGBoost](https://github.com/adap/flower/tree/main/examples/xgboost-comprehensive) - [Flower through Docker Compose and with Grafana dashboard](https://github.com/adap/flower/tree/main/examples/flower-via-docker-compose) -- [Flower with KaplanMeierFitter from the lifelines library](https://github.com/adap/flower/tree/main/examples/federated-kaplna-meier-fitter) +- [Flower with KaplanMeierFitter from the lifelines library](https://github.com/adap/flower/tree/main/examples/federated-kaplan-meier-fitter) - [Sample Level Privacy with Opacus](https://github.com/adap/flower/tree/main/examples/opacus) ## Community From f22cb84d32109ff7762462931d7e60aaf2b0604a Mon Sep 17 00:00:00 2001 From: Robert Steiner Date: Thu, 23 May 2024 11:37:10 +0200 Subject: [PATCH 05/23] fix(framework:skip) Use sqlite3 lib without dev headers (#3486) Signed-off-by: Robert Steiner --- src/docker/base/ubuntu/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/docker/base/ubuntu/Dockerfile b/src/docker/base/ubuntu/Dockerfile index 75152d00f8d6..6f2585d4570f 100644 --- a/src/docker/base/ubuntu/Dockerfile +++ b/src/docker/base/ubuntu/Dockerfile @@ -54,7 +54,7 @@ ENV DEBIAN_FRONTEND=noninteractive RUN apt-get update \ && apt-get -y --no-install-recommends install \ - libsqlite3-dev \ + libsqlite3-0 \ && rm -rf /var/lib/apt/lists/* COPY --from=python /usr/local/bin/python /usr/local/bin/python From a754714865619fbaeb4a44a77956a473d2d49784 Mon Sep 17 00:00:00 2001 From: Charles Beauville Date: Thu, 23 May 2024 12:08:53 +0200 Subject: [PATCH 06/23] ci(*:skip) Ignore dependabot PRs for title checks (#3495) --- .github/workflows/pr_check.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/pr_check.yml b/.github/workflows/pr_check.yml index 47bb4b284136..6b3af429bb0d 100644 --- a/.github/workflows/pr_check.yml +++ b/.github/workflows/pr_check.yml @@ -14,6 +14,7 @@ jobs: pr_title_check: runs-on: ubuntu-22.04 name: Title format + if: ${{ github.actor != 'dependabot[bot]' }} steps: - uses: actions/checkout@v4 - name: Bootstrap From 14deafda25a74536d0d542492781ece20ce12178 Mon Sep 17 00:00:00 2001 From: Robert Steiner Date: Thu, 23 May 2024 13:53:57 +0200 Subject: [PATCH 07/23] ci(*:skip) Upgrade pip and setuptools (#3496) --------- Signed-off-by: Robert Steiner Co-authored-by: Chong Shen Ng Co-authored-by: Taner Topal --- .devcontainer/Dockerfile | 4 ++-- .github/actions/bootstrap/action.yml | 6 +++--- dev/bootstrap.sh | 6 +++--- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile index 03849995735d..ce4f8a1a5b8d 100644 --- a/.devcontainer/Dockerfile +++ b/.devcontainer/Dockerfile @@ -29,8 +29,8 @@ RUN apt-get install -y curl wget gnupg python3 python-is-python3 python3-pip git build-essential tmux vim RUN python -m pip install \ - pip==23.3.1 \ - setuptools==68.2.2 \ + pip==24.0.0 \ + setuptools==69.5.1 \ poetry==1.7.1 USER $USERNAME diff --git a/.github/actions/bootstrap/action.yml b/.github/actions/bootstrap/action.yml index 8527f1f82307..bee90beffa7d 100644 --- a/.github/actions/bootstrap/action.yml +++ b/.github/actions/bootstrap/action.yml @@ -6,17 +6,17 @@ inputs: default: 3.8 pip-version: description: "Version of pip to be installed using pip" - default: 23.3.1 + default: 24.0.0 setuptools-version: description: "Version of setuptools to be installed using pip" - default: 68.2.2 + default: 69.5.1 poetry-version: description: "Version of poetry to be installed using pip" default: 1.7.1 poetry-skip: description: "Option to skip the poetry installation" required: false - default: 'false' + default: "false" outputs: python-version: description: "Version range or exact version of Python or PyPy" diff --git a/dev/bootstrap.sh b/dev/bootstrap.sh index b4bb82d73b51..154fe0f1cbaf 100755 --- a/dev/bootstrap.sh +++ b/dev/bootstrap.sh @@ -1,6 +1,6 @@ #!/bin/bash set -e -cd "$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"/../ +cd "$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"/../ # Setup environment variables for development ./dev/setup-envs.sh @@ -9,8 +9,8 @@ cd "$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"/../ ./dev/rm-caches.sh # Upgrade/install spcific versions of `pip`, `setuptools`, and `poetry` -python -m pip install -U pip==23.3.1 -python -m pip install -U setuptools==68.2.2 +python -m pip install -U pip==24.0.0 +python -m pip install -U setuptools==69.5.1 python -m pip install -U poetry==1.7.1 # Use `poetry` to install project dependencies From 97d1d4d6c65bbdcd8bc2ed2c6c65ae58af64d4a7 Mon Sep 17 00:00:00 2001 From: Charles Beauville Date: Thu, 23 May 2024 14:30:46 +0200 Subject: [PATCH 08/23] ci(*:skip) Fix PR title check regex (#3497) --- .github/workflows/pr_check.yml | 1 - dev/changelog_config.toml | 2 +- dev/check_pr_title.py | 29 ++++++++++++++++------------- 3 files changed, 17 insertions(+), 15 deletions(-) diff --git a/.github/workflows/pr_check.yml b/.github/workflows/pr_check.yml index 6b3af429bb0d..47bb4b284136 100644 --- a/.github/workflows/pr_check.yml +++ b/.github/workflows/pr_check.yml @@ -14,7 +14,6 @@ jobs: pr_title_check: runs-on: ubuntu-22.04 name: Title format - if: ${{ github.actor != 'dependabot[bot]' }} steps: - uses: actions/checkout@v4 - name: Bootstrap diff --git a/dev/changelog_config.toml b/dev/changelog_config.toml index 892050cdb084..898f9bfdb221 100644 --- a/dev/changelog_config.toml +++ b/dev/changelog_config.toml @@ -7,7 +7,7 @@ project = ["framework", "baselines", "datasets", "examples"] scope = "skip" -pattern_template = "^({types})\\(({projects})(?::({scope}))?\\) ([A-Z][^\\.\\n]*(?:\\.(?=[^\\.\\n]))*[^\\.\\n]*)$" +pattern_template = "^({types})\\(({projects})(?::({scope}))?\\) ([A-Z][^\\n]*[^\\.\\n])$" allowed_verbs=[ "Abandon", diff --git a/dev/check_pr_title.py b/dev/check_pr_title.py index e2679b3e7226..33b7a4664e9f 100644 --- a/dev/check_pr_title.py +++ b/dev/check_pr_title.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== -"""This module is used to check a given PR title format.""" +"""Used to check a given PR title format.""" import pathlib import re @@ -46,10 +46,13 @@ valid = True error = "it doesn't have the correct format" - if not match: + # This check is there to ignore dependabot PRs from title checks + if pr_title.startswith("chore"): + sys.exit(0) + elif not match: valid = False else: - if not match.group(4).split()[0] in allowed_verbs: + if match.group(4).split()[0] not in allowed_verbs: valid = False error = "the doesn't start with a verb in the imperative mood" elif match.group(2) == "*" and match.group(3) is None: @@ -58,16 +61,16 @@ if not valid: print( - f"PR title `{pr_title}` is invalid, {error}.\n\n" - "A PR title should be of the form:\n\n\t() " - f"\n\nOr, if the PR shouldn't appear in the changelog:\n\n\t" - f"(:skip) \n\nwith in [{types}],\n" - f" in [{'|'.join(config['project']) + '|*'}] (where '*' is used " - "when modifying multiple projects and should be used in " + f"PR title `{pr_title}` is invalid, {error}.\n\nA PR title should " + "be of the form:\n\n\t() \n\n" + f"Or, if the PR shouldn't appear in the changelog:\n\n\t" + f"(:skip) \n\nwith in [{types}],\n" + f" in [{'|'.join(config['project']) + '|*'}] " + "(where '*' is used when modifying multiple projects and should be used in " "conjunction with the ':skip' flag),\nand starting with " - "a capitalized verb in the imperative mood and without any punctuation at the end.\n\n" - "A valid example is:\n\n\t`feat(framework) Add flwr build CLI command`\n\n" - "Or, if the PR shouldn't appear in the changelog:\n\n\t" - "`feat(framework:skip) Add new option to build CLI`\n" + "a capitalized verb in the imperative mood and without any punctuation " + "at the end.\n\nA valid example is:\n\n\t`feat(framework) " + "Add flwr build CLI command`\n\nOr, if the PR shouldn't appear in " + "the changelog:\n\n\t`feat(framework:skip) Add new option to build CLI`\n" ) sys.exit(1) From 30e99983f7d777f504ce7f5bd828ca1310c8ac77 Mon Sep 17 00:00:00 2001 From: Javier Date: Thu, 23 May 2024 15:26:29 +0100 Subject: [PATCH 09/23] fix(*:skip) Exclude `setuptools` version from `pyproject.toml` (#3494) --------- Co-authored-by: Taner Topal Co-authored-by: Taner Topal --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 57ed4450cc52..adacd5d2b4e1 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -73,13 +73,13 @@ iterators = "^0.0.2" typer = { version = "^0.9.0", extras=["all"] } tomli = "^2.0.1" pathspec = "^0.12.1" +setuptools = "!=70.0.0" # Optional dependencies (Simulation Engine) ray = { version = "==2.6.3", optional = true, python = ">=3.8,<3.12" } # Optional dependencies (REST transport layer) requests = { version = "^2.31.0", optional = true } starlette = { version = "^0.31.0", optional = true } uvicorn = { version = "^0.23.0", extras = ["standard"], optional = true } -setuptools = "69.5.1" [tool.poetry.extras] simulation = ["ray", "pydantic"] From a43aa0d77a02997d251a0945e108f4c1a29de004 Mon Sep 17 00:00:00 2001 From: Chong Shen Ng Date: Thu, 23 May 2024 17:17:33 +0100 Subject: [PATCH 10/23] ci(framework:skip) Add caching to speed up E2E CI tests (#3480) --- .github/workflows/e2e.yml | 20 +++++++++++++--- e2e/bare-client-auth/pyproject.toml | 21 +++++++++++------ e2e/bare-https/pyproject.toml | 21 +++++++++++------ e2e/bare/pyproject.toml | 21 +++++++++++------ e2e/fastai/pyproject.toml | 25 +++++++++++++------- e2e/jax/pyproject.toml | 31 ++++++++++++++---------- e2e/opacus/pyproject.toml | 27 +++++++++++++-------- e2e/pandas/pyproject.toml | 33 ++++++++++++++++---------- e2e/pytorch-lightning/pyproject.toml | 27 +++++++++++++-------- e2e/pytorch/pyproject.toml | 31 ++++++++++++++---------- e2e/scikit-learn/pyproject.toml | 29 +++++++++++++---------- e2e/tabnet/pyproject.toml | 35 +++++++++++++++++----------- e2e/tensorflow/pyproject.toml | 29 ++++++++++++++--------- 13 files changed, 224 insertions(+), 126 deletions(-) diff --git a/.github/workflows/e2e.yml b/.github/workflows/e2e.yml index 57802e598546..1802809bd2b0 100644 --- a/.github/workflows/e2e.yml +++ b/.github/workflows/e2e.yml @@ -118,12 +118,26 @@ jobs: steps: - uses: actions/checkout@v4 - - name: Bootstrap - uses: ./.github/actions/bootstrap + - name: Set up Python + uses: actions/setup-python@v5 with: python-version: 3.8 + - name: Install build tools + run: | + python -m pip install -U pip==23.3.1 + shell: bash + # Using approach described here for Python location caching: + # https://blog.allenai.org/python-caching-in-github-actions-e9452698e98d + - name: Cache Python location + id: cache-python + uses: actions/cache@v4 + with: + path: ${{ env.pythonLocation }} + key: pythonloc-${{ runner.os }}-${{ matrix.directory }}-${{ env.pythonLocation }}-${{ hashFiles('**/pyproject.toml') }} + restore-keys: | + pythonloc-${{ runner.os }}-${{ matrix.directory }}-${{ env.pythonLocation }} - name: Install dependencies - run: python -m poetry install + run: python -m pip install --upgrade . - name: Install Flower wheel from artifact store if: ${{ github.repository == 'adap/flower' && !github.event.pull_request.head.repo.fork && github.actor != 'dependabot[bot]' }} run: | diff --git a/e2e/bare-client-auth/pyproject.toml b/e2e/bare-client-auth/pyproject.toml index 693fec815474..839f0779cc01 100644 --- a/e2e/bare-client-auth/pyproject.toml +++ b/e2e/bare-client-auth/pyproject.toml @@ -1,13 +1,20 @@ [build-system] -requires = ["poetry-core>=1.4.0"] -build-backend = "poetry.core.masonry.api" +requires = ["hatchling"] +build-backend = "hatchling.build" -[tool.poetry] +[project] name = "bare_client_auth_test" version = "0.1.0" description = "Client-auth-enabled bare Federated Learning test with Flower" -authors = ["The Flower Authors "] +authors = [ + { name = "The Flower Authors", email = "hello@flower.ai" }, +] +dependencies = [ + "flwr @ {root:parent:parent:uri}", +] -[tool.poetry.dependencies] -python = "^3.8" -flwr = { path = "../../", develop = true } +[tool.hatch.build.targets.wheel] +packages = ["."] + +[tool.hatch.metadata] +allow-direct-references = true diff --git a/e2e/bare-https/pyproject.toml b/e2e/bare-https/pyproject.toml index 3afb7b57a084..de8aa92cbd02 100644 --- a/e2e/bare-https/pyproject.toml +++ b/e2e/bare-https/pyproject.toml @@ -1,13 +1,20 @@ [build-system] -requires = ["poetry-core>=1.4.0"] -build-backend = "poetry.core.masonry.api" +requires = ["hatchling"] +build-backend = "hatchling.build" -[tool.poetry] +[project] name = "bare_https_test" version = "0.1.0" description = "HTTPS-enabled bare Federated Learning test with Flower" -authors = ["The Flower Authors "] +authors = [ + { name = "The Flower Authors", email = "hello@flower.ai" }, +] +dependencies = [ + "flwr @ {root:parent:parent:uri}", +] -[tool.poetry.dependencies] -python = "^3.8" -flwr = { path = "../../", develop = true } +[tool.hatch.build.targets.wheel] +packages = ["."] + +[tool.hatch.metadata] +allow-direct-references = true diff --git a/e2e/bare/pyproject.toml b/e2e/bare/pyproject.toml index 45ce7ea333af..ba8c1b2b2276 100644 --- a/e2e/bare/pyproject.toml +++ b/e2e/bare/pyproject.toml @@ -1,13 +1,20 @@ [build-system] -requires = ["poetry-core>=1.4.0"] -build-backend = "poetry.core.masonry.api" +requires = ["hatchling"] +build-backend = "hatchling.build" -[tool.poetry] +[project] name = "bare_test" version = "0.1.0" description = "Bare Federated Learning test with Flower" -authors = ["The Flower Authors "] +authors = [ + { name = "The Flower Authors", email = "hello@flower.ai" }, +] +dependencies = [ + "flwr[simulation,rest] @ {root:parent:parent:uri}", +] -[tool.poetry.dependencies] -python = "^3.8" -flwr = { path = "../../", develop = true, extras = ["simulation", "rest"] } +[tool.hatch.build.targets.wheel] +packages = ["."] + +[tool.hatch.metadata] +allow-direct-references = true diff --git a/e2e/fastai/pyproject.toml b/e2e/fastai/pyproject.toml index feed31f6d202..53d3b7e7baf1 100644 --- a/e2e/fastai/pyproject.toml +++ b/e2e/fastai/pyproject.toml @@ -1,15 +1,22 @@ [build-system] -requires = ["poetry-core>=1.4.0"] -build-backend = "poetry.core.masonry.api" +requires = ["hatchling"] +build-backend = "hatchling.build" -[tool.poetry] +[project] name = "quickstart-fastai" version = "0.1.0" description = "Fastai Federated Learning E2E test with Flower" -authors = ["The Flower Authors "] +authors = [ + { name = "The Flower Authors", email = "hello@flower.ai" }, +] +dependencies = [ + "flwr[simulation] @ {root:parent:parent:uri}", + "fastai>=2.7.12,<3.0.0", + "torch>=2.0.0,!=2.0.1,<2.1.0", +] -[tool.poetry.dependencies] -python = ">=3.8,<3.10" -flwr = { path = "../../", develop = true, extras = ["simulation"] } -fastai = "^2.7.12" -torch = ">=2.0.0, !=2.0.1, < 2.1.0" +[tool.hatch.build.targets.wheel] +packages = ["."] + +[tool.hatch.metadata] +allow-direct-references = true diff --git a/e2e/jax/pyproject.toml b/e2e/jax/pyproject.toml index 9a4af5dee59a..bb024ba14d23 100644 --- a/e2e/jax/pyproject.toml +++ b/e2e/jax/pyproject.toml @@ -1,17 +1,24 @@ -[tool.poetry] +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" + +[project] name = "jax_example" version = "0.1.0" description = "JAX example training a linear regression model with federated learning" -authors = ["The Flower Authors "] +authors = [ + { name = "The Flower Authors", email = "hello@flower.ai" }, +] +dependencies = [ + "flwr[simulation] @ {root:parent:parent:uri}", + "jax==0.4.13", + "jaxlib==0.4.13", + "scikit-learn>=1.1.1,<2.0.0", + "numpy>=1.21.4,<2.0.0", +] -[tool.poetry.dependencies] -python = "^3.8" -flwr = { path = "../../", develop = true, extras = ["simulation"] } -jax = "==0.4.13" -jaxlib = "==0.4.13" -scikit-learn = "^1.1.1" -numpy = "^1.21.4" +[tool.hatch.build.targets.wheel] +packages = ["."] -[build-system] -requires = ["poetry-core>=1.4.0"] -build-backend = "poetry.core.masonry.api" +[tool.hatch.metadata] +allow-direct-references = true diff --git a/e2e/opacus/pyproject.toml b/e2e/opacus/pyproject.toml index ab4a727cc00b..8fd1056f43a3 100644 --- a/e2e/opacus/pyproject.toml +++ b/e2e/opacus/pyproject.toml @@ -1,16 +1,23 @@ [build-system] -requires = ["poetry-core>=1.4.0"] -build-backend = "poetry.core.masonry.api" +requires = ["hatchling"] +build-backend = "hatchling.build" -[tool.poetry] +[project] name = "opacus_e2e" version = "0.1.0" description = "Opacus E2E testing" -authors = ["The Flower Authors "] +authors = [ + { name = "The Flower Authors", email = "hello@flower.ai" }, +] +dependencies = [ + "flwr[simulation] @ {root:parent:parent:uri}", + "opacus>=1.4.0,<2.0.0", + "torch>=1.13.1,<2.0.0", + "torchvision>=0.14.0,<2.0.0", +] -[tool.poetry.dependencies] -python = "^3.8" -flwr = { path = "../../", develop = true, extras = ["simulation"] } -opacus = "^1.4.0" -torch = "^1.13.1" -torchvision = "^0.14.0" +[tool.hatch.build.targets.wheel] +packages = ["."] + +[tool.hatch.metadata] +allow-direct-references = true diff --git a/e2e/pandas/pyproject.toml b/e2e/pandas/pyproject.toml index 416dfeec3460..f8f8488a7006 100644 --- a/e2e/pandas/pyproject.toml +++ b/e2e/pandas/pyproject.toml @@ -1,17 +1,26 @@ [build-system] -requires = ["poetry-core>=1.4.0"] -build-backend = "poetry.core.masonry.api" +requires = ["hatchling"] +build-backend = "hatchling.build" -[tool.poetry] +[project] name = "quickstart-pandas" version = "0.1.0" -description = "Pandas Federated Analytics Quickstart with Flower" -authors = ["Ragy Haddad "] -maintainers = ["The Flower Authors "] +description = "Pandas E2E test with Flower" +authors = [ + { name = "Ragy Haddad", email = "ragy202@gmail.com" }, +] +maintainers = [ + { name = "The Flower Authors", email = "hello@flower.ai" }, +] +dependencies = [ + "flwr[simulation] @ {root:parent:parent:uri}", + "numpy>=1.21.0,<2.0.0", + "pandas>=2.0.0,<3.0.0", + "scikit-learn>=1.1.1,<2.0.0", +] -[tool.poetry.dependencies] -python = "^3.8" -flwr = { path = "../../", develop = true, extras = ["simulation"] } -numpy = "^1.21.0" -pandas = "^2.0.0" -scikit-learn = "^1.1.1" +[tool.hatch.build.targets.wheel] +packages = ["."] + +[tool.hatch.metadata] +allow-direct-references = true diff --git a/e2e/pytorch-lightning/pyproject.toml b/e2e/pytorch-lightning/pyproject.toml index 90d659813c28..8706ef098d8b 100644 --- a/e2e/pytorch-lightning/pyproject.toml +++ b/e2e/pytorch-lightning/pyproject.toml @@ -1,15 +1,22 @@ [build-system] -requires = ["poetry-core>=1.4.0"] -build-backend = "poetry.masonry.api" +requires = ["hatchling"] +build-backend = "hatchling.build" -[tool.poetry] -name = "quickstart-pytorch-lightning" +[project] +name = "quickstart-pytorch-lightning-test" version = "0.1.0" description = "Federated Learning E2E test with Flower and PyTorch Lightning" -authors = ["The Flower Authors "] +authors = [ + { name = "The Flower Authors", email = "hello@flower.ai" }, +] +dependencies = [ + "flwr[simulation] @ {root:parent:parent:uri}", + "pytorch-lightning==2.2.4", + "torchvision==0.14.1", +] -[tool.poetry.dependencies] -python = "^3.8" -flwr = { path = "../../", develop = true, extras = ["simulation"] } -pytorch-lightning = "2.2.4" -torchvision = "0.14.1" +[tool.hatch.build.targets.wheel] +packages = ["."] + +[tool.hatch.metadata] +allow-direct-references = true diff --git a/e2e/pytorch/pyproject.toml b/e2e/pytorch/pyproject.toml index e538f1437df6..8c59c43d50df 100644 --- a/e2e/pytorch/pyproject.toml +++ b/e2e/pytorch/pyproject.toml @@ -1,16 +1,23 @@ [build-system] -requires = ["poetry-core>=1.4.0"] -build-backend = "poetry.core.masonry.api" +requires = ["hatchling"] +build-backend = "hatchling.build" -[tool.poetry] -name = "quickstart-pytorch" +[project] +name = "pytorch_e2e" version = "0.1.0" -description = "PyTorch Federated Learning Quickstart with Flower" -authors = ["The Flower Authors "] +description = "PyTorch Federated Learning E2E test with Flower" +authors = [ + { name = "The Flower Authors", email = "hello@flower.ai" }, +] +dependencies = [ + "flwr[simulation] @ {root:parent:parent:uri}", + "torch>=1.12.0,<2.0.0", + "torchvision>=0.14.1,<0.15.0", + "tqdm>=4.63.0,<5.0.0", +] -[tool.poetry.dependencies] -python = "^3.8" -flwr = { path = "../../", develop = true, extras = ["simulation"] } -torch = "^1.12.0" -torchvision = "^0.14.1" -tqdm = "^4.63.0" +[tool.hatch.build.targets.wheel] +packages = ["."] + +[tool.hatch.metadata] +allow-direct-references = true diff --git a/e2e/scikit-learn/pyproject.toml b/e2e/scikit-learn/pyproject.toml index 50c07d31add7..caba2324d44f 100644 --- a/e2e/scikit-learn/pyproject.toml +++ b/e2e/scikit-learn/pyproject.toml @@ -1,18 +1,23 @@ [build-system] -requires = ["poetry-core>=1.4.0"] -build-backend = "poetry.core.masonry.api" +requires = ["hatchling"] +build-backend = "hatchling.build" -[tool.poetry] -name = "sklearn-mnist" +[project] +name = "sklearn-mnist-test" version = "0.1.0" -description = "Federated learning with scikit-learn and Flower" +description = "Federated learning E2E test with scikit-learn and Flower" authors = [ - "The Flower Authors ", - "Kaushik Amar Das ", + { name = "The Flower Authors", email = "hello@flower.ai" }, + { name = "Kaushik Amar Das", email = "kaushik.das@iiitg.ac.in"}, ] +dependencies = [ + "flwr[simulation,rest] @ {root:parent:parent:uri}", + "scikit-learn>=1.1.1,<2.0.0", + "openml>=0.14.0,<0.15.0" +] + +[tool.hatch.build.targets.wheel] +packages = ["."] -[tool.poetry.dependencies] -python = "^3.8" -flwr = { path = "../../", develop = true, extras = ["simulation"] } -scikit-learn = "^1.1.1" -openml = "^0.14.0" +[tool.hatch.metadata] +allow-direct-references = true diff --git a/e2e/tabnet/pyproject.toml b/e2e/tabnet/pyproject.toml index b1abf382a24a..99379ddb607e 100644 --- a/e2e/tabnet/pyproject.toml +++ b/e2e/tabnet/pyproject.toml @@ -1,18 +1,25 @@ [build-system] -requires = ["poetry-core>=1.4.0"] -build-backend = "poetry.core.masonry.api" +requires = ["hatchling"] +build-backend = "hatchling.build" -[tool.poetry] -name = "quickstart-tabnet" +[project] +name = "quickstart-tabnet-test" version = "0.1.0" -description = "Tabnet Federated Learning Quickstart with Flower" -authors = ["The Flower Authors "] +description = "Tabnet Federated Learning E2E test with Flower" +authors = [ + { name = "The Flower Authors", email = "hello@flower.ai" }, +] +dependencies = [ + "flwr[simulation] @ {root:parent:parent:uri}", + "tensorflow-cpu>=2.9.1,!=2.11.1; platform_machine == \"x86_64\"", + "tensorflow-macos>=2.9.1,!=2.11.1; sys_platform == \"darwin\" and platform_machine == \"arm64\"", + "tensorflow_datasets==4.9.2", + "tensorflow-io-gcs-filesystem<0.35.0", + "tabnet==0.1.6", +] -[tool.poetry.dependencies] -python = ">=3.8,<3.11" -flwr = { path = "../../", develop = true, extras = ["simulation"] } -tensorflow-cpu = { version = ">=2.9.1,<2.11.1 || >2.11.1", markers = "platform_machine == \"x86_64\"" } -tensorflow-macos = { version = ">=2.9.1,<2.11.1 || >2.11.1", markers = "sys_platform == \"darwin\" and platform_machine == \"arm64\"" } -tensorflow_datasets = "4.9.2" -tensorflow-io-gcs-filesystem = "<0.35.0" -tabnet = "0.1.6" +[tool.hatch.build.targets.wheel] +packages = ["."] + +[tool.hatch.metadata] +allow-direct-references = true diff --git a/e2e/tensorflow/pyproject.toml b/e2e/tensorflow/pyproject.toml index a7dbfe2305db..4b035873223c 100644 --- a/e2e/tensorflow/pyproject.toml +++ b/e2e/tensorflow/pyproject.toml @@ -1,15 +1,22 @@ [build-system] -requires = ["poetry-core>=1.4.0"] -build-backend = "poetry.core.masonry.api" +requires = ["hatchling"] +build-backend = "hatchling.build" -[tool.poetry] -name = "quickstart-tensorflow" +[project] +name = "quickstart-tensorflow-test" version = "0.1.0" -description = "Keras Federated Learning Quickstart with Flower" -authors = ["The Flower Authors "] +description = "Keras Federated Learning E2E test with Flower" +authors = [ + { name = "The Flower Authors", email = "hello@flower.ai" }, +] +dependencies = [ + "flwr[simulation] @ {root:parent:parent:uri}", + "tensorflow-cpu>=2.9.1,!=2.11.1", + "tensorflow-io-gcs-filesystem<0.35.0", +] -[tool.poetry.dependencies] -python = ">=3.8,<3.11" -flwr = { path = "../../", develop = true, extras = ["simulation"] } -tensorflow-cpu = "^2.9.1, !=2.11.1" -tensorflow-io-gcs-filesystem = "<0.35.0" +[tool.hatch.build.targets.wheel] +packages = ["."] + +[tool.hatch.metadata] +allow-direct-references = true From 117216739bbb6c847b48d6dc9d89be1aa8e7ea6b Mon Sep 17 00:00:00 2001 From: Javier Date: Thu, 23 May 2024 18:45:31 +0100 Subject: [PATCH 11/23] ci(framework) Add `FlowerNext` simulation E2E (#3363) Co-authored-by: Heng Pan Co-authored-by: Daniel J. Beutel --- .github/workflows/e2e.yml | 3 +++ e2e/pytorch/simulation_next.py | 14 ++++++++++++++ e2e/tensorflow/simulation_next.py | 14 ++++++++++++++ 3 files changed, 31 insertions(+) create mode 100644 e2e/pytorch/simulation_next.py create mode 100644 e2e/tensorflow/simulation_next.py diff --git a/.github/workflows/e2e.yml b/.github/workflows/e2e.yml index 1802809bd2b0..18ca4e46d593 100644 --- a/.github/workflows/e2e.yml +++ b/.github/workflows/e2e.yml @@ -151,6 +151,9 @@ jobs: - name: Run virtual client test if: ${{ matrix.directory != 'bare-client-auth' }} run: python simulation.py + - name: Run simulation engine test + if: ${{ matrix.directory == 'pytorch' || matrix.directory == 'tensorflow'}} + run: python simulation_next.py - name: Run driver test if: ${{ matrix.directory != 'bare-client-auth' }} run: ./../test_driver.sh "${{ matrix.directory }}" diff --git a/e2e/pytorch/simulation_next.py b/e2e/pytorch/simulation_next.py new file mode 100644 index 000000000000..ba1719dfb75b --- /dev/null +++ b/e2e/pytorch/simulation_next.py @@ -0,0 +1,14 @@ +from client import app as client_app + +import flwr as fl + +# Define ServerAppp +server_app = fl.server.ServerApp( + config=fl.server.ServerConfig(num_rounds=3), +) + + +# Run with FlowerNext +fl.simulation.run_simulation( + server_app=server_app, client_app=client_app, num_supernodes=2 +) diff --git a/e2e/tensorflow/simulation_next.py b/e2e/tensorflow/simulation_next.py new file mode 100644 index 000000000000..ba1719dfb75b --- /dev/null +++ b/e2e/tensorflow/simulation_next.py @@ -0,0 +1,14 @@ +from client import app as client_app + +import flwr as fl + +# Define ServerAppp +server_app = fl.server.ServerApp( + config=fl.server.ServerConfig(num_rounds=3), +) + + +# Run with FlowerNext +fl.simulation.run_simulation( + server_app=server_app, client_app=client_app, num_supernodes=2 +) From 0c88e528902c3d9e49c5cf899c628140fc14c8fa Mon Sep 17 00:00:00 2001 From: Robert Steiner Date: Thu, 23 May 2024 22:53:31 +0200 Subject: [PATCH 12/23] ci(framework:skip) Switch to new base images (#3483) --------- Signed-off-by: Robert Steiner --- .github/workflows/_docker-build.yml | 1 + .github/workflows/docker-base.yml | 60 -------- .github/workflows/docker-images.yml | 75 ++++++++++ .github/workflows/docker-serverapp.yml | 52 ------- .github/workflows/docker-superlink.yml | 45 ------ .github/workflows/docker-supernode.yml | 52 ------- dev/build-docker-image-matrix.py | 182 +++++++++++++++++++++++++ src/docker/serverapp/Dockerfile | 16 +-- src/docker/superlink/Dockerfile | 16 +-- src/docker/supernode/Dockerfile | 12 +- 10 files changed, 264 insertions(+), 247 deletions(-) delete mode 100644 .github/workflows/docker-base.yml create mode 100644 .github/workflows/docker-images.yml delete mode 100644 .github/workflows/docker-serverapp.yml delete mode 100644 .github/workflows/docker-superlink.yml delete mode 100644 .github/workflows/docker-supernode.yml create mode 100644 dev/build-docker-image-matrix.py diff --git a/.github/workflows/_docker-build.yml b/.github/workflows/_docker-build.yml index 043b6139a79e..608158fc0a5a 100644 --- a/.github/workflows/_docker-build.yml +++ b/.github/workflows/_docker-build.yml @@ -88,6 +88,7 @@ jobs: id: build uses: docker/build-push-action@2cdde995de11925a030ce8070c3d77a52ffcf1c0 # v5.3.0 with: + pull: true platforms: ${{ matrix.platform.docker }} context: "{{defaultContext}}:${{ inputs.file-dir }}" build-args: ${{ inputs.build-args }} diff --git a/.github/workflows/docker-base.yml b/.github/workflows/docker-base.yml deleted file mode 100644 index f779043d9932..000000000000 --- a/.github/workflows/docker-base.yml +++ /dev/null @@ -1,60 +0,0 @@ -name: Build docker base image - -on: - workflow_dispatch: - inputs: - flwr-version: - description: "Version of Flower." - required: true - type: string - -permissions: - contents: read - -jobs: - parameters: - name: Collect build parameters - runs-on: ubuntu-22.04 - timeout-minutes: 10 - outputs: - pip-version: ${{ steps.versions.outputs.pip-version }} - setuptools-version: ${{ steps.versions.outputs.setuptools-version }} - - steps: - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - - - uses: ./.github/actions/bootstrap - id: bootstrap - - - id: versions - run: | - echo "pip-version=${{ steps.bootstrap.outputs.pip-version }}" >> "$GITHUB_OUTPUT" - echo "setuptools-version=${{ steps.bootstrap.outputs.setuptools-version }}" >> "$GITHUB_OUTPUT" - - build-base-images: - name: Build base images - uses: ./.github/workflows/_docker-build.yml - needs: parameters - strategy: - fail-fast: false - matrix: - python-version: ["3.8", "3.9", "3.10", "3.11"] - distro: - - name: ubuntu - version: "22.04" - - name: alpine - version: "3.19" - with: - namespace-repository: flwr/base - file-dir: src/docker/base/${{ matrix.distro.name }} - build-args: | - PYTHON_VERSION=${{ matrix.python-version }} - PIP_VERSION=${{ needs.parameters.outputs.pip-version }} - SETUPTOOLS_VERSION=${{ needs.parameters.outputs.setuptools-version }} - DISTRO=${{ matrix.distro.name }} - DISTRO_VERSION=${{ matrix.distro.version }} - FLWR_VERSION=${{ github.event.inputs.flwr-version }} - tags: ${{ github.event.inputs.flwr-version }}-py${{ matrix.python-version }}-${{ matrix.distro.name }}${{ matrix.distro.version }} - secrets: - dockerhub-user: ${{ secrets.DOCKERHUB_USERNAME }} - dockerhub-token: ${{ secrets.DOCKERHUB_TOKEN }} diff --git a/.github/workflows/docker-images.yml b/.github/workflows/docker-images.yml new file mode 100644 index 000000000000..e341ae62e3f7 --- /dev/null +++ b/.github/workflows/docker-images.yml @@ -0,0 +1,75 @@ +name: Build docker images + +on: + workflow_dispatch: + inputs: + flwr-version: + description: "Version of Flower." + required: true + type: string + +permissions: + contents: read + +jobs: + parameters: + name: Collect build parameters + runs-on: ubuntu-22.04 + timeout-minutes: 10 + outputs: + pip-version: ${{ steps.versions.outputs.pip-version }} + setuptools-version: ${{ steps.versions.outputs.setuptools-version }} + matrix: ${{ steps.matrix.outputs.matrix }} + steps: + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + + - uses: ./.github/actions/bootstrap + id: bootstrap + + - id: versions + run: | + echo "pip-version=${{ steps.bootstrap.outputs.pip-version }}" >> "$GITHUB_OUTPUT" + echo "setuptools-version=${{ steps.bootstrap.outputs.setuptools-version }}" >> "$GITHUB_OUTPUT" + + - id: matrix + run: | + python dev/build-docker-image-matrix.py --flwr-version ${{ github.event.inputs.flwr-version }} > matrix.json + echo "matrix=$(cat matrix.json)" >> $GITHUB_OUTPUT + + build-base-images: + name: Build base images + uses: ./.github/workflows/_docker-build.yml + needs: parameters + strategy: + fail-fast: false + matrix: ${{ fromJson(needs.parameters.outputs.matrix).base }} + with: + namespace-repository: ${{ matrix.images.namespace_repository }} + file-dir: ${{ matrix.images.file_dir }} + build-args: | + PYTHON_VERSION=${{ matrix.images.python_version }} + PIP_VERSION=${{ needs.parameters.outputs.pip-version }} + SETUPTOOLS_VERSION=${{ needs.parameters.outputs.setuptools-version }} + DISTRO=${{ matrix.images.distro.name }} + DISTRO_VERSION=${{ matrix.images.distro.version }} + FLWR_VERSION=${{ matrix.images.flwr_version }} + tags: ${{ matrix.images.tag }} + secrets: + dockerhub-user: ${{ secrets.DOCKERHUB_USERNAME }} + dockerhub-token: ${{ secrets.DOCKERHUB_TOKEN }} + + build-binary-images: + name: Build binary images + uses: ./.github/workflows/_docker-build.yml + needs: [parameters, build-base-images] + strategy: + fail-fast: false + matrix: ${{ fromJson(needs.parameters.outputs.matrix).binary }} + with: + namespace-repository: ${{ matrix.images.namespace_repository }} + file-dir: ${{ matrix.images.file_dir }} + build-args: BASE_IMAGE=${{ matrix.images.base_image }} + tags: ${{ matrix.images.tags }} + secrets: + dockerhub-user: ${{ secrets.DOCKERHUB_USERNAME }} + dockerhub-token: ${{ secrets.DOCKERHUB_TOKEN }} diff --git a/.github/workflows/docker-serverapp.yml b/.github/workflows/docker-serverapp.yml deleted file mode 100644 index 531ef079c641..000000000000 --- a/.github/workflows/docker-serverapp.yml +++ /dev/null @@ -1,52 +0,0 @@ -name: Build docker ServerApp image - -on: - workflow_dispatch: - inputs: - flwr-version: - description: "Version of Flower." - required: true - type: string - -permissions: - contents: read - -jobs: - build-serverapp-images: - name: Build images - uses: ./.github/workflows/_docker-build.yml - # run only on default branch when using it with workflow_dispatch - if: github.ref_name == github.event.repository.default_branch - strategy: - fail-fast: false - matrix: - image: [ - { - py-version: "3.8", - tags: "${{ github.event.inputs.flwr-version }}-py3.8-ubuntu22.04" - }, - { - py-version: "3.9", - tags: "${{ github.event.inputs.flwr-version }}-py3.9-ubuntu22.04" - }, - { - py-version: "3.10", - tags: "${{ github.event.inputs.flwr-version }}-py3.10-ubuntu22.04" - }, - { - py-version: "3.11", - # those are two tags -py3.11-py3.11-ubuntu22.04 and separated by a \n - tags: "${{ github.event.inputs.flwr-version }}-py3.11-ubuntu22.04\n${{ github.event.inputs.flwr-version }}" - }, - ] - with: - namespace-repository: flwr/serverapp - file-dir: src/docker/serverapp - build-args: | - FLWR_VERSION=${{ github.event.inputs.flwr-version }} - PYTHON_VERSION=${{ matrix.image.py-version }} - UBUNTU_VERSION=ubuntu22.04 - tags: ${{ matrix.image.tags }} - secrets: - dockerhub-user: ${{ secrets.DOCKERHUB_USERNAME }} - dockerhub-token: ${{ secrets.DOCKERHUB_TOKEN }} diff --git a/.github/workflows/docker-superlink.yml b/.github/workflows/docker-superlink.yml deleted file mode 100644 index b6a6b4114ba4..000000000000 --- a/.github/workflows/docker-superlink.yml +++ /dev/null @@ -1,45 +0,0 @@ -name: Build docker SuperLink image - -on: - workflow_dispatch: - inputs: - flwr-version: - description: "Version of Flower." - required: true - type: string - -permissions: - contents: read - -jobs: - build-superlink-images: - name: Build images - uses: ./.github/workflows/_docker-build.yml - # run only on default branch when using it with workflow_dispatch - if: github.ref_name == github.event.repository.default_branch - with: - namespace-repository: flwr/superlink - file-dir: src/docker/superlink - build-args: | - FLWR_VERSION=${{ github.event.inputs.flwr-version }} - PYTHON_VERSION=3.11 - UBUNTU_VERSION=ubuntu22.04 - tags: | - ${{ github.event.inputs.flwr-version }}-py3.11-ubuntu22.04 - ${{ github.event.inputs.flwr-version }} - secrets: - dockerhub-user: ${{ secrets.DOCKERHUB_USERNAME }} - dockerhub-token: ${{ secrets.DOCKERHUB_TOKEN }} - - summary: - name: Summary - runs-on: ubuntu-22.04 - needs: build-superlink-images - timeout-minutes: 10 - steps: - - run: | - echo "### Images" >> $GITHUB_STEP_SUMMARY - echo "" >> $GITHUB_STEP_SUMMARY - for IMAGE in $(echo ${{ toJson(needs.build-superlink-images.outputs.metadata) }} | jq -r '.tags[]' ); do - echo "- $IMAGE" >> $GITHUB_STEP_SUMMARY - done diff --git a/.github/workflows/docker-supernode.yml b/.github/workflows/docker-supernode.yml deleted file mode 100644 index 83b53764b794..000000000000 --- a/.github/workflows/docker-supernode.yml +++ /dev/null @@ -1,52 +0,0 @@ -name: Build docker SuperNode image - -on: - workflow_dispatch: - inputs: - flwr-version: - description: "Version of Flower." - required: true - type: string - -permissions: - contents: read - -jobs: - build-supernode-images: - name: Build images - uses: ./.github/workflows/_docker-build.yml - # run only on default branch when using it with workflow_dispatch - if: github.ref_name == github.event.repository.default_branch - strategy: - fail-fast: false - matrix: - image: [ - { - py-version: "3.8", - tags: "${{ github.event.inputs.flwr-version }}-py3.8-ubuntu22.04" - }, - { - py-version: "3.9", - tags: "${{ github.event.inputs.flwr-version }}-py3.9-ubuntu22.04" - }, - { - py-version: "3.10", - tags: "${{ github.event.inputs.flwr-version }}-py3.10-ubuntu22.04" - }, - { - py-version: "3.11", - # those are two tags -py3.11-py3.11-ubuntu22.04 and separated by a \n - tags: "${{ github.event.inputs.flwr-version }}-py3.11-ubuntu22.04\n${{ github.event.inputs.flwr-version }}" - }, - ] - with: - namespace-repository: flwr/supernode - file-dir: src/docker/supernode - build-args: | - FLWR_VERSION=${{ github.event.inputs.flwr-version }} - PYTHON_VERSION=${{ matrix.image.py-version }} - UBUNTU_VERSION=ubuntu22.04 - tags: ${{ matrix.image.tags }} - secrets: - dockerhub-user: ${{ secrets.DOCKERHUB_USERNAME }} - dockerhub-token: ${{ secrets.DOCKERHUB_TOKEN }} diff --git a/dev/build-docker-image-matrix.py b/dev/build-docker-image-matrix.py new file mode 100644 index 000000000000..5b9c63434bfb --- /dev/null +++ b/dev/build-docker-image-matrix.py @@ -0,0 +1,182 @@ +""" +Usage: python dev/build-docker-image-matrix.py --flwr-version +""" + +import argparse +from dataclasses import asdict, dataclass +from enum import Enum +import json +from typing import Any, Callable, Dict, List, Optional + + +class DistroName(str, Enum): + ALPINE = "alpine" + UBUNTU = "ubuntu" + + +@dataclass +class Distro: + name: "DistroName" + version: str + + +LATEST_SUPPORTED_PYTHON_VERSION = "3.11" +SUPPORTED_PYTHON_VERSIONS = [ + "3.8", + "3.9", + "3.10", + LATEST_SUPPORTED_PYTHON_VERSION, +] + +DOCKERFILE_ROOT = "src/docker" + + +@dataclass +class BaseImage: + distro: Distro + python_version: str + namespace_repository: str + file_dir: str + tag: str + flwr_version: str + + +def new_base_image( + flwr_version: str, python_version: str, distro: Distro +) -> Dict[str, Any]: + return BaseImage( + distro, + python_version, + "flwr/base", + f"{DOCKERFILE_ROOT}/base/{distro.name.value}", + f"{flwr_version}-py{python_version}-{distro.name.value}{distro.version}", + flwr_version, + ) + + +def generate_base_images( + flwr_version: str, python_versions: List[str], distros: List[Dict[str, str]] +) -> List[Dict[str, Any]]: + return [ + new_base_image(flwr_version, python_version, distro) + for distro in distros + for python_version in python_versions + ] + + +@dataclass +class BinaryImage: + namespace_repository: str + file_dir: str + base_image: str + tags: List[str] + + +def new_binary_image( + name: str, + base_image: BaseImage, + tags_fn: Optional[Callable], +) -> Dict[str, Any]: + tags = [] + if tags_fn is not None: + tags += tags_fn(base_image) or [] + + return BinaryImage( + f"flwr/{name}", + f"{DOCKERFILE_ROOT}/{name}", + base_image.tag, + "\n".join(tags), + ) + + +def generate_binary_images( + name: str, + base_images: List[BaseImage], + tags_fn: Optional[Callable] = None, + filter: Optional[Callable] = None, +) -> List[Dict[str, Any]]: + filter = filter or (lambda _: True) + + return [ + new_binary_image(name, image, tags_fn) for image in base_images if filter(image) + ] + + +def tag_latest_alpine_with_flwr_version(image: BaseImage) -> List[str]: + if ( + image.distro.name == DistroName.ALPINE + and image.python_version == LATEST_SUPPORTED_PYTHON_VERSION + ): + return [image.tag, image.flwr_version] + else: + return [image.tag] + + +def tag_latest_ubuntu_with_flwr_version(image: BaseImage) -> List[str]: + if ( + image.distro.name == DistroName.UBUNTU + and image.python_version == LATEST_SUPPORTED_PYTHON_VERSION + ): + return [image.tag, image.flwr_version] + else: + return [image.tag] + + +if __name__ == "__main__": + arg_parser = argparse.ArgumentParser( + description="Generate Github Docker workflow matrix" + ) + arg_parser.add_argument("--flwr-version", type=str, required=True) + args = arg_parser.parse_args() + + flwr_version = args.flwr_version + + # ubuntu base images for each supported python version + ubuntu_base_images = generate_base_images( + flwr_version, + SUPPORTED_PYTHON_VERSIONS, + [Distro(DistroName.UBUNTU, "22.04")], + ) + # alpine base images for the latest supported python version + alpine_base_images = generate_base_images( + flwr_version, + [LATEST_SUPPORTED_PYTHON_VERSION], + [Distro(DistroName.ALPINE, "3.19")], + ) + + base_images = ubuntu_base_images + alpine_base_images + + binary_images = ( + # ubuntu and alpine images for the latest supported python version + generate_binary_images( + "superlink", + base_images, + tag_latest_alpine_with_flwr_version, + lambda image: image.python_version == LATEST_SUPPORTED_PYTHON_VERSION, + ) + # ubuntu images for each supported python version + + generate_binary_images( + "supernode", + base_images, + tag_latest_ubuntu_with_flwr_version, + lambda image: image.distro.name == DistroName.UBUNTU, + ) + # ubuntu images for each supported python version + + generate_binary_images( + "serverapp", + base_images, + tag_latest_ubuntu_with_flwr_version, + lambda image: image.distro.name == DistroName.UBUNTU, + ) + ) + + print( + json.dumps( + { + "base": {"images": list(map(lambda image: asdict(image), base_images))}, + "binary": { + "images": list(map(lambda image: asdict(image), binary_images)) + }, + } + ) + ) diff --git a/src/docker/serverapp/Dockerfile b/src/docker/serverapp/Dockerfile index 22b464a3838c..08eceacc4557 100644 --- a/src/docker/serverapp/Dockerfile +++ b/src/docker/serverapp/Dockerfile @@ -14,19 +14,7 @@ # ============================================================================== ARG BASE_REPOSITORY=flwr/base -ARG PYTHON_VERSION=3.11 -ARG UBUNTU_VERSION=ubuntu22.04 -FROM $BASE_REPOSITORY:py${PYTHON_VERSION}-${UBUNTU_VERSION} +ARG BASE_IMAGE +FROM $BASE_REPOSITORY:$BASE_IMAGE -ARG FLWR_PACKAGE=flwr -ARG FLWR_VERSION -RUN python -m pip install -U --no-cache-dir \ - ${FLWR_PACKAGE}==${FLWR_VERSION} && \ - # Without pyenv rehash the executable cannot be found. - # pyenv rehash is usually called via the shell by adding - # `pyenv init -` in the shell profile, but that doesn't work - # well in docker - pyenv rehash - -WORKDIR /app ENTRYPOINT ["flower-server-app"] diff --git a/src/docker/superlink/Dockerfile b/src/docker/superlink/Dockerfile index acf06f66f2fb..31c0906dfd22 100644 --- a/src/docker/superlink/Dockerfile +++ b/src/docker/superlink/Dockerfile @@ -14,19 +14,7 @@ # ============================================================================== ARG BASE_REPOSITORY=flwr/base -ARG PYTHON_VERSION=3.11 -ARG UBUNTU_VERSION=ubuntu22.04 -FROM $BASE_REPOSITORY:py${PYTHON_VERSION}-${UBUNTU_VERSION} +ARG BASE_IMAGE +FROM $BASE_REPOSITORY:$BASE_IMAGE -ARG FLWR_PACKAGE=flwr -ARG FLWR_VERSION -RUN python -m pip install -U --no-cache-dir \ - ${FLWR_PACKAGE}==${FLWR_VERSION} && \ - # Without pyenv rehash the executable cannot be found. - # pyenv rehash is usually called via the shell by adding - # `pyenv init -` in the shell profile, but that doesn't work - # well in docker - pyenv rehash - -WORKDIR /app ENTRYPOINT ["flower-superlink"] diff --git a/src/docker/supernode/Dockerfile b/src/docker/supernode/Dockerfile index 8117dcc295df..8dce1c389a5b 100644 --- a/src/docker/supernode/Dockerfile +++ b/src/docker/supernode/Dockerfile @@ -14,15 +14,7 @@ # ============================================================================== ARG BASE_REPOSITORY=flwr/base -ARG PYTHON_VERSION=3.11 -ARG UBUNTU_VERSION=ubuntu22.04 -FROM $BASE_REPOSITORY:py${PYTHON_VERSION}-${UBUNTU_VERSION} +ARG BASE_IMAGE +FROM $BASE_REPOSITORY:$BASE_IMAGE -ARG FLWR_PACKAGE=flwr -ARG FLWR_VERSION -RUN python -m pip install -U --no-cache-dir \ - ${FLWR_PACKAGE}==${FLWR_VERSION} && \ - pyenv rehash - -WORKDIR /app ENTRYPOINT ["flower-client-app"] From 856409ea0f1d77db3fd4c7907aa4febb0c38d988 Mon Sep 17 00:00:00 2001 From: Javier Date: Fri, 24 May 2024 12:43:27 +0100 Subject: [PATCH 13/23] fix(*:skip) Update `ray` and unpin `setuptools` (#3501) --------- Co-authored-by: Taner Topal --- pyproject.toml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index adacd5d2b4e1..2d8c24d8e80c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -73,9 +73,8 @@ iterators = "^0.0.2" typer = { version = "^0.9.0", extras=["all"] } tomli = "^2.0.1" pathspec = "^0.12.1" -setuptools = "!=70.0.0" # Optional dependencies (Simulation Engine) -ray = { version = "==2.6.3", optional = true, python = ">=3.8,<3.12" } +ray = { version = "==2.10.0", optional = true, python = ">=3.8,<3.12" } # Optional dependencies (REST transport layer) requests = { version = "^2.31.0", optional = true } starlette = { version = "^0.31.0", optional = true } From 0cd41cc4572076f0c698a79e471964bd36bc11b2 Mon Sep 17 00:00:00 2001 From: mohammadnaseri Date: Fri, 24 May 2024 15:28:03 +0100 Subject: [PATCH 14/23] docs(framework) Fix the number of examples in pytorch-flower doc (#3498) Co-authored-by: Taner Topal --- .../tutorial-series-get-started-with-flower-pytorch.ipynb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/tutorial-series-get-started-with-flower-pytorch.ipynb b/doc/source/tutorial-series-get-started-with-flower-pytorch.ipynb index c9d38b417a92..d8e6e58fafab 100644 --- a/doc/source/tutorial-series-get-started-with-flower-pytorch.ipynb +++ b/doc/source/tutorial-series-get-started-with-flower-pytorch.ipynb @@ -160,7 +160,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "We now have a list of ten training sets and ten validation sets (`trainloaders` and `valloaders`) representing the data of ten different organizations. Each `trainloader`/`valloader` pair contains 4500 training examples and 500 validation examples. There's also a single `testloader` (we did not split the test set). Again, this is only necessary for building research or educational systems, actual federated learning systems have their data naturally distributed across multiple partitions.\n", + "We now have a list of ten training sets and ten validation sets (`trainloaders` and `valloaders`) representing the data of ten different organizations. Each `trainloader`/`valloader` pair contains 4000 training examples and 1000 validation examples. There's also a single `testloader` (we did not split the test set). Again, this is only necessary for building research or educational systems, actual federated learning systems have their data naturally distributed across multiple partitions.\n", "\n", "Let's take a look at the first batch of images and labels in the first training set (i.e., `trainloaders[0]`) before we move on:" ] From 05ad7df2a810a1bad90af98ccac460fb38cb8cc4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 24 May 2024 15:33:38 +0000 Subject: [PATCH 15/23] chore(deps): update torch requirement in /e2e/opacus (#3502) Updates the requirements on [torch](https://github.com/pytorch/pytorch) to permit the latest version. - [Release notes](https://github.com/pytorch/pytorch/releases) - [Changelog](https://github.com/pytorch/pytorch/blob/main/RELEASE.md) - [Commits](https://github.com/pytorch/pytorch/compare/v1.13.1...v2.3.0) --- updated-dependencies: - dependency-name: torch dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Taner Topal --- e2e/opacus/pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/e2e/opacus/pyproject.toml b/e2e/opacus/pyproject.toml index 8fd1056f43a3..cee9fc1914cf 100644 --- a/e2e/opacus/pyproject.toml +++ b/e2e/opacus/pyproject.toml @@ -12,7 +12,7 @@ authors = [ dependencies = [ "flwr[simulation] @ {root:parent:parent:uri}", "opacus>=1.4.0,<2.0.0", - "torch>=1.13.1,<2.0.0", + "torch>=1.13.1,<3.0.0", "torchvision>=0.14.0,<2.0.0", ] From 83e9ba4c7722c9f882748aa8a83adde4d2f1285c Mon Sep 17 00:00:00 2001 From: Daniel Nata Nugraha Date: Fri, 24 May 2024 18:55:04 +0200 Subject: [PATCH 16/23] refactor(framework) Update CLI auth flags (#3503) Co-authored-by: Daniel J. Beutel --- doc/source/how-to-authenticate-supernodes.rst | 17 +-- e2e/test_driver.sh | 7 +- .../README.md | 27 ++--- .../certificate.conf | 0 .../client.py | 0 .../generate.sh | 0 .../pyproject.toml | 0 .../server.py | 0 .../task.py | 0 src/py/flwr/client/supernode/app.py | 64 +++++++---- .../crypto/symmetric_encryption.py | 15 --- src/py/flwr/server/app.py | 100 ++++++++++++------ src/py/flwr/server/server_test.py | 8 +- 13 files changed, 138 insertions(+), 100 deletions(-) rename examples/{flower-client-authentication => flower-authentication}/README.md (73%) rename examples/{flower-client-authentication => flower-authentication}/certificate.conf (100%) rename examples/{flower-client-authentication => flower-authentication}/client.py (100%) rename examples/{flower-client-authentication => flower-authentication}/generate.sh (100%) rename examples/{flower-client-authentication => flower-authentication}/pyproject.toml (100%) rename examples/{flower-client-authentication => flower-authentication}/server.py (100%) rename examples/{flower-client-authentication => flower-authentication}/task.py (100%) diff --git a/doc/source/how-to-authenticate-supernodes.rst b/doc/source/how-to-authenticate-supernodes.rst index 472ba64a8310..e0834a1aff0a 100644 --- a/doc/source/how-to-authenticate-supernodes.rst +++ b/doc/source/how-to-authenticate-supernodes.rst @@ -9,7 +9,7 @@ Flower node authentication works similar to how GitHub SSH authentication works: * Shared secret is used to compute the HMAC value of the message sent from SuperNode to SuperLink as a token * SuperLink verifies the token -We recommend you to check out the complete `code example `_ demonstrating federated learning with Flower in an authenticated setting. +We recommend you to check out the complete `code example `_ demonstrating federated learning with Flower in an authenticated setting. .. note:: This guide covers a preview feature that might change in future versions of Flower. @@ -29,15 +29,17 @@ Use the following terminal command to start a Flower :code:`SuperNode` that has flower-superlink --certificates certificates/ca.crt certificates/server.pem certificates/server.key - --require-client-authentication ./keys/client_public_keys.csv ./keys/server_credentials ./keys/server_credentials.pub + --auth-list-public-keys keys/client_public_keys.csv + --auth-superlink-private-key keys/server_credentials + --auth-superlink-public-key keys/server_credentials.pub -Let's break down the :code:`--require-client-authentication` flag: +Let's break down the authentication flags: -1. The first argument is a path to a CSV file storing all known node public keys. You need to store all known node public keys that are allowed to participate in a federation in one CSV file (:code:`.csv`). +1. The first flag :code:`--auth-list-public-keys` expects a path to a CSV file storing all known node public keys. You need to store all known node public keys that are allowed to participate in a federation in one CSV file (:code:`.csv`). A valid CSV file storing known node public keys should list the keys in OpenSSH format, separated by commas and without any comments. For an example, refer to our code sample, which contains a CSV file with two known node public keys. -2. The second and third arguments are paths to the server's private and public keys. For development purposes, you can generate a private and public key pair using :code:`ssh-keygen -t ecdsa -b 384`. +2. The second and third flags :code:`--auth-superlink-private-key` and :code:`--auth-superlink-public-key` expect paths to the server's private and public keys. For development purposes, you can generate a private and public key pair using :code:`ssh-keygen -t ecdsa -b 384`. .. note:: In Flower 1.9, there is no support for dynamically removing, editing, or adding known node public keys to the SuperLink. @@ -56,9 +58,10 @@ Use the following terminal command to start an authenticated :code:`SuperNode`: flower-client-app client:app --root-certificates certificates/ca.crt --server 127.0.0.1:9092 - --authentication-keys ./keys/client_credentials ./keys/client_credentials.pub + --auth-supernode-private-key keys/client_credentials + --auth-supernode-public-key keys/client_credentials.pub -The :code:`--authentication-keys` flag expects two arguments: a path to the node's private key file and a path to the node's public key file. For development purposes, you can generate a private and public key pair using :code:`ssh-keygen -t ecdsa -b 384`. +The :code:`--auth-supernode-private-key` flag expects a path to the node's private key file and the :code:`--auth-supernode-public-key` flag expects a path to the node's public key file. For development purposes, you can generate a private and public key pair using :code:`ssh-keygen -t ecdsa -b 384`. Security notice diff --git a/e2e/test_driver.sh b/e2e/test_driver.sh index 6d7f6ec864fd..7a312004990d 100755 --- a/e2e/test_driver.sh +++ b/e2e/test_driver.sh @@ -47,9 +47,9 @@ case "$2" in db_arg="--database :flwr-in-memory-state:" server_arg="--certificates certificates/ca.crt certificates/server.pem certificates/server.key" client_arg="--root-certificates certificates/ca.crt" - server_auth="--require-client-authentication keys/client_public_keys.csv keys/server_credentials keys/server_credentials.pub" - client_auth_1="--authentication-keys keys/client_credentials_1 keys/client_credentials_1.pub" - client_auth_2="--authentication-keys keys/client_credentials_2 keys/client_credentials_2.pub" + server_auth="--auth-list-public-keys keys/client_public_keys.csv --auth-superlink-private-key keys/server_credentials --auth-superlink-public-key keys/server_credentials.pub" + client_auth_1="--auth-supernode-private-key keys/client_credentials_1 --auth-supernode-public-key keys/client_credentials_1.pub" + client_auth_2="--auth-supernode-private-key keys/client_credentials_2 --auth-supernode-public-key keys/client_credentials_2.pub" ;; *) rest_arg="" @@ -84,4 +84,3 @@ if [[ "$res" = "0" ]]; then echo "Training worked correctly"; kill $cl1_pid; kill $cl2_pid; kill $sl_pid; else echo "Training had an issue" && exit 1; fi - diff --git a/examples/flower-client-authentication/README.md b/examples/flower-authentication/README.md similarity index 73% rename from examples/flower-client-authentication/README.md rename to examples/flower-authentication/README.md index 7c724fc26f64..e2403be7608d 100644 --- a/examples/flower-client-authentication/README.md +++ b/examples/flower-authentication/README.md @@ -1,19 +1,19 @@ -# Flower Client Authentication with PyTorch 🧪 +# Flower Authentication with PyTorch 🧪 > 🧪 = This example covers experimental features that might change in future versions of Flower > Please consult the regular PyTorch code examples ([quickstart](https://github.com/adap/flower/tree/main/examples/quickstart-pytorch), [advanced](https://github.com/adap/flower/tree/main/examples/advanced-pytorch)) to learn how to use Flower with PyTorch. -The following steps describe how to start a long-running Flower server (SuperLink) and a long-running Flower client (SuperNode) with client authentication enabled. +The following steps describe how to start a long-running Flower server (SuperLink) and a long-running Flower client (SuperNode) with authentication enabled. ## Project Setup Start by cloning the example project. We prepared a single-line command that you can copy into your shell which will checkout the example for you: ```shell -git clone --depth=1 https://github.com/adap/flower.git _tmp && mv _tmp/examples/flower-client-authentication . && rm -rf _tmp && cd flower-client-authentication +git clone --depth=1 https://github.com/adap/flower.git _tmp && mv _tmp/examples/flower-authentication . && rm -rf _tmp && cd flower-authentication ``` -This will create a new directory called `flower-client-authentication` with the following project structure: +This will create a new directory called `flower-authentication` with the following project structure: ```bash $ tree . @@ -62,26 +62,28 @@ The script also generates a CSV file that includes each of the generated (client ## Start the long-running Flower server (SuperLink) -To start a long-running Flower server and enable client authentication is very easy; all you need to do is type -`--require-client-authentication` followed by the path to the known `client_public_keys.csv`, server's private key -`server_credentials`, and server's public key `server_credentials.pub`. Notice that you can only enable client -authentication with a secure TLS connection. +To start a long-running Flower server (SuperLink) and enable authentication is very easy; all you need to do is type +`--auth-list-public-keys` containing file path to the known `client_public_keys.csv`, `--auth-superlink-private-key` +containing file path to the SuperLink's private key `server_credentials`, and `--auth-superlink-public-key` containing file path to the SuperLink's public key `server_credentials.pub`. Notice that you can only enable authentication with a secure TLS connection. ```bash flower-superlink \ --certificates certificates/ca.crt certificates/server.pem certificates/server.key \ - --require-client-authentication keys/client_public_keys.csv keys/server_credentials keys/server_credentials.pub + --auth-list-public-keys keys/client_public_keys.csv \ + --auth-superlink-private-key keys/server_credentials \ + --auth-superlink-public-key keys/server_credentials.pub ``` ## Start the long-running Flower client (SuperNode) -In a new terminal window, start the first long-running Flower client: +In a new terminal window, start the first long-running Flower client (SuperNode): ```bash flower-client-app client:app \ --root-certificates certificates/ca.crt \ --server 127.0.0.1:9092 \ - --authentication-keys keys/client_credentials_1 keys/client_credentials_1.pub + --auth-supernode-private-key keys/client_credentials_1 \ + --auth-supernode-public-key keys/client_credentials_1.pub ``` In yet another new terminal window, start the second long-running Flower client: @@ -90,7 +92,8 @@ In yet another new terminal window, start the second long-running Flower client: flower-client-app client:app \ --root-certificates certificates/ca.crt \ --server 127.0.0.1:9092 \ - --authentication-keys keys/client_credentials_2 keys/client_credentials_2.pub + --auth-supernode-private-key keys/client_credentials_2 \ + --auth-supernode-public-key keys/client_credentials_2.pub ``` If you generated more than 2 client credentials, you can add more clients by opening new terminal windows and running the command diff --git a/examples/flower-client-authentication/certificate.conf b/examples/flower-authentication/certificate.conf similarity index 100% rename from examples/flower-client-authentication/certificate.conf rename to examples/flower-authentication/certificate.conf diff --git a/examples/flower-client-authentication/client.py b/examples/flower-authentication/client.py similarity index 100% rename from examples/flower-client-authentication/client.py rename to examples/flower-authentication/client.py diff --git a/examples/flower-client-authentication/generate.sh b/examples/flower-authentication/generate.sh similarity index 100% rename from examples/flower-client-authentication/generate.sh rename to examples/flower-authentication/generate.sh diff --git a/examples/flower-client-authentication/pyproject.toml b/examples/flower-authentication/pyproject.toml similarity index 100% rename from examples/flower-client-authentication/pyproject.toml rename to examples/flower-authentication/pyproject.toml diff --git a/examples/flower-client-authentication/server.py b/examples/flower-authentication/server.py similarity index 100% rename from examples/flower-client-authentication/server.py rename to examples/flower-authentication/server.py diff --git a/examples/flower-client-authentication/task.py b/examples/flower-authentication/task.py similarity index 100% rename from examples/flower-client-authentication/task.py rename to examples/flower-authentication/task.py diff --git a/src/py/flwr/client/supernode/app.py b/src/py/flwr/client/supernode/app.py index e46ed43cc676..ac58e9aa4a81 100644 --- a/src/py/flwr/client/supernode/app.py +++ b/src/py/flwr/client/supernode/app.py @@ -20,6 +20,7 @@ from pathlib import Path from typing import Callable, Optional, Tuple +from cryptography.exceptions import UnsupportedAlgorithm from cryptography.hazmat.primitives.asymmetric import ec from cryptography.hazmat.primitives.serialization import ( load_ssh_private_key, @@ -31,9 +32,6 @@ from flwr.common.exit_handlers import register_exit_handlers from flwr.common.logger import log from flwr.common.object_ref import load_app, validate -from flwr.common.secure_aggregation.crypto.symmetric_encryption import ( - ssh_types_to_elliptic_curve, -) from ..app import _start_client_internal @@ -242,40 +240,60 @@ def _parse_args_common(parser: argparse.ArgumentParser) -> None: " Default: current working directory.", ) parser.add_argument( - "--authentication-keys", - nargs=2, - metavar=("CLIENT_PRIVATE_KEY", "CLIENT_PUBLIC_KEY"), + "--auth-supernode-private-key", + type=str, + help="The SuperNode's private key (as a path str) to enable authentication.", + ) + parser.add_argument( + "--auth-supernode-public-key", type=str, - help="Provide two file paths: (1) the client's private " - "key file, and (2) the client's public key file.", + help="The SuperNode's public key (as a path str) to enable authentication.", ) def _try_setup_client_authentication( args: argparse.Namespace, ) -> Optional[Tuple[ec.EllipticCurvePrivateKey, ec.EllipticCurvePublicKey]]: - if not args.authentication_keys: + if not args.auth_supernode_private_key and not args.auth_supernode_public_key: return None - ssh_private_key = load_ssh_private_key( - Path(args.authentication_keys[0]).read_bytes(), - None, - ) - ssh_public_key = load_ssh_public_key(Path(args.authentication_keys[1]).read_bytes()) + if not args.auth_supernode_private_key or not args.auth_supernode_public_key: + sys.exit( + "Authentication requires file paths to both " + "'--auth-supernode-private-key' and '--auth-supernode-public-key'" + "to be provided (providing only one of them is not sufficient)." + ) + + try: + ssh_private_key = load_ssh_private_key( + Path(args.auth_supernode_private_key).read_bytes(), + None, + ) + if not isinstance(ssh_private_key, ec.EllipticCurvePrivateKey): + raise ValueError() + except (ValueError, UnsupportedAlgorithm): + sys.exit( + "Error: Unable to parse the private key file in " + "'--auth-supernode-private-key'. Authentication requires elliptic " + "curve private and public key pair. Please ensure that the file " + "path points to a valid private key file and try again." + ) try: - client_private_key, client_public_key = ssh_types_to_elliptic_curve( - ssh_private_key, ssh_public_key + ssh_public_key = load_ssh_public_key( + Path(args.auth_supernode_public_key).read_bytes() ) - except TypeError: + if not isinstance(ssh_public_key, ec.EllipticCurvePublicKey): + raise ValueError() + except (ValueError, UnsupportedAlgorithm): sys.exit( - "The file paths provided could not be read as a private and public " - "key pair. Client authentication requires an elliptic curve public and " - "private key pair. Please provide the file paths containing elliptic " - "curve private and public keys to '--authentication-keys'." + "Error: Unable to parse the public key file in " + "'--auth-supernode-public-key'. Authentication requires elliptic " + "curve private and public key pair. Please ensure that the file " + "path points to a valid public key file and try again." ) return ( - client_private_key, - client_public_key, + ssh_private_key, + ssh_public_key, ) diff --git a/src/py/flwr/common/secure_aggregation/crypto/symmetric_encryption.py b/src/py/flwr/common/secure_aggregation/crypto/symmetric_encryption.py index 9856b8b706f9..1d004a398ea8 100644 --- a/src/py/flwr/common/secure_aggregation/crypto/symmetric_encryption.py +++ b/src/py/flwr/common/secure_aggregation/crypto/symmetric_encryption.py @@ -117,18 +117,3 @@ def verify_hmac(key: bytes, message: bytes, hmac_value: bytes) -> bool: return True except InvalidSignature: return False - - -def ssh_types_to_elliptic_curve( - private_key: serialization.SSHPrivateKeyTypes, - public_key: serialization.SSHPublicKeyTypes, -) -> Tuple[ec.EllipticCurvePrivateKey, ec.EllipticCurvePublicKey]: - """Cast SSH key types to elliptic curve.""" - if isinstance(private_key, ec.EllipticCurvePrivateKey) and isinstance( - public_key, ec.EllipticCurvePublicKey - ): - return (private_key, public_key) - - raise TypeError( - "The provided key is not an EllipticCurvePrivateKey or EllipticCurvePublicKey" - ) diff --git a/src/py/flwr/server/app.py b/src/py/flwr/server/app.py index 30e73fccb7df..84a075a3e6df 100644 --- a/src/py/flwr/server/app.py +++ b/src/py/flwr/server/app.py @@ -26,6 +26,7 @@ from typing import List, Optional, Sequence, Set, Tuple import grpc +from cryptography.exceptions import UnsupportedAlgorithm from cryptography.hazmat.primitives.asymmetric import ec from cryptography.hazmat.primitives.serialization import ( load_ssh_private_key, @@ -45,7 +46,6 @@ from flwr.common.secure_aggregation.crypto.symmetric_encryption import ( private_key_to_bytes, public_key_to_bytes, - ssh_types_to_elliptic_curve, ) from flwr.proto.fleet_pb2_grpc import ( # pylint: disable=E0611 add_FleetServicer_to_server, @@ -435,44 +435,69 @@ def _try_setup_client_authentication( args: argparse.Namespace, certificates: Optional[Tuple[bytes, bytes, bytes]], ) -> Optional[Tuple[Set[bytes], ec.EllipticCurvePrivateKey, ec.EllipticCurvePublicKey]]: - if not args.require_client_authentication: + if ( + not args.auth_list_public_keys + and not args.auth_superlink_private_key + and not args.auth_superlink_public_key + ): return None + if ( + not args.auth_list_public_keys + or not args.auth_superlink_private_key + or not args.auth_superlink_public_key + ): + sys.exit( + "Authentication requires providing file paths for " + "'--auth-list-public-keys', '--auth-superlink-private-key' and " + "'--auth-superlink-public-key'. Provide all three to enable authentication." + ) + if certificates is None: sys.exit( - "Client authentication only works over secure connections. " - "Please provide certificate paths using '--certificates' when " - "enabling '--require-client-authentication'." + "Authentication requires secure connections. " + "Please provide certificate paths using '--certificates' and " + "try again." ) - client_keys_file_path = Path(args.require_client_authentication[0]) + client_keys_file_path = Path(args.auth_list_public_keys) if not client_keys_file_path.exists(): sys.exit( - "The provided path to the client public keys CSV file does not exist: " + "The provided path to the known public keys CSV file does not exist: " f"{client_keys_file_path}. " - "Please provide the CSV file path containing known client public keys " - "to '--require-client-authentication'." + "Please provide the CSV file path containing known public keys " + "to '--auth-list-public-keys'." ) client_public_keys: Set[bytes] = set() - ssh_private_key = load_ssh_private_key( - Path(args.require_client_authentication[1]).read_bytes(), - None, - ) - ssh_public_key = load_ssh_public_key( - Path(args.require_client_authentication[2]).read_bytes() - ) try: - server_private_key, server_public_key = ssh_types_to_elliptic_curve( - ssh_private_key, ssh_public_key + ssh_private_key = load_ssh_private_key( + Path(args.auth_superlink_private_key).read_bytes(), + None, ) - except TypeError: + if not isinstance(ssh_private_key, ec.EllipticCurvePrivateKey): + raise ValueError() + except (ValueError, UnsupportedAlgorithm): sys.exit( - "The file paths provided could not be read as a private and public " - "key pair. Client authentication requires an elliptic curve public and " - "private key pair. Please provide the file paths containing elliptic " - "curve private and public keys to '--require-client-authentication'." + "Error: Unable to parse the private key file in " + "'--auth-superlink-private-key'. Authentication requires elliptic " + "curve private and public key pair. Please ensure that the file " + "path points to a valid private key file and try again." + ) + + try: + ssh_public_key = load_ssh_public_key( + Path(args.auth_superlink_public_key).read_bytes() + ) + if not isinstance(ssh_public_key, ec.EllipticCurvePublicKey): + raise ValueError() + except (ValueError, UnsupportedAlgorithm): + sys.exit( + "Error: Unable to parse the public key file in " + "'--auth-superlink-public-key'. Authentication requires elliptic " + "curve private and public key pair. Please ensure that the file " + "path points to a valid public key file and try again." ) with open(client_keys_file_path, newline="", encoding="utf-8") as csvfile: @@ -484,14 +509,14 @@ def _try_setup_client_authentication( client_public_keys.add(public_key_to_bytes(public_key)) else: sys.exit( - "Error: Unable to parse the public keys in the .csv " - "file. Please ensure that the .csv file contains valid " - "SSH public keys and try again." + "Error: Unable to parse the public keys in the CSV " + "file. Please ensure that the CSV file path points to a valid " + "known SSH public keys files and try again." ) return ( client_public_keys, - server_private_key, - server_public_key, + ssh_private_key, + ssh_public_key, ) @@ -714,13 +739,20 @@ def _add_args_common(parser: argparse.ArgumentParser) -> None: default=DATABASE, ) parser.add_argument( - "--require-client-authentication", - nargs=3, - metavar=("CLIENT_KEYS", "SERVER_PRIVATE_KEY", "SERVER_PUBLIC_KEY"), + "--auth-list-public-keys", + type=str, + help="A CSV file (as a path str) containing a list of known public " + "keys to enable authentication.", + ) + parser.add_argument( + "--auth-superlink-private-key", + type=str, + help="The SuperLink's private key (as a path str) to enable authentication.", + ) + parser.add_argument( + "--auth-superlink-public-key", type=str, - help="Provide three file paths: (1) a .csv file containing a list of " - "known client public keys for authentication, (2) the server's private " - "key file, and (3) the server's public key file.", + help="The SuperLink's public key (as a path str) to enable authentication.", ) diff --git a/src/py/flwr/server/server_test.py b/src/py/flwr/server/server_test.py index 51071c13f895..f47b5c3d8469 100644 --- a/src/py/flwr/server/server_test.py +++ b/src/py/flwr/server/server_test.py @@ -242,11 +242,9 @@ def test_setup_client_auth() -> None: # pylint: disable=R0914 # Mock argparse with `require-client-authentication`` flag mock_args = argparse.Namespace( - require_client_authentication=[ - str(client_keys_file_path), - str(server_private_key_path), - str(server_public_key_path), - ] + auth_list_public_keys=str(client_keys_file_path), + auth_superlink_private_key=str(server_private_key_path), + auth_superlink_public_key=str(server_public_key_path), ) # Run _try_setup_client_authentication From 63e1b1fa395bf89fd51ed475c7a987e22fdc94ef Mon Sep 17 00:00:00 2001 From: Javier Date: Fri, 24 May 2024 18:35:59 +0100 Subject: [PATCH 17/23] fix(framework) Support empty `Parameters` conversion to/from `RecordSet` (#3344) --- src/py/flwr/common/recordset_compat.py | 9 +++++++- src/py/flwr/common/recordset_compat_test.py | 23 +++++++++++++++------ 2 files changed, 25 insertions(+), 7 deletions(-) diff --git a/src/py/flwr/common/recordset_compat.py b/src/py/flwr/common/recordset_compat.py index 394ea1353bab..1b0bf52d8277 100644 --- a/src/py/flwr/common/recordset_compat.py +++ b/src/py/flwr/common/recordset_compat.py @@ -35,6 +35,8 @@ Status, ) +EMPTY_TENSOR_KEY = "_empty" + def parametersrecord_to_parameters( record: ParametersRecord, keep_input: bool @@ -59,7 +61,8 @@ def parametersrecord_to_parameters( parameters = Parameters(tensors=[], tensor_type="") for key in list(record.keys()): - parameters.tensors.append(record[key].data) + if key != EMPTY_TENSOR_KEY: + parameters.tensors.append(record[key].data) if not parameters.tensor_type: # Setting from first array in record. Recall the warning in the docstrings @@ -103,6 +106,10 @@ def parameters_to_parametersrecord( data=tensor, dtype="", stype=tensor_type, shape=[] ) + if num_arrays == 0: + ordered_dict[EMPTY_TENSOR_KEY] = Array( + data=b"", dtype="", stype=tensor_type, shape=[] + ) return ParametersRecord(ordered_dict, keep_input=keep_input) diff --git a/src/py/flwr/common/recordset_compat_test.py b/src/py/flwr/common/recordset_compat_test.py index 288326dc9e83..e0ac7f216af9 100644 --- a/src/py/flwr/common/recordset_compat_test.py +++ b/src/py/flwr/common/recordset_compat_test.py @@ -74,6 +74,11 @@ def _get_valid_fitins() -> FitIns: return FitIns(parameters=ndarrays_to_parameters(arrays), config={"a": 1.0, "b": 0}) +def _get_valid_fitins_with_empty_ndarrays() -> FitIns: + pp = ndarrays_to_parameters([]) + return FitIns(parameters=pp, config={"a": 1.0, "b": 0}) + + def _get_valid_fitres() -> FitRes: """Returnn Valid parameters but potentially invalid config.""" arrays = get_ndarrays() @@ -138,23 +143,29 @@ def _get_valid_getpropertiesres() -> GetPropertiesRes: @pytest.mark.parametrize( - "keep_input, validate_freed_fn", + "keep_input, validate_freed_fn, fn", [ ( False, lambda x, x_copy, y: len(x.parameters.tensors) == 0 and x_copy == y, + _get_valid_fitins, ), # check tensors were freed + (True, lambda x, x_copy, y: x == y, _get_valid_fitins), ( - True, - lambda x, x_copy, y: x == y, - ), + False, + lambda x, x_copy, y: len(x.parameters.tensors) == 0 and x_copy == y, + _get_valid_fitins_with_empty_ndarrays, + ), # check tensors were freed + (True, lambda x, x_copy, y: x == y, _get_valid_fitins_with_empty_ndarrays), ], ) def test_fitins_to_recordset_and_back( - keep_input: bool, validate_freed_fn: Callable[[FitIns, FitIns, FitIns], bool] + keep_input: bool, + validate_freed_fn: Callable[[FitIns, FitIns, FitIns], bool], + fn: Callable[[], FitIns], ) -> None: """Test conversion FitIns --> RecordSet --> FitIns.""" - fitins = _get_valid_fitins() + fitins = fn() fitins_copy = deepcopy(fitins) From 2d1c3a6e827f4b63bc27c6ea93327e254e2d7584 Mon Sep 17 00:00:00 2001 From: Charles Beauville Date: Mon, 27 May 2024 10:33:12 +0200 Subject: [PATCH 18/23] ci(*:skip) Update code owners for translations (#3515) --- .github/CODEOWNERS | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 0f95b023ecaa..0b8702a8360c 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -18,6 +18,9 @@ README.md @jafermarq @tanertopal @danieljanes # Changelog /doc/source/ref-changelog.md @jafermarq @tanertopal @danieljanes +# Translations +/doc/locales @charlesbvll @tanertopal @danieljanes + # GitHub Actions and Workflows /.github/workflows @Robert-Steiner @tanertopal @danieljanes /.github/actions @Robert-Steiner @tanertopal @danieljanes From 77bf92ab5f8e3c39a6a747f99d9c623ab6f3b6ed Mon Sep 17 00:00:00 2001 From: "Weblate (bot)" Date: Mon, 27 May 2024 10:48:27 +0200 Subject: [PATCH 19/23] docs(framework) Add latest Weblate translation updates (#3286) Co-authored-by: Yan Gao Co-authored-by: Young D. Kwon Co-authored-by: Gustavo Bertoli Co-authored-by: Charles Beauville --- doc/locales/ko/LC_MESSAGES/framework-docs.po | 31 +- .../pt_BR/LC_MESSAGES/framework-docs.po | 193 +- .../zh_Hans/LC_MESSAGES/framework-docs.po | 2556 ++++++++++++++--- 3 files changed, 2286 insertions(+), 494 deletions(-) diff --git a/doc/locales/ko/LC_MESSAGES/framework-docs.po b/doc/locales/ko/LC_MESSAGES/framework-docs.po index 8ef7cf887247..63a160232909 100644 --- a/doc/locales/ko/LC_MESSAGES/framework-docs.po +++ b/doc/locales/ko/LC_MESSAGES/framework-docs.po @@ -3,59 +3,63 @@ # This file is distributed under the same license as the Flower package. # FIRST AUTHOR , 2024. # -#, fuzzy msgid "" msgstr "" "Project-Id-Version: Flower main\n" "Report-Msgid-Bugs-To: \n" "POT-Creation-Date: 2024-05-13 09:48+0200\n" -"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" -"Last-Translator: FULL NAME \n" +"PO-Revision-Date: 2024-05-14 21:01+0000\n" +"Last-Translator: \"Young D. Kwon\" \n" +"Language-Team: Korean \n" "Language: ko\n" -"Language-Team: ko \n" -"Plural-Forms: nplurals=1; plural=0;\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=utf-8\n" "Content-Transfer-Encoding: 8bit\n" +"Plural-Forms: nplurals=1; plural=0;\n" +"X-Generator: Weblate 5.6-dev\n" "Generated-By: Babel 2.14.0\n" #: ../../source/contributor-explanation-architecture.rst:2 msgid "Flower Architecture" -msgstr "" +msgstr "Flower 아키텍처" #: ../../source/contributor-explanation-architecture.rst:5 msgid "Edge Client Engine" -msgstr "" +msgstr "엣지 클라이언트 엔진" #: ../../source/contributor-explanation-architecture.rst:7 msgid "" "`Flower `_ core framework architecture with Edge " "Client Engine" -msgstr "" +msgstr "`Flower `_의 핵심 프레임워크 아키텍처와 엣지 클라이언트 " +"엔진" #: ../../source/contributor-explanation-architecture.rst:13 msgid "Virtual Client Engine" -msgstr "" +msgstr "가상 클라이언트 엔진" #: ../../source/contributor-explanation-architecture.rst:15 msgid "" "`Flower `_ core framework architecture with Virtual " "Client Engine" -msgstr "" +msgstr "`Flower `_의 핵심 프레임워크 아키텍처와 가상 클라이언트 " +"엔진" #: ../../source/contributor-explanation-architecture.rst:21 msgid "Virtual Client Engine and Edge Client Engine in the same workload" -msgstr "" +msgstr "동일 작업에서 가상 클라이언트 엔진과 엣지 클라이언트 엔진" #: ../../source/contributor-explanation-architecture.rst:23 msgid "" "`Flower `_ core framework architecture with both " "Virtual Client Engine and Edge Client Engine" -msgstr "" +msgstr "`Flower `_의 핵심 프레임워크 아키텍처와 가상 및 엣지 " +"클라이언트 엔진" #: ../../source/contributor-how-to-build-docker-images.rst:2 msgid "How to build Docker Flower images locally" -msgstr "" +msgstr "Docker Flower 이미지를 Locally 구축하는 방법" #: ../../source/contributor-how-to-build-docker-images.rst:4 msgid "" @@ -20705,4 +20709,3 @@ msgid "" "pytorch.html>`__ shows how to build a simple federated learning system " "with PyTorch and Flower." msgstr "" - diff --git a/doc/locales/pt_BR/LC_MESSAGES/framework-docs.po b/doc/locales/pt_BR/LC_MESSAGES/framework-docs.po index 4e117619f9b5..75a49d95d404 100644 --- a/doc/locales/pt_BR/LC_MESSAGES/framework-docs.po +++ b/doc/locales/pt_BR/LC_MESSAGES/framework-docs.po @@ -3,59 +3,68 @@ # This file is distributed under the same license as the Flower package. # FIRST AUTHOR , 2023. # -#, fuzzy msgid "" msgstr "" "Project-Id-Version: Flower main\n" "Report-Msgid-Bugs-To: \n" "POT-Creation-Date: 2024-03-15 14:23+0000\n" -"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" -"Last-Translator: FULL NAME \n" +"PO-Revision-Date: 2024-05-25 11:09+0000\n" +"Last-Translator: Gustavo Bertoli \n" +"Language-Team: Portuguese (Brazil) \n" "Language: pt_BR\n" -"Language-Team: pt_BR \n" -"Plural-Forms: nplurals=2; plural=(n > 1);\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=utf-8\n" "Content-Transfer-Encoding: 8bit\n" +"Plural-Forms: nplurals=2; plural=n > 1;\n" +"X-Generator: Weblate 5.6-dev\n" "Generated-By: Babel 2.14.0\n" #: ../../source/contributor-explanation-architecture.rst:2 msgid "Flower Architecture" -msgstr "" +msgstr "Arquitetura do Flower" #: ../../source/contributor-explanation-architecture.rst:5 msgid "Edge Client Engine" -msgstr "" +msgstr "Engine do Edge Client" #: ../../source/contributor-explanation-architecture.rst:7 msgid "" "`Flower `_ core framework architecture with Edge " "Client Engine" msgstr "" +"`Flower `_ arquitetura principal do framework com Engine " +"do Edge Client" #: ../../source/contributor-explanation-architecture.rst:13 msgid "Virtual Client Engine" -msgstr "" +msgstr "Engine do Virtual Client" #: ../../source/contributor-explanation-architecture.rst:15 msgid "" "`Flower `_ core framework architecture with Virtual " "Client Engine" msgstr "" +"`Flower `_ arquitetura principal do framework com Engine " +"do Virtual Client" #: ../../source/contributor-explanation-architecture.rst:21 msgid "Virtual Client Engine and Edge Client Engine in the same workload" msgstr "" +"Engine do Virtual Client e do Edge Client no mesma carga de trabalho " +"(workload)" #: ../../source/contributor-explanation-architecture.rst:23 msgid "" "`Flower `_ core framework architecture with both " "Virtual Client Engine and Edge Client Engine" msgstr "" +"`Flower `_ arquitetura principal do framework com ambas " +"engines do Virtual Client e do Edge Client" #: ../../source/contributor-how-to-build-docker-images.rst:2 msgid "How to build Docker Flower images locally" -msgstr "" +msgstr "Como construir imagens Docker do Flower localmente" #: ../../source/contributor-how-to-build-docker-images.rst:4 msgid "" @@ -66,26 +75,36 @@ msgid "" "if that is what you need. In this guide, we will explain what images " "exist and how to build them locally." msgstr "" +"Flower disponibiliza imagens docker em `Docker Hub `_ que incluem todas as dependências necesárias para " +"executar o servidor. Você pode também compilar suas próprias imagens docker " +"customizadas do zero com uma versão diferente do Python ou do Ubuntu se isso " +"for o que você precisa. Neste guia, explicaremos quais imagens existem e " +"como compilar localmente." #: ../../source/contributor-how-to-build-docker-images.rst:9 msgid "" "Before we can start, we need to meet a few prerequisites in our local " "development environment." msgstr "" +"Antes de começarmos, precisamos encontrar alguns pré-requisitos em nosso " +"ambiente de desenvolvimento local." #: ../../source/contributor-how-to-build-docker-images.rst:11 msgid "Clone the flower repository." -msgstr "" +msgstr "Clone o repositório do flower." #: ../../source/contributor-how-to-build-docker-images.rst:17 msgid "Verify the Docker daemon is running." -msgstr "" +msgstr "Verifique que o serviço Docker está rodando." #: ../../source/contributor-how-to-build-docker-images.rst:19 msgid "" "Please follow the first section on :doc:`Run Flower using Docker ` which covers this step in more detail." msgstr "" +"Por favor, siga a primeira seção em :doc:`Execute o Flower usando Docker " +"` que cobre este passo em mais detalhes." #: ../../source/contributor-how-to-build-docker-images.rst:23 msgid "" @@ -96,6 +115,12 @@ msgid "" "server image is based on the base image, but it additionally installs the" " Flower server using ``pip``." msgstr "" +"Atualmente, Flower fornece duas imagens, uma imagem base e uma imagem de " +"servidor. Também haverá uma imagem de cliente em breve. A imagem base, como " +"o nome sugere, contém dependências básicas que tanto o servidor quanto o " +"cliente precisam. Isso inclui dependências do sistema, Python e ferramentas " +"Python. A imagem do servidor é baseada na imagem base, mas também instala o " +"servidor Flower usando ``pip```." #: ../../source/contributor-how-to-build-docker-images.rst:28 msgid "" @@ -103,6 +128,9 @@ msgid "" "respective Dockerfiles. You can find them in the subdirectories of " "``src/docker``." msgstr "" +"As instruções de compilação que montam as imagens estão localizadas nos " +"respectivos Dockerfiles. Você pode encontrá-los nos subdiretórios ``src/" +"docker```." #: ../../source/contributor-how-to-build-docker-images.rst:31 msgid "" @@ -114,91 +142,101 @@ msgid "" "available build arguments for each image are listed in one of the tables " "below." msgstr "" +"Ambas, imagens base e do servidor são configuradas através dos argumentos de " +"compilação. Através dos argumentos de compilação, podemos tornar nossa " +"compilação mais flexível. Por exemplo, na imagem base, podemos especificar a " +"versão do Python para instalar usando o argumento de compilação " +"`PYTHON_VERSION`. Alguns dos argumentos de compilação têm valores padrão, " +"outros devem ser especificados ao compilar a imagem. Todos os argumentos de " +"compilação disponíveis para cada imagem estão listados em uma das tabelas " +"abaixo." #: ../../source/contributor-how-to-build-docker-images.rst:38 msgid "Building the base image" -msgstr "" +msgstr "Construindo a imagem base" #: ../../source/contributor-how-to-build-docker-images.rst:44 #: ../../source/contributor-how-to-build-docker-images.rst:86 msgid "Build argument" -msgstr "" +msgstr "Argumento de compilação" #: ../../source/contributor-how-to-build-docker-images.rst:45 #: ../../source/contributor-how-to-build-docker-images.rst:87 msgid "Description" -msgstr "" +msgstr "Descrição" #: ../../source/contributor-how-to-build-docker-images.rst:46 #: ../../source/contributor-how-to-build-docker-images.rst:88 msgid "Required" -msgstr "" +msgstr "Necessário" #: ../../source/contributor-how-to-build-docker-images.rst:47 #: ../../source/contributor-how-to-build-docker-images.rst:89 msgid "Example" -msgstr "" +msgstr "Exemplo" #: ../../source/contributor-how-to-build-docker-images.rst:48 msgid "``PYTHON_VERSION``" -msgstr "" +msgstr "``PYTHON_VERSION``" #: ../../source/contributor-how-to-build-docker-images.rst:49 msgid "Version of ``python`` to be installed." -msgstr "" +msgstr "Versão do ``python`` a ser instalada." #: ../../source/contributor-how-to-build-docker-images.rst:50 #: ../../source/contributor-how-to-build-docker-images.rst:54 #: ../../source/contributor-how-to-build-docker-images.rst:58 #: ../../source/contributor-how-to-build-docker-images.rst:100 msgid "Yes" -msgstr "" +msgstr "Sim" #: ../../source/contributor-how-to-build-docker-images.rst:51 msgid "``3.11``" -msgstr "" +msgstr "``3.11``" #: ../../source/contributor-how-to-build-docker-images.rst:52 msgid "``PIP_VERSION``" -msgstr "" +msgstr "``PIP_VERSION``" #: ../../source/contributor-how-to-build-docker-images.rst:53 msgid "Version of ``pip`` to be installed." -msgstr "" +msgstr "Versão do ``pip`` a ser instalada." #: ../../source/contributor-how-to-build-docker-images.rst:55 msgid "``23.0.1``" -msgstr "" +msgstr "``23.0.1``" #: ../../source/contributor-how-to-build-docker-images.rst:56 msgid "``SETUPTOOLS_VERSION``" -msgstr "" +msgstr "``SETUPTOOLS_VERSION``" #: ../../source/contributor-how-to-build-docker-images.rst:57 msgid "Version of ``setuptools`` to be installed." -msgstr "" +msgstr "Versão do ``setuptools`` a ser instalada." #: ../../source/contributor-how-to-build-docker-images.rst:59 msgid "``69.0.2``" -msgstr "" +msgstr "``69.0.2``" #: ../../source/contributor-how-to-build-docker-images.rst:60 msgid "``UBUNTU_VERSION``" -msgstr "" +msgstr "``UBUNTU_VERSION``" #: ../../source/contributor-how-to-build-docker-images.rst:61 msgid "Version of the official Ubuntu Docker image." -msgstr "" +msgstr "Versão da imagem Docker oficial do Ubuntu." #: ../../source/contributor-how-to-build-docker-images.rst:62 msgid "Defaults to ``22.04``." -msgstr "" +msgstr "Como padrão ``22.04``." #: ../../source/contributor-how-to-build-docker-images.rst:65 msgid "" "The following example creates a base image with Python 3.11.0, pip 23.0.1" " and setuptools 69.0.2:" msgstr "" +"O exemplo seguinte cria uma imagem base com Python 3.11.0, pip 23.0.1 e " +"setuptools 69.0.2:" #: ../../source/contributor-how-to-build-docker-images.rst:76 msgid "" @@ -206,52 +244,58 @@ msgid "" "the build arguments as well as the name and tag can be adapted to your " "needs. These values serve as examples only." msgstr "" +"O nome da imagem é ``flwr_base`` com a tag ``0.1.0``. Lembre-se que os " +"argumentos de construção assim como o nome e a tag podem ser adaptados de " +"acordo com suas necessidades. Estes valores servem apenas como exemplo." #: ../../source/contributor-how-to-build-docker-images.rst:80 msgid "Building the server image" -msgstr "" +msgstr "Construindo a imagem do servidor" #: ../../source/contributor-how-to-build-docker-images.rst:90 msgid "``BASE_REPOSITORY``" -msgstr "" +msgstr "``BASE_REPOSITORY``" #: ../../source/contributor-how-to-build-docker-images.rst:91 msgid "The repository name of the base image." -msgstr "" +msgstr "O nome do repositório da imagem base." #: ../../source/contributor-how-to-build-docker-images.rst:92 msgid "Defaults to ``flwr/server``." -msgstr "" +msgstr "Pré-definido para ``flwr/server``." #: ../../source/contributor-how-to-build-docker-images.rst:94 msgid "``BASE_IMAGE_TAG``" -msgstr "" +msgstr "``BASE_IMAGE_TAG``" #: ../../source/contributor-how-to-build-docker-images.rst:95 +#, fuzzy msgid "The image tag of the base image." -msgstr "" +msgstr "A tag da imagem da imagem base." #: ../../source/contributor-how-to-build-docker-images.rst:96 msgid "Defaults to ``py3.11-ubuntu22.04``." -msgstr "" +msgstr "Pré-definido para ``py3.11-ubuntu22.04``." #: ../../source/contributor-how-to-build-docker-images.rst:98 msgid "``FLWR_VERSION``" -msgstr "" +msgstr "``FLWR_VERSION``" #: ../../source/contributor-how-to-build-docker-images.rst:99 msgid "Version of Flower to be installed." -msgstr "" +msgstr "Versão do Flower a ser instalada." #: ../../source/contributor-how-to-build-docker-images.rst:101 msgid "``1.7.0``" -msgstr "" +msgstr "``1.7.0``" #: ../../source/contributor-how-to-build-docker-images.rst:103 msgid "" "The following example creates a server image with the official Flower " "base image py3.11-ubuntu22.04 and Flower 1.7.0:" msgstr "" +"O exemplo a seguir cria uma imagem de servidor com a imagem base oficial do " +"Flower py3.11-ubuntu22.04 e Flower 1.7.0:" #: ../../source/contributor-how-to-build-docker-images.rst:114 msgid "" @@ -259,6 +303,9 @@ msgid "" " the build arguments as well as the name and tag can be adapted to your " "needs. These values serve as examples only." msgstr "" +"O nome da imagem é ``flwr_server`` e a tag ``0.1.0``. Lembre-se que os " +"argumentos de compilação, bem como o nome e a tag podem ser adaptados às " +"suas necessidades. Esses valores servem apenas como exemplos." #: ../../source/contributor-how-to-build-docker-images.rst:117 msgid "" @@ -268,14 +315,19 @@ msgid "" " match the name of your image and the value of ``BASE_IMAGE_TAG`` must " "match the tag of your image." msgstr "" +"Se você quiser usar sua própria imagem base ao invés da imagem oficial base " +"do Flower, tudo que você precisa fazer é definir os argumentos " +"``BASE_REPOSITORY`` e ``BASE_IMAGE_TAG`` como parte do comando de " +"compilação. O valor de ``BASE_REPOSITORY`` deve corresponder ao nome da sua " +"imagem e o valor de ``BASE_IMAGE_TAG`` deve corresponder à tag da sua imagem." #: ../../source/contributor-how-to-build-docker-images.rst:131 msgid "After creating the image, we can test whether the image is working:" -msgstr "" +msgstr "Depois de criar a imagem, podemos testar se a imagem está funcionando:" #: ../../source/contributor-how-to-contribute-translations.rst:2 msgid "Contribute translations" -msgstr "" +msgstr "Contribua com traduções" #: ../../source/contributor-how-to-contribute-translations.rst:4 msgid "" @@ -288,17 +340,30 @@ msgid "" "also be a great opportunity for those wanting to become open source " "contributors with little prerequisites." msgstr "" +"Desde o `Flower 1.5 `_ nós introduzimos traduções para nossas páginas de " +"documentação, mas, como você pode ter notado, as traduções são muitas vezes " +"imperfeitas. Se você fala línguas diferentes do inglês, você pode ser capaz " +"de nos ajudar neste esforço para tornar o aprendizado federado acessível a " +"tantas pessoas quanto possível, contribuindo para essas traduções! Isso " +"também pode ser uma grande oportunidade para aqueles que querem se tornar " +"contribuintes de código aberto com poucos pré-requisitos." #: ../../source/contributor-how-to-contribute-translations.rst:13 +#, fuzzy msgid "" "Our translation project is publicly available over on `Weblate " "`_, this " "where most of the work will happen." msgstr "" +"Nosso projeto de tradução está disponível publicamente em `Weblate " +"`_, onde a " +"maioria do trabalho acontecerá." #: ../../source/contributor-how-to-contribute-translations.rst:18 +#, fuzzy msgid "Contribute to existing languages" -msgstr "" +msgstr "Contribuir para as línguas existentes" #: ../../source/contributor-how-to-contribute-translations.rst:23 msgid "" @@ -308,6 +373,10 @@ msgid "" " profile settings can be found `here " "`_." msgstr "" +"A primeira coisa que você precisa fazer para contribuir é criar uma conta " +"Weblate gratuita nesta `página `_. Mais informações sobre as configurações de perfil podem ser encontradas " +"`aqui `_." #: ../../source/contributor-how-to-contribute-translations.rst:29 msgid "" @@ -316,12 +385,18 @@ msgid "" "docs/framework/>`_. Here, you should see the different existing languages" " that can be found on the website." msgstr "" +"Uma vez que você esteja conectado ao Weblate, você pode navegar até o " +"projeto `Flower Framework `_. Aqui, você deve ver os diferentes idiomas existentes que " +"podem ser encontrados no site." #: ../../source/contributor-how-to-contribute-translations.rst:34 msgid "" "Once you have selected the language you want to contribute to, you should" " see a similar interface to this:" msgstr "" +"Uma vez que você tenha selecionado o idioma que deseja contribuir, você deve " +"ver uma interface semelhante a esta:" #: ../../source/contributor-how-to-contribute-translations.rst:39 msgid "" @@ -330,10 +405,14 @@ msgid "" "will automatically bring you to the translation interface for " "untranslated strings." msgstr "" +"A opção mais direta aqui é clicar no botão ``Translate`` no canto superior " +"direito (na seção ``Translation status``). Isso te levará automaticamente " +"para a interface de tradução de strings ainda não traduzidas." #: ../../source/contributor-how-to-contribute-translations.rst:43 +#, fuzzy msgid "This is what the interface looks like:" -msgstr "" +msgstr "É assim que a interface se parece:" #: ../../source/contributor-how-to-contribute-translations.rst:47 msgid "" @@ -344,6 +423,13 @@ msgid "" "your translation to suggestions for other users to view), or ``Skip`` (to" " go to the next untranslated string without saving anything)." msgstr "" +"Você insire sua tradução na caixa de texto no topo e depois, uma vez que " +"você está satisfeito com ela, você pressiona ``Save and continue`` (para " +"salvar a tradução e ir para a próxima string não traduzida), ``Save and " +"stay`` (para salvar a tradução e ficar na mesma página), ``Suggest`` (para " +"adicionar sua tradução para sugestões para outros usuários verem), ou " +"``Skip`` (para ir para a próxima string não traduzida sem salvar nada na " +"atual)." #: ../../source/contributor-how-to-contribute-translations.rst:54 msgid "" @@ -353,6 +439,11 @@ msgid "" "translations in ``Other languages``, and the ``History`` of translations " "for this string." msgstr "" +"Para ajudar com as traduções, você pode ver na parte inferior o ``Nearby " +"strings`` (strings próximas), o ``Comments`` (comentários de outros " +"contribuidores), o ``Automatic suggestions`` (sugestões atuomáticas de " +"sistemas de tradução automática), as traduções em ``Other languages`` (" +"outras línguas), e o ``History`` (histórico) de traduções para esta string." #: ../../source/contributor-how-to-contribute-translations.rst:59 msgid "" @@ -360,6 +451,9 @@ msgid "" "click the link under ``Source string location`` in order to view the " "source of the doc file containing the string." msgstr "" +"À direita, sob a seção ``String information``, você também pode clicar no " +"link sob ``Source string location`` para visualizar a fonte do arquivo doc " +"que contém a string." #: ../../source/contributor-how-to-contribute-translations.rst:63 msgid "" @@ -367,10 +461,14 @@ msgid "" "this `in-depth guide " "`_." msgstr "" +"Para obter mais informações sobre como traduzir usando o Weblate, você pode " +"conferir este `guia detalhado `_." #: ../../source/contributor-how-to-contribute-translations.rst:67 +#, fuzzy msgid "Add new languages" -msgstr "" +msgstr "Adicionar novos idiomas" #: ../../source/contributor-how-to-contribute-translations.rst:69 msgid "" @@ -378,10 +476,14 @@ msgid "" "either on `Slack `_, or by opening an issue" " on our `GitHub repo `_." msgstr "" +"Se você quiser adicionar uma nova língua, primeiro você terá que entrar em " +"contato conosco, no `Slack `_, ou abrindo uma " +"issue no nosso `repositório GitHub `_." #: ../../source/contributor-how-to-create-new-messages.rst:2 +#, fuzzy msgid "Creating New Messages" -msgstr "" +msgstr "Criando novas mensagens" #: ../../source/contributor-how-to-create-new-messages.rst:4 msgid "" @@ -21349,4 +21451,3 @@ msgstr "" #~ msgid "|08cb60859b07461588fe44e55810b050|" #~ msgstr "" - diff --git a/doc/locales/zh_Hans/LC_MESSAGES/framework-docs.po b/doc/locales/zh_Hans/LC_MESSAGES/framework-docs.po index 86d96e5e6865..4f4d9cfe5214 100644 --- a/doc/locales/zh_Hans/LC_MESSAGES/framework-docs.po +++ b/doc/locales/zh_Hans/LC_MESSAGES/framework-docs.po @@ -8,15 +8,16 @@ msgstr "" "Project-Id-Version: Flower main\n" "Report-Msgid-Bugs-To: \n" "POT-Creation-Date: 2024-03-15 14:23+0000\n" -"PO-Revision-Date: 2024-02-19 11:37+0000\n" +"PO-Revision-Date: 2024-05-10 06:59+0000\n" "Last-Translator: Yan Gao \n" +"Language-Team: Chinese (Simplified) \n" "Language: zh_Hans\n" -"Language-Team: Chinese (Simplified) \n" -"Plural-Forms: nplurals=1; plural=0;\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=utf-8\n" "Content-Transfer-Encoding: 8bit\n" +"Plural-Forms: nplurals=1; plural=0;\n" +"X-Generator: Weblate 5.5.4-rc\n" "Generated-By: Babel 2.14.0\n" #: ../../source/contributor-explanation-architecture.rst:2 @@ -58,6 +59,7 @@ msgid "How to build Docker Flower images locally" msgstr "如何在本地搭建Docker Flower images" #: ../../source/contributor-how-to-build-docker-images.rst:4 +#, fuzzy msgid "" "Flower provides pre-made docker images on `Docker Hub " "`_ that include all necessary " @@ -66,12 +68,17 @@ msgid "" "if that is what you need. In this guide, we will explain what images " "exist and how to build them locally." msgstr "" +"Flower 在 `Docker Hub `_ " +"上提供了预制的 docker 镜像,其中包括运行服务器所需的所有依赖项。如果你需要," +"也可以使用不同版本的 Python 或 Ubuntu 从头开始构建自己的定制 docker " +"镜像。在本指南中,我们将介绍有哪些镜像,以及如何在本地构建它们。" #: ../../source/contributor-how-to-build-docker-images.rst:9 +#, fuzzy msgid "" "Before we can start, we need to meet a few prerequisites in our local " "development environment." -msgstr "" +msgstr "在开始之前,我们需要在本地开发环境中满足一些先决条件。" #: ../../source/contributor-how-to-build-docker-images.rst:11 #, fuzzy @@ -79,16 +86,21 @@ msgid "Clone the flower repository." msgstr "**叉花仓库**" #: ../../source/contributor-how-to-build-docker-images.rst:17 +#, fuzzy msgid "Verify the Docker daemon is running." -msgstr "" +msgstr "验证 Docker 守护进程是否正在运行。" #: ../../source/contributor-how-to-build-docker-images.rst:19 +#, fuzzy msgid "" "Please follow the first section on :doc:`Run Flower using Docker ` which covers this step in more detail." msgstr "" +"请阅读 :doc:`Run Flower using Docker ` " +"的第一节,其中更详细地介绍了这一步骤。" #: ../../source/contributor-how-to-build-docker-images.rst:23 +#, fuzzy msgid "" "Currently, Flower provides two images, a base image and a server image. " "There will also be a client image soon. The base image, as the name " @@ -97,15 +109,22 @@ msgid "" "server image is based on the base image, but it additionally installs the" " Flower server using ``pip``." msgstr "" +"目前,Flower 提供两个镜像,一个基础镜像和一个服务器镜像。不久还将推出客户端镜" +"像。基础镜像,顾名思义,包含服务器和客户端都需要的基本依赖项。其中包括系统依" +"赖项、Python 和 Python 工具。服务器镜像基于基础镜像,但它会使用 ``pip`` " +"额外安装 Flower 服务器。" #: ../../source/contributor-how-to-build-docker-images.rst:28 +#, fuzzy msgid "" "The build instructions that assemble the images are located in the " "respective Dockerfiles. You can find them in the subdirectories of " "``src/docker``." -msgstr "" +msgstr "组装镜像的构建说明位于各自的 Dockerfile 中。你可以在 ``src/docker`` " +"的子目录中找到它们。" #: ../../source/contributor-how-to-build-docker-images.rst:31 +#, fuzzy msgid "" "Both, base and server image are configured via build arguments. Through " "build arguments, we can make our build more flexible. For example, in the" @@ -115,6 +134,10 @@ msgid "" "available build arguments for each image are listed in one of the tables " "below." msgstr "" +"基础镜像和服务器镜像都是通过构建参数配置的。通过联编参数,我们可以使联编更加" +"灵活。例如,在基础镜像中,我们可以使用 ``PYTHON_VERSION`` " +"联编参数指定要安装的 Python 版本。有些联编参数有默认值,有些则必须在联编映像" +"时指定。每个映像的所有可用联编参数都列在下表中。" #: ../../source/contributor-how-to-build-docker-images.rst:38 #, fuzzy @@ -151,8 +174,9 @@ msgid "``PYTHON_VERSION``" msgstr "Python 版本" #: ../../source/contributor-how-to-build-docker-images.rst:49 +#, fuzzy msgid "Version of ``python`` to be installed." -msgstr "" +msgstr "要安装的 ``python`` 版本。" #: ../../source/contributor-how-to-build-docker-images.rst:50 #: ../../source/contributor-how-to-build-docker-images.rst:54 @@ -168,12 +192,14 @@ msgid "``3.11``" msgstr "``1.0.0rc1``" #: ../../source/contributor-how-to-build-docker-images.rst:52 +#, fuzzy msgid "``PIP_VERSION``" -msgstr "" +msgstr "``PIP_VERSION``" #: ../../source/contributor-how-to-build-docker-images.rst:53 +#, fuzzy msgid "Version of ``pip`` to be installed." -msgstr "" +msgstr "要安装的 ``pip` 版本。" #: ../../source/contributor-how-to-build-docker-images.rst:55 #, fuzzy @@ -181,12 +207,14 @@ msgid "``23.0.1``" msgstr "``1.0.0rc1``" #: ../../source/contributor-how-to-build-docker-images.rst:56 +#, fuzzy msgid "``SETUPTOOLS_VERSION``" -msgstr "" +msgstr "设置工具版本" #: ../../source/contributor-how-to-build-docker-images.rst:57 +#, fuzzy msgid "Version of ``setuptools`` to be installed." -msgstr "" +msgstr "要安装的 `setuptools`` 版本。" #: ../../source/contributor-how-to-build-docker-images.rst:59 #, fuzzy @@ -194,29 +222,36 @@ msgid "``69.0.2``" msgstr "``1.0.0b0``" #: ../../source/contributor-how-to-build-docker-images.rst:60 +#, fuzzy msgid "``UBUNTU_VERSION``" -msgstr "" +msgstr "``UBUNTU_VERSION``" #: ../../source/contributor-how-to-build-docker-images.rst:61 +#, fuzzy msgid "Version of the official Ubuntu Docker image." -msgstr "" +msgstr "官方 Ubuntu Docker 映像的版本。" #: ../../source/contributor-how-to-build-docker-images.rst:62 +#, fuzzy msgid "Defaults to ``22.04``." -msgstr "" +msgstr "默认为 ``22.04``。" #: ../../source/contributor-how-to-build-docker-images.rst:65 +#, fuzzy msgid "" "The following example creates a base image with Python 3.11.0, pip 23.0.1" " and setuptools 69.0.2:" -msgstr "" +msgstr "下面的示例使用 Python 3.11.0、pip 23.0.1 和 setuptools 69.0.2 " +"创建了基本映像:" #: ../../source/contributor-how-to-build-docker-images.rst:76 +#, fuzzy msgid "" "The name of image is ``flwr_base`` and the tag ``0.1.0``. Remember that " "the build arguments as well as the name and tag can be adapted to your " "needs. These values serve as examples only." -msgstr "" +msgstr "图像名称为 ``flwr_base``,标记为 ``0.1." +"0``。请记住,编译参数以及名称和标记都可以根据需要进行调整。这些值仅供参考。" #: ../../source/contributor-how-to-build-docker-images.rst:80 #, fuzzy @@ -224,36 +259,44 @@ msgid "Building the server image" msgstr "启动服务器" #: ../../source/contributor-how-to-build-docker-images.rst:90 +#, fuzzy msgid "``BASE_REPOSITORY``" -msgstr "" +msgstr "基础存储库" #: ../../source/contributor-how-to-build-docker-images.rst:91 +#, fuzzy msgid "The repository name of the base image." -msgstr "" +msgstr "基础镜像的存储库名称。" #: ../../source/contributor-how-to-build-docker-images.rst:92 +#, fuzzy msgid "Defaults to ``flwr/server``." -msgstr "" +msgstr "默认为 ``flwr/server``。" #: ../../source/contributor-how-to-build-docker-images.rst:94 +#, fuzzy msgid "``BASE_IMAGE_TAG``" -msgstr "" +msgstr "基本图像标签" #: ../../source/contributor-how-to-build-docker-images.rst:95 +#, fuzzy msgid "The image tag of the base image." -msgstr "" +msgstr "基础图像的图像标记。" #: ../../source/contributor-how-to-build-docker-images.rst:96 +#, fuzzy msgid "Defaults to ``py3.11-ubuntu22.04``." -msgstr "" +msgstr "默认为 ``py3.11-ubuntu22.04``。" #: ../../source/contributor-how-to-build-docker-images.rst:98 +#, fuzzy msgid "``FLWR_VERSION``" -msgstr "" +msgstr "``FLWR_VERSION``" #: ../../source/contributor-how-to-build-docker-images.rst:99 +#, fuzzy msgid "Version of Flower to be installed." -msgstr "" +msgstr "要安装的 Flower 版本。" #: ../../source/contributor-how-to-build-docker-images.rst:101 #, fuzzy @@ -261,19 +304,24 @@ msgid "``1.7.0``" msgstr "``1.0.0b0``" #: ../../source/contributor-how-to-build-docker-images.rst:103 +#, fuzzy msgid "" "The following example creates a server image with the official Flower " "base image py3.11-ubuntu22.04 and Flower 1.7.0:" -msgstr "" +msgstr "下面的示例使用官方的 Flower 基本镜像 py3.11-ubuntu22.04 和 Flower 1.7.0 " +"创建了一个服务器镜像:" #: ../../source/contributor-how-to-build-docker-images.rst:114 +#, fuzzy msgid "" "The name of image is ``flwr_server`` and the tag ``0.1.0``. Remember that" " the build arguments as well as the name and tag can be adapted to your " "needs. These values serve as examples only." -msgstr "" +msgstr "图像名称为 ``flwr_server``,标记为 ``0.1." +"0``。请记住,编译参数以及名称和标记都可以根据需要进行调整。这些值仅供参考。" #: ../../source/contributor-how-to-build-docker-images.rst:117 +#, fuzzy msgid "" "If you want to use your own base image instead of the official Flower " "base image, all you need to do is set the ``BASE_REPOSITORY`` and " @@ -281,10 +329,14 @@ msgid "" " match the name of your image and the value of ``BASE_IMAGE_TAG`` must " "match the tag of your image." msgstr "" +"如果您想使用自己的基础图片而不是 Flower 官方的基础图片,只需设置 " +"``BASE_REPOSITORY`` 和 ``BASE_IMAGE_TAG`` 联编参数即可。`BASE_REPOSITORY``的" +"值必须与您的图像名称一致,`BASE_IMAGE_TAG``的值必须与您的图像标签一致。" #: ../../source/contributor-how-to-build-docker-images.rst:131 +#, fuzzy msgid "After creating the image, we can test whether the image is working:" -msgstr "" +msgstr "创建图像后,我们可以测试图像是否正常工作:" #: ../../source/contributor-how-to-contribute-translations.rst:2 msgid "Contribute translations" @@ -859,13 +911,18 @@ msgid "" msgstr "版本号在 ``pyproject.toml`` 中说明。要发布 Flower 的新版本,需要完成以下工作(按顺序排列):" #: ../../source/contributor-how-to-release-flower.rst:11 +#, fuzzy msgid "" "Run ``python3 src/py/flwr_tool/update_changelog.py `` in " "order to add every new change to the changelog (feel free to make manual " "changes to the changelog afterwards until it looks good)." msgstr "" +"运行 ``python3 src/py/flwr_tool/update_changelog.py `` 以将每" +"项新更改添加到更新日志中(之后可对更新日志进行手动更改,直到看起来不错为止)" +"。" #: ../../source/contributor-how-to-release-flower.rst:12 +#, fuzzy msgid "" "Once the changelog has been updated with all the changes, run ``./dev" "/prepare-release-changelog.sh v``, where ```` " @@ -874,6 +931,11 @@ msgid "" "by the version and current date, and it will add a thanking message for " "the contributors. Open a pull request with those changes." msgstr "" +"更新更新日志后,运行``./dev/prepare-release-changelog.sh " +"v``,其中````是``pyproject." +"toml``中的版本(注意前面的``v``)。这将用版本和当前日期替换更新日志中的 " +"``Unreleased`` " +"标头,并为贡献者添加一条感谢信息。打开一个包含这些更改的拉取请求。" #: ../../source/contributor-how-to-release-flower.rst:13 #, fuzzy @@ -1664,6 +1726,7 @@ msgid "" msgstr "中间的输入框供您描述 PR 的作用,并将其与现有问题联系起来。我们在此放置了注释(一旦 PR 打开,注释将不会显示),以指导您完成整个过程。" #: ../../source/contributor-tutorial-contribute-on-github.rst:196 +#, fuzzy msgid "" "It is important to follow the instructions described in comments. For " "instance, in order to not break how our changelog system works, you " @@ -1671,6 +1734,9 @@ msgid "" "carefully. You can also checkout some examples and details in the " ":ref:`changelogentry` appendix." msgstr "" +"请务必遵守注释中的说明。例如,为了不破坏我们的更新日志系统,你应该仔细阅读\"" +"`更新日志条目``\"部分上面的信息。您还可以查看 :ref:`changelogentry` " +"附录中的一些示例和细节。" #: ../../source/contributor-tutorial-contribute-on-github.rst:200 msgid "" @@ -2032,131 +2098,163 @@ msgid "Changelog entry" msgstr "更新日志" #: ../../source/contributor-tutorial-contribute-on-github.rst:356 +#, fuzzy msgid "" "When opening a new PR, inside its description, there should be a " "``Changelog entry`` header." -msgstr "" +msgstr "打开一个新 PR 时,在其描述中应有一个 ``Changelog entry`` 标头。" #: ../../source/contributor-tutorial-contribute-on-github.rst:358 +#, fuzzy msgid "" "Above this header you should see the following comment that explains how " "to write your changelog entry:" -msgstr "" +msgstr "在页眉上方,你会看到以下注释,说明如何编写更新日志条目:" #: ../../source/contributor-tutorial-contribute-on-github.rst:360 +#, fuzzy msgid "" "Inside the following 'Changelog entry' section, you should put the " "description of your changes that will be added to the changelog alongside" " your PR title." -msgstr "" +msgstr "在下面的 \"更新日志条目 \"部分中,您应该在 PR " +"标题旁边写上将添加到更新日志中的更改描述。" #: ../../source/contributor-tutorial-contribute-on-github.rst:363 +#, fuzzy msgid "" "If the section is completely empty (without any token) or non-existent, " "the changelog will just contain the title of the PR for the changelog " "entry, without any description." -msgstr "" +msgstr "如果该部分完全为空(没有任何标记)或不存在,更新日志将只包含更新日志条目的 " +"PR 标题,而不包含任何描述。" #: ../../source/contributor-tutorial-contribute-on-github.rst:366 +#, fuzzy msgid "" "If the section contains some text other than tokens, it will use it to " "add a description to the change." -msgstr "" +msgstr "如果该部分包含标记以外的文本,它将使用这些文本为更改添加说明。" #: ../../source/contributor-tutorial-contribute-on-github.rst:368 +#, fuzzy msgid "" "If the section contains one of the following tokens it will ignore any " "other text and put the PR under the corresponding section of the " "changelog:" -msgstr "" +msgstr "如果该部分包含以下标记之一,它将忽略任何其他文本,并将 PR " +"放在更新日志的相应部分下:" #: ../../source/contributor-tutorial-contribute-on-github.rst:370 +#, fuzzy msgid " is for classifying a PR as a general improvement." -msgstr "" +msgstr " 用于将 PR 划分为一般改进。" #: ../../source/contributor-tutorial-contribute-on-github.rst:372 +#, fuzzy msgid " is to not add the PR to the changelog" -msgstr "" +msgstr "表示不将 PR 添加到更新日志中" #: ../../source/contributor-tutorial-contribute-on-github.rst:374 +#, fuzzy msgid " is to add a general baselines change to the PR" -msgstr "" +msgstr " 是指在 PR 中添加一般基线更改" #: ../../source/contributor-tutorial-contribute-on-github.rst:376 +#, fuzzy msgid " is to add a general examples change to the PR" -msgstr "" +msgstr " 是在 PR 中添加对一般示例的修改" #: ../../source/contributor-tutorial-contribute-on-github.rst:378 +#, fuzzy msgid " is to add a general sdk change to the PR" -msgstr "" +msgstr " 是指在 PR 中添加一般的 sdk 更改" #: ../../source/contributor-tutorial-contribute-on-github.rst:380 +#, fuzzy msgid " is to add a general simulations change to the PR" -msgstr "" +msgstr "(模拟)是在 PR 中添加一般模拟变更" #: ../../source/contributor-tutorial-contribute-on-github.rst:382 +#, fuzzy msgid "Note that only one token should be used." -msgstr "" +msgstr "请注意,只能使用一个标记。" #: ../../source/contributor-tutorial-contribute-on-github.rst:384 +#, fuzzy msgid "" "Its content must have a specific format. We will break down what each " "possibility does:" -msgstr "" +msgstr "其内容必须有特定的格式。我们将分析每种可能性的作用:" #: ../../source/contributor-tutorial-contribute-on-github.rst:386 +#, fuzzy msgid "" "If the ``### Changelog entry`` section contains nothing or doesn't exist," " the following text will be added to the changelog::" -msgstr "" +msgstr "如果 ``#### Changelog entry`` " +"部分不包含任何内容或不存在,则会在更新日志中添加以下文本::" #: ../../source/contributor-tutorial-contribute-on-github.rst:390 +#, fuzzy msgid "" "If the ``### Changelog entry`` section contains a description (and no " "token), the following text will be added to the changelog::" -msgstr "" +msgstr "如果 ``#### Changelog entry`` " +"部分包含描述(但没有标记),则会在更新日志中添加以下文本::" #: ../../source/contributor-tutorial-contribute-on-github.rst:396 +#, fuzzy msgid "" "If the ``### Changelog entry`` section contains ````, nothing will " "change in the changelog." -msgstr "" +msgstr "如果 ``#### Changelog entry`` 部分包含 " +"````,更新日志中将不会有任何更改。" #: ../../source/contributor-tutorial-contribute-on-github.rst:398 +#, fuzzy msgid "" "If the ``### Changelog entry`` section contains ````, the " "following text will be added to the changelog::" -msgstr "" +msgstr "如果 ``### Changelog entry`` 部分包含 " +"````,则会在更新日志中添加以下文本::" #: ../../source/contributor-tutorial-contribute-on-github.rst:402 +#, fuzzy msgid "" "If the ``### Changelog entry`` section contains ````, the " "following text will be added to the changelog::" -msgstr "" +msgstr "如果``### " +"更新日志条目``部分包含``<基准线>``,则会在更新日志中添加以下文本::" #: ../../source/contributor-tutorial-contribute-on-github.rst:406 +#, fuzzy msgid "" "If the ``### Changelog entry`` section contains ````, the " "following text will be added to the changelog::" -msgstr "" +msgstr "如果``### 更新日志条目``部分包含``<示例>``,则会在更新日志中添加以下文本::" #: ../../source/contributor-tutorial-contribute-on-github.rst:410 +#, fuzzy msgid "" "If the ``### Changelog entry`` section contains ````, the following " "text will be added to the changelog::" -msgstr "" +msgstr "如果``### 更新日志条目``部分包含````,则会在更新日志中添加以下文本::" #: ../../source/contributor-tutorial-contribute-on-github.rst:414 +#, fuzzy msgid "" "If the ``### Changelog entry`` section contains ````, the " "following text will be added to the changelog::" -msgstr "" +msgstr "如果 ``### Changelog entry`` 部分包含 " +"````,则会在更新日志中添加以下文本::" #: ../../source/contributor-tutorial-contribute-on-github.rst:418 +#, fuzzy msgid "" "Note that only one token must be provided, otherwise, only the first " "action (in the order listed above), will be performed." -msgstr "" +msgstr "请注意,必须只提供一个标记,否则将只执行第一个操作(按上述顺序)。" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:2 msgid "Get started as a contributor" @@ -2198,38 +2296,46 @@ msgid "Developer Machine Setup" msgstr "开发者机器设置" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:21 +#, fuzzy msgid "Preliminarities" -msgstr "" +msgstr "前言" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:22 +#, fuzzy msgid "Some system-wide dependencies are needed." -msgstr "" +msgstr "需要一些全系统依赖性。" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:25 +#, fuzzy msgid "For macOS" -msgstr "" +msgstr "适用于 macOS" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:27 +#, fuzzy msgid "" "Install `homebrew `_. Don't forget the post-" "installation actions to add `brew` to your PATH." -msgstr "" +msgstr "安装 `homebrew `_。别忘了安装后的操作,将 `brew` " +"添加到你的 PATH。" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:28 +#, fuzzy msgid "" "Install `xz` (to install different Python versions) and `pandoc` to build" " the docs::" -msgstr "" +msgstr "安装 `xz`(用于安装不同的 Python 版本)和 `pandoc` 以构建文档::" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:34 +#, fuzzy msgid "For Ubuntu" -msgstr "" +msgstr "针对 Ubuntu" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:35 +#, fuzzy msgid "" "Ensure you system (Ubuntu 22.04+) is up-to-date, and you have all " "necessary packages::" -msgstr "" +msgstr "确保您的系统(Ubuntu 22.04+)为最新版本,并安装了所有必要的软件包::" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:44 #, fuzzy @@ -2244,6 +2350,7 @@ msgid "" msgstr "首先,从 GitHub 克隆 \"Flower 存储库 `_\":" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:52 +#, fuzzy msgid "" "Let's create the Python environment for all-things Flower. If you wish to" " use :code:`pyenv`, we provide two convenience scripts that you can use. " @@ -2251,6 +2358,9 @@ msgid "" "environment, activate and skip to the last point where all packages are " "installed." msgstr "" +"让我们为 Flower 创建一个 Python 环境。如果您想使用 " +":code:`pyenv`,我们提供了两个方便的脚本供您使用。如果你不喜欢使用 " +":code:`pyenv`,请创建一个新环境,激活并跳到最后一点,即安装所有软件包。" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:54 #, fuzzy @@ -3317,6 +3427,7 @@ msgid "Differential Privacy" msgstr "差分隐私" #: ../../source/explanation-differential-privacy.rst:3 +#, fuzzy msgid "" "The information in datasets like healthcare, financial transactions, user" " preferences, etc., is valuable and has the potential for scientific " @@ -3324,16 +3435,21 @@ msgid "" "data is also sensitive and there is a risk of compromising individual " "privacy." msgstr "" +"医疗保健、金融交易、用户偏好等数据集中的信息非常宝贵,有可能带来科学突破并提" +"供重要的商业见解。然而,这些数据也是敏感数据,存在泄露个人隐私的风险。" #: ../../source/explanation-differential-privacy.rst:6 +#, fuzzy msgid "" "Traditional methods like anonymization alone would not work because of " "attacks like Re-identification and Data Linkage. That's where " "differential privacy comes in. It provides the possibility of analyzing " "data while ensuring the privacy of individuals." -msgstr "" +msgstr "单靠匿名等传统方法是行不通的,因为会受到重新识别和数据链接等攻击。这就是差异" +"化隐私的用武之地。它提供了在分析数据的同时确保个人隐私的可能性。" #: ../../source/explanation-differential-privacy.rst:12 +#, fuzzy msgid "" "Imagine two datasets that are identical except for a single record (for " "instance, Alice's data). Differential Privacy (DP) guarantees that any " @@ -3342,18 +3458,25 @@ msgid "" "preserves group patterns while obscuring individual details, ensuring the" " individual's information remains hidden in the crowd." msgstr "" +"试想一下,两个数据集除了一条记录(例如 Alice 的数据)之外完全相同。差分隐私(" +"DP)可以保证任何分析(M),比如计算平均收入,对两个数据集都会产生几乎相同的结" +"果(O 和 O' 将是相似的)。这既保留了群体模式,又掩盖了个人细节,确保个人的信" +"息隐藏在人群中。" #: ../../source/explanation-differential-privacy.rst:-1 +#, fuzzy msgid "DP Intro" -msgstr "" +msgstr "DP 介绍" #: ../../source/explanation-differential-privacy.rst:22 +#, fuzzy msgid "" "One of the most commonly used mechanisms to achieve DP is adding enough " "noise to the output of the analysis to mask the contribution of each " "individual in the data while preserving the overall accuracy of the " "analysis." -msgstr "" +msgstr "实现 DP 的最常用机制之一是在分析输出中加入足够的噪音,以掩盖数据中每个个体的" +"贡献,同时保持分析的整体准确性。" #: ../../source/explanation-differential-privacy.rst:25 #, fuzzy @@ -3361,6 +3484,7 @@ msgid "Formal Definition" msgstr "编译 ProtoBuf 定义" #: ../../source/explanation-differential-privacy.rst:26 +#, fuzzy msgid "" "Differential Privacy (DP) provides statistical guarantees against the " "information an adversary can infer through the output of a randomized " @@ -3371,14 +3495,23 @@ msgid "" "databases, D :sub:`1` and D :sub:`2`, that differ in only a single " "record, and for all possible outputs S ⊆ Range(A):" msgstr "" +"差分隐私(Differential Privacy,DP)针对对手通过随机算法的输出所能推断出的信" +"息提供统计保证。它为单个个体通过添加噪声对算法输出的影响提供了一个无条件的上" +"限[1]。如果任意两个相邻的数据库D :sub:`1`和D :sub:`2`只有一条记录不同," +"并且对于所有可能的输出S ⊆ Range(A),随机化机制M提供(:math:`epsilon`,:math:`" +"\\delta`)差异隐私:" #: ../../source/explanation-differential-privacy.rst:32 +#, fuzzy msgid "" "\\small\n" "P[M(D_{1} \\in A)] \\leq e^{\\delta} P[M(D_{2} \\in A)] + \\delta" msgstr "" +"\\small\n" +"P[M(D_{1} \\in A)] \\leq e^{\\delta} P[M(D_{2} \\in A)] + \\delta" #: ../../source/explanation-differential-privacy.rst:38 +#, fuzzy msgid "" "The :math:`\\epsilon` parameter, also known as the privacy budget, is a " "metric of privacy loss. It also controls the privacy-utility trade-off; " @@ -3389,6 +3522,11 @@ msgid "" "proportional to the sensitivity of the output, which measures the maximum" " change in the output due to the inclusion or removal of a single record." msgstr "" +":math:`\\epsilon`参数也称为隐私预算,是衡量隐私损失的指标。较低的 :math:`" +"\\epsilon` 值表示较高的隐私级别,但也可能降低效用。:math:`\\delta" +"`参数考虑了:math:`\\epsilon`上限不成立的小概率。实现差异化隐私所需的噪声量与" +"输出的灵敏度成正比,而输出的灵敏度是指由于包含或删除一条记录而导致的输出的最" +"大变化。" #: ../../source/explanation-differential-privacy.rst:45 #, fuzzy @@ -3396,6 +3534,7 @@ msgid "Differential Privacy in Machine Learning" msgstr "差分隐私" #: ../../source/explanation-differential-privacy.rst:46 +#, fuzzy msgid "" "DP can be utilized in machine learning to preserve the privacy of the " "training data. Differentially private machine learning algorithms are " @@ -3409,6 +3548,11 @@ msgid "" "model training. Additionally, such noise can be incorporated into the " "model's output." msgstr "" +"机器学习中可以利用 DP 来保护训练数据的隐私。差分保密机器学习算法的设计方式是" +"防止算法学习到任何单个数据点的任何特定信息,从而防止模型泄露敏感信息。根据引" +"入噪声的阶段,有多种方法可将 DP 应用于机器学习算法。一种方法是在训练数据(特" +"征或标签)中添加噪声,另一种方法是在模型训练过程中向损失函数的梯度注入噪声。" +"此外,这种噪声还可以被纳入模型的输出中。" #: ../../source/explanation-differential-privacy.rst:53 #, fuzzy @@ -3416,6 +3560,7 @@ msgid "Differential Privacy in Federated Learning" msgstr "扩大联邦学习的规模" #: ../../source/explanation-differential-privacy.rst:54 +#, fuzzy msgid "" "Federated learning is a data minimization approach that allows multiple " "parties to collaboratively train a model without sharing their raw data. " @@ -3425,14 +3570,20 @@ msgid "" "membership inference and property inference attacks, or model inversion " "attacks." msgstr "" +"联合学习是一种数据最小化方法,允许多方在不共享原始数据的情况下合作训练一个模" +"型。然而,联合学习也带来了新的隐私挑战。各方与中央服务器之间的模型更新可能会" +"泄露本地数据信息。这些泄漏信息可能会被攻击利用,如成员推断攻击、属性推断攻击" +"或模型反转攻击。" #: ../../source/explanation-differential-privacy.rst:58 +#, fuzzy msgid "" "DP can play a crucial role in federated learning to provide privacy for " "the clients' data." -msgstr "" +msgstr "DP 可以在联合学习中发挥重要作用,为客户数据提供隐私保护。" #: ../../source/explanation-differential-privacy.rst:60 +#, fuzzy msgid "" "Depending on the granularity of privacy provision or the location of " "noise addition, different forms of DP exist in federated learning. In " @@ -3440,21 +3591,28 @@ msgid "" " learning based on where the noise is added: at the server (also known as" " the center) or at the client (also known as the local)." msgstr "" +"根据提供隐私的粒度或添加噪声的位置,联合学习中存在不同形式的 " +"DP。在本说明中,我们将根据添加噪声的位置,重点介绍联合学习中利用 DP " +"的两种方法:在服务器(也称为中心)或客户端(也称为本地)。" #: ../../source/explanation-differential-privacy.rst:63 +#, fuzzy msgid "" "**Central Differential Privacy**: DP is applied by the server and the " "goal is to prevent the aggregated model from leaking information about " "each client's data." -msgstr "" +msgstr "**中央差分隐私**: DP " +"由服务器应用,目标是防止聚合模型泄露每个客户的数据信息。" #: ../../source/explanation-differential-privacy.rst:65 +#, fuzzy msgid "" "**Local Differential Privacy**: DP is applied on the client side before " "sending any information to the server and the goal is to prevent the " "updates that are sent to the server from leaking any information about " "the client's data." -msgstr "" +msgstr "**本地差分隐私**: 在向服务器发送任何信息之前,在客户端应用 " +"DP,目的是防止向服务器发送的更新泄露任何有关客户端数据的信息。" #: ../../source/explanation-differential-privacy.rst:-1 #: ../../source/explanation-differential-privacy.rst:68 @@ -3464,13 +3622,16 @@ msgid "Central Differential Privacy" msgstr "差分隐私" #: ../../source/explanation-differential-privacy.rst:69 +#, fuzzy msgid "" "In this approach, which is also known as user-level DP, the central " "server is responsible for adding noise to the globally aggregated " "parameters. It should be noted that trust in the server is required." -msgstr "" +msgstr "在这种方法(也称为用户级 DP)中,中央服务器负责在全局汇总参数中添加噪声。需要" +"注意的是,这需要对服务器的信任。" #: ../../source/explanation-differential-privacy.rst:76 +#, fuzzy msgid "" "While there are various ways to implement central DP in federated " "learning, we concentrate on the algorithms proposed by [2] and [3]. The " @@ -3484,12 +3645,21 @@ msgid "" " by restricting the `L2` norm of the clients' model updates, ensuring " "that larger updates are scaled down to fit within the norm `S`." msgstr "" +"虽然在联合学习中实现中央数据处理的方法有很多种,但我们将重点放在[2]和[3]提出" +"的算法上。总体方法是剪辑客户端发送的模型更新,并在聚合模型中添加一定量的噪声" +"。在每次迭代中,以特定概率随机选择一组客户端进行训练。每个客户端对自己的数据" +"进行局部训练。然后,每个客户端的更新会被某个值`S`(灵敏度`S`)剪切。这将限制" +"任何单个客户端的影响,这对隐私至关重要,通常也有利于稳健性。" +"实现这一点的常用方法是限制客户机模型更新的 `L2` 准则," +"确保较大的更新被缩减以适应 `S` 准则。" #: ../../source/explanation-differential-privacy.rst:-1 +#, fuzzy msgid "clipping" -msgstr "" +msgstr "剪贴" #: ../../source/explanation-differential-privacy.rst:89 +#, fuzzy msgid "" "Afterwards, the Gaussian mechanism is used to add noise in order to " "distort the sum of all clients' updates. The amount of noise is scaled to" @@ -3497,38 +3667,49 @@ msgid "" "mechanism is used with a noise sampled from `N (0, σ²)` where `σ = ( " "noise_scale * S ) / (number of sampled clients)`." msgstr "" +"然后,使用高斯机制添加噪声,以扭曲所有客户端的更新总和。噪声量与灵敏度值成正" +"比,以获得隐私保证。高斯机制的噪声采样范围为 `N (0, σ²)` ,其中 σ = ( " +"噪声规模 * S ) / (采样客户数)`。" #: ../../source/explanation-differential-privacy.rst:94 +#, fuzzy msgid "Clipping" -msgstr "" +msgstr "剪贴" #: ../../source/explanation-differential-privacy.rst:96 +#, fuzzy msgid "" "There are two forms of clipping commonly used in Central DP: Fixed " "Clipping and Adaptive Clipping." -msgstr "" +msgstr "中央处理器常用的剪切有两种形式:固定剪切和自适应剪切。" #: ../../source/explanation-differential-privacy.rst:98 +#, fuzzy msgid "" "**Fixed Clipping** : A predefined fix threshold is set for the magnitude " "of clients' updates. Any update exceeding this threshold is clipped back " "to the threshold value." -msgstr "" +msgstr "** 固定削波** : 为客户端更新的大小设置了一个预定义的固定阈值。任何超过该阈值" +"的更新都会被剪切回阈值。" #: ../../source/explanation-differential-privacy.rst:100 +#, fuzzy msgid "" "**Adaptive Clipping** : The clipping threshold dynamically adjusts based " "on the observed update distribution [4]. It means that the clipping value" " is tuned during the rounds with respect to the quantile of the update " "norm distribution." -msgstr "" +msgstr "** 自适应削波** : 削波阈值根据观察到的更新分布动态调整[4]。这意味着,在各轮" +"中,会根据更新规范分布的量化值调整削波值。" #: ../../source/explanation-differential-privacy.rst:102 +#, fuzzy msgid "" "The choice between fixed and adaptive clipping depends on various factors" " such as privacy requirements, data distribution, model complexity, and " "others." -msgstr "" +msgstr "在固定剪切和自适应剪切之间做出选择取决于各种因素,如隐私要求、数据分布、模型" +"复杂性等。" #: ../../source/explanation-differential-privacy.rst:-1 #: ../../source/explanation-differential-privacy.rst:105 @@ -3538,45 +3719,62 @@ msgid "Local Differential Privacy" msgstr "差分隐私" #: ../../source/explanation-differential-privacy.rst:107 +#, fuzzy msgid "" "In this approach, each client is responsible for performing DP. Local DP " "avoids the need for a fully trusted aggregator, but it should be noted " "that local DP leads to a decrease in accuracy but better privacy in " "comparison to central DP." msgstr "" +"在这种方法中,每个客户端都负责执行 DP。本地 DP " +"避免了对完全可信的聚合器的需求,但需要注意的是,与中央 DP 相比,本地 DP " +"会降低准确性,但却能更好地保护隐私。" #: ../../source/explanation-differential-privacy.rst:116 +#, fuzzy msgid "In this explainer, we focus on two forms of achieving Local DP:" -msgstr "" +msgstr "在本说明中,我们将重点介绍实现本地 DP 的两种形式:" #: ../../source/explanation-differential-privacy.rst:118 +#, fuzzy msgid "" "Each client adds noise to the local updates before sending them to the " "server. To achieve (:math:`\\epsilon`, :math:`\\delta`)-DP, considering " "the sensitivity of the local model to be ∆, Gaussian noise is applied " "with a noise scale of σ where:" msgstr "" +"每个客户端在向服务器发送本地更新之前,都会在本地更新中加入噪声。为了实现(:ma" +"th:`\\epsilon`, :math:`\\delta`)-DP,考虑到本地模型的灵敏度为 " +"∆,应用了高斯噪声,噪声尺度为 σ,其中:" #: ../../source/explanation-differential-privacy.rst:120 +#, fuzzy msgid "" "\\small\n" "\\frac{∆ \\times \\sqrt{2 \\times " "\\log\\left(\\frac{1.25}{\\delta}\\right)}}{\\epsilon}\n" "\n" msgstr "" +"\\small\n" +"\\frac{∆ \\times \\sqrt{2 \\times \\log\\left(\\frac{1.25}{\\delta}\\right" +")}}{\\epsilon}\n" +"\n" #: ../../source/explanation-differential-privacy.rst:125 +#, fuzzy msgid "" "Each client adds noise to the gradients of the model during the local " "training (DP-SGD). More specifically, in this approach, gradients are " "clipped and an amount of calibrated noise is injected into the gradients." -msgstr "" +msgstr "在局部训练过程中,每个客户端都会向模型的梯度添加噪声(DP-SGD)。更具体地说," +"在这种方法中,梯度会被剪切,并在梯度中注入一定量的校准噪声。" #: ../../source/explanation-differential-privacy.rst:128 +#, fuzzy msgid "" "Please note that these two approaches are providing privacy at different " "levels." -msgstr "" +msgstr "请注意,这两种方法提供了不同层次的隐私。" #: ../../source/explanation-differential-privacy.rst:131 #, fuzzy @@ -3584,8 +3782,9 @@ msgid "**References:**" msgstr "参考资料" #: ../../source/explanation-differential-privacy.rst:133 +#, fuzzy msgid "[1] Dwork et al. The Algorithmic Foundations of Differential Privacy." -msgstr "" +msgstr "[1] Dwork 等:《差分隐私的算法基础》。" #: ../../source/explanation-differential-privacy.rst:135 #, fuzzy @@ -3597,10 +3796,11 @@ msgstr "" "language models.\" arXiv preprint arXiv:1710.06963 (2017)." #: ../../source/explanation-differential-privacy.rst:137 +#, fuzzy msgid "" "[3] Geyer et al. Differentially Private Federated Learning: A Client " "Level Perspective." -msgstr "" +msgstr "[3] Geyer 等人。差异化化私人联合学习:客户层面的视角。" #: ../../source/explanation-differential-privacy.rst:139 #, fuzzy @@ -4952,8 +5152,9 @@ msgid "Install stable release" msgstr "安装稳定版" #: ../../source/how-to-install-flower.rst:15 +#, fuzzy msgid "Using pip" -msgstr "" +msgstr "使用 pip" #: ../../source/how-to-install-flower.rst:17 msgid "" @@ -4968,28 +5169,33 @@ msgid "" msgstr "对于使用虚拟客户端引擎的模拟,`flwr`` 应与`simulation`` 一起安装:" #: ../../source/how-to-install-flower.rst:27 +#, fuzzy msgid "Using conda (or mamba)" -msgstr "" +msgstr "使用 conda(或 mamba)" #: ../../source/how-to-install-flower.rst:29 +#, fuzzy msgid "Flower can also be installed from the ``conda-forge`` channel." -msgstr "" +msgstr "Flower 也可以从 ``conda-forge`` 频道安装。" #: ../../source/how-to-install-flower.rst:31 +#, fuzzy msgid "" "If you have not added ``conda-forge`` to your channels, you will first " "need to run the following::" -msgstr "" +msgstr "如果您尚未在频道中添加 ``conda-forge``,则首先需要运行以下程序::" #: ../../source/how-to-install-flower.rst:36 +#, fuzzy msgid "" "Once the ``conda-forge`` channel has been enabled, ``flwr`` can be " "installed with ``conda``::" -msgstr "" +msgstr "一旦启用了 ``conda-forge`` 频道,就可以使用 ``conda``: 安装 ``flwr``:" #: ../../source/how-to-install-flower.rst:40 +#, fuzzy msgid "or with ``mamba``::" -msgstr "" +msgstr "或用 ``mamba`` ::" #: ../../source/how-to-install-flower.rst:46 msgid "Verify installation" @@ -5304,35 +5510,47 @@ msgstr "" "metrics.html>`_" #: ../../source/how-to-run-flower-using-docker.rst:2 +#, fuzzy msgid "Run Flower using Docker" -msgstr "" +msgstr "使用 Docker 运行 Flower" #: ../../source/how-to-run-flower-using-docker.rst:4 +#, fuzzy msgid "" "The simplest way to get started with Flower is by using the pre-made " "Docker images, which you can find on `Docker Hub " "`_." msgstr "" +"开始使用 Flower 的最简单方法是使用预制的 Docker 镜像,您可以在 `Docker Hub " +"`_ 上找到这些镜像。" #: ../../source/how-to-run-flower-using-docker.rst:7 +#, fuzzy msgid "Before you start, make sure that the Docker daemon is running:" -msgstr "" +msgstr "开始之前,请确保 Docker 守护进程正在运行:" #: ../../source/how-to-run-flower-using-docker.rst:14 +#, fuzzy msgid "" "If you do not see the version of Docker but instead get an error saying " "that the command was not found, you will need to install Docker first. " "You can find installation instruction `here `_." msgstr "" +"如果没有看到 Docker 的版本,而是出现找不到命令的错误,则需要先安装 Docker。" +"你可以在 `_ 找到安装说明。" #: ../../source/how-to-run-flower-using-docker.rst:20 +#, fuzzy msgid "" "On Linux, Docker commands require ``sudo`` privilege. If you want to " "avoid using ``sudo``, you can follow the `Post-installation steps " "`_ on the " "official Docker website." msgstr "" +"在 Linux 上,Docker 命令需要 ``sudo`` 权限。如果你想避免使用 ``sudo``," +"可以按照 Docker 官方网站上的 `安装后步骤 `_进行操作。" #: ../../source/how-to-run-flower-using-docker.rst:25 #, fuzzy @@ -5345,10 +5563,12 @@ msgid "Quickstart" msgstr "快速入门 JAX" #: ../../source/how-to-run-flower-using-docker.rst:30 +#, fuzzy msgid "If you're looking to try out Flower, you can use the following command:" -msgstr "" +msgstr "如果您想试用 Flower,可以使用以下命令:" #: ../../source/how-to-run-flower-using-docker.rst:37 +#, fuzzy msgid "" "The command will pull the Docker image with the tag " "``1.7.0-py3.11-ubuntu22.04`` from Docker Hub. The tag contains the " @@ -5356,16 +5576,24 @@ msgid "" "uses Flower 1.7.0, Python 3.11 and Ubuntu 22.04. The ``--rm`` flag tells " "Docker to remove the container after it exits." msgstr "" +"该命令将从 Docker Hub 提取标签为``1.7.0-py3.11-ubuntu22.04``的 Docker 镜像。" +"标签包含使用 Flower、Python 和 Ubuntu 的信息。在本例中,它使用了 Flower 1.7." +"0、Python 3.11 和 Ubuntu 22.04。rm \"标记告诉 Docker 在退出后移除容器。" #: ../../source/how-to-run-flower-using-docker.rst:44 +#, fuzzy msgid "" "By default, the Flower server keeps state in-memory. When using the " "Docker flag ``--rm``, the state is not persisted between container " "starts. We will show below how to save the state in a file on your host " "system." msgstr "" +"默认情况下,Flower 服务器会将状态保存在内存中。使用 Docker 标志 ``--rm`` 时," +"状态不会在容器启动之间持久化。下面我们将展示如何将状态保存到主机系统上的文件" +"中。" #: ../../source/how-to-run-flower-using-docker.rst:48 +#, fuzzy msgid "" "The ``-p :`` flag tells Docker to map the ports " "``9091``/``9092`` of the host to ``9091``/``9092`` of the container, " @@ -5374,8 +5602,14 @@ msgid "" "after the tag is passed to the Flower server. Here, we are passing the " "flag ``--insecure``." msgstr "" +"``-p :`` 标记会告诉 Docker 将主机的端口 ``9091``/``9092`` " +"映射到容器的端口 ``9091``/`9092``,这样你就可以在 ``http://localhost:9091`` " +"上访问 Driver API,在 ``http://localhost:9092`` 上访问 Fleet API。最后," +"标签后面的任何标志都会传递给 Flower 服务器。在这里,我们传递的标志是 " +"``--insecure`` 。" #: ../../source/how-to-run-flower-using-docker.rst:55 +#, fuzzy msgid "" "The ``--insecure`` flag enables insecure communication (using HTTP, not " "HTTPS) and should only be used for testing purposes. We strongly " @@ -5383,18 +5617,24 @@ msgid "" "flower-using-docker.html#enabling-ssl-for-secure-connections>`_ when " "deploying to a production environment." msgstr "" +"不安全 \"标志启用不安全通信(使用 HTTP,而非 HTTPS),只能用于测试目的。" +"我们强烈建议在部署到生产环境时启用 `SSL `_。" #: ../../source/how-to-run-flower-using-docker.rst:60 +#, fuzzy msgid "" "You can use ``--help`` to view all available flags that the server " "supports:" -msgstr "" +msgstr "您可以使用 ``--help`` 查看服务器支持的所有可用标记:" #: ../../source/how-to-run-flower-using-docker.rst:67 +#, fuzzy msgid "Mounting a volume to store the state on the host system" -msgstr "" +msgstr "在主机系统上挂载卷以存储状态" #: ../../source/how-to-run-flower-using-docker.rst:69 +#, fuzzy msgid "" "If you want to persist the state of the server on your host system, all " "you need to do is specify a path where you want to save the file on your " @@ -5404,14 +5644,22 @@ msgid "" "Furthermore, we use the flag ``--database`` to specify the name of the " "database file." msgstr "" +"如果想在主机系统上持久保存服务器的状态,只需在主机系统上指定保存文件的路径和" +"数据库文件的名称即可。在下面的示例中,我们通过标志 ``-v`` 告诉 Docker " +"将用户的主目录(主机上的 ``~/``)挂载到容器的 ``/app/`` 目录中。此外," +"我们使用标志 ``--database`` 来指定数据库文件的名称。" #: ../../source/how-to-run-flower-using-docker.rst:82 +#, fuzzy msgid "" "As soon as the server starts, the file ``state.db`` is created in the " "user's home directory on your host system. If the file already exists, " "the server tries to restore the state from the file. To start the server " "with an empty database, simply remove the ``state.db`` file." msgstr "" +"服务器一启动,就会在主机系统的用户主目录下创建文件 ``state.db``。如果该文件已" +"经存在,服务器会尝试从该文件恢复状态。要以空数据库启动服务器,只需删除 " +"``state.db`` 文件即可。" #: ../../source/how-to-run-flower-using-docker.rst:87 #, fuzzy @@ -5419,20 +5667,26 @@ msgid "Enabling SSL for secure connections" msgstr "启用 SSL 连接" #: ../../source/how-to-run-flower-using-docker.rst:89 +#, fuzzy msgid "" "To enable SSL, you will need a CA certificate, a server certificate and a" " server private key." -msgstr "" +msgstr "要启用 SSL,需要 CA 证书、服务器证书和服务器私钥。" #: ../../source/how-to-run-flower-using-docker.rst:92 +#, fuzzy msgid "" "For testing purposes, you can generate your own self-signed certificates." " The `Enable SSL connections `_ page contains a section that " "will guide you through the process." msgstr "" +"出于测试目的,你可以生成自己的自签名证书。启用 SSL 连接 `_ " +"页面中有一个部分将指导你完成这一过程。" #: ../../source/how-to-run-flower-using-docker.rst:96 +#, fuzzy msgid "" "Assuming all files we need are in the local ``certificates`` directory, " "we can use the flag ``-v`` to mount the local directory into the " @@ -5440,23 +5694,34 @@ msgid "" "the files within the container. Finally, we pass the names of the " "certificates to the server with the ``--certificates`` flag." msgstr "" +"假设我们需要的所有文件都在本地的 ``certificates`` 目录中,我们可以使用标记 " +"``-v`` 将本地目录挂载到容器的 ``/app/`` " +"目录中。这样,服务器就可以访问容器内的文件。最后,我们使用 ``--certificates``" +" 标志将证书名称传递给服务器。" #: ../../source/how-to-run-flower-using-docker.rst:108 +#, fuzzy msgid "Using a different Flower or Python version" -msgstr "" +msgstr "使用不同的 Flower 或 Python 版本" #: ../../source/how-to-run-flower-using-docker.rst:110 +#, fuzzy msgid "" "If you want to use a different version of Flower or Python, you can do so" " by changing the tag. All versions we provide are available on `Docker " "Hub `_." msgstr "" +"如果您想使用不同版本的 Flower 或 Python,可以通过更改标签来实现。" +"我们提供的所有版本都可以在 `Docker Hub `_ 上找到。" #: ../../source/how-to-run-flower-using-docker.rst:114 +#, fuzzy msgid "Pinning a Docker image to a specific version" -msgstr "" +msgstr "将 Docker 映像固定到特定版本" #: ../../source/how-to-run-flower-using-docker.rst:116 +#, fuzzy msgid "" "It may happen that we update the images behind the tags. Such updates " "usually include security updates of system dependencies that should not " @@ -5464,16 +5729,22 @@ msgid "" "you always use the same image, you can specify the hash of the image " "instead of the tag." msgstr "" +"我们可能会更新标签后面的图像。此类更新通常包括系统依赖项的安全更新,不会改变 " +"Flower 的功能。不过,如果您想确保始终使用同一张图片,可以指定图片的哈希值而不" +"是标签。" #: ../../source/how-to-run-flower-using-docker.rst:121 +#, fuzzy msgid "" "The following command returns the current image hash referenced by the " "``server:1.7.0-py3.11-ubuntu22.04`` tag:" -msgstr "" +msgstr "下面的命令将返回由 ``server:1.7.0-py3.11-ubuntu22.04`` " +"标记引用的当前图像哈希值:" #: ../../source/how-to-run-flower-using-docker.rst:128 +#, fuzzy msgid "Next, we can pin the hash when running a new server container:" -msgstr "" +msgstr "接下来,我们可以在运行新服务器容器时将哈希值固定下来:" #: ../../source/how-to-run-flower-using-docker.rst:137 #, fuzzy @@ -5481,10 +5752,11 @@ msgid "Setting environment variables" msgstr "设置编码环境" #: ../../source/how-to-run-flower-using-docker.rst:139 +#, fuzzy msgid "" "To set a variable inside a Docker container, you can use the ``-e " "=`` flag." -msgstr "" +msgstr "要在 Docker 容器内设置变量,可以使用 ``-e =`` 标志。" #: ../../source/how-to-run-simulations.rst:2 msgid "Run simulations" @@ -6032,10 +6304,13 @@ msgid "" msgstr "要加载进度,只需在代码中添加以下几行。请注意,这将遍历所有已保存的检查点,并加载最新的检查点:" #: ../../source/how-to-save-and-load-model-checkpoints.rst:97 +#, fuzzy msgid "" "Return/use this object of type ``Parameters`` wherever necessary, such as" " in the ``initial_parameters`` when defining a ``Strategy``." msgstr "" +"在必要时返回/使用此 ``Parameters`` 类型的对象,例如在定义 ``Strategy` 时的 " +"``initial_parameters` 中。" #: ../../source/how-to-upgrade-to-flower-1.0.rst:2 msgid "Upgrade to Flower 1.0" @@ -6353,73 +6628,93 @@ msgstr "" "`_ 并使用 \"#questions``\"。" #: ../../source/how-to-use-built-in-mods.rst:2 +#, fuzzy msgid "Use Built-in Mods" -msgstr "" +msgstr "使用内置调制器" #: ../../source/how-to-use-built-in-mods.rst:4 +#, fuzzy msgid "" "**Note: This tutorial covers experimental features. The functionality and" " interfaces may change in future versions.**" -msgstr "" +msgstr "**注:本教程涵盖实验性功能。功能和界面可能会在未来版本中发生变化。" #: ../../source/how-to-use-built-in-mods.rst:6 +#, fuzzy msgid "" "In this tutorial, we will learn how to utilize built-in mods to augment " "the behavior of a ``ClientApp``. Mods (sometimes also called Modifiers) " "allow us to perform operations before and after a task is processed in " "the ``ClientApp``." msgstr "" +"在本教程中,我们将学习如何利用内置模块来增强 ``ClientApp`` " +"的行为。修改器(有时也称为修改器)允许我们在 ``ClientApp`` " +"处理任务之前和之后执行操作。" #: ../../source/how-to-use-built-in-mods.rst:9 +#, fuzzy msgid "What are Mods?" -msgstr "" +msgstr "什么是 Mods?" #: ../../source/how-to-use-built-in-mods.rst:11 +#, fuzzy msgid "" "A Mod is a callable that wraps around a ``ClientApp``. It can manipulate " "or inspect the incoming ``Message`` and the resulting outgoing " "``Message``. The signature for a ``Mod`` is as follows:" msgstr "" +"Mod 是包裹在 ``ClientApp`` 周围的可调用程序。它可以操作或检查传入的 " +"``Message`` 和由此产生的传出的 ``Message`` 。一个 ``Mod`` 的签名如下:" #: ../../source/how-to-use-built-in-mods.rst:18 +#, fuzzy msgid "A typical mod function might look something like this:" -msgstr "" +msgstr "一个典型的修改函数可能是这样的:" #: ../../source/how-to-use-built-in-mods.rst:31 +#, fuzzy msgid "Using Mods" -msgstr "" +msgstr "使用修改器" #: ../../source/how-to-use-built-in-mods.rst:33 +#, fuzzy msgid "To use mods in your ``ClientApp``, you can follow these steps:" -msgstr "" +msgstr "要在您的 ``ClientApp`` 中使用 mod,可以按照以下步骤操作:" #: ../../source/how-to-use-built-in-mods.rst:36 +#, fuzzy msgid "1. Import the required mods" -msgstr "" +msgstr "1. 导入所需修改" #: ../../source/how-to-use-built-in-mods.rst:38 +#, fuzzy msgid "First, import the built-in mod you intend to use:" -msgstr "" +msgstr "首先,导入您打算使用的内置模式:" #: ../../source/how-to-use-built-in-mods.rst:46 +#, fuzzy msgid "2. Define your client function" -msgstr "" +msgstr "2. 定义客户功能" #: ../../source/how-to-use-built-in-mods.rst:48 +#, fuzzy msgid "" "Define your client function (``client_fn``) that will be wrapped by the " "mod(s):" -msgstr "" +msgstr "定义将被 mod 封装的客户端函数(``client_fn``):" #: ../../source/how-to-use-built-in-mods.rst:57 +#, fuzzy msgid "3. Create the ``ClientApp`` with mods" -msgstr "" +msgstr "3. 用模块创建 ``ClientApp``" #: ../../source/how-to-use-built-in-mods.rst:59 +#, fuzzy msgid "" "Create your ``ClientApp`` and pass the mods as a list to the ``mods`` " "argument. The order in which you provide the mods matters:" -msgstr "" +msgstr "创建您的 ``ClientApp`` 并将 mods 作为列表传递给 ``mods`` 参数。提供 mod " +"的顺序很重要:" #: ../../source/how-to-use-built-in-mods.rst:72 #, fuzzy @@ -6427,50 +6722,62 @@ msgid "Order of execution" msgstr "停用" #: ../../source/how-to-use-built-in-mods.rst:74 +#, fuzzy msgid "" "When the ``ClientApp`` runs, the mods are executed in the order they are " "provided in the list:" -msgstr "" +msgstr "当运行 ``ClientApp`` 时,会按照列表中提供的顺序执行模块:" #: ../../source/how-to-use-built-in-mods.rst:76 +#, fuzzy msgid "``example_mod_1`` (outermost mod)" -msgstr "" +msgstr "``example_mod_1`` (最外层模块)" #: ../../source/how-to-use-built-in-mods.rst:77 +#, fuzzy msgid "``example_mod_2`` (next mod)" -msgstr "" +msgstr "示例模式 2(下一个模式)" #: ../../source/how-to-use-built-in-mods.rst:78 +#, fuzzy msgid "" "Message handler (core function that handles the incoming ``Message`` and " "returns the outgoing ``Message``)" -msgstr "" +msgstr "消息处理程序(处理传入的 \"消息 \"并返回传出的 \"消息 \"的核心函数)" #: ../../source/how-to-use-built-in-mods.rst:79 +#, fuzzy msgid "``example_mod_2`` (on the way back)" -msgstr "" +msgstr "``example_mod_2`` (返回途中)" #: ../../source/how-to-use-built-in-mods.rst:80 +#, fuzzy msgid "``example_mod_1`` (outermost mod on the way back)" -msgstr "" +msgstr "``example_mod_1`` (返回途中最外层的模式)" #: ../../source/how-to-use-built-in-mods.rst:82 +#, fuzzy msgid "" "Each mod has a chance to inspect and modify the incoming ``Message`` " "before passing it to the next mod, and likewise with the outgoing " "``Message`` before returning it up the stack." -msgstr "" +msgstr "每个模块都有机会检查和修改传入的 \"信息\",然后再将其传递给下一个模块,同样," +"也有机会检查和修改传出的 \"信息\",然后再将其返回堆栈。" #: ../../source/how-to-use-built-in-mods.rst:87 +#, fuzzy msgid "" "By following this guide, you have learned how to effectively use mods to " "enhance your ``ClientApp``'s functionality. Remember that the order of " "mods is crucial and affects how the input and output are processed." msgstr "" +"通过本指南,您已学会如何有效地使用 mod 来增强您的 ``ClientApp`` " +"的功能。请记住,mod 的顺序至关重要,它会影响输入和输出的处理方式。" #: ../../source/how-to-use-built-in-mods.rst:89 +#, fuzzy msgid "Enjoy building a more robust and flexible ``ClientApp`` with mods!" -msgstr "" +msgstr "使用 mods 构建更强大、更灵活的 \"客户端应用程序\"!" #: ../../source/how-to-use-differential-privacy.rst:2 #, fuzzy @@ -6478,29 +6785,38 @@ msgid "Use Differential Privacy" msgstr "差分隐私" #: ../../source/how-to-use-differential-privacy.rst:3 +#, fuzzy msgid "" "This guide explains how you can utilize differential privacy in the " "Flower framework. If you are not yet familiar with differential privacy, " "you can refer to :doc:`explanation-differential-privacy`." msgstr "" +"本指南解释了如何在 Flower 框架中使用差分隐私。如果您还不熟悉差分隐私," +"可以参考 :doc:`explanation-differential-privacy` 。" #: ../../source/how-to-use-differential-privacy.rst:7 +#, fuzzy msgid "" "Differential Privacy in Flower is in a preview phase. If you plan to use " "these features in a production environment with sensitive data, feel free" " contact us to discuss your requirements and to receive guidance on how " "to best use these features." msgstr "" +"Flower 中的差异隐私处于预览阶段。如果您计划在生产环境中使用这些敏感数据功能," +"请随时联系我们,讨论您的需求,并获得如何最好地使用这些功能的指导。" #: ../../source/how-to-use-differential-privacy.rst:12 +#, fuzzy msgid "" "This approach consists of two seprate phases: clipping of the updates and" " adding noise to the aggregated model. For the clipping phase, Flower " "framework has made it possible to decide whether to perform clipping on " "the server side or the client side." -msgstr "" +msgstr "这种方法包括两个独立的阶段:对更新进行剪切和在聚合模型中添加噪声。在剪切阶段" +",Flower 框架可以决定是在服务器端还是在客户端执行剪切。" #: ../../source/how-to-use-differential-privacy.rst:15 +#, fuzzy msgid "" "**Server-side Clipping**: This approach has the advantage of the server " "enforcing uniform clipping across all clients' updates and reducing the " @@ -6508,14 +6824,19 @@ msgid "" "disadvantage of increasing the computational load on the server due to " "the need to perform the clipping operation for all clients." msgstr "" +"** 服务器端剪切**: 这种方法的优点是服务器可对所有客户端的更新执行统一的剪切" +",并减少剪切值的通信开销。不过,这种方法也有缺点,那就是需要为所有客户端执行" +"剪切操作,从而增加了服务器的计算负荷。" #: ../../source/how-to-use-differential-privacy.rst:16 +#, fuzzy msgid "" "**Client-side Clipping**: This approach has the advantage of reducing the" " computational overhead on the server. However, it also has the " "disadvantage of lacking centralized control, as the server has less " "control over the clipping process." -msgstr "" +msgstr "**客户端剪切**: 这种方法的优点是可以减少服务器的计算开销。不过,它也有缺乏集" +"中控制的缺点,因为服务器对剪切过程的控制较少。" #: ../../source/how-to-use-differential-privacy.rst:21 #, fuzzy @@ -6523,6 +6844,7 @@ msgid "Server-side Clipping" msgstr "服务器端逻辑" #: ../../source/how-to-use-differential-privacy.rst:22 +#, fuzzy msgid "" "For central DP with server-side clipping, there are two :code:`Strategy` " "classes that act as wrappers around the actual :code:`Strategy` instance " @@ -6531,6 +6853,11 @@ msgid "" ":code:`DifferentialPrivacyServerSideAdaptiveClipping` for fixed and " "adaptive clipping." msgstr "" +"对于具有服务器端剪裁功能的中央 DP,有两个 :code:`Strategy` 类作为实际 " +":code:`Strategy` 实例(例如 :code:`FedAvg`)的包装器。这两个封装类分别是 " +":code:`DifferentialPrivacyServerSideFixedClipping` 和 " +":code:`DifferentialPrivacyServerSideAdaptiveClipping` " +",用于固定剪辑和自适应剪辑。" #: ../../source/how-to-use-differential-privacy.rst:-1 #, fuzzy @@ -6538,6 +6865,7 @@ msgid "server side clipping" msgstr "服务器端逻辑" #: ../../source/how-to-use-differential-privacy.rst:31 +#, fuzzy msgid "" "The code sample below enables the :code:`FedAvg` strategy to use server-" "side fixed clipping using the " @@ -6546,6 +6874,9 @@ msgid "" ":code:`DifferentialPrivacyServerSideAdaptiveClipping` by adjusting the " "corresponding input parameters." msgstr "" +"下面的代码示例使用 :code:`DifferentialPrivacyServerSideFixedClipping` " +"封装类使 :code:`FedAvg` 策略使用服务器端固定剪辑。通过调整相应的输入参数," +"同样的方法也可用于 :code:`DifferentialPrivacyServerSideAdaptiveClipping`。" #: ../../source/how-to-use-differential-privacy.rst:52 #, fuzzy @@ -6553,6 +6884,7 @@ msgid "Client-side Clipping" msgstr "客户端逻辑" #: ../../source/how-to-use-differential-privacy.rst:53 +#, fuzzy msgid "" "For central DP with client-side clipping, the server sends the clipping " "value to selected clients on each round. Clients can use existing Flower " @@ -6562,6 +6894,11 @@ msgid "" ":code:`DifferentialPrivacyClientSideFixedClipping` and " ":code:`DifferentialPrivacyClientSideAdaptiveClipping`." msgstr "" +"对于带有客户端剪裁功能的中央 DP,服务器会在每一轮向选定的客户端发送剪裁值。" +"客户端可以使用现有的 Flower :code:`Mods`来执行剪裁。有两种模式可用于固定和自" +"适应客户端剪辑::code:`fixedclipping_mod` 和 :code:`adaptiveclipping_mod`," +"以及相应的服务器端封装 :code:`DifferentialPrivacyClientSideFixedClipping` 和 " +":code:`DifferentialPrivacyClientSideAdaptiveClipping`。" #: ../../source/how-to-use-differential-privacy.rst:-1 #, fuzzy @@ -6569,49 +6906,66 @@ msgid "client side clipping" msgstr "客户端逻辑" #: ../../source/how-to-use-differential-privacy.rst:63 +#, fuzzy msgid "" "The code sample below enables the :code:`FedAvg` strategy to use " "differential privacy with client-side fixed clipping using both the " ":code:`DifferentialPrivacyClientSideFixedClipping` wrapper class and, on " "the client, :code:`fixedclipping_mod`:" msgstr "" +"下面的代码示例使用 :code:`DifferentialPrivacyClientSideFixedClipping` " +"封装类和客户端的 :code:`fixedclipping_mod` 使 :code:`FedAvg` " +"策略在客户端固定剪辑的情况下使用差分隐私:" #: ../../source/how-to-use-differential-privacy.rst:80 +#, fuzzy msgid "" "In addition to the server-side strategy wrapper, the :code:`ClientApp` " "needs to configure the matching :code:`fixedclipping_mod` to perform the " "client-side clipping:" msgstr "" +"除了服务器端策略包装器外,:code:`ClientApp` 还需要配置匹配的 " +":code:`fixedclipping_mod` 以执行客户端剪切:" #: ../../source/how-to-use-differential-privacy.rst:97 +#, fuzzy msgid "" "To utilize local differential privacy (DP) and add noise to the client " "model parameters before transmitting them to the server in Flower, you " "can use the `LocalDpMod`. The following hyperparameters need to be set: " "clipping norm value, sensitivity, epsilon, and delta." msgstr "" +"要利用本地差分隐私(DP)并在将客户端模型参数传输到 Flower " +"服务器之前为其添加噪声,可以使用 " +"`LocalDpMod`。需要设置以下超参数:剪切规范值、灵敏度、ε 和 delta。" #: ../../source/how-to-use-differential-privacy.rst:-1 +#, fuzzy msgid "local DP mod" -msgstr "" +msgstr "本地 DP 模式" #: ../../source/how-to-use-differential-privacy.rst:104 +#, fuzzy msgid "Below is a code example that shows how to use :code:`LocalDpMod`:" -msgstr "" +msgstr "下面的代码示例展示了如何使用 :code:`LocalDpMod`:" #: ../../source/how-to-use-differential-privacy.rst:122 +#, fuzzy msgid "" "Please note that the order of mods, especially those that modify " "parameters, is important when using multiple modifiers. Typically, " "differential privacy (DP) modifiers should be the last to operate on " "parameters." -msgstr "" +msgstr "请注意,在使用多个修改器时,修改器(尤其是修改参数的修改器)的顺序非常重要。" +"通常情况下,差分隐私 (DP) 修改器应最后对参数进行操作。" #: ../../source/how-to-use-differential-privacy.rst:125 +#, fuzzy msgid "Local Training using Privacy Engines" -msgstr "" +msgstr "使用隐私引擎进行本地培训" #: ../../source/how-to-use-differential-privacy.rst:126 +#, fuzzy msgid "" "For ensuring data instance-level privacy during local model training on " "the client side, consider leveraging privacy engines such as Opacus and " @@ -6621,6 +6975,11 @@ msgid "" " Privacy `_)." msgstr "" +"要在客户端本地模型训练期间确保数据实例级隐私,可考虑利用 Opacus 和 " +"TensorFlow Privacy 等隐私引擎。有关将 Flower 与这些引擎结合使用的示例," +"请参阅 Flower 示例目录(`Opacus `_, `Tensorflow Privacy `_)。" #: ../../source/how-to-use-strategies.rst:2 msgid "Use strategies" @@ -6884,12 +7243,14 @@ msgid "Information-oriented API reference and other reference material." msgstr "以信息为导向的 API 参考资料和其他参考资料。" #: ../../source/index.rst:132::1 +#, fuzzy msgid ":py:obj:`flwr `\\" -msgstr "" +msgstr ":py:obj:`flwr `\\" #: ../../source/index.rst:132::1 flwr:1 of +#, fuzzy msgid "Flower main package." -msgstr "" +msgstr "Flower 主包装。" #: ../../source/index.rst:149 msgid "Contributor docs" @@ -6933,28 +7294,32 @@ msgid "flwr" msgstr "Flower" #: ../../source/ref-api/flwr.rst:25 ../../source/ref-api/flwr.server.rst:52 +#, fuzzy msgid "Modules" -msgstr "" +msgstr "模块" #: ../../source/ref-api/flwr.rst:35::1 +#, fuzzy msgid ":py:obj:`flwr.client `\\" -msgstr "" +msgstr ":py:obj:`flwr.client `\\" #: ../../source/ref-api/flwr.rst:35::1 flwr.client:1 of msgid "Flower client." msgstr "Flower 客户端。" #: ../../source/ref-api/flwr.rst:35::1 +#, fuzzy msgid ":py:obj:`flwr.common `\\" -msgstr "" +msgstr ":py:obj:`flwr.common `\\" #: ../../source/ref-api/flwr.rst:35::1 flwr.common:1 of msgid "Common components shared between server and client." msgstr "服务器和客户端共享的通用组件。" #: ../../source/ref-api/flwr.rst:35::1 +#, fuzzy msgid ":py:obj:`flwr.server `\\" -msgstr "" +msgstr ":py:obj:`flwr.server `\\" #: ../../source/ref-api/flwr.rst:35::1 #: ../../source/ref-api/flwr.server.rst:41::1 flwr.server:1 @@ -6963,8 +7328,9 @@ msgid "Flower server." msgstr "Flower 服务器。" #: ../../source/ref-api/flwr.rst:35::1 +#, fuzzy msgid ":py:obj:`flwr.simulation `\\" -msgstr "" +msgstr ":py:obj:`flwr.simulation `\\" #: ../../source/ref-api/flwr.rst:35::1 flwr.simulation:1 of #, fuzzy @@ -6984,8 +7350,9 @@ msgid "Functions" msgstr "四种函数:" #: ../../source/ref-api/flwr.client.rst:24::1 +#, fuzzy msgid ":py:obj:`run_client_app `\\ \\(\\)" -msgstr "" +msgstr ":py:obj:`run_client_app `\\ \\(\\)" #: ../../source/ref-api/flwr.client.rst:24::1 #: flwr.client.app.run_client_app:1 of @@ -6994,10 +7361,13 @@ msgid "Run Flower client app." msgstr "Flower 客户端。" #: ../../source/ref-api/flwr.client.rst:24::1 +#, fuzzy msgid "" ":py:obj:`start_client `\\ \\(\\*\\, " "server\\_address\\[\\, client\\_fn\\, ...\\]\\)" msgstr "" +":py:obj:`start_client `\\ \\(\\*\\, " +"server\\_address\\[\\, client\\_fn\\, ...\\]\\)" #: ../../source/ref-api/flwr.client.rst:24::1 #: flwr.client.app.start_client:1 of @@ -7005,10 +7375,13 @@ msgid "Start a Flower client node which connects to a Flower server." msgstr "启动一个 Flower 客户节点,连接到 Flower 服务器。" #: ../../source/ref-api/flwr.client.rst:24::1 +#, fuzzy msgid "" ":py:obj:`start_numpy_client `\\ \\(\\*\\," " server\\_address\\, client\\)" msgstr "" +":py:obj:`start_numpy_client `\\ \\(\\*\\, " +"server\\_address\\, client\\)" #: ../../source/ref-api/flwr.client.rst:24::1 #: flwr.client.app.start_numpy_client:1 of @@ -7020,12 +7393,14 @@ msgstr "启动 Flower NumPyClient,连接到 gRPC 服务器。" #: ../../source/ref-api/flwr.server.rst:29 #: ../../source/ref-api/flwr.server.strategy.rst:17 #: ../../source/ref-api/flwr.server.workflow.rst:17 +#, fuzzy msgid "Classes" -msgstr "" +msgstr "类别" #: ../../source/ref-api/flwr.client.rst:33::1 +#, fuzzy msgid ":py:obj:`Client `\\ \\(\\)" -msgstr "" +msgstr ":py:obj:`Client `\\ \\(\\)" #: ../../source/ref-api/flwr.client.rst:33::1 #: flwr.client.client.Client:1 of @@ -7033,10 +7408,12 @@ msgid "Abstract base class for Flower clients." msgstr "Flower 客户端的抽象基类。" #: ../../source/ref-api/flwr.client.rst:33::1 +#, fuzzy msgid "" ":py:obj:`ClientApp `\\ \\(\\[client\\_fn\\, " "mods\\]\\)" msgstr "" +":py:obj:`ClientApp `\\ \\(\\[client\\_fn\\, mods\\]\\)" #: ../../source/ref-api/flwr.client.rst:33::1 #: flwr.client.client_app.ClientApp:1 of @@ -7045,8 +7422,9 @@ msgid "Flower ClientApp." msgstr "Flower 客户端。" #: ../../source/ref-api/flwr.client.rst:33::1 +#, fuzzy msgid ":py:obj:`NumPyClient `\\ \\(\\)" -msgstr "" +msgstr ":py:obj:`NumPyClient `\\ \\(\\)" #: ../../source/ref-api/flwr.client.rst:33::1 #: flwr.client.numpy_client.NumPyClient:1 of @@ -7056,8 +7434,9 @@ msgstr "使用 NumPy 的 Flower 客户端的抽象基类。" #: flwr.client.client.Client:1 flwr.client.numpy_client.NumPyClient:1 #: flwr.server.client_manager.ClientManager:1 #: flwr.server.strategy.strategy.Strategy:1 of +#, fuzzy msgid "Bases: :py:class:`~abc.ABC`" -msgstr "" +msgstr "Bases: :py:class:`~abc.ABC`" #: ../../source/ref-api/flwr.client.Client.rst:15 #: ../../source/ref-api/flwr.client.ClientApp.rst:15 @@ -7122,12 +7501,14 @@ msgstr "" #: ../../source/ref-api/flwr.server.workflow.DefaultWorkflow.rst:15 #: ../../source/ref-api/flwr.server.workflow.SecAggPlusWorkflow.rst:15 #: ../../source/ref-api/flwr.server.workflow.SecAggWorkflow.rst:15 +#, fuzzy msgid "Methods" -msgstr "" +msgstr "方法" #: ../../source/ref-api/flwr.client.Client.rst:44::1 +#, fuzzy msgid ":py:obj:`evaluate `\\ \\(ins\\)" -msgstr "" +msgstr ":py:obj:`evaluate `\\ \\(ins\\)" #: ../../source/ref-api/flwr.client.Client.rst:44::1 #: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 @@ -7137,8 +7518,9 @@ msgid "Evaluate the provided parameters using the locally held dataset." msgstr "使用本地数据集评估所提供的参数。" #: ../../source/ref-api/flwr.client.Client.rst:44::1 +#, fuzzy msgid ":py:obj:`fit `\\ \\(ins\\)" -msgstr "" +msgstr ":py:obj:`fit `\\ \\(ins\\)" #: ../../source/ref-api/flwr.client.Client.rst:44::1 #: flwr.client.client.Client.fit:1 of @@ -7146,8 +7528,9 @@ msgid "Refine the provided parameters using the locally held dataset." msgstr "利用本地数据集完善所提供的参数。" #: ../../source/ref-api/flwr.client.Client.rst:44::1 +#, fuzzy msgid ":py:obj:`get_context `\\ \\(\\)" -msgstr "" +msgstr ":py:obj:`get_context `\\ \\(\\)" #: ../../source/ref-api/flwr.client.Client.rst:44::1 #: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 @@ -7158,8 +7541,10 @@ msgid "Get the run context from this client." msgstr "评估客户端的反应。" #: ../../source/ref-api/flwr.client.Client.rst:44::1 +#, fuzzy msgid ":py:obj:`get_parameters `\\ \\(ins\\)" msgstr "" +":py:obj:`get_parameters `\\ \\(ins\\)" #: ../../source/ref-api/flwr.client.Client.rst:44::1 #: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 @@ -7169,8 +7554,10 @@ msgid "Return the current local model parameters." msgstr "返回当前本地模型参数。" #: ../../source/ref-api/flwr.client.Client.rst:44::1 +#, fuzzy msgid ":py:obj:`get_properties `\\ \\(ins\\)" msgstr "" +":py:obj:`get_properties `\\ \\(ins\\)" #: ../../source/ref-api/flwr.client.Client.rst:44::1 #: flwr.client.client.Client.get_properties:1 of @@ -7178,19 +7565,22 @@ msgid "Return set of client's properties." msgstr "返回客户端的属性集。" #: ../../source/ref-api/flwr.client.Client.rst:44::1 +#, fuzzy msgid ":py:obj:`set_context `\\ \\(context\\)" -msgstr "" +msgstr ":py:obj:`set_context `\\ \\(context\\)" #: ../../source/ref-api/flwr.client.Client.rst:44::1 #: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 #: flwr.client.client.Client.set_context:1 #: flwr.client.numpy_client.NumPyClient.set_context:1 of +#, fuzzy msgid "Apply a run context to this client." -msgstr "" +msgstr "将运行上下文应用于该客户端。" #: ../../source/ref-api/flwr.client.Client.rst:44::1 +#, fuzzy msgid ":py:obj:`to_client `\\ \\(\\)" -msgstr "" +msgstr ":py:obj:`to_client `\\ \\(\\)" #: ../../source/ref-api/flwr.client.Client.rst:44::1 #: flwr.client.client.Client.to_client:1 of @@ -7225,12 +7615,14 @@ msgstr "返回客户端(本身)。" #: ../../source/ref-api/flwr.common.Status.rst:25 #: ../../source/ref-api/flwr.server.LegacyContext.rst:25 #: ../../source/ref-api/flwr.server.ServerConfig.rst:25 +#, fuzzy msgid "Attributes" -msgstr "" +msgstr "属性" #: flwr.client.client.Client.evaluate:1::1 of +#, fuzzy msgid ":py:obj:`context `\\" -msgstr "" +msgstr ":py:obj:`context `\\" #: ../../source/ref-api/flwr.common.Parameters.rst:2 #: flwr.client.app.start_client flwr.client.app.start_numpy_client @@ -7408,8 +7800,9 @@ msgstr "客户端" #: flwr.server.workflow.default_workflows.DefaultWorkflow:1 #: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:1 #: of +#, fuzzy msgid "Bases: :py:class:`object`" -msgstr "" +msgstr "Bases: :py:class:`object`" #: flwr.client.app.start_client:41 flwr.client.app.start_numpy_client:36 #: flwr.client.client_app.ClientApp:4 @@ -7427,41 +7820,52 @@ msgid "Examples" msgstr "实例" #: flwr.client.client_app.ClientApp:5 of +#, fuzzy msgid "" "Assuming a typical `Client` implementation named `FlowerClient`, you can " "wrap it in a `ClientApp` as follows:" -msgstr "" +msgstr "假定有一个名为 `FlowerClient` 的典型 `Client` 实现,可以将其封装在一个 " +"`ClientApp` 中,如下所示:" #: flwr.client.client_app.ClientApp:16 of +#, fuzzy msgid "" "If the above code is in a Python module called `client`, it can be " "started as follows:" -msgstr "" +msgstr "如果上述代码位于一个名为 \"客户端 \"的 Python " +"模块中,则可以按如下方式启动它:" #: flwr.client.client_app.ClientApp:21 of +#, fuzzy msgid "" "In this `client:app` example, `client` refers to the Python module " "`client.py` in which the previous code lives in and `app` refers to the " "global attribute `app` that points to an object of type `ClientApp`." msgstr "" +"在这个 `client:app` 例子中,`client` 指的是前面代码所在的 Python 模块 `client" +".py`,而 `app` 指的是指向 `ClientApp` 类型对象的全局属性 `app` 。" #: flwr.client.client_app.ClientApp.evaluate:1::1 of +#, fuzzy msgid ":py:obj:`evaluate `\\ \\(\\)" -msgstr "" +msgstr ":py:obj:`evaluate `\\ \\(\\)" #: flwr.client.client_app.ClientApp.evaluate:1 #: flwr.client.client_app.ClientApp.evaluate:1::1 of +#, fuzzy msgid "Return a decorator that registers the evaluate fn with the client app." -msgstr "" +msgstr "返回一个装饰器,用于向客户端程序注册评估 fn。" #: flwr.client.client_app.ClientApp.evaluate:1::1 of +#, fuzzy msgid ":py:obj:`query `\\ \\(\\)" -msgstr "" +msgstr ":py:obj:`query `\\ \\(\\)" #: flwr.client.client_app.ClientApp.evaluate:1::1 #: flwr.client.client_app.ClientApp.query:1 of +#, fuzzy msgid "Return a decorator that registers the query fn with the client app." -msgstr "" +msgstr "返回一个向客户端应用程序注册查询 fn 的装饰器。" #: flwr.client.client_app.ClientApp.evaluate:1::1 of #, fuzzy @@ -7470,22 +7874,28 @@ msgstr "server.strategy.Strategy" #: flwr.client.client_app.ClientApp.evaluate:1::1 #: flwr.client.client_app.ClientApp.train:1 of +#, fuzzy msgid "Return a decorator that registers the train fn with the client app." -msgstr "" +msgstr "返回一个装饰器,用于在客户端应用程序中注册火车 fn。" #: ../../source/ref-api/flwr.client.NumPyClient.rst:2 msgid "NumPyClient" msgstr "NumPyClient" #: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#, fuzzy msgid "" ":py:obj:`evaluate `\\ \\(parameters\\, " "config\\)" msgstr "" +":py:obj:`evaluate `\\ \\(parameters\\, " +"config\\)" #: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#, fuzzy msgid ":py:obj:`fit `\\ \\(parameters\\, config\\)" msgstr "" +":py:obj:`fit `\\ \\(parameters\\, config\\)" #: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 #: flwr.client.numpy_client.NumPyClient.fit:1 of @@ -7493,20 +7903,27 @@ msgid "Train the provided parameters using the locally held dataset." msgstr "使用本地数据集训练所提供的参数。" #: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#, fuzzy msgid ":py:obj:`get_context `\\ \\(\\)" -msgstr "" +msgstr ":py:obj:`get_context `\\ \\(\\)" #: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#, fuzzy msgid "" ":py:obj:`get_parameters `\\ " "\\(config\\)" msgstr "" +":py:obj:`get_parameters `\\ \\(" +"config\\)" #: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#, fuzzy msgid "" ":py:obj:`get_properties `\\ " "\\(config\\)" msgstr "" +":py:obj:`get_properties `\\ \\(" +"config\\)" #: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 #: flwr.client.numpy_client.NumPyClient.get_properties:1 of @@ -7514,14 +7931,17 @@ msgid "Return a client's set of properties." msgstr "返回客户端的属性集。" #: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#, fuzzy msgid "" ":py:obj:`set_context `\\ " "\\(context\\)" msgstr "" +":py:obj:`set_context `\\ \\(context\\)" #: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#, fuzzy msgid ":py:obj:`to_client `\\ \\(\\)" -msgstr "" +msgstr ":py:obj:`to_client `\\ \\(\\)" #: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 #: flwr.client.numpy_client.NumPyClient.to_client:1 of @@ -7529,8 +7949,9 @@ msgid "Convert to object to Client type and return it." msgstr "将对象转换为客户类型并返回。" #: flwr.client.numpy_client.NumPyClient.evaluate:1::1 of +#, fuzzy msgid ":py:obj:`context `\\" -msgstr "" +msgstr ":py:obj:`context `\\" #: flwr.client.numpy_client.NumPyClient.evaluate:3 #: flwr.client.numpy_client.NumPyClient.fit:3 @@ -7647,8 +8068,9 @@ msgstr "" "类型值的字典。它可用于将任意属性值传回服务器。" #: ../../source/ref-api/flwr.client.run_client_app.rst:2 +#, fuzzy msgid "run\\_client\\_app" -msgstr "" +msgstr "run\\_client\\_app" #: ../../source/ref-api/flwr.client.start_client.rst:2 #, fuzzy @@ -7695,10 +8117,13 @@ msgid "" msgstr "字节字符串或路径字符串形式的 PEM 编码根证书。如果提供,将使用这些证书与启用 SSL 的 Flower 服务器建立安全连接。" #: flwr.client.app.start_client:23 flwr.client.app.start_numpy_client:26 of +#, fuzzy msgid "" "Starts an insecure gRPC connection when True. Enables HTTPS connection " "when False, using system certificates if `root_certificates` is None." msgstr "" +"为 True 时启动不安全的 gRPC 连接。False 时启用 HTTPS 连接,如果 " +"`root_certificates` 为 None,则使用系统证书。" #: flwr.client.app.start_client:26 flwr.client.app.start_numpy_client:29 of msgid "" @@ -7710,18 +8135,22 @@ msgstr "" "'rest': HTTP(实验性)" #: flwr.client.app.start_client:31 of +#, fuzzy msgid "" "The maximum number of times the client will try to connect to the server " "before giving up in case of a connection error. If set to None, there is " "no limit to the number of tries." -msgstr "" +msgstr "客户端在出现连接错误时放弃连接服务器的最大尝试次数。如果设置为 \"无\"" +",则不限制尝试次数。" #: flwr.client.app.start_client:35 of +#, fuzzy msgid "" "The maximum duration before the client stops trying to connect to the " "server in case of connection error. If set to None, there is no limit to " "the total time." -msgstr "" +msgstr "在出现连接错误时,客户端停止尝试连接服务器之前的最长持续时间。如果设置为 \"无" +"\",则总时间没有限制。" #: flwr.client.app.start_client:42 flwr.client.app.start_numpy_client:37 of msgid "Starting a gRPC client with an insecure server connection:" @@ -7743,12 +8172,16 @@ msgid "start\\_numpy\\_client" msgstr "start_numpy_client" #: flwr.client.app.start_numpy_client:5 of +#, fuzzy msgid "" "This function is deprecated since 1.7.0. Use " ":code:`flwr.client.start_client` instead and first convert your " ":code:`NumPyClient` to type :code:`flwr.client.Client` by executing its " ":code:`to_client()` method." msgstr "" +"自 1.7.0 起该函数已被弃用。请使用 :code:`flwr.client.start_client`," +"并首先通过执行 :code:`to_client()`方法将 :code:`NumPyClient`转换为 " +":code:`flwr.client.Client`。" #: flwr.client.app.start_numpy_client:13 of msgid "An implementation of the abstract base class `flwr.client.NumPyClient`." @@ -7759,8 +8192,10 @@ msgid "common" msgstr "常见" #: ../../source/ref-api/flwr.common.rst:30::1 +#, fuzzy msgid ":py:obj:`array_from_numpy `\\ \\(ndarray\\)" msgstr "" +":py:obj:`array_from_numpy `\\ \\(ndarray\\)" #: ../../source/ref-api/flwr.common.rst:30::1 #: flwr.common.record.conversion_utils.array_from_numpy:1 of @@ -7769,8 +8204,10 @@ msgid "Create Array from NumPy ndarray." msgstr "将参数对象转换为 NumPy ndarrays。" #: ../../source/ref-api/flwr.common.rst:30::1 +#, fuzzy msgid ":py:obj:`bytes_to_ndarray `\\ \\(tensor\\)" msgstr "" +":py:obj:`bytes_to_ndarray `\\ \\(tensor\\)" #: ../../source/ref-api/flwr.common.rst:30::1 #: flwr.common.parameter.bytes_to_ndarray:1 of @@ -7778,10 +8215,13 @@ msgid "Deserialize NumPy ndarray from bytes." msgstr "从字节反序列化 NumPy ndarray。" #: ../../source/ref-api/flwr.common.rst:30::1 +#, fuzzy msgid "" ":py:obj:`configure `\\ \\(identifier\\[\\, " "filename\\, host\\]\\)" msgstr "" +":py:obj:`configure `\\ \\(identifier\\[\\, filename\\" +", host\\]\\)" #: ../../source/ref-api/flwr.common.rst:30::1 #: flwr.common.logger.configure:1 of @@ -7789,21 +8229,28 @@ msgid "Configure logging to file and/or remote log server." msgstr "配置将日志记录到文件和/或远程日志服务器。" #: ../../source/ref-api/flwr.common.rst:30::1 +#, fuzzy msgid "" ":py:obj:`event `\\ \\(event\\_type\\[\\, " "event\\_details\\]\\)" msgstr "" +":py:obj:`event `\\ \\(event\\_type\\[\\, event\\_details\\" +"]\\)" #: ../../source/ref-api/flwr.common.rst:30::1 #: flwr.common.telemetry.event:1 of +#, fuzzy msgid "Submit create_event to ThreadPoolExecutor to avoid blocking." -msgstr "" +msgstr "将 create_event 提交给 ThreadPoolExecutor 以避免阻塞。" #: ../../source/ref-api/flwr.common.rst:30::1 +#, fuzzy msgid "" ":py:obj:`log `\\ \\(level\\, msg\\, \\*args\\, " "\\*\\*kwargs\\)" msgstr "" +":py:obj:`log `\\ \\(level\\, msg\\, \\*args\\, \\*\\*" +"kwargs\\)" #: ../../source/ref-api/flwr.common.rst:30::1 logging.Logger.log:1 #: of @@ -7811,8 +8258,10 @@ msgid "Log 'msg % args' with the integer severity 'level'." msgstr "以整数严重性 \"级别 \"记录 \"msg % args\"。" #: ../../source/ref-api/flwr.common.rst:30::1 +#, fuzzy msgid ":py:obj:`ndarray_to_bytes `\\ \\(ndarray\\)" msgstr "" +":py:obj:`ndarray_to_bytes `\\ \\(ndarray\\)" #: ../../source/ref-api/flwr.common.rst:30::1 #: flwr.common.parameter.ndarray_to_bytes:1 of @@ -7820,8 +8269,9 @@ msgid "Serialize NumPy ndarray to bytes." msgstr "将 NumPy ndarray 序列化为字节。" #: ../../source/ref-api/flwr.common.rst:30::1 +#, fuzzy msgid ":py:obj:`now `\\ \\(\\)" -msgstr "" +msgstr ":py:obj:`now `\\ \\(\\)" #: ../../source/ref-api/flwr.common.rst:30::1 #: flwr.common.date.now:1 of @@ -7829,10 +8279,13 @@ msgid "Construct a datetime from time.time() with time zone set to UTC." msgstr "从 time.time() 生成日期时间,时区设置为 UTC。" #: ../../source/ref-api/flwr.common.rst:30::1 +#, fuzzy msgid "" ":py:obj:`ndarrays_to_parameters `\\ " "\\(ndarrays\\)" msgstr "" +":py:obj:`ndarrays_to_parameters `\\ \\(" +"ndarrays\\)" #: ../../source/ref-api/flwr.common.rst:30::1 #: flwr.common.parameter.ndarrays_to_parameters:1 @@ -7843,10 +8296,13 @@ msgid "Convert NumPy ndarrays to parameters object." msgstr "将 NumPy ndarrays 转换为参数对象。" #: ../../source/ref-api/flwr.common.rst:30::1 +#, fuzzy msgid "" ":py:obj:`parameters_to_ndarrays `\\ " "\\(parameters\\)" msgstr "" +":py:obj:`parameters_to_ndarrays `\\ \\(" +"parameters\\)" #: ../../source/ref-api/flwr.common.rst:30::1 #: flwr.common.parameter.parameters_to_ndarrays:1 of @@ -7854,10 +8310,12 @@ msgid "Convert parameters object to NumPy ndarrays." msgstr "将参数对象转换为 NumPy ndarrays。" #: ../../source/ref-api/flwr.common.rst:64::1 +#, fuzzy msgid "" ":py:obj:`Array `\\ \\(dtype\\, shape\\, stype\\, " "data\\)" msgstr "" +":py:obj:`Array `\\ \\(dtype\\, shape\\, stype\\, data\\)" #: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.record.parametersrecord.Array:1 of @@ -7866,10 +8324,13 @@ msgid "Array type." msgstr "返回类型" #: ../../source/ref-api/flwr.common.rst:64::1 +#, fuzzy msgid "" ":py:obj:`ClientMessage `\\ " "\\(\\[get\\_properties\\_res\\, ...\\]\\)" msgstr "" +":py:obj:`ClientMessage `\\ \\(\\[get\\_properties" +"\\_res\\, ...\\]\\)" #: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.typing.ClientMessage:1 of @@ -7877,8 +8338,9 @@ msgid "ClientMessage is a container used to hold one result message." msgstr "ClientMessage 是用于容纳一条结果信息的容器。" #: ../../source/ref-api/flwr.common.rst:64::1 +#, fuzzy msgid ":py:obj:`Code `\\ \\(value\\)" -msgstr "" +msgstr ":py:obj:`Code `\\ \\(value\\)" #: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.typing.Code:1 of @@ -7902,17 +8364,20 @@ msgid "Configs record." msgstr "配置日志记录" #: ../../source/ref-api/flwr.common.rst:64::1 +#, fuzzy msgid ":py:obj:`Context `\\ \\(state\\)" -msgstr "" +msgstr ":py:obj:`Context `\\ \\(state\\)" #: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.context.Context:1 of +#, fuzzy msgid "State of your run." -msgstr "" +msgstr "您的运行状态。" #: ../../source/ref-api/flwr.common.rst:64::1 +#, fuzzy msgid ":py:obj:`DisconnectRes `\\ \\(reason\\)" -msgstr "" +msgstr ":py:obj:`DisconnectRes `\\ \\(reason\\)" #: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.typing.DisconnectRes:1 of @@ -7920,10 +8385,12 @@ msgid "DisconnectRes message from client to server." msgstr "客户端向服务器发送 DisconnectRes 信息。" #: ../../source/ref-api/flwr.common.rst:64::1 +#, fuzzy msgid "" ":py:obj:`EvaluateIns `\\ \\(parameters\\, " "config\\)" msgstr "" +":py:obj:`EvaluateIns `\\ \\(parameters\\, config\\)" #: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.typing.EvaluateIns:1 of @@ -7931,10 +8398,13 @@ msgid "Evaluate instructions for a client." msgstr "评估客户端的指示。" #: ../../source/ref-api/flwr.common.rst:64::1 +#, fuzzy msgid "" ":py:obj:`EvaluateRes `\\ \\(status\\, loss\\, " "num\\_examples\\, metrics\\)" msgstr "" +":py:obj:`EvaluateRes `\\ \\(status\\, loss\\, " +"num\\_examples\\, metrics\\)" #: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.typing.EvaluateRes:1 of @@ -7942,8 +8412,9 @@ msgid "Evaluate response from a client." msgstr "评估客户端的反应。" #: ../../source/ref-api/flwr.common.rst:64::1 +#, fuzzy msgid ":py:obj:`EventType `\\ \\(value\\)" -msgstr "" +msgstr ":py:obj:`EventType `\\ \\(value\\)" #: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.telemetry.EventType:1 of @@ -7951,8 +8422,9 @@ msgid "Types of telemetry events." msgstr "遥测事件类型。" #: ../../source/ref-api/flwr.common.rst:64::1 +#, fuzzy msgid ":py:obj:`FitIns `\\ \\(parameters\\, config\\)" -msgstr "" +msgstr ":py:obj:`FitIns `\\ \\(parameters\\, config\\)" #: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.typing.FitIns:1 of @@ -7960,10 +8432,13 @@ msgid "Fit instructions for a client." msgstr "为客户提供安装说明。" #: ../../source/ref-api/flwr.common.rst:64::1 +#, fuzzy msgid "" ":py:obj:`FitRes `\\ \\(status\\, parameters\\, " "num\\_examples\\, metrics\\)" msgstr "" +":py:obj:`FitRes `\\ \\(status\\, parameters\\, " +"num\\_examples\\, metrics\\)" #: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.typing.FitRes:1 of @@ -7971,17 +8446,21 @@ msgid "Fit response from a client." msgstr "来自客户端的合适回复。" #: ../../source/ref-api/flwr.common.rst:64::1 +#, fuzzy msgid ":py:obj:`Error `\\ \\(code\\[\\, reason\\]\\)" -msgstr "" +msgstr ":py:obj:`Error `\\ \\(code\\[\\, reason\\]\\)" #: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.message.Error:1 of +#, fuzzy msgid "A dataclass that stores information about an error that occurred." -msgstr "" +msgstr "数据类,用于存储所发生错误的相关信息。" #: ../../source/ref-api/flwr.common.rst:64::1 +#, fuzzy msgid ":py:obj:`GetParametersIns `\\ \\(config\\)" msgstr "" +":py:obj:`GetParametersIns `\\ \\(config\\)" #: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.typing.GetParametersIns:1 of @@ -7989,10 +8468,13 @@ msgid "Parameters request for a client." msgstr "客户端的参数请求。" #: ../../source/ref-api/flwr.common.rst:64::1 +#, fuzzy msgid "" ":py:obj:`GetParametersRes `\\ \\(status\\, " "parameters\\)" msgstr "" +":py:obj:`GetParametersRes `\\ \\(status\\, " +"parameters\\)" #: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.typing.GetParametersRes:1 of @@ -8000,8 +8482,10 @@ msgid "Response when asked to return parameters." msgstr "要求返回参数时的响应。" #: ../../source/ref-api/flwr.common.rst:64::1 +#, fuzzy msgid ":py:obj:`GetPropertiesIns `\\ \\(config\\)" msgstr "" +":py:obj:`GetPropertiesIns `\\ \\(config\\)" #: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.typing.GetPropertiesIns:1 of @@ -8009,10 +8493,13 @@ msgid "Properties request for a client." msgstr "客户端的属性请求。" #: ../../source/ref-api/flwr.common.rst:64::1 +#, fuzzy msgid "" ":py:obj:`GetPropertiesRes `\\ \\(status\\, " "properties\\)" msgstr "" +":py:obj:`GetPropertiesRes `\\ \\(status\\, " +"properties\\)" #: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.typing.GetPropertiesRes:1 of @@ -8020,71 +8507,94 @@ msgid "Properties response from a client." msgstr "来自客户端的属性响应。" #: ../../source/ref-api/flwr.common.rst:64::1 +#, fuzzy msgid "" ":py:obj:`Message `\\ \\(metadata\\[\\, content\\, " "error\\]\\)" msgstr "" +":py:obj:`Message `\\ \\(metadata\\[\\, content\\, " +"error\\]\\)" #: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.message.Message:1 of +#, fuzzy msgid "State of your application from the viewpoint of the entity using it." -msgstr "" +msgstr "从使用实体的角度看应用程序的状态。" #: ../../source/ref-api/flwr.common.rst:64::1 +#, fuzzy msgid ":py:obj:`MessageType `\\ \\(\\)" -msgstr "" +msgstr ":py:obj:`MessageType `\\ \\(\\)" #: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.constant.MessageType:1 of +#, fuzzy msgid "Message type." -msgstr "" +msgstr "信息类型。" #: ../../source/ref-api/flwr.common.rst:64::1 +#, fuzzy msgid ":py:obj:`MessageTypeLegacy `\\ \\(\\)" -msgstr "" +msgstr ":py:obj:`MessageTypeLegacy `\\ \\(\\)" #: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.constant.MessageTypeLegacy:1 of +#, fuzzy msgid "Legacy message type." -msgstr "" +msgstr "传统信息类型。" #: ../../source/ref-api/flwr.common.rst:64::1 +#, fuzzy msgid "" ":py:obj:`Metadata `\\ \\(run\\_id\\, " "message\\_id\\, src\\_node\\_id\\, ...\\)" msgstr "" +":py:obj:`Metadata `\\ \\(run\\_id\\, message\\_id\\, " +"src\\_node\\_id\\, ...\\)" #: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.message.Metadata:1 of +#, fuzzy msgid "A dataclass holding metadata associated with the current message." -msgstr "" +msgstr "数据类型,包含与当前报文相关的元数据。" #: ../../source/ref-api/flwr.common.rst:64::1 +#, fuzzy msgid "" ":py:obj:`MetricsRecord `\\ " "\\(\\[metrics\\_dict\\, keep\\_input\\]\\)" msgstr "" +":py:obj:`MetricsRecord `\\ \\(\\[metrics\\_dict\\" +", keep\\_input\\]\\)" #: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.record.metricsrecord.MetricsRecord:1 of +#, fuzzy msgid "Metrics record." -msgstr "" +msgstr "指标记录。" #: ../../source/ref-api/flwr.common.rst:64::1 +#, fuzzy msgid ":py:obj:`NDArray `\\" -msgstr "" +msgstr ":py:obj:`NDArray `\\" #: ../../source/ref-api/flwr.common.rst:64::1 +#, fuzzy msgid "" "alias of :py:class:`~numpy.ndarray`\\ [:py:obj:`~typing.Any`, " ":py:class:`~numpy.dtype`\\ [:py:obj:`~typing.Any`]]" msgstr "" +"alias of :py:class:`~numpy.ndarray`\\ [:py:obj:`~typing.Any`, " +":py:class:`~numpy.dtype`\\ [:py:obj:`~typing.Any`]]" #: ../../source/ref-api/flwr.common.rst:64::1 +#, fuzzy msgid "" ":py:obj:`Parameters `\\ \\(tensors\\, " "tensor\\_type\\)" msgstr "" +":py:obj:`Parameters `\\ \\(tensors\\, tensor\\_type\\" +")" #: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.typing.Parameters:1 of @@ -8092,10 +8602,13 @@ msgid "Model parameters." msgstr "模型参数。" #: ../../source/ref-api/flwr.common.rst:64::1 +#, fuzzy msgid "" ":py:obj:`ParametersRecord `\\ " "\\(\\[array\\_dict\\, keep\\_input\\]\\)" msgstr "" +":py:obj:`ParametersRecord `\\ \\(\\[" +"array\\_dict\\, keep\\_input\\]\\)" #: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.record.parametersrecord.ParametersRecord:1 of @@ -8104,8 +8617,9 @@ msgid "Parameters record." msgstr "参数" #: ../../source/ref-api/flwr.common.rst:64::1 +#, fuzzy msgid ":py:obj:`ReconnectIns `\\ \\(seconds\\)" -msgstr "" +msgstr ":py:obj:`ReconnectIns `\\ \\(seconds\\)" #: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.typing.ReconnectIns:1 of @@ -8113,21 +8627,28 @@ msgid "ReconnectIns message from server to client." msgstr "服务器发送给客户端的重新连接信息。" #: ../../source/ref-api/flwr.common.rst:64::1 +#, fuzzy msgid "" ":py:obj:`RecordSet `\\ " "\\(\\[parameters\\_records\\, ...\\]\\)" msgstr "" +":py:obj:`RecordSet `\\ \\(\\[parameters\\_records\\, " +"...\\]\\)" #: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.record.recordset.RecordSet:1 of +#, fuzzy msgid "RecordSet stores groups of parameters, metrics and configs." -msgstr "" +msgstr "RecordSet 可存储参数、指标和配置组。" #: ../../source/ref-api/flwr.common.rst:64::1 +#, fuzzy msgid "" ":py:obj:`ServerMessage `\\ " "\\(\\[get\\_properties\\_ins\\, ...\\]\\)" msgstr "" +":py:obj:`ServerMessage `\\ \\(\\[get\\_properties" +"\\_ins\\, ...\\]\\)" #: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.typing.ServerMessage:1 of @@ -8135,8 +8656,9 @@ msgid "ServerMessage is a container used to hold one instruction message." msgstr "ServerMessage 是用于容纳一条指令信息的容器。" #: ../../source/ref-api/flwr.common.rst:64::1 +#, fuzzy msgid ":py:obj:`Status `\\ \\(code\\, message\\)" -msgstr "" +msgstr ":py:obj:`Status `\\ \\(code\\, message\\)" #: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.typing.Status:1 of @@ -8144,37 +8666,45 @@ msgid "Client status." msgstr "客户端状态。" #: ../../source/ref-api/flwr.common.Array.rst:2 +#, fuzzy msgid "Array" -msgstr "" +msgstr "数组" #: flwr.common.record.parametersrecord.Array:3 of +#, fuzzy msgid "" "A dataclass containing serialized data from an array-like or tensor-like " "object along with some metadata about it." -msgstr "" +msgstr "数据类,包含数组类或张量类对象的序列化数据以及相关元数据。" #: flwr.common.record.parametersrecord.Array:6 of +#, fuzzy msgid "" "A string representing the data type of the serialised object (e.g. " "`np.float32`)" -msgstr "" +msgstr "表示序列化对象数据类型的字符串(例如 `np.float32`)" #: flwr.common.record.parametersrecord.Array:8 of +#, fuzzy msgid "" "A list representing the shape of the unserialized array-like object. This" " is used to deserialize the data (depending on the serialization method) " "or simply as a metadata field." -msgstr "" +msgstr "代表未序列化数组对象形状的列表。它可用于反序列化数据(取决于序列化方法),或" +"仅作为元数据字段使用。" #: flwr.common.record.parametersrecord.Array:12 of +#, fuzzy msgid "" "A string indicating the type of serialisation mechanism used to generate " "the bytes in `data` from an array-like or tensor-like object." -msgstr "" +msgstr "表示序列化机制类型的字符串,用于从类似数组或类似张量的对象中生成 `data` " +"中的字节。" #: flwr.common.record.parametersrecord.Array:15 of +#, fuzzy msgid "A buffer of bytes containing the data." -msgstr "" +msgstr "包含数据的字节缓冲区。" #: ../../source/ref-api/flwr.common.Array.rst:26::1 #, fuzzy @@ -8188,8 +8718,9 @@ msgid "Return the array as a NumPy array." msgstr "以 NumPy ndarrays 列表形式返回模型参数" #: flwr.common.record.parametersrecord.Array.numpy:1::1 of +#, fuzzy msgid ":py:obj:`dtype `\\" -msgstr "" +msgstr ":py:obj:`dtype `\\" #: flwr.common.record.parametersrecord.Array.numpy:1::1 of #, fuzzy @@ -8202,8 +8733,9 @@ msgid ":py:obj:`stype `\\" msgstr "server.strategy.Strategy" #: flwr.common.record.parametersrecord.Array.numpy:1::1 of +#, fuzzy msgid ":py:obj:`data `\\" -msgstr "" +msgstr ":py:obj:`data `\\" #: ../../source/ref-api/flwr.common.ClientMessage.rst:2 #, fuzzy @@ -8211,58 +8743,77 @@ msgid "ClientMessage" msgstr "客户端" #: ../../source/ref-api/flwr.common.ClientMessage.rst:31::1 +#, fuzzy msgid ":py:obj:`evaluate_res `\\" -msgstr "" +msgstr ":py:obj:`evaluate_res `\\" #: ../../source/ref-api/flwr.common.ClientMessage.rst:31::1 +#, fuzzy msgid ":py:obj:`fit_res `\\" -msgstr "" +msgstr ":py:obj:`fit_res `\\" #: ../../source/ref-api/flwr.common.ClientMessage.rst:31::1 +#, fuzzy msgid "" ":py:obj:`get_parameters_res " "`\\" msgstr "" +":py:obj:`get_parameters_res `\\" #: ../../source/ref-api/flwr.common.ClientMessage.rst:31::1 +#, fuzzy msgid "" ":py:obj:`get_properties_res " "`\\" msgstr "" +":py:obj:`get_properties_res `\\" #: ../../source/ref-api/flwr.common.Code.rst:2 +#, fuzzy msgid "Code" -msgstr "" +msgstr "代码" #: flwr.common.typing.Code:1 of +#, fuzzy msgid "Bases: :py:class:`~enum.Enum`" -msgstr "" +msgstr "Bases: :py:class:`~enum.Enum`" #: ../../source/ref-api/flwr.common.Code.rst:26::1 +#, fuzzy msgid ":py:obj:`OK `\\" -msgstr "" +msgstr ":py:obj:`OK `\\" #: ../../source/ref-api/flwr.common.Code.rst:26::1 +#, fuzzy msgid "" ":py:obj:`GET_PROPERTIES_NOT_IMPLEMENTED " "`\\" msgstr "" +":py:obj:`GET_PROPERTIES_NOT_IMPLEMENTED `\\" #: ../../source/ref-api/flwr.common.Code.rst:26::1 +#, fuzzy msgid "" ":py:obj:`GET_PARAMETERS_NOT_IMPLEMENTED " "`\\" msgstr "" +":py:obj:`GET_PARAMETERS_NOT_IMPLEMENTED `\\" #: ../../source/ref-api/flwr.common.Code.rst:26::1 +#, fuzzy msgid ":py:obj:`FIT_NOT_IMPLEMENTED `\\" -msgstr "" +msgstr ":py:obj:`FIT_NOT_IMPLEMENTED `\\" #: ../../source/ref-api/flwr.common.Code.rst:26::1 +#, fuzzy msgid "" ":py:obj:`EVALUATE_NOT_IMPLEMENTED " "`\\" msgstr "" +":py:obj:`EVALUATE_NOT_IMPLEMENTED `\\" #: ../../source/ref-api/flwr.common.ConfigsRecord.rst:2 #, fuzzy @@ -8270,6 +8821,7 @@ msgid "ConfigsRecord" msgstr "配置日志记录" #: flwr.common.record.configsrecord.ConfigsRecord:1 of +#, fuzzy msgid "" "Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " "[:py:class:`str`, :py:obj:`~typing.Union`\\ [:py:class:`int`, " @@ -8279,77 +8831,101 @@ msgid "" ":py:class:`~typing.List`\\ [:py:class:`bytes`], " ":py:class:`~typing.List`\\ [:py:class:`bool`]]]" msgstr "" +"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ [:py:class:`str`" +", :py:obj:`~typing.Union`\\ [:py:class:`int`, :py:class:`float`, " +":py:class:`str`, :py:class:`bytes`, :py:class:`bool`, :py:class:`~typing." +"List`\\ [:py:class:`int`], :py:class:`~typing.List`\\ [:py:class:`float`], " +":py:class:`~typing.List`\\ [:py:class:`str`], :py:class:`~typing.List`\\ " +"[:py:class:`bytes`], :py:class:`~typing.List`\\ [:py:class:`bool`]]]" #: flwr.common.record.typeddict.TypedDict.clear:1::1 of +#, fuzzy msgid ":py:obj:`clear `\\ \\(\\)" -msgstr "" +msgstr ":py:obj:`clear `\\ \\(\\)" #: flwr.common.record.typeddict.TypedDict.clear:1 #: flwr.common.record.typeddict.TypedDict.clear:1::1 of +#, fuzzy msgid "Remove all items from R." -msgstr "" +msgstr "从 R 中删除所有项目。" #: flwr.common.record.typeddict.TypedDict.clear:1::1 of +#, fuzzy msgid ":py:obj:`count_bytes `\\ \\(\\)" -msgstr "" +msgstr ":py:obj:`count_bytes `\\ \\(\\)" #: flwr.common.record.configsrecord.ConfigsRecord.count_bytes:1 #: flwr.common.record.metricsrecord.MetricsRecord.count_bytes:1 #: flwr.common.record.parametersrecord.ParametersRecord.count_bytes:1 #: flwr.common.record.typeddict.TypedDict.clear:1::1 of +#, fuzzy msgid "Return number of Bytes stored in this object." -msgstr "" +msgstr "返回存储在此对象中的字节数。" #: flwr.common.record.typeddict.TypedDict.clear:1::1 of +#, fuzzy msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" -msgstr "" +msgstr ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" #: flwr.common.record.typeddict.TypedDict.clear:1::1 #: flwr.common.record.typeddict.TypedDict.get:1 of +#, fuzzy msgid "d defaults to None." -msgstr "" +msgstr "d 默认为 \"无\"。" #: flwr.common.record.typeddict.TypedDict.clear:1::1 of +#, fuzzy msgid ":py:obj:`items `\\ \\(\\)" -msgstr "" +msgstr ":py:obj:`items `\\ \\(\\)" #: flwr.common.record.typeddict.TypedDict.clear:1::1 of +#, fuzzy msgid ":py:obj:`keys `\\ \\(\\)" -msgstr "" +msgstr ":py:obj:`keys `\\ \\(\\)" #: flwr.common.record.typeddict.TypedDict.clear:1::1 of +#, fuzzy msgid ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" -msgstr "" +msgstr ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" #: flwr.common.record.typeddict.TypedDict.clear:1::1 #: flwr.common.record.typeddict.TypedDict.pop:1 of +#, fuzzy msgid "If key is not found, d is returned if given, otherwise KeyError is raised." -msgstr "" +msgstr "如果未找到 key,则返回 d(如果给定),否则引发 KeyError。" #: flwr.common.record.typeddict.TypedDict.clear:1::1 of +#, fuzzy msgid "" ":py:obj:`update `\\ \\(\\[E\\, " "\\]\\*\\*F\\)" msgstr "" +":py:obj:`update `\\ \\(\\[E\\, \\]\\*\\*F\\" +")" #: flwr.common.record.typeddict.TypedDict.clear:1::1 #: flwr.common.record.typeddict.TypedDict.update:1 of +#, fuzzy msgid "Update R from dict/iterable E and F." -msgstr "" +msgstr "根据二进制/可迭代 E 和 F 更新 R。" #: flwr.common.record.typeddict.TypedDict.clear:1::1 of +#, fuzzy msgid ":py:obj:`values `\\ \\(\\)" -msgstr "" +msgstr ":py:obj:`values `\\ \\(\\)" #: flwr.common.record.configsrecord.ConfigsRecord.count_bytes:3 of +#, fuzzy msgid "This function counts booleans as occupying 1 Byte." -msgstr "" +msgstr "该函数将布尔值计算为占用 1 个字节。" #: ../../source/ref-api/flwr.common.Context.rst:2 +#, fuzzy msgid "Context" -msgstr "" +msgstr "背景" #: flwr.common.context.Context:3 of +#, fuzzy msgid "" "Holds records added by the entity in a given run and that will stay " "local. This means that the data it holds will never leave the system it's" @@ -8358,6 +8934,9 @@ msgid "" "different points during the lifecycle of this entity (e.g. across " "multiple rounds)" msgstr "" +"保存实体在给定运行中添加的记录,这些记录将保留在本地。这意味着它保存的数据永" +"远不会离开运行的系统。在执行模式时,它可用作中间存储或抓取板。它还可以作为存" +"储器,在实体生命周期的不同阶段(如多轮)进行访问。" #: ../../source/ref-api/flwr.common.Context.rst:28::1 #, fuzzy @@ -8365,40 +8944,49 @@ msgid ":py:obj:`state `\\" msgstr "server.strategy.Strategy" #: ../../source/ref-api/flwr.common.DisconnectRes.rst:2 +#, fuzzy msgid "DisconnectRes" -msgstr "" +msgstr "断开Res" #: ../../source/ref-api/flwr.common.DisconnectRes.rst:28::1 +#, fuzzy msgid ":py:obj:`reason `\\" -msgstr "" +msgstr ":py:obj:`reason `\\" #: ../../source/ref-api/flwr.common.Error.rst:2 +#, fuzzy msgid "Error" -msgstr "" +msgstr "错误" #: flwr.common.message.Error:3 of +#, fuzzy msgid "An identifier for the error." -msgstr "" +msgstr "错误的标识符。" #: flwr.common.message.Error:5 of +#, fuzzy msgid "A reason for why the error arose (e.g. an exception stack-trace)" -msgstr "" +msgstr "出错原因(如异常堆栈跟踪)" #: flwr.common.Error.code:1::1 of +#, fuzzy msgid ":py:obj:`code `\\" -msgstr "" +msgstr ":py:obj:`code `\\" #: flwr.common.Error.code:1 flwr.common.Error.code:1::1 of +#, fuzzy msgid "Error code." -msgstr "" +msgstr "错误代码。" #: flwr.common.Error.code:1::1 of +#, fuzzy msgid ":py:obj:`reason `\\" -msgstr "" +msgstr ":py:obj:`reason `\\" #: flwr.common.Error.code:1::1 flwr.common.Error.reason:1 of +#, fuzzy msgid "Reason reported about the error." -msgstr "" +msgstr "报告的错误原因。" #: ../../source/ref-api/flwr.common.EvaluateIns.rst:2 #, fuzzy @@ -8406,32 +8994,39 @@ msgid "EvaluateIns" msgstr "说明" #: ../../source/ref-api/flwr.common.EvaluateIns.rst:29::1 +#, fuzzy msgid ":py:obj:`parameters `\\" -msgstr "" +msgstr ":py:obj:`parameters `\\" #: ../../source/ref-api/flwr.common.EvaluateIns.rst:29::1 +#, fuzzy msgid ":py:obj:`config `\\" -msgstr "" +msgstr ":py:obj:`config `\\" #: ../../source/ref-api/flwr.common.EvaluateRes.rst:2 +#, fuzzy msgid "EvaluateRes" -msgstr "" +msgstr "评估Res" #: ../../source/ref-api/flwr.common.EvaluateRes.rst:31::1 +#, fuzzy msgid ":py:obj:`status `\\" -msgstr "" +msgstr ":py:obj:`status `\\" #: ../../source/ref-api/flwr.common.EvaluateRes.rst:31::1 +#, fuzzy msgid ":py:obj:`loss `\\" -msgstr "" +msgstr ":py:obj:`loss `\\" #: ../../source/ref-api/flwr.common.EvaluateRes.rst:31::1 +#, fuzzy msgid ":py:obj:`num_examples `\\" -msgstr "" +msgstr ":py:obj:`num_examples `\\" #: ../../source/ref-api/flwr.common.EvaluateRes.rst:31::1 +#, fuzzy msgid ":py:obj:`metrics `\\" -msgstr "" +msgstr ":py:obj:`metrics `\\" #: ../../source/ref-api/flwr.common.EventType.rst:2 #, fuzzy @@ -8439,148 +9034,198 @@ msgid "EventType" msgstr "返回类型" #: flwr.common.telemetry.EventType:1 of +#, fuzzy msgid "Bases: :py:class:`str`, :py:class:`~enum.Enum`" -msgstr "" +msgstr "Bases: :py:class:`str`, :py:class:`~enum.Enum`" #: ../../source/ref-api/flwr.common.EventType.rst:42::1 +#, fuzzy msgid ":py:obj:`PING `\\" -msgstr "" +msgstr ":py:obj:`PING `\\" #: ../../source/ref-api/flwr.common.EventType.rst:42::1 +#, fuzzy msgid ":py:obj:`START_CLIENT_ENTER `\\" msgstr "" +":py:obj:`START_CLIENT_ENTER `\\" #: ../../source/ref-api/flwr.common.EventType.rst:42::1 +#, fuzzy msgid ":py:obj:`START_CLIENT_LEAVE `\\" msgstr "" +":py:obj:`START_CLIENT_LEAVE `\\" #: ../../source/ref-api/flwr.common.EventType.rst:42::1 +#, fuzzy msgid ":py:obj:`START_SERVER_ENTER `\\" msgstr "" +":py:obj:`START_SERVER_ENTER `\\" #: ../../source/ref-api/flwr.common.EventType.rst:42::1 +#, fuzzy msgid ":py:obj:`START_SERVER_LEAVE `\\" msgstr "" +":py:obj:`START_SERVER_LEAVE `\\" #: ../../source/ref-api/flwr.common.EventType.rst:42::1 +#, fuzzy msgid "" ":py:obj:`RUN_DRIVER_API_ENTER " "`\\" msgstr "" +":py:obj:`RUN_DRIVER_API_ENTER `\\" #: ../../source/ref-api/flwr.common.EventType.rst:42::1 +#, fuzzy msgid "" ":py:obj:`RUN_DRIVER_API_LEAVE " "`\\" msgstr "" +":py:obj:`RUN_DRIVER_API_LEAVE `\\" #: ../../source/ref-api/flwr.common.EventType.rst:42::1 +#, fuzzy msgid "" ":py:obj:`RUN_FLEET_API_ENTER " "`\\" msgstr "" +":py:obj:`RUN_FLEET_API_ENTER `\\" #: ../../source/ref-api/flwr.common.EventType.rst:42::1 +#, fuzzy msgid "" ":py:obj:`RUN_FLEET_API_LEAVE " "`\\" msgstr "" +":py:obj:`RUN_FLEET_API_LEAVE `\\" #: ../../source/ref-api/flwr.common.EventType.rst:42::1 +#, fuzzy msgid "" ":py:obj:`RUN_SUPERLINK_ENTER " "`\\" msgstr "" +":py:obj:`RUN_SUPERLINK_ENTER `\\" #: ../../source/ref-api/flwr.common.EventType.rst:42::1 +#, fuzzy msgid "" ":py:obj:`RUN_SUPERLINK_LEAVE " "`\\" msgstr "" +":py:obj:`RUN_SUPERLINK_LEAVE `\\" #: ../../source/ref-api/flwr.common.EventType.rst:42::1 +#, fuzzy msgid "" ":py:obj:`START_SIMULATION_ENTER " "`\\" msgstr "" +":py:obj:`START_SIMULATION_ENTER `\\" #: ../../source/ref-api/flwr.common.EventType.rst:42::1 +#, fuzzy msgid "" ":py:obj:`START_SIMULATION_LEAVE " "`\\" msgstr "" +":py:obj:`START_SIMULATION_LEAVE `\\" #: ../../source/ref-api/flwr.common.EventType.rst:42::1 +#, fuzzy msgid ":py:obj:`DRIVER_CONNECT `\\" -msgstr "" +msgstr ":py:obj:`DRIVER_CONNECT `\\" #: ../../source/ref-api/flwr.common.EventType.rst:42::1 +#, fuzzy msgid ":py:obj:`DRIVER_DISCONNECT `\\" -msgstr "" +msgstr ":py:obj:`DRIVER_DISCONNECT `\\" #: ../../source/ref-api/flwr.common.EventType.rst:42::1 +#, fuzzy msgid ":py:obj:`START_DRIVER_ENTER `\\" msgstr "" +":py:obj:`START_DRIVER_ENTER `\\" #: ../../source/ref-api/flwr.common.EventType.rst:42::1 +#, fuzzy msgid ":py:obj:`START_DRIVER_LEAVE `\\" msgstr "" +":py:obj:`START_DRIVER_LEAVE `\\" #: ../../source/ref-api/flwr.common.EventType.rst:42::1 +#, fuzzy msgid "" ":py:obj:`RUN_CLIENT_APP_ENTER " "`\\" msgstr "" +":py:obj:`RUN_CLIENT_APP_ENTER `\\" #: ../../source/ref-api/flwr.common.EventType.rst:42::1 +#, fuzzy msgid "" ":py:obj:`RUN_CLIENT_APP_LEAVE " "`\\" msgstr "" +":py:obj:`RUN_CLIENT_APP_LEAVE `\\" #: ../../source/ref-api/flwr.common.EventType.rst:42::1 +#, fuzzy msgid "" ":py:obj:`RUN_SERVER_APP_ENTER " "`\\" msgstr "" +":py:obj:`RUN_SERVER_APP_ENTER `\\" #: ../../source/ref-api/flwr.common.EventType.rst:42::1 +#, fuzzy msgid "" ":py:obj:`RUN_SERVER_APP_LEAVE " "`\\" msgstr "" +":py:obj:`RUN_SERVER_APP_LEAVE `\\" #: ../../source/ref-api/flwr.common.FitIns.rst:2 +#, fuzzy msgid "FitIns" -msgstr "" +msgstr "FitIns" #: ../../source/ref-api/flwr.common.FitIns.rst:29::1 +#, fuzzy msgid ":py:obj:`parameters `\\" -msgstr "" +msgstr ":py:obj:`parameters `\\" #: ../../source/ref-api/flwr.common.FitIns.rst:29::1 +#, fuzzy msgid ":py:obj:`config `\\" -msgstr "" +msgstr ":py:obj:`config `\\" #: ../../source/ref-api/flwr.common.FitRes.rst:2 +#, fuzzy msgid "FitRes" -msgstr "" +msgstr "FitRes" #: ../../source/ref-api/flwr.common.FitRes.rst:31::1 +#, fuzzy msgid ":py:obj:`status `\\" -msgstr "" +msgstr ":py:obj:`status `\\" #: ../../source/ref-api/flwr.common.FitRes.rst:31::1 +#, fuzzy msgid ":py:obj:`parameters `\\" -msgstr "" +msgstr ":py:obj:`parameters `\\" #: ../../source/ref-api/flwr.common.FitRes.rst:31::1 +#, fuzzy msgid ":py:obj:`num_examples `\\" -msgstr "" +msgstr ":py:obj:`num_examples `\\" #: ../../source/ref-api/flwr.common.FitRes.rst:31::1 +#, fuzzy msgid ":py:obj:`metrics `\\" -msgstr "" +msgstr ":py:obj:`metrics `\\" #: ../../source/ref-api/flwr.common.GetParametersIns.rst:2 #, fuzzy @@ -8588,8 +9233,9 @@ msgid "GetParametersIns" msgstr "参数" #: ../../source/ref-api/flwr.common.GetParametersIns.rst:28::1 +#, fuzzy msgid ":py:obj:`config `\\" -msgstr "" +msgstr ":py:obj:`config `\\" #: ../../source/ref-api/flwr.common.GetParametersRes.rst:2 #, fuzzy @@ -8597,32 +9243,39 @@ msgid "GetParametersRes" msgstr "参数" #: ../../source/ref-api/flwr.common.GetParametersRes.rst:29::1 +#, fuzzy msgid ":py:obj:`status `\\" -msgstr "" +msgstr ":py:obj:`status `\\" #: ../../source/ref-api/flwr.common.GetParametersRes.rst:29::1 +#, fuzzy msgid ":py:obj:`parameters `\\" -msgstr "" +msgstr ":py:obj:`parameters `\\" #: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:2 +#, fuzzy msgid "GetPropertiesIns" -msgstr "" +msgstr "GetPropertiesIns" #: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:28::1 +#, fuzzy msgid ":py:obj:`config `\\" -msgstr "" +msgstr ":py:obj:`config `\\" #: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:2 +#, fuzzy msgid "GetPropertiesRes" -msgstr "" +msgstr "GetPropertiesRes" #: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:29::1 +#, fuzzy msgid ":py:obj:`status `\\" -msgstr "" +msgstr ":py:obj:`status `\\" #: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:29::1 +#, fuzzy msgid ":py:obj:`properties `\\" -msgstr "" +msgstr ":py:obj:`properties `\\" #: ../../source/ref-api/flwr.common.Message.rst:2 #, fuzzy @@ -8631,64 +9284,81 @@ msgstr "服务器端" #: flwr.common.Message.content:1::1 flwr.common.Message.metadata:1 #: flwr.common.message.Message:3 of +#, fuzzy msgid "A dataclass including information about the message to be executed." -msgstr "" +msgstr "数据类型,包括要执行的信息的相关信息。" #: flwr.common.message.Message:5 of +#, fuzzy msgid "" "Holds records either sent by another entity (e.g. sent by the server-side" " logic to a client, or vice-versa) or that will be sent to it." -msgstr "" +msgstr "保存由其他实体发送的记录(如由服务器端逻辑发送到客户端,反之亦然)或将发送到" +"该实体的记录。" #: flwr.common.message.Message:8 of +#, fuzzy msgid "" "A dataclass that captures information about an error that took place when" " processing another message." -msgstr "" +msgstr "数据类,用于捕捉处理其他报文时发生的错误信息。" #: ../../source/ref-api/flwr.common.Message.rst:35::1 +#, fuzzy msgid "" ":py:obj:`create_error_reply `\\ " "\\(error\\, ttl\\)" msgstr "" +":py:obj:`create_error_reply `\\ \\(" +"error\\, ttl\\)" #: ../../source/ref-api/flwr.common.Message.rst:35::1 #: flwr.common.message.Message.create_error_reply:1 of +#, fuzzy msgid "Construct a reply message indicating an error happened." -msgstr "" +msgstr "构建一条回复信息,说明发生了错误。" #: ../../source/ref-api/flwr.common.Message.rst:35::1 +#, fuzzy msgid "" ":py:obj:`create_reply `\\ \\(content\\," " ttl\\)" msgstr "" +":py:obj:`create_reply `\\ \\(content\\, " +"ttl\\)" #: ../../source/ref-api/flwr.common.Message.rst:35::1 #: flwr.common.message.Message.create_reply:1 of +#, fuzzy msgid "Create a reply to this message with specified content and TTL." -msgstr "" +msgstr "以指定的内容和 TTL 创建对该信息的回复。" #: ../../source/ref-api/flwr.common.Message.rst:35::1 +#, fuzzy msgid ":py:obj:`has_content `\\ \\(\\)" -msgstr "" +msgstr ":py:obj:`has_content `\\ \\(\\)" #: ../../source/ref-api/flwr.common.Message.rst:35::1 #: flwr.common.message.Message.has_content:1 of +#, fuzzy msgid "Return True if message has content, else False." -msgstr "" +msgstr "如果信息有内容,则返回 True,否则返回 False。" #: ../../source/ref-api/flwr.common.Message.rst:35::1 +#, fuzzy msgid ":py:obj:`has_error `\\ \\(\\)" -msgstr "" +msgstr ":py:obj:`has_error `\\ \\(\\)" #: ../../source/ref-api/flwr.common.Message.rst:35::1 #: flwr.common.message.Message.has_error:1 of +#, fuzzy msgid "Return True if message has an error, else False." -msgstr "" +msgstr "如果信息有错误,则返回 True,否则返回 False。" #: flwr.common.Message.content:1::1 of +#, fuzzy msgid ":py:obj:`content `\\" -msgstr "" +msgstr ":py:obj:`content `\\" #: flwr.common.Message.content:1 flwr.common.Message.content:1::1 #: of @@ -8697,42 +9367,53 @@ msgid "The content of this message." msgstr "评估客户端的反应。" #: flwr.common.Message.content:1::1 of +#, fuzzy msgid ":py:obj:`error `\\" -msgstr "" +msgstr ":py:obj:`error `\\" #: flwr.common.Message.content:1::1 flwr.common.Message.error:1 of +#, fuzzy msgid "Error captured by this message." -msgstr "" +msgstr "该信息捕捉到的错误。" #: flwr.common.Message.content:1::1 of +#, fuzzy msgid ":py:obj:`metadata `\\" -msgstr "" +msgstr ":py:obj:`metadata `\\" #: flwr.common.message.Message.create_error_reply:3 of +#, fuzzy msgid "The error that was encountered." -msgstr "" +msgstr "遇到的错误。" #: flwr.common.Metadata.dst_node_id:1::1 #: flwr.common.Metadata.ttl:1 flwr.common.message.Message.create_error_reply:5 #: flwr.common.message.Message.create_reply:9 flwr.common.message.Metadata:16 #: of +#, fuzzy msgid "Time-to-live for this message." -msgstr "" +msgstr "该信息的有效时间。" #: flwr.common.message.Message.create_reply:3 of +#, fuzzy msgid "" "The method generates a new `Message` as a reply to this message. It " "inherits 'run_id', 'src_node_id', 'dst_node_id', and 'message_type' from " "this message and sets 'reply_to_message' to the ID of this message." msgstr "" +"该方法会生成一条新的 \"信息\",作为对该信息的回复。该方法继承了该消息的 " +"\"run_id\"、\"src_node_id\"、\"dst_node_id \"和 \"message_type\",并将 " +"\"reply_to_message \"设置为该消息的 ID。" #: flwr.common.message.Message.create_reply:7 of +#, fuzzy msgid "The content for the reply message." -msgstr "" +msgstr "回复信息的内容。" #: flwr.common.message.Message.create_reply:12 of +#, fuzzy msgid "A new `Message` instance representing the reply." -msgstr "" +msgstr "代表回复的新的 `Message` 实例。" #: ../../source/ref-api/flwr.common.MessageType.rst:2 #, fuzzy @@ -8740,176 +9421,223 @@ msgid "MessageType" msgstr "返回类型" #: ../../source/ref-api/flwr.common.MessageType.rst:30::1 +#, fuzzy msgid ":py:obj:`EVALUATE `\\" -msgstr "" +msgstr ":py:obj:`EVALUATE `\\" #: ../../source/ref-api/flwr.common.MessageType.rst:30::1 +#, fuzzy msgid ":py:obj:`QUERY `\\" -msgstr "" +msgstr ":py:obj:`QUERY `\\" #: ../../source/ref-api/flwr.common.MessageType.rst:30::1 +#, fuzzy msgid ":py:obj:`TRAIN `\\" -msgstr "" +msgstr ":py:obj:`TRAIN `\\" #: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:2 +#, fuzzy msgid "MessageTypeLegacy" -msgstr "" +msgstr "MessageTypeLegacy" #: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:29::1 +#, fuzzy msgid ":py:obj:`GET_PARAMETERS `\\" msgstr "" +":py:obj:`GET_PARAMETERS `\\" #: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:29::1 +#, fuzzy msgid ":py:obj:`GET_PROPERTIES `\\" msgstr "" +":py:obj:`GET_PROPERTIES `\\" #: flwr.common.Metadata.dst_node_id:1::1 #: flwr.common.Metadata.run_id:1 flwr.common.message.Metadata:3 of +#, fuzzy msgid "An identifier for the current run." -msgstr "" +msgstr "当前运行的标识符。" #: flwr.common.Metadata.dst_node_id:1::1 #: flwr.common.Metadata.message_id:1 flwr.common.message.Metadata:5 of +#, fuzzy msgid "An identifier for the current message." -msgstr "" +msgstr "当前信息的标识符。" #: flwr.common.Metadata.dst_node_id:1::1 #: flwr.common.Metadata.src_node_id:1 flwr.common.message.Metadata:7 of +#, fuzzy msgid "An identifier for the node sending this message." -msgstr "" +msgstr "发送此信息的节点的标识符。" #: flwr.common.Metadata.dst_node_id:1 #: flwr.common.Metadata.dst_node_id:1::1 #: flwr.common.message.Metadata:9 of +#, fuzzy msgid "An identifier for the node receiving this message." -msgstr "" +msgstr "接收此信息的节点的标识符。" #: flwr.common.Metadata.dst_node_id:1::1 #: flwr.common.Metadata.reply_to_message:1 flwr.common.message.Metadata:11 of +#, fuzzy msgid "An identifier for the message this message replies to." -msgstr "" +msgstr "该信息回复的信息的标识符。" #: flwr.common.message.Metadata:13 of +#, fuzzy msgid "" "An identifier for grouping messages. In some settings, this is used as " "the FL round." -msgstr "" +msgstr "用于分组报文的标识符。在某些设置中,它被用作 FL 轮。" #: flwr.common.Metadata.dst_node_id:1::1 #: flwr.common.Metadata.message_type:1 flwr.common.message.Metadata:18 of +#, fuzzy msgid "A string that encodes the action to be executed on the receiving end." -msgstr "" +msgstr "编码接收端要执行的操作的字符串。" #: flwr.common.message.Metadata:21 of +#, fuzzy msgid "" "An identifier that can be used when loading a particular data partition " "for a ClientApp. Making use of this identifier is more relevant when " "conducting simulations." -msgstr "" +msgstr "为 ClientApp " +"加载特定数据分区时可使用的标识符。在进行模拟时,使用该标识符更有意义。" #: flwr.common.Metadata.dst_node_id:1::1 of +#, fuzzy msgid ":py:obj:`dst_node_id `\\" -msgstr "" +msgstr ":py:obj:`dst_node_id `\\" #: flwr.common.Metadata.dst_node_id:1::1 of +#, fuzzy msgid ":py:obj:`group_id `\\" -msgstr "" +msgstr ":py:obj:`group_id `\\" #: flwr.common.Metadata.dst_node_id:1::1 #: flwr.common.Metadata.group_id:1 of +#, fuzzy msgid "An identifier for grouping messages." -msgstr "" +msgstr "用于分组信息的标识符。" #: flwr.common.Metadata.dst_node_id:1::1 of +#, fuzzy msgid ":py:obj:`message_id `\\" -msgstr "" +msgstr ":py:obj:`message_id `\\" #: flwr.common.Metadata.dst_node_id:1::1 of +#, fuzzy msgid ":py:obj:`message_type `\\" -msgstr "" +msgstr ":py:obj:`message_type `\\" #: flwr.common.Metadata.dst_node_id:1::1 of +#, fuzzy msgid ":py:obj:`partition_id `\\" -msgstr "" +msgstr ":py:obj:`partition_id `\\" #: flwr.common.Metadata.dst_node_id:1::1 #: flwr.common.Metadata.partition_id:1 of +#, fuzzy msgid "An identifier telling which data partition a ClientApp should use." -msgstr "" +msgstr "告诉 ClientApp 应使用哪个数据分区的标识符。" #: flwr.common.Metadata.dst_node_id:1::1 of +#, fuzzy msgid ":py:obj:`reply_to_message `\\" -msgstr "" +msgstr ":py:obj:`reply_to_message `\\" #: flwr.common.Metadata.dst_node_id:1::1 of +#, fuzzy msgid ":py:obj:`run_id `\\" -msgstr "" +msgstr ":py:obj:`run_id `\\" #: flwr.common.Metadata.dst_node_id:1::1 of +#, fuzzy msgid ":py:obj:`src_node_id `\\" -msgstr "" +msgstr ":py:obj:`src_node_id `\\" #: flwr.common.Metadata.dst_node_id:1::1 of +#, fuzzy msgid ":py:obj:`ttl `\\" -msgstr "" +msgstr ":py:obj:`ttl `\\" #: ../../source/ref-api/flwr.common.MetricsRecord.rst:2 +#, fuzzy msgid "MetricsRecord" -msgstr "" +msgstr "MetricsRecord" #: flwr.common.record.metricsrecord.MetricsRecord:1 of +#, fuzzy msgid "" "Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " "[:py:class:`str`, :py:obj:`~typing.Union`\\ [:py:class:`int`, " ":py:class:`float`, :py:class:`~typing.List`\\ [:py:class:`int`], " ":py:class:`~typing.List`\\ [:py:class:`float`]]]" msgstr "" +"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ [:py:class:`str`" +", :py:obj:`~typing.Union`\\ [:py:class:`int`, :py:class:`float`, " +":py:class:`~typing.List`\\ [:py:class:`int`], :py:class:`~typing.List`\\ " +"[:py:class:`float`]]]" #: flwr.common.record.typeddict.TypedDict.clear:1::1 of +#, fuzzy msgid ":py:obj:`clear `\\ \\(\\)" -msgstr "" +msgstr ":py:obj:`clear `\\ \\(\\)" #: flwr.common.record.typeddict.TypedDict.clear:1::1 of +#, fuzzy msgid ":py:obj:`count_bytes `\\ \\(\\)" -msgstr "" +msgstr ":py:obj:`count_bytes `\\ \\(\\)" #: flwr.common.record.typeddict.TypedDict.clear:1::1 of +#, fuzzy msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" -msgstr "" +msgstr ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" #: flwr.common.record.typeddict.TypedDict.clear:1::1 of +#, fuzzy msgid ":py:obj:`items `\\ \\(\\)" -msgstr "" +msgstr ":py:obj:`items `\\ \\(\\)" #: flwr.common.record.typeddict.TypedDict.clear:1::1 of +#, fuzzy msgid ":py:obj:`keys `\\ \\(\\)" -msgstr "" +msgstr ":py:obj:`keys `\\ \\(\\)" #: flwr.common.record.typeddict.TypedDict.clear:1::1 of +#, fuzzy msgid ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" -msgstr "" +msgstr ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" #: flwr.common.record.typeddict.TypedDict.clear:1::1 of +#, fuzzy msgid "" ":py:obj:`update `\\ \\(\\[E\\, " "\\]\\*\\*F\\)" msgstr "" +":py:obj:`update `\\ \\(\\[E\\, \\]\\*\\*F\\" +")" #: flwr.common.record.typeddict.TypedDict.clear:1::1 of +#, fuzzy msgid ":py:obj:`values `\\ \\(\\)" -msgstr "" +msgstr ":py:obj:`values `\\ \\(\\)" #: ../../source/ref-api/flwr.common.NDArray.rst:2 +#, fuzzy msgid "NDArray" -msgstr "" +msgstr "NDArray" #: ../../source/ref-api/flwr.common.Parameters.rst:29::1 +#, fuzzy msgid ":py:obj:`tensors `\\" -msgstr "" +msgstr ":py:obj:`tensors `\\" #: ../../source/ref-api/flwr.common.Parameters.rst:29::1 +#, fuzzy msgid ":py:obj:`tensor_type `\\" -msgstr "" +msgstr ":py:obj:`tensor_type `\\" #: ../../source/ref-api/flwr.common.ParametersRecord.rst:2 #, fuzzy @@ -8917,59 +9645,79 @@ msgid "ParametersRecord" msgstr "参数" #: flwr.common.record.parametersrecord.ParametersRecord:1 of +#, fuzzy msgid "" "Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " "[:py:class:`str`, :py:class:`~flwr.common.record.parametersrecord.Array`]" msgstr "" +"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ [:py:class:`str`" +", :py:class:`~flwr.common.record.parametersrecord.Array`]" #: flwr.common.record.parametersrecord.ParametersRecord:3 of +#, fuzzy msgid "" "A dataclass storing named Arrays in order. This means that it holds " "entries as an OrderedDict[str, Array]. ParametersRecord objects can be " "viewed as an equivalent to PyTorch's state_dict, but holding serialised " "tensors instead." msgstr "" +"按顺序存储命名数组的数据类。这意味着它以 OrderedDict[str, Array] " +"的形式保存条目。ParametersRecord 对象相当于 PyTorch 的 " +"state_dict,但它保存的是序列化的张量。" #: flwr.common.record.typeddict.TypedDict.clear:1::1 of +#, fuzzy msgid ":py:obj:`clear `\\ \\(\\)" -msgstr "" +msgstr ":py:obj:`clear `\\ \\(\\)" #: flwr.common.record.typeddict.TypedDict.clear:1::1 of +#, fuzzy msgid ":py:obj:`count_bytes `\\ \\(\\)" msgstr "" +":py:obj:`count_bytes `\\ \\(\\)" #: flwr.common.record.typeddict.TypedDict.clear:1::1 of +#, fuzzy msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" -msgstr "" +msgstr ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" #: flwr.common.record.typeddict.TypedDict.clear:1::1 of +#, fuzzy msgid ":py:obj:`items `\\ \\(\\)" -msgstr "" +msgstr ":py:obj:`items `\\ \\(\\)" #: flwr.common.record.typeddict.TypedDict.clear:1::1 of +#, fuzzy msgid ":py:obj:`keys `\\ \\(\\)" -msgstr "" +msgstr ":py:obj:`keys `\\ \\(\\)" #: flwr.common.record.typeddict.TypedDict.clear:1::1 of +#, fuzzy msgid ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" -msgstr "" +msgstr ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" #: flwr.common.record.typeddict.TypedDict.clear:1::1 of +#, fuzzy msgid "" ":py:obj:`update `\\ \\(\\[E\\, " "\\]\\*\\*F\\)" msgstr "" +":py:obj:`update `\\ \\(\\[E\\, \\]\\*\\*" +"F\\)" #: flwr.common.record.typeddict.TypedDict.clear:1::1 of +#, fuzzy msgid ":py:obj:`values `\\ \\(\\)" -msgstr "" +msgstr ":py:obj:`values `\\ \\(\\)" #: flwr.common.record.parametersrecord.ParametersRecord.count_bytes:3 of +#, fuzzy msgid "" "Note that a small amount of Bytes might also be included in this counting" " that correspond to metadata of the serialized object (e.g. of NumPy " "array) needed for deseralization." -msgstr "" +msgstr "请注意,该计数中还可能包含少量字节,这些字节与序列化对象(如 NumPy " +"数组)的元数据相对应,需要进行去eralization。" #: ../../source/ref-api/flwr.common.ReconnectIns.rst:2 #, fuzzy @@ -8977,39 +9725,48 @@ msgid "ReconnectIns" msgstr "启用 SSL 连接" #: ../../source/ref-api/flwr.common.ReconnectIns.rst:28::1 +#, fuzzy msgid ":py:obj:`seconds `\\" -msgstr "" +msgstr ":py:obj:`seconds `\\" #: ../../source/ref-api/flwr.common.RecordSet.rst:2 +#, fuzzy msgid "RecordSet" -msgstr "" +msgstr "RecordSet" #: flwr.common.RecordSet.configs_records:1::1 of +#, fuzzy msgid ":py:obj:`configs_records `\\" -msgstr "" +msgstr ":py:obj:`configs_records `\\" #: flwr.common.RecordSet.configs_records:1 #: flwr.common.RecordSet.configs_records:1::1 of +#, fuzzy msgid "Dictionary holding ConfigsRecord instances." -msgstr "" +msgstr "包含 ConfigsRecord 实例的字典。" #: flwr.common.RecordSet.configs_records:1::1 of +#, fuzzy msgid ":py:obj:`metrics_records `\\" -msgstr "" +msgstr ":py:obj:`metrics_records `\\" #: flwr.common.RecordSet.configs_records:1::1 #: flwr.common.RecordSet.metrics_records:1 of +#, fuzzy msgid "Dictionary holding MetricsRecord instances." -msgstr "" +msgstr "保存 MetricsRecord 实例的字典。" #: flwr.common.RecordSet.configs_records:1::1 of +#, fuzzy msgid ":py:obj:`parameters_records `\\" msgstr "" +":py:obj:`parameters_records `\\" #: flwr.common.RecordSet.configs_records:1::1 #: flwr.common.RecordSet.parameters_records:1 of +#, fuzzy msgid "Dictionary holding ParametersRecord instances." -msgstr "" +msgstr "存放 ParametersRecord 实例的字典。" #: ../../source/ref-api/flwr.common.ServerMessage.rst:2 #, fuzzy @@ -9017,24 +9774,30 @@ msgid "ServerMessage" msgstr "服务器端" #: ../../source/ref-api/flwr.common.ServerMessage.rst:31::1 +#, fuzzy msgid ":py:obj:`evaluate_ins `\\" -msgstr "" +msgstr ":py:obj:`evaluate_ins `\\" #: ../../source/ref-api/flwr.common.ServerMessage.rst:31::1 +#, fuzzy msgid ":py:obj:`fit_ins `\\" -msgstr "" +msgstr ":py:obj:`fit_ins `\\" #: ../../source/ref-api/flwr.common.ServerMessage.rst:31::1 +#, fuzzy msgid "" ":py:obj:`get_parameters_ins " "`\\" msgstr "" +":py:obj:`get_parameters_ins `\\" #: ../../source/ref-api/flwr.common.ServerMessage.rst:31::1 +#, fuzzy msgid "" ":py:obj:`get_properties_ins " "`\\" msgstr "" +":py:obj:`get_properties_ins `\\" #: ../../source/ref-api/flwr.common.Status.rst:2 #, fuzzy @@ -9042,20 +9805,24 @@ msgid "Status" msgstr "客户端状态。" #: ../../source/ref-api/flwr.common.Status.rst:29::1 +#, fuzzy msgid ":py:obj:`code `\\" -msgstr "" +msgstr ":py:obj:`code `\\" #: ../../source/ref-api/flwr.common.Status.rst:29::1 +#, fuzzy msgid ":py:obj:`message `\\" -msgstr "" +msgstr ":py:obj:`message `\\" #: ../../source/ref-api/flwr.common.array_from_numpy.rst:2 +#, fuzzy msgid "array\\_from\\_numpy" -msgstr "" +msgstr "array\\_from\\_numpy" #: ../../source/ref-api/flwr.common.bytes_to_ndarray.rst:2 +#, fuzzy msgid "bytes\\_to\\_ndarray" -msgstr "" +msgstr "bytes\\_to\\_ndarray" #: ../../source/ref-api/flwr.common.configure.rst:2 #, fuzzy @@ -9063,12 +9830,14 @@ msgid "configure" msgstr "配置日志记录" #: ../../source/ref-api/flwr.common.event.rst:2 +#, fuzzy msgid "event" -msgstr "" +msgstr "事件" #: ../../source/ref-api/flwr.common.log.rst:2 +#, fuzzy msgid "log" -msgstr "" +msgstr "登录" #: logging.Logger.log:3 of msgid "" @@ -9082,28 +9851,33 @@ msgid "logger.log(level, \"We have a %s\", \"mysterious problem\", exc_info=1)" msgstr "logger.log(level, \"We have a %s\", \"mysterious problem\", exc_info=1)" #: ../../source/ref-api/flwr.common.ndarray_to_bytes.rst:2 +#, fuzzy msgid "ndarray\\_to\\_bytes" -msgstr "" +msgstr "ndarray\\_to\\_bytes" #: ../../source/ref-api/flwr.common.ndarrays_to_parameters.rst:2 +#, fuzzy msgid "ndarrays\\_to\\_parameters" -msgstr "" +msgstr "ndarrays\\_to\\_parameters" #: ../../source/ref-api/flwr.common.now.rst:2 +#, fuzzy msgid "now" -msgstr "" +msgstr "现在" #: ../../source/ref-api/flwr.common.parameters_to_ndarrays.rst:2 +#, fuzzy msgid "parameters\\_to\\_ndarrays" -msgstr "" +msgstr "parameters\\_to\\_ndarrays" #: ../../source/ref-api/flwr.server.rst:2 msgid "server" msgstr "服务器" #: ../../source/ref-api/flwr.server.rst:27::1 +#, fuzzy msgid ":py:obj:`run_driver_api `\\ \\(\\)" -msgstr "" +msgstr ":py:obj:`run_driver_api `\\ \\(\\)" #: ../../source/ref-api/flwr.server.rst:27::1 #: flwr.server.app.run_driver_api:1 of @@ -9112,8 +9886,9 @@ msgid "Run Flower server (Driver API)." msgstr "flower-driver-api" #: ../../source/ref-api/flwr.server.rst:27::1 +#, fuzzy msgid ":py:obj:`run_fleet_api `\\ \\(\\)" -msgstr "" +msgstr ":py:obj:`run_fleet_api `\\ \\(\\)" #: ../../source/ref-api/flwr.server.rst:27::1 #: flwr.server.app.run_fleet_api:1 of @@ -9122,8 +9897,9 @@ msgid "Run Flower server (Fleet API)." msgstr "Flower 服务器。" #: ../../source/ref-api/flwr.server.rst:27::1 +#, fuzzy msgid ":py:obj:`run_server_app `\\ \\(\\)" -msgstr "" +msgstr ":py:obj:`run_server_app `\\ \\(\\)" #: ../../source/ref-api/flwr.server.rst:27::1 #: flwr.server.run_serverapp.run_server_app:1 of @@ -9132,19 +9908,24 @@ msgid "Run Flower server app." msgstr "Flower 服务器。" #: ../../source/ref-api/flwr.server.rst:27::1 +#, fuzzy msgid ":py:obj:`run_superlink `\\ \\(\\)" -msgstr "" +msgstr ":py:obj:`run_superlink `\\ \\(\\)" #: ../../source/ref-api/flwr.server.rst:27::1 #: flwr.server.app.run_superlink:1 of +#, fuzzy msgid "Run Flower server (Driver API and Fleet API)." -msgstr "" +msgstr "运行 Flower 服务器(Driver API 和 Fleet API)。" #: ../../source/ref-api/flwr.server.rst:27::1 +#, fuzzy msgid "" ":py:obj:`start_driver `\\ \\(\\*\\[\\, " "server\\_address\\, server\\, ...\\]\\)" msgstr "" +":py:obj:`start_driver `\\ \\(\\*\\[\\, " +"server\\_address\\, server\\, ...\\]\\)" #: ../../source/ref-api/flwr.server.rst:27::1 #: flwr.server.compat.app.start_driver:1 of @@ -9153,10 +9934,13 @@ msgid "Start a Flower Driver API server." msgstr "启动基于 Ray 的Flower模拟服务器。" #: ../../source/ref-api/flwr.server.rst:27::1 +#, fuzzy msgid "" ":py:obj:`start_server `\\ \\(\\*\\[\\, " "server\\_address\\, server\\, ...\\]\\)" msgstr "" +":py:obj:`start_server `\\ \\(\\*\\[\\, " +"server\\_address\\, server\\, ...\\]\\)" #: ../../source/ref-api/flwr.server.rst:27::1 #: flwr.server.app.start_server:1 of @@ -9164,8 +9948,9 @@ msgid "Start a Flower server using the gRPC transport layer." msgstr "使用 gRPC 传输层启动 Flower 服务器。" #: ../../source/ref-api/flwr.server.rst:41::1 +#, fuzzy msgid ":py:obj:`ClientManager `\\ \\(\\)" -msgstr "" +msgstr ":py:obj:`ClientManager `\\ \\(\\)" #: ../../source/ref-api/flwr.server.rst:41::1 #: flwr.server.client_manager.ClientManager:1 of @@ -9185,12 +9970,14 @@ msgstr "" #: ../../source/ref-api/flwr.server.rst:41::1 #: flwr.server.driver.driver.Driver:1 of +#, fuzzy msgid "`Driver` class provides an interface to the Driver API." -msgstr "" +msgstr "`Driver` 类为驱动程序 API 提供了一个接口。" #: ../../source/ref-api/flwr.server.rst:41::1 +#, fuzzy msgid ":py:obj:`History `\\ \\(\\)" -msgstr "" +msgstr ":py:obj:`History `\\ \\(\\)" #: ../../source/ref-api/flwr.server.rst:41::1 #: flwr.server.history.History:1 of @@ -9199,21 +9986,28 @@ msgid "History class for training and/or evaluation metrics collection." msgstr "**hist** -- 包含训练和评估指标的对象。" #: ../../source/ref-api/flwr.server.rst:41::1 +#, fuzzy msgid "" ":py:obj:`LegacyContext `\\ \\(state\\[\\, " "config\\, strategy\\, ...\\]\\)" msgstr "" +":py:obj:`LegacyContext `\\ \\(state\\[\\, config\\" +", strategy\\, ...\\]\\)" #: ../../source/ref-api/flwr.server.rst:41::1 #: flwr.server.compat.legacy_context.LegacyContext:1 of +#, fuzzy msgid "Legacy Context." -msgstr "" +msgstr "传承背景。" #: ../../source/ref-api/flwr.server.rst:41::1 +#, fuzzy msgid "" ":py:obj:`Server `\\ \\(\\*\\, client\\_manager\\[\\, " "strategy\\]\\)" msgstr "" +":py:obj:`Server `\\ \\(\\*\\, client\\_manager\\[\\, " +"strategy\\]\\)" #: ../../source/ref-api/flwr.server.rst:41::1 #, fuzzy @@ -9245,8 +10039,10 @@ msgid "Flower server config." msgstr "Flower 服务器。" #: ../../source/ref-api/flwr.server.rst:41::1 +#, fuzzy msgid ":py:obj:`SimpleClientManager `\\ \\(\\)" msgstr "" +":py:obj:`SimpleClientManager `\\ \\(\\)" #: ../../source/ref-api/flwr.server.rst:41::1 #: flwr.server.client_manager.SimpleClientManager:1 of @@ -9281,19 +10077,23 @@ msgid "ClientManager" msgstr "客户端" #: flwr.server.client_manager.ClientManager.all:1::1 of +#, fuzzy msgid ":py:obj:`all `\\ \\(\\)" -msgstr "" +msgstr ":py:obj:`all `\\ \\(\\)" #: flwr.server.client_manager.ClientManager.all:1 #: flwr.server.client_manager.ClientManager.all:1::1 #: flwr.server.client_manager.SimpleClientManager.all:1 #: flwr.server.client_manager.SimpleClientManager.all:1::1 of +#, fuzzy msgid "Return all available clients." -msgstr "" +msgstr "返回所有可用客户。" #: flwr.server.client_manager.ClientManager.all:1::1 of +#, fuzzy msgid ":py:obj:`num_available `\\ \\(\\)" msgstr "" +":py:obj:`num_available `\\ \\(\\)" #: flwr.server.client_manager.ClientManager.all:1::1 #: flwr.server.client_manager.ClientManager.num_available:1 @@ -9304,70 +10104,87 @@ msgid "Return the number of available clients." msgstr "返回样本大小和所需的可用客户数量。" #: flwr.server.client_manager.ClientManager.all:1::1 of +#, fuzzy msgid ":py:obj:`register `\\ \\(client\\)" -msgstr "" +msgstr ":py:obj:`register `\\ \\(client\\)" #: flwr.server.client_manager.ClientManager.all:1::1 #: flwr.server.client_manager.ClientManager.register:1 #: flwr.server.client_manager.SimpleClientManager.all:1::1 #: flwr.server.client_manager.SimpleClientManager.register:1 of +#, fuzzy msgid "Register Flower ClientProxy instance." -msgstr "" +msgstr "注册 Flower ClientProxy 实例。" #: flwr.server.client_manager.ClientManager.all:1::1 of +#, fuzzy msgid "" ":py:obj:`sample `\\ " "\\(num\\_clients\\[\\, min\\_num\\_clients\\, criterion\\]\\)" msgstr "" +":py:obj:`sample `\\ \\(num\\_clients\\[\\, " +"min\\_num\\_clients\\, criterion\\]\\)" #: flwr.server.client_manager.ClientManager.all:1::1 #: flwr.server.client_manager.ClientManager.sample:1 #: flwr.server.client_manager.SimpleClientManager.all:1::1 #: flwr.server.client_manager.SimpleClientManager.sample:1 of +#, fuzzy msgid "Sample a number of Flower ClientProxy instances." -msgstr "" +msgstr "取样若干 Flower ClientProxy 实例。" #: flwr.server.client_manager.ClientManager.all:1::1 of +#, fuzzy msgid ":py:obj:`unregister `\\ \\(client\\)" msgstr "" +":py:obj:`unregister `\\ \\(client\\)" #: flwr.server.client_manager.ClientManager.all:1::1 #: flwr.server.client_manager.ClientManager.unregister:1 #: flwr.server.client_manager.SimpleClientManager.all:1::1 #: flwr.server.client_manager.SimpleClientManager.unregister:1 of +#, fuzzy msgid "Unregister Flower ClientProxy instance." -msgstr "" +msgstr "取消注册 Flower ClientProxy 实例。" #: flwr.server.client_manager.ClientManager.all:1::1 of +#, fuzzy msgid "" ":py:obj:`wait_for `\\ " "\\(num\\_clients\\, timeout\\)" msgstr "" +":py:obj:`wait_for `\\ \\(num\\_clients\\" +", timeout\\)" #: flwr.server.client_manager.ClientManager.all:1::1 #: flwr.server.client_manager.ClientManager.wait_for:1 #: flwr.server.client_manager.SimpleClientManager.all:1::1 #: flwr.server.client_manager.SimpleClientManager.wait_for:1 of +#, fuzzy msgid "Wait until at least `num_clients` are available." -msgstr "" +msgstr "等待至少 `num_clients` 可用。" #: flwr.server.client_manager.ClientManager.num_available:3 #: flwr.server.client_manager.SimpleClientManager.num_available:3 of +#, fuzzy msgid "**num_available** -- The number of currently available clients." -msgstr "" +msgstr "**num_available** -- 当前可用客户端的数量。" #: flwr.server.client_manager.ClientManager.register:6 #: flwr.server.client_manager.SimpleClientManager.register:6 of +#, fuzzy msgid "" "**success** -- Indicating if registration was successful. False if " "ClientProxy is already registered or can not be registered for any " "reason." -msgstr "" +msgstr "**success** -- 表示注册是否成功。如果 ClientProxy 已注册或因故无法注册,则为 " +"False。" #: flwr.server.client_manager.ClientManager.unregister:3 #: flwr.server.client_manager.SimpleClientManager.unregister:3 of +#, fuzzy msgid "This method is idempotent." -msgstr "" +msgstr "这种方法是幂等的。" #: ../../source/ref-api/flwr.server.Driver.rst:2 #, fuzzy @@ -9417,50 +10234,64 @@ msgstr "server.strategy.Strategy" #: flwr.server.driver.driver.Driver.close:1 #: flwr.server.driver.driver.Driver.close:1::1 of +#, fuzzy msgid "Disconnect from the SuperLink if connected." -msgstr "" +msgstr "如果已连接,请断开与超级链接的连接。" #: flwr.server.driver.driver.Driver.close:1::1 of +#, fuzzy msgid "" ":py:obj:`create_message `\\ " "\\(content\\, message\\_type\\, ...\\)" msgstr "" +":py:obj:`create_message `\\ \\(content\\, " +"message\\_type\\, ...\\)" #: flwr.server.driver.driver.Driver.close:1::1 #: flwr.server.driver.driver.Driver.create_message:1 of +#, fuzzy msgid "Create a new message with specified parameters." -msgstr "" +msgstr "使用指定参数创建新信息。" #: flwr.server.driver.driver.Driver.close:1::1 of +#, fuzzy msgid ":py:obj:`get_node_ids `\\ \\(\\)" -msgstr "" +msgstr ":py:obj:`get_node_ids `\\ \\(\\)" #: flwr.server.driver.driver.Driver.close:1::1 #: flwr.server.driver.driver.Driver.get_node_ids:1 of +#, fuzzy msgid "Get node IDs." -msgstr "" +msgstr "获取节点 ID。" #: flwr.server.driver.driver.Driver.close:1::1 of +#, fuzzy msgid "" ":py:obj:`pull_messages `\\ " "\\(message\\_ids\\)" msgstr "" +":py:obj:`pull_messages `\\ \\(message\\_ids" +"\\)" #: flwr.server.driver.driver.Driver.close:1::1 #: flwr.server.driver.driver.Driver.pull_messages:1 of +#, fuzzy msgid "Pull messages based on message IDs." -msgstr "" +msgstr "根据信息 ID 提取信息。" #: flwr.server.driver.driver.Driver.close:1::1 of +#, fuzzy msgid "" ":py:obj:`push_messages `\\ " "\\(messages\\)" msgstr "" +":py:obj:`push_messages `\\ \\(messages\\)" #: flwr.server.driver.driver.Driver.close:1::1 #: flwr.server.driver.driver.Driver.push_messages:1 of +#, fuzzy msgid "Push messages to specified node IDs." -msgstr "" +msgstr "向指定的节点 ID 推送信息。" #: flwr.server.driver.driver.Driver.close:1::1 of #, fuzzy @@ -9474,98 +10305,119 @@ msgstr "" #: flwr.server.driver.driver.Driver.close:1::1 #: flwr.server.driver.driver.Driver.send_and_receive:1 of +#, fuzzy msgid "Push messages to specified node IDs and pull the reply messages." -msgstr "" +msgstr "向指定的节点 ID 推送信息并提取回复信息。" #: flwr.server.driver.driver.Driver.create_message:3 of +#, fuzzy msgid "" "This method constructs a new `Message` with given content and metadata. " "The `run_id` and `src_node_id` will be set automatically." -msgstr "" +msgstr "本方法使用给定的内容和元数据构建新的 `Message` 。run_id \"和 \"src_node_id " +"\"将自动设置。" #: flwr.server.driver.driver.Driver.create_message:6 of +#, fuzzy msgid "" "The content for the new message. This holds records that are to be sent " "to the destination node." -msgstr "" +msgstr "新信息的内容。其中包含要发送到目的节点的记录。" #: flwr.server.driver.driver.Driver.create_message:9 of +#, fuzzy msgid "" "The type of the message, defining the action to be executed on the " "receiving end." -msgstr "" +msgstr "信息类型,定义接收端要执行的操作。" #: flwr.server.driver.driver.Driver.create_message:12 of +#, fuzzy msgid "The ID of the destination node to which the message is being sent." -msgstr "" +msgstr "信息发送目的地节点的 ID。" #: flwr.server.driver.driver.Driver.create_message:14 of +#, fuzzy msgid "" "The ID of the group to which this message is associated. In some " "settings, this is used as the FL round." -msgstr "" +msgstr "与该信息相关联的组的 ID。在某些设置中,它被用作 FL 轮。" #: flwr.server.driver.driver.Driver.create_message:17 of +#, fuzzy msgid "" "Time-to-live for the round trip of this message, i.e., the time from " "sending this message to receiving a reply. It specifies the duration for " "which the message and its potential reply are considered valid." -msgstr "" +msgstr "此报文往返的有效时间,即从发送此报文到收到回复的时间。它规定了信息及其潜在回" +"复被视为有效的持续时间。" #: flwr.server.driver.driver.Driver.create_message:22 of +#, fuzzy msgid "" "**message** -- A new `Message` instance with the specified content and " "metadata." -msgstr "" +msgstr "**message** -- 具有指定内容和元数据的新 \"信息 \"实例。" #: flwr.server.driver.driver.Driver.pull_messages:3 of +#, fuzzy msgid "" "This method is used to collect messages from the SuperLink that " "correspond to a set of given message IDs." -msgstr "" +msgstr "该方法用于从超级链接中收集与一组给定消息 ID 相对应的消息。" #: flwr.server.driver.driver.Driver.pull_messages:6 of +#, fuzzy msgid "An iterable of message IDs for which reply messages are to be retrieved." -msgstr "" +msgstr "要检索回复信息的信息 ID 的可迭代项。" #: flwr.server.driver.driver.Driver.pull_messages:9 of +#, fuzzy msgid "**messages** -- An iterable of messages received." -msgstr "" +msgstr "**messages** -- 收到的信息迭代。" #: flwr.server.driver.driver.Driver.push_messages:3 of +#, fuzzy msgid "" "This method takes an iterable of messages and sends each message to the " "node specified in `dst_node_id`." -msgstr "" +msgstr "该方法接收一个可迭代的消息,并将每条消息发送到 `dst_node_id` 中指定的节点。" #: flwr.server.driver.driver.Driver.push_messages:6 #: flwr.server.driver.driver.Driver.send_and_receive:7 of +#, fuzzy msgid "An iterable of messages to be sent." -msgstr "" +msgstr "要发送的信息迭代。" #: flwr.server.driver.driver.Driver.push_messages:9 of +#, fuzzy msgid "" "**message_ids** -- An iterable of IDs for the messages that were sent, " "which can be used to pull replies." -msgstr "" +msgstr "**message_ids** -- 已发送信息的可迭代 ID,可用于提取回复信息。" #: flwr.server.driver.driver.Driver.send_and_receive:3 of +#, fuzzy msgid "" "This method sends a list of messages to their destination node IDs and " "then waits for the replies. It continues to pull replies until either all" " replies are received or the specified timeout duration is exceeded." -msgstr "" +msgstr "该方法会向目标节点 ID 发送信息列表,然后等待回复。它会继续提取回复,直到收到" +"所有回复或超过指定的超时时间。" #: flwr.server.driver.driver.Driver.send_and_receive:9 of +#, fuzzy msgid "" "The timeout duration in seconds. If specified, the method will wait for " "replies for this duration. If `None`, there is no time limit and the " "method will wait until replies for all messages are received." -msgstr "" +msgstr "超时时间(秒)。如果指定,该方法将在此期限内等待回复。如果指定为 \"无\"" +",则没有时间限制,该方法将等待直到收到所有信息的回复。" #: flwr.server.driver.driver.Driver.send_and_receive:14 of +#, fuzzy msgid "**replies** -- An iterable of reply messages received from the SuperLink." -msgstr "" +msgstr "**replies** -- 从超级链接收到的回复信息的迭代。" #: flwr.server.driver.driver.Driver.send_and_receive:18 #: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:53 @@ -9576,23 +10428,31 @@ msgid "Notes" msgstr "无" #: flwr.server.driver.driver.Driver.send_and_receive:19 of +#, fuzzy msgid "" "This method uses `push_messages` to send the messages and `pull_messages`" " to collect the replies. If `timeout` is set, the method may not return " "replies for all sent messages. A message remains valid until its TTL, " "which is not affected by `timeout`." msgstr "" +"该方法使用 `push_messages` 发送信息,并使用 `pull_messages` 收集回复。" +"如果设置了 `timeout`,该方法可能不会返回所有已发送消息的回复。消息在其 TTL " +"之前一直有效,不受 `timeout` 影响。" #: ../../source/ref-api/flwr.server.History.rst:2 +#, fuzzy msgid "History" -msgstr "" +msgstr "历史" #: flwr.server.history.History.add_loss_centralized:1::1 of +#, fuzzy msgid "" ":py:obj:`add_loss_centralized " "`\\ \\(server\\_round\\, " "loss\\)" msgstr "" +":py:obj:`add_loss_centralized `\\ " +"\\(server\\_round\\, loss\\)" #: flwr.server.history.History.add_loss_centralized:1 #: flwr.server.history.History.add_loss_centralized:1::1 of @@ -9601,23 +10461,30 @@ msgid "Add one loss entry (from centralized evaluation)." msgstr "集中评估" #: flwr.server.history.History.add_loss_centralized:1::1 of +#, fuzzy msgid "" ":py:obj:`add_loss_distributed " "`\\ \\(server\\_round\\, " "loss\\)" msgstr "" +":py:obj:`add_loss_distributed `\\ " +"\\(server\\_round\\, loss\\)" #: flwr.server.history.History.add_loss_centralized:1::1 #: flwr.server.history.History.add_loss_distributed:1 of +#, fuzzy msgid "Add one loss entry (from distributed evaluation)." -msgstr "" +msgstr "增加一个损失条目(来自分布式评估)。" #: flwr.server.history.History.add_loss_centralized:1::1 of +#, fuzzy msgid "" ":py:obj:`add_metrics_centralized " "`\\ \\(server\\_round\\, " "metrics\\)" msgstr "" +":py:obj:`add_metrics_centralized `\\ \\(server\\_round\\, metrics\\)" #: flwr.server.history.History.add_loss_centralized:1::1 #: flwr.server.history.History.add_metrics_centralized:1 of @@ -9626,11 +10493,14 @@ msgid "Add metrics entries (from centralized evaluation)." msgstr "集中评估" #: flwr.server.history.History.add_loss_centralized:1::1 of +#, fuzzy msgid "" ":py:obj:`add_metrics_distributed " "`\\ \\(server\\_round\\, " "metrics\\)" msgstr "" +":py:obj:`add_metrics_distributed `\\ \\(server\\_round\\, metrics\\)" #: flwr.server.history.History.add_loss_centralized:1::1 #: flwr.server.history.History.add_metrics_distributed:1 of @@ -9639,24 +10509,30 @@ msgid "Add metrics entries (from distributed evaluation)." msgstr "定制的集中/分布式评估" #: flwr.server.history.History.add_loss_centralized:1::1 of +#, fuzzy msgid "" ":py:obj:`add_metrics_distributed_fit " "`\\ \\(server\\_round\\," " ...\\)" msgstr "" +":py:obj:`add_metrics_distributed_fit `\\ \\(server\\_round\\, ...\\)" #: flwr.server.history.History.add_loss_centralized:1::1 #: flwr.server.history.History.add_metrics_distributed_fit:1 of +#, fuzzy msgid "Add metrics entries (from distributed fit)." -msgstr "" +msgstr "添加度量条目(来自分布式拟合)。" #: ../../source/ref-api/flwr.server.LegacyContext.rst:2 +#, fuzzy msgid "LegacyContext" -msgstr "" +msgstr "遗留上下文" #: flwr.server.compat.legacy_context.LegacyContext:1 of +#, fuzzy msgid "Bases: :py:class:`~flwr.common.context.Context`" -msgstr "" +msgstr "Bases: :py:class:`~flwr.common.context.Context`" #: ../../source/ref-api/flwr.server.LegacyContext.rst:32::1 #, fuzzy @@ -9669,8 +10545,9 @@ msgid ":py:obj:`strategy `\\" msgstr "server.strategy.Strategy" #: ../../source/ref-api/flwr.server.LegacyContext.rst:32::1 +#, fuzzy msgid ":py:obj:`client_manager `\\" -msgstr "" +msgstr ":py:obj:`client_manager `\\" #: ../../source/ref-api/flwr.server.LegacyContext.rst:32::1 #, fuzzy @@ -9683,8 +10560,9 @@ msgid ":py:obj:`state `\\" msgstr "server.strategy.Strategy" #: flwr.server.server.Server.client_manager:1::1 of +#, fuzzy msgid ":py:obj:`client_manager `\\ \\(\\)" -msgstr "" +msgstr ":py:obj:`client_manager `\\ \\(\\)" #: flwr.server.server.Server.client_manager:1 #: flwr.server.server.Server.client_manager:1::1 of @@ -9693,21 +10571,28 @@ msgid "Return ClientManager." msgstr "返回客户端(本身)。" #: flwr.server.server.Server.client_manager:1::1 of +#, fuzzy msgid "" ":py:obj:`disconnect_all_clients " "`\\ \\(timeout\\)" msgstr "" +":py:obj:`disconnect_all_clients `" +"\\ \\(timeout\\)" #: flwr.server.server.Server.client_manager:1::1 #: flwr.server.server.Server.disconnect_all_clients:1 of +#, fuzzy msgid "Send shutdown signal to all clients." -msgstr "" +msgstr "向所有客户端发送关闭信号。" #: flwr.server.server.Server.client_manager:1::1 of +#, fuzzy msgid "" ":py:obj:`evaluate_round `\\ " "\\(server\\_round\\, timeout\\)" msgstr "" +":py:obj:`evaluate_round `\\ \\(" +"server\\_round\\, timeout\\)" #: flwr.server.server.Server.client_manager:1::1 #: flwr.server.server.Server.evaluate_round:1 of @@ -9716,8 +10601,9 @@ msgid "Validate current global model on a number of clients." msgstr "当前(全局)模型参数。" #: flwr.server.server.Server.client_manager:1::1 of +#, fuzzy msgid ":py:obj:`fit `\\ \\(num\\_rounds\\, timeout\\)" -msgstr "" +msgstr ":py:obj:`fit `\\ \\(num\\_rounds\\, timeout\\)" #: flwr.server.server.Server.client_manager:1::1 #: flwr.server.server.Server.fit:1 of @@ -9726,10 +10612,13 @@ msgid "Run federated averaging for a number of rounds." msgstr "联邦平均动量策略。" #: flwr.server.server.Server.client_manager:1::1 of +#, fuzzy msgid "" ":py:obj:`fit_round `\\ \\(server\\_round\\," " timeout\\)" msgstr "" +":py:obj:`fit_round `\\ \\(server\\_round\\, " +"timeout\\)" #: flwr.server.server.Server.client_manager:1::1 #: flwr.server.server.Server.fit_round:1 of @@ -9738,19 +10627,25 @@ msgid "Perform a single round of federated averaging." msgstr "本轮联邦学习。" #: flwr.server.server.Server.client_manager:1::1 of +#, fuzzy msgid "" ":py:obj:`set_max_workers `\\ " "\\(max\\_workers\\)" msgstr "" +":py:obj:`set_max_workers `\\ \\(" +"max\\_workers\\)" #: flwr.server.server.Server.client_manager:1::1 #: flwr.server.server.Server.set_max_workers:1 of +#, fuzzy msgid "Set the max_workers used by ThreadPoolExecutor." -msgstr "" +msgstr "设置 ThreadPoolExecutor 使用的最大工作器数。" #: flwr.server.server.Server.client_manager:1::1 of +#, fuzzy msgid ":py:obj:`set_strategy `\\ \\(strategy\\)" msgstr "" +":py:obj:`set_strategy `\\ \\(strategy\\)" #: flwr.server.server.Server.client_manager:1::1 #: flwr.server.server.Server.set_strategy:1 of @@ -9769,8 +10664,9 @@ msgid "Use the `ServerApp` with an existing `Strategy`:" msgstr "使用现有策略" #: flwr.server.server_app.ServerApp:15 of +#, fuzzy msgid "Use the `ServerApp` with a custom main function:" -msgstr "" +msgstr "使用带有自定义主函数的 `ServerApp`:" #: flwr.server.server_app.ServerApp.main:1::1 of #, fuzzy @@ -9779,8 +10675,9 @@ msgstr "server.strategy.Strategy" #: flwr.server.server_app.ServerApp.main:1 #: flwr.server.server_app.ServerApp.main:1::1 of +#, fuzzy msgid "Return a decorator that registers the main fn with the server app." -msgstr "" +msgstr "返回向服务器应用程序注册 main fn 的装饰器。" #: ../../source/ref-api/flwr.server.ServerConfig.rst:2 #, fuzzy @@ -9788,78 +10685,102 @@ msgid "ServerConfig" msgstr "服务器" #: flwr.server.server_config.ServerConfig:3 of +#, fuzzy msgid "" "All attributes have default values which allows users to configure just " "the ones they care about." -msgstr "" +msgstr "所有属性都有默认值,用户只需配置自己关心的属性即可。" #: ../../source/ref-api/flwr.server.ServerConfig.rst:29::1 +#, fuzzy msgid ":py:obj:`num_rounds `\\" -msgstr "" +msgstr ":py:obj:`num_rounds `\\" #: ../../source/ref-api/flwr.server.ServerConfig.rst:29::1 +#, fuzzy msgid ":py:obj:`round_timeout `\\" -msgstr "" +msgstr ":py:obj:`round_timeout `\\" #: ../../source/ref-api/flwr.server.SimpleClientManager.rst:2 +#, fuzzy msgid "SimpleClientManager" -msgstr "" +msgstr "SimpleClientManager" #: flwr.server.client_manager.SimpleClientManager:1 of +#, fuzzy msgid "Bases: :py:class:`~flwr.server.client_manager.ClientManager`" -msgstr "" +msgstr "Bases: :py:class:`~flwr.server.client_manager.ClientManager`" #: flwr.server.client_manager.SimpleClientManager.all:1::1 of +#, fuzzy msgid ":py:obj:`all `\\ \\(\\)" -msgstr "" +msgstr ":py:obj:`all `\\ \\(\\)" #: flwr.server.client_manager.SimpleClientManager.all:1::1 of +#, fuzzy msgid "" ":py:obj:`num_available `\\" " \\(\\)" msgstr "" +":py:obj:`num_available `\\ \\(" +"\\)" #: flwr.server.client_manager.SimpleClientManager.all:1::1 of +#, fuzzy msgid "" ":py:obj:`register `\\ " "\\(client\\)" msgstr "" +":py:obj:`register `\\ \\(client\\)" #: flwr.server.client_manager.SimpleClientManager.all:1::1 of +#, fuzzy msgid "" ":py:obj:`sample `\\ " "\\(num\\_clients\\[\\, min\\_num\\_clients\\, criterion\\]\\)" msgstr "" +":py:obj:`sample `\\ \\(num\\_clients" +"\\[\\, min\\_num\\_clients\\, criterion\\]\\)" #: flwr.server.client_manager.SimpleClientManager.all:1::1 of +#, fuzzy msgid "" ":py:obj:`unregister `\\ " "\\(client\\)" msgstr "" +":py:obj:`unregister `\\ \\(" +"client\\)" #: flwr.server.client_manager.SimpleClientManager.all:1::1 of +#, fuzzy msgid "" ":py:obj:`wait_for `\\ " "\\(num\\_clients\\[\\, timeout\\]\\)" msgstr "" +":py:obj:`wait_for `\\ \\(" +"num\\_clients\\[\\, timeout\\]\\)" #: flwr.server.client_manager.SimpleClientManager.wait_for:3 of +#, fuzzy msgid "" "Blocks until the requested number of clients is available or until a " "timeout is reached. Current timeout default: 1 day." -msgstr "" +msgstr "阻塞,直到请求的客户端数量可用或达到超时为止。当前超时默认值:1 天。" #: flwr.server.client_manager.SimpleClientManager.wait_for:6 of +#, fuzzy msgid "The number of clients to wait for." -msgstr "" +msgstr "需要等待的客户数量。" #: flwr.server.client_manager.SimpleClientManager.wait_for:8 of +#, fuzzy msgid "The time in seconds to wait for, defaults to 86400 (24h)." -msgstr "" +msgstr "以秒为单位的等待时间,默认为 86400(24 小时)。" #: flwr.server.client_manager.SimpleClientManager.wait_for:11 of +#, fuzzy msgid "**success**" -msgstr "" +msgstr "**success**" #: ../../source/ref-api/flwr.server.run_driver_api.rst:2 #, fuzzy @@ -9867,12 +10788,14 @@ msgid "run\\_driver\\_api" msgstr "flower-driver-api" #: ../../source/ref-api/flwr.server.run_fleet_api.rst:2 +#, fuzzy msgid "run\\_fleet\\_api" -msgstr "" +msgstr "run\\_fleet\\_api" #: ../../source/ref-api/flwr.server.run_server_app.rst:2 +#, fuzzy msgid "run\\_server\\_app" -msgstr "" +msgstr "run\\_server\\_app" #: ../../source/ref-api/flwr.server.run_superlink.rst:2 #, fuzzy @@ -9925,8 +10848,9 @@ msgstr "" "`flwr.server.client_manager.SimpleClientManager`。" #: flwr.server.compat.app.start_driver:25 of +#, fuzzy msgid "The Driver object to use." -msgstr "" +msgstr "要使用的驱动程序对象。" #: flwr.server.app.start_server:37 flwr.server.compat.app.start_driver:28 of msgid "**hist** -- Object containing training and evaluation metrics." @@ -9992,10 +10916,13 @@ msgid "strategy" msgstr "Krum 策略。" #: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#, fuzzy msgid "" ":py:obj:`Bulyan `\\ \\(\\*\\, " "fraction\\_fit\\, fraction\\_evaluate\\, ...\\)" msgstr "" +":py:obj:`Bulyan `\\ \\(\\*\\, fraction\\_fit\\, " +"fraction\\_evaluate\\, ...\\)" #: ../../source/ref-api/flwr.server.strategy.rst:45::1 #: flwr.server.strategy.bulyan.Bulyan:1 of @@ -10003,10 +10930,13 @@ msgid "Bulyan strategy." msgstr "Bulyan 策略。" #: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#, fuzzy msgid "" ":py:obj:`DPFedAvgAdaptive `\\ " "\\(strategy\\, num\\_sampled\\_clients\\)" msgstr "" +":py:obj:`DPFedAvgAdaptive `\\ \\(" +"strategy\\, num\\_sampled\\_clients\\)" #: ../../source/ref-api/flwr.server.strategy.rst:45::1 #: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive:1 of @@ -10014,10 +10944,13 @@ msgid "Wrapper for configuring a Strategy for DP with Adaptive Clipping." msgstr "用于配置具有自适应剪切功能的 DP 策略的包装器。" #: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#, fuzzy msgid "" ":py:obj:`DPFedAvgFixed `\\ " "\\(strategy\\, num\\_sampled\\_clients\\, ...\\)" msgstr "" +":py:obj:`DPFedAvgFixed `\\ \\(strategy\\" +", num\\_sampled\\_clients\\, ...\\)" #: ../../source/ref-api/flwr.server.strategy.rst:45::1 #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed:1 of @@ -10025,11 +10958,14 @@ msgid "Wrapper for configuring a Strategy for DP with Fixed Clipping." msgstr "封装器,用于为具有固定剪切功能的 DP 配置策略。" #: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#, fuzzy msgid "" ":py:obj:`DifferentialPrivacyClientSideAdaptiveClipping " "`\\ " "\\(...\\)" msgstr "" +":py:obj:`DifferentialPrivacyClientSideAdaptiveClipping `\\ \\(...\\)" #: ../../source/ref-api/flwr.server.strategy.rst:45::1 #: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:1 @@ -10039,11 +10975,14 @@ msgid "Strategy wrapper for central DP with client-side adaptive clipping." msgstr "用于配置具有自适应剪切功能的 DP 策略的包装器。" #: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#, fuzzy msgid "" ":py:obj:`DifferentialPrivacyServerSideAdaptiveClipping " "`\\ " "\\(...\\)" msgstr "" +":py:obj:`DifferentialPrivacyServerSideAdaptiveClipping `\\ \\(...\\)" #: ../../source/ref-api/flwr.server.strategy.rst:45::1 #: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:1 @@ -10053,11 +10992,14 @@ msgid "Strategy wrapper for central DP with server-side adaptive clipping." msgstr "用于配置具有自适应剪切功能的 DP 策略的包装器。" #: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#, fuzzy msgid "" ":py:obj:`DifferentialPrivacyClientSideFixedClipping " "`\\ " "\\(...\\)" msgstr "" +":py:obj:`DifferentialPrivacyClientSideFixedClipping `\\ \\(...\\)" #: ../../source/ref-api/flwr.server.strategy.rst:45::1 #: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:1 @@ -10067,11 +11009,14 @@ msgid "Strategy wrapper for central DP with client-side fixed clipping." msgstr "封装器,用于为具有固定剪切功能的 DP 配置策略。" #: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#, fuzzy msgid "" ":py:obj:`DifferentialPrivacyServerSideFixedClipping " "`\\ " "\\(...\\)" msgstr "" +":py:obj:`DifferentialPrivacyServerSideFixedClipping `\\ \\(...\\)" #: ../../source/ref-api/flwr.server.strategy.rst:45::1 #: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:1 @@ -10081,10 +11026,13 @@ msgid "Strategy wrapper for central DP with server-side fixed clipping." msgstr "封装器,用于为具有固定剪切功能的 DP 配置策略。" #: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#, fuzzy msgid "" ":py:obj:`FedAdagrad `\\ \\(\\*\\[\\, " "fraction\\_fit\\, ...\\]\\)" msgstr "" +":py:obj:`FedAdagrad `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" #: ../../source/ref-api/flwr.server.strategy.rst:45::1 #: flwr.server.strategy.fedadagrad.FedAdagrad:1 of @@ -10092,10 +11040,13 @@ msgid "FedAdagrad strategy - Adaptive Federated Optimization using Adagrad." msgstr "FedAdagrad 策略 - 使用 Adagrad 进行自适应联合优化。" #: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#, fuzzy msgid "" ":py:obj:`FedAdam `\\ \\(\\*\\[\\, " "fraction\\_fit\\, ...\\]\\)" msgstr "" +":py:obj:`FedAdam `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" #: ../../source/ref-api/flwr.server.strategy.rst:45::1 #: flwr.server.strategy.fedadam.FedAdam:1 of @@ -10103,10 +11054,13 @@ msgid "FedAdam - Adaptive Federated Optimization using Adam." msgstr "FedAdam - 使用 Adam 进行自适应联合优化。" #: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#, fuzzy msgid "" ":py:obj:`FedAvg `\\ \\(\\*\\[\\, " "fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" msgstr "" +":py:obj:`FedAvg `\\ \\(\\*\\[\\, fraction\\_fit" +"\\, fraction\\_evaluate\\, ...\\]\\)" #: ../../source/ref-api/flwr.server.strategy.rst:45::1 #: flwr.server.strategy.fedavg.FedAvg:1 @@ -10115,16 +11069,22 @@ msgid "Federated Averaging strategy." msgstr "联邦平均策略。" #: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#, fuzzy msgid "" ":py:obj:`FedAvgAndroid `\\ " "\\(\\*\\[\\, fraction\\_fit\\, ...\\]\\)" msgstr "" +":py:obj:`FedAvgAndroid `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" #: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#, fuzzy msgid "" ":py:obj:`FedAvgM `\\ \\(\\*\\[\\, " "fraction\\_fit\\, ...\\]\\)" msgstr "" +":py:obj:`FedAvgM `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" #: ../../source/ref-api/flwr.server.strategy.rst:45::1 #: flwr.server.strategy.fedavgm.FedAvgM:1 of @@ -10132,10 +11092,13 @@ msgid "Federated Averaging with Momentum strategy." msgstr "联邦平均动量策略。" #: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#, fuzzy msgid "" ":py:obj:`FedMedian `\\ \\(\\*\\[\\, " "fraction\\_fit\\, ...\\]\\)" msgstr "" +":py:obj:`FedMedian `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" #: ../../source/ref-api/flwr.server.strategy.rst:45::1 #: flwr.server.strategy.fedmedian.FedMedian:1 of @@ -10144,10 +11107,13 @@ msgid "Configurable FedMedian strategy implementation." msgstr "可配置的 FedAvg 策略实施。" #: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#, fuzzy msgid "" ":py:obj:`FedOpt `\\ \\(\\*\\[\\, " "fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" msgstr "" +":py:obj:`FedOpt `\\ \\(\\*\\[\\, fraction\\_fit" +"\\, fraction\\_evaluate\\, ...\\]\\)" #: ../../source/ref-api/flwr.server.strategy.rst:45::1 #: flwr.server.strategy.fedopt.FedOpt:1 of @@ -10156,10 +11122,13 @@ msgid "Federated Optim strategy." msgstr "联邦优化策略。" #: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#, fuzzy msgid "" ":py:obj:`FedProx `\\ \\(\\*\\[\\, " "fraction\\_fit\\, ...\\]\\)" msgstr "" +":py:obj:`FedProx `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" #: ../../source/ref-api/flwr.server.strategy.rst:45::1 #: flwr.server.strategy.fedprox.FedProx:1 of @@ -10167,10 +11136,13 @@ msgid "Federated Optimization strategy." msgstr "联邦优化策略。" #: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#, fuzzy msgid "" ":py:obj:`FedTrimmedAvg `\\ " "\\(\\*\\[\\, fraction\\_fit\\, ...\\]\\)" msgstr "" +":py:obj:`FedTrimmedAvg `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" #: ../../source/ref-api/flwr.server.strategy.rst:45::1 #: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:1 of @@ -10178,10 +11150,13 @@ msgid "Federated Averaging with Trimmed Mean [Dong Yin, et al., 2021]." msgstr "带修剪均值的联邦平均法[Dong Yin 等,2021]。" #: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#, fuzzy msgid "" ":py:obj:`FedXgbBagging `\\ " "\\(\\[evaluate\\_function\\]\\)" msgstr "" +":py:obj:`FedXgbBagging `\\ \\(\\[" +"evaluate\\_function\\]\\)" #: ../../source/ref-api/flwr.server.strategy.rst:45::1 #: flwr.server.strategy.fedxgb_bagging.FedXgbBagging:1 of @@ -10190,10 +11165,13 @@ msgid "Configurable FedXgbBagging strategy implementation." msgstr "可配置的 FedXgbNAvg 策略实施。" #: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#, fuzzy msgid "" ":py:obj:`FedXgbCyclic `\\ " "\\(\\*\\*kwargs\\)" msgstr "" +":py:obj:`FedXgbCyclic `\\ \\(\\*\\*" +"kwargs\\)" #: ../../source/ref-api/flwr.server.strategy.rst:45::1 #: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic:1 of @@ -10202,10 +11180,13 @@ msgid "Configurable FedXgbCyclic strategy implementation." msgstr "可配置的 FedAvg 策略实施。" #: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#, fuzzy msgid "" ":py:obj:`FedXgbNnAvg `\\ \\(\\*args\\, " "\\*\\*kwargs\\)" msgstr "" +":py:obj:`FedXgbNnAvg `\\ \\(\\*args\\, \\*" +"\\*kwargs\\)" #: ../../source/ref-api/flwr.server.strategy.rst:45::1 #: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg:1 of @@ -10213,10 +11194,13 @@ msgid "Configurable FedXgbNnAvg strategy implementation." msgstr "可配置的 FedXgbNAvg 策略实施。" #: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#, fuzzy msgid "" ":py:obj:`FedYogi `\\ \\(\\*\\[\\, " "fraction\\_fit\\, ...\\]\\)" msgstr "" +":py:obj:`FedYogi `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" #: ../../source/ref-api/flwr.server.strategy.rst:45::1 #: flwr.server.strategy.fedyogi.FedYogi:1 of @@ -10224,11 +11208,14 @@ msgid "FedYogi [Reddi et al., 2020] strategy." msgstr "FedYogi [Reddi 等人,2020] 策略。" #: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#, fuzzy msgid "" ":py:obj:`FaultTolerantFedAvg " "`\\ \\(\\*\\[\\, " "fraction\\_fit\\, ...\\]\\)" msgstr "" +":py:obj:`FaultTolerantFedAvg `\\ \\" +"(\\*\\[\\, fraction\\_fit\\, ...\\]\\)" #: ../../source/ref-api/flwr.server.strategy.rst:45::1 #: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg:1 of @@ -10236,10 +11223,13 @@ msgid "Configurable fault-tolerant FedAvg strategy implementation." msgstr "可配置的容错 FedAvg 策略实施。" #: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#, fuzzy msgid "" ":py:obj:`Krum `\\ \\(\\*\\[\\, " "fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" msgstr "" +":py:obj:`Krum `\\ \\(\\*\\[\\, fraction\\_fit\\, " +"fraction\\_evaluate\\, ...\\]\\)" #: ../../source/ref-api/flwr.server.strategy.rst:45::1 #: flwr.server.strategy.krum.Krum:1 of @@ -10248,10 +11238,13 @@ msgid "Krum [Blanchard et al., 2017] strategy." msgstr "FedYogi [Reddi 等人,2020] 策略。" #: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#, fuzzy msgid "" ":py:obj:`QFedAvg `\\ \\(\\*\\[\\, " "q\\_param\\, qffl\\_learning\\_rate\\, ...\\]\\)" msgstr "" +":py:obj:`QFedAvg `\\ \\(\\*\\[\\, q\\_param\\, " +"qffl\\_learning\\_rate\\, ...\\]\\)" #: ../../source/ref-api/flwr.server.strategy.rst:45::1 #: flwr.server.strategy.qfedavg.QFedAvg:1 of @@ -10269,8 +11262,9 @@ msgid "Abstract base class for server strategy implementations." msgstr "服务器策略实现的抽象基类。" #: ../../source/ref-api/flwr.server.strategy.Bulyan.rst:2 +#, fuzzy msgid "Bulyan" -msgstr "" +msgstr "Bulyan" #: flwr.server.strategy.bulyan.Bulyan:1 #: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg:1 @@ -10429,11 +11423,14 @@ msgid "arguments to the first_aggregation rule" msgstr "第一聚类规则的参数" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" ":py:obj:`aggregate_evaluate " "`\\ \\(server\\_round\\, " "results\\, ...\\)" msgstr "" +":py:obj:`aggregate_evaluate `" +"\\ \\(server\\_round\\, results\\, ...\\)" #: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1 #: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 @@ -10447,10 +11444,13 @@ msgid "Aggregate evaluation losses using weighted average." msgstr "采用加权平均法计算评估损失总额。" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" ":py:obj:`aggregate_fit `\\ " "\\(server\\_round\\, results\\, failures\\)" msgstr "" +":py:obj:`aggregate_fit `\\ \\(" +"server\\_round\\, results\\, failures\\)" #: flwr.server.strategy.bulyan.Bulyan.aggregate_fit:1 #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of @@ -10458,11 +11458,14 @@ msgid "Aggregate fit results using Bulyan." msgstr "使用 Bulyan 技术汇总拟合结果。" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" ":py:obj:`configure_evaluate " "`\\ \\(server\\_round\\, " "parameters\\, ...\\)" msgstr "" +":py:obj:`configure_evaluate `" +"\\ \\(server\\_round\\, parameters\\, ...\\)" #: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 #: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.configure_evaluate:1 @@ -10488,10 +11491,13 @@ msgid "Configure the next round of evaluation." msgstr "配置下一轮评估。" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" ":py:obj:`configure_fit `\\ " "\\(server\\_round\\, parameters\\, ...\\)" msgstr "" +":py:obj:`configure_fit `\\ \\(" +"server\\_round\\, parameters\\, ...\\)" #: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 #: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.configure_fit:1 @@ -10520,10 +11526,13 @@ msgid "Configure the next round of training." msgstr "配置下一轮训练。" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" ":py:obj:`evaluate `\\ " "\\(server\\_round\\, parameters\\)" msgstr "" +":py:obj:`evaluate `\\ \\(server\\_round" +"\\, parameters\\)" #: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 @@ -10539,11 +11548,14 @@ msgid "Evaluate model parameters using an evaluation function." msgstr "使用评估函数评估模型参数。" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" ":py:obj:`initialize_parameters " "`\\ " "\\(client\\_manager\\)" msgstr "" +":py:obj:`initialize_parameters `\\ \\(client\\_manager\\)" #: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 @@ -10558,11 +11570,14 @@ msgid "Initialize global model parameters." msgstr "初始化全局模型参数。" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" ":py:obj:`num_evaluation_clients " "`\\ " "\\(num\\_available\\_clients\\)" msgstr "" +":py:obj:`num_evaluation_clients `\\ \\(num\\_available\\_clients\\)" #: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 @@ -10577,10 +11592,13 @@ msgid "Use a fraction of available clients for evaluation." msgstr "使用部分可用客户进行评估。" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" ":py:obj:`num_fit_clients `\\" " \\(num\\_available\\_clients\\)" msgstr "" +":py:obj:`num_fit_clients `\\ \\(" +"num\\_available\\_clients\\)" #: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 @@ -10599,21 +11617,26 @@ msgid "DPFedAvgAdaptive" msgstr "DPFedAvgAdaptive" #: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive:1 of +#, fuzzy msgid "Bases: :py:class:`~flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed`" -msgstr "" +msgstr "Bases: :py:class:`~flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed`" #: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive:3 #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed:3 of +#, fuzzy msgid "This class is deprecated and will be removed in a future release." -msgstr "" +msgstr "该类已被弃用,将在以后的版本中删除。" #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 #: of +#, fuzzy msgid "" ":py:obj:`aggregate_evaluate " "`\\ " "\\(server\\_round\\, results\\, ...\\)" msgstr "" +":py:obj:`aggregate_evaluate `\\ \\(server\\_round\\, results\\, ...\\)" #: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1 #: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 @@ -10631,11 +11654,14 @@ msgstr "使用给定的策略汇总评估损失。" #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 #: of +#, fuzzy msgid "" ":py:obj:`aggregate_fit " "`\\ " "\\(server\\_round\\, results\\, failures\\)" msgstr "" +":py:obj:`aggregate_fit `" +"\\ \\(server\\_round\\, results\\, failures\\)" #: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive.aggregate_fit:1 #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 @@ -10645,11 +11671,14 @@ msgstr "汇总 DPFedAvgFixed 中的训练结果并更新片段标准。" #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 #: of +#, fuzzy msgid "" ":py:obj:`configure_evaluate " "`\\ " "\\(server\\_round\\, parameters\\, ...\\)" msgstr "" +":py:obj:`configure_evaluate `\\ \\(server\\_round\\, parameters\\, ...\\)" #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:1 of @@ -10658,18 +11687,24 @@ msgstr "使用指定策略配置下一轮评估。" #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 #: of +#, fuzzy msgid "" ":py:obj:`configure_fit " "`\\ " "\\(server\\_round\\, parameters\\, ...\\)" msgstr "" +":py:obj:`configure_fit `" +"\\ \\(server\\_round\\, parameters\\, ...\\)" #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 #: of +#, fuzzy msgid "" ":py:obj:`evaluate `\\ " "\\(server\\_round\\, parameters\\)" msgstr "" +":py:obj:`evaluate `\\ \\(" +"server\\_round\\, parameters\\)" #: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 #: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.evaluate:1 @@ -10686,11 +11721,14 @@ msgstr "使用策略中的评估函数评估模型参数。" #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 #: of +#, fuzzy msgid "" ":py:obj:`initialize_parameters " "`\\ " "\\(client\\_manager\\)" msgstr "" +":py:obj:`initialize_parameters `\\ \\(client\\_manager\\)" #: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 #: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.initialize_parameters:1 @@ -10747,24 +11785,31 @@ msgstr "DPFedAvgFixed" #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed:1 #: flwr.server.strategy.fedavg.FedAvg:1 #: flwr.server.strategy.fedavg_android.FedAvgAndroid:1 of +#, fuzzy msgid "Bases: :py:class:`~flwr.server.strategy.strategy.Strategy`" -msgstr "" +msgstr "Bases: :py:class:`~flwr.server.strategy.strategy.Strategy`" #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 #: of +#, fuzzy msgid "" ":py:obj:`aggregate_evaluate " "`\\ " "\\(server\\_round\\, results\\, ...\\)" msgstr "" +":py:obj:`aggregate_evaluate `\\ \\(server\\_round\\, results\\, ...\\)" #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 #: of +#, fuzzy msgid "" ":py:obj:`aggregate_fit " "`\\ " "\\(server\\_round\\, results\\, failures\\)" msgstr "" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_fit:1 of @@ -10773,19 +11818,25 @@ msgstr "使用非加权汇总法汇总训练结果。" #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 #: of +#, fuzzy msgid "" ":py:obj:`configure_evaluate " "`\\ " "\\(server\\_round\\, parameters\\, ...\\)" msgstr "" +":py:obj:`configure_evaluate `\\ \\(server\\_round\\, parameters\\, ...\\)" #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 #: of +#, fuzzy msgid "" ":py:obj:`configure_fit " "`\\ " "\\(server\\_round\\, parameters\\, ...\\)" msgstr "" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:1 of @@ -10796,18 +11847,24 @@ msgstr "配置包含差分隐私 (DP) 的下一轮训练。" #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 #: of +#, fuzzy msgid "" ":py:obj:`evaluate `\\ " "\\(server\\_round\\, parameters\\)" msgstr "" +":py:obj:`evaluate `\\ \\(" +"server\\_round\\, parameters\\)" #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 #: of +#, fuzzy msgid "" ":py:obj:`initialize_parameters " "`\\ " "\\(client\\_manager\\)" msgstr "" +":py:obj:`initialize_parameters `\\ \\(client\\_manager\\)" #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:3 of msgid "" @@ -10828,16 +11885,19 @@ msgstr "" "一个元组列表。列表中的每个元组都标识了一个`ClientProxy`和该特定`ClientProxy`的`FitIns'。如果某个特定的`ClientProxy`不在此列表中,则表示该`ClientProxy`将不参加下一轮联合学习。" #: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideAdaptiveClipping.rst:2 +#, fuzzy msgid "DifferentialPrivacyClientSideAdaptiveClipping" -msgstr "" +msgstr "DifferentialPrivacyClientSideAdaptiveClipping" #: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:3 #: of +#, fuzzy msgid "Use `adaptiveclipping_mod` modifier at the client side." -msgstr "" +msgstr "在客户端使用 \"adaptiveclipping_mod \"修改器。" #: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:5 #: of +#, fuzzy msgid "" "In comparison to `DifferentialPrivacyServerSideAdaptiveClipping`, which " "performs clipping on the server-side, " @@ -10845,57 +11905,68 @@ msgid "" "happen on the client-side, usually by using the built-in " "`adaptiveclipping_mod`." msgstr "" +"与在服务器端执行剪切的 `DifferentialPrivacyServerSideAdaptiveClipping` " +"相比,`DifferentialPrivacyClientSideAdaptiveClipping` 希望在客户端进行剪切," +"通常使用内置的 `adaptiveclipping_mod`。" #: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:10 #: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:3 #: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:10 #: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:3 #: of +#, fuzzy msgid "The strategy to which DP functionalities will be added by this wrapper." -msgstr "" +msgstr "该包装器将添加 DP 功能的策略。" #: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:12 #: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:5 #: of +#, fuzzy msgid "The noise multiplier for the Gaussian mechanism for model updates." -msgstr "" +msgstr "用于模型更新的高斯机制的噪声乘数。" #: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:14 #: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:7 #: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:17 #: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:10 #: of +#, fuzzy msgid "The number of clients that are sampled on each round." -msgstr "" +msgstr "每轮取样的客户数。" #: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:16 #: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:9 #: of +#, fuzzy msgid "" "The initial value of clipping norm. Defaults to 0.1. Andrew et al. " "recommends to set to 0.1." -msgstr "" +msgstr "剪切规范的初始值。默认为 0.1。安德鲁等人建议设置为 0.1。" #: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:19 #: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:12 #: of +#, fuzzy msgid "The desired quantile of updates which should be clipped. Defaults to 0.5." -msgstr "" +msgstr "需要剪切的更新量化值。默认为 0.5。" #: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:21 #: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:14 #: of +#, fuzzy msgid "" "The learning rate for the clipping norm adaptation. Defaults to 0.2. " "Andrew et al. recommends to set to 0.2." -msgstr "" +msgstr "剪切规范适应的学习率。默认为 0.2。安德鲁等人建议设置为 0.2。" #: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:24 #: of +#, fuzzy msgid "" "The stddev of the noise added to the count of updates currently below the" " estimate. Andrew et al. recommends to set to `expected_num_records/20`" -msgstr "" +msgstr "添加到当前低于估计值的更新计数中的噪声的 stddev。安德鲁等人建议设置为 " +"\"expected_num_records/20" #: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:30 #: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:23 @@ -10908,31 +11979,41 @@ msgstr "server.strategy" #: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:34 #: of +#, fuzzy msgid "" "Wrap the strategy with the " "`DifferentialPrivacyClientSideAdaptiveClipping` wrapper:" -msgstr "" +msgstr "用 \"DifferentialPrivacyClientSideAdaptiveClipping \"包装器对策略进行包装:" #: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:40 #: of +#, fuzzy msgid "On the client, add the `adaptiveclipping_mod` to the client-side mods:" -msgstr "" +msgstr "在客户端,将 \"adaptiveclipping_mod \"添加到客户端模块中:" #: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 #: of +#, fuzzy msgid "" ":py:obj:`aggregate_evaluate " "`\\" " \\(server\\_round\\, results\\, ...\\)" msgstr "" +":py:obj:`aggregate_evaluate `\\ \\(" +"server\\_round\\, results\\, ...\\)" #: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 #: of +#, fuzzy msgid "" ":py:obj:`aggregate_fit " "`\\" " \\(server\\_round\\, results\\, failures\\)" msgstr "" +":py:obj:`aggregate_fit `\\ \\(" +"server\\_round\\, results\\, failures\\)" #: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 #: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_fit:1 @@ -10945,35 +12026,51 @@ msgstr "汇总 DPFedAvgFixed 中的训练结果并更新片段标准。" #: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 #: of +#, fuzzy msgid "" ":py:obj:`configure_evaluate " "`\\" " \\(server\\_round\\, parameters\\, ...\\)" msgstr "" +":py:obj:`configure_evaluate `\\ \\(" +"server\\_round\\, parameters\\, ...\\)" #: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 #: of +#, fuzzy msgid "" ":py:obj:`configure_fit " "`\\" " \\(server\\_round\\, parameters\\, ...\\)" msgstr "" +":py:obj:`configure_fit `\\ \\(" +"server\\_round\\, parameters\\, ...\\)" #: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 #: of +#, fuzzy msgid "" ":py:obj:`evaluate " "`\\" " \\(server\\_round\\, parameters\\)" msgstr "" +":py:obj:`evaluate `\\ \\(server\\_round" +"\\, parameters\\)" #: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 #: of +#, fuzzy msgid "" ":py:obj:`initialize_parameters " "`\\" " \\(client\\_manager\\)" msgstr "" +":py:obj:`initialize_parameters `\\ \\(" +"client\\_manager\\)" #: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideFixedClipping.rst:2 #, fuzzy @@ -10982,59 +12079,76 @@ msgstr "差分隐私" #: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:3 #: of +#, fuzzy msgid "Use `fixedclipping_mod` modifier at the client side." -msgstr "" +msgstr "在客户端使用 `fixedclipping_mod` 修改器。" #: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:5 #: of +#, fuzzy msgid "" "In comparison to `DifferentialPrivacyServerSideFixedClipping`, which " "performs clipping on the server-side, " "`DifferentialPrivacyClientSideFixedClipping` expects clipping to happen " "on the client-side, usually by using the built-in `fixedclipping_mod`." msgstr "" +"与在服务器端执行剪切的 \"DifferentialPrivacyServerSideFixedClipping \"相比," +"\"DifferentialPrivacyClientSideFixedClipping \"希望在客户端进行剪切," +"通常是使用内置的 \"fixedclipping_mod\"。" #: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:12 #: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:5 #: of +#, fuzzy msgid "" "The noise multiplier for the Gaussian mechanism for model updates. A " "value of 1.0 or higher is recommended for strong privacy." -msgstr "" +msgstr "模型更新高斯机制的噪声乘数。建议使用 1.0 或更高的值,以获得较强的隐私性。" #: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:15 #: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:8 #: of +#, fuzzy msgid "The value of the clipping norm." -msgstr "" +msgstr "削波法线的值。" #: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:26 #: of +#, fuzzy msgid "" "Wrap the strategy with the `DifferentialPrivacyClientSideFixedClipping` " "wrapper:" -msgstr "" +msgstr "用 \"DifferentialPrivacyClientSideFixedClipping \"包装器包装策略:" #: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:32 #: of +#, fuzzy msgid "On the client, add the `fixedclipping_mod` to the client-side mods:" -msgstr "" +msgstr "在客户端,将 \"fixedclipping_mod \"添加到客户端模块中:" #: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 #: of +#, fuzzy msgid "" ":py:obj:`aggregate_evaluate " "`\\" " \\(server\\_round\\, results\\, ...\\)" msgstr "" +":py:obj:`aggregate_evaluate `\\ \\(" +"server\\_round\\, results\\, ...\\)" #: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 #: of +#, fuzzy msgid "" ":py:obj:`aggregate_fit " "`\\" " \\(server\\_round\\, results\\, failures\\)" msgstr "" +":py:obj:`aggregate_fit `\\ \\(" +"server\\_round\\, results\\, failures\\)" #: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 #: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_fit:1 @@ -11045,102 +12159,146 @@ msgstr "然后将汇总结果序列化:" #: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 #: of +#, fuzzy msgid "" ":py:obj:`configure_evaluate " "`\\" " \\(server\\_round\\, parameters\\, ...\\)" msgstr "" +":py:obj:`configure_evaluate `\\ \\(" +"server\\_round\\, parameters\\, ...\\)" #: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 #: of +#, fuzzy msgid "" ":py:obj:`configure_fit " "`\\" " \\(server\\_round\\, parameters\\, ...\\)" msgstr "" +":py:obj:`configure_fit `\\ \\(" +"server\\_round\\, parameters\\, ...\\)" #: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 #: of +#, fuzzy msgid "" ":py:obj:`evaluate " "`\\" " \\(server\\_round\\, parameters\\)" msgstr "" +":py:obj:`evaluate `\\ \\(server\\_round\\, " +"parameters\\)" #: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 #: of +#, fuzzy msgid "" ":py:obj:`initialize_parameters " "`\\" " \\(client\\_manager\\)" msgstr "" +":py:obj:`initialize_parameters `\\ \\(" +"client\\_manager\\)" #: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideAdaptiveClipping.rst:2 +#, fuzzy msgid "DifferentialPrivacyServerSideAdaptiveClipping" -msgstr "" +msgstr "DifferentialPrivacyServerSideAdaptiveClipping" #: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:17 #: of +#, fuzzy msgid "" "The standard deviation of the noise added to the count of updates below " "the estimate. Andrew et al. recommends to set to " "`expected_num_records/20`" -msgstr "" +msgstr "添加到低于估计值的更新计数中的噪声标准偏差。安德鲁等人建议设置为 " +"\"expected_num_records/20" #: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:27 #: of +#, fuzzy msgid "" "Wrap the strategy with the DifferentialPrivacyServerSideAdaptiveClipping " "wrapper" -msgstr "" +msgstr "用 DifferentialPrivacyServerSideAdaptiveClipping 封装器封装策略" #: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 #: of +#, fuzzy msgid "" ":py:obj:`aggregate_evaluate " "`\\" " \\(server\\_round\\, results\\, ...\\)" msgstr "" +":py:obj:`aggregate_evaluate `\\ \\(" +"server\\_round\\, results\\, ...\\)" #: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 #: of +#, fuzzy msgid "" ":py:obj:`aggregate_fit " "`\\" " \\(server\\_round\\, results\\, failures\\)" msgstr "" +":py:obj:`aggregate_fit `\\ \\(" +"server\\_round\\, results\\, failures\\)" #: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 #: of +#, fuzzy msgid "" ":py:obj:`configure_evaluate " "`\\" " \\(server\\_round\\, parameters\\, ...\\)" msgstr "" +":py:obj:`configure_evaluate `\\ \\(" +"server\\_round\\, parameters\\, ...\\)" #: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 #: of +#, fuzzy msgid "" ":py:obj:`configure_fit " "`\\" " \\(server\\_round\\, parameters\\, ...\\)" msgstr "" +":py:obj:`configure_fit `\\ \\(" +"server\\_round\\, parameters\\, ...\\)" #: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 #: of +#, fuzzy msgid "" ":py:obj:`evaluate " "`\\" " \\(server\\_round\\, parameters\\)" msgstr "" +":py:obj:`evaluate `\\ \\(server\\_round" +"\\, parameters\\)" #: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 #: of +#, fuzzy msgid "" ":py:obj:`initialize_parameters " "`\\" " \\(client\\_manager\\)" msgstr "" +":py:obj:`initialize_parameters `\\ \\(" +"client\\_manager\\)" #: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideFixedClipping.rst:2 #, fuzzy @@ -11149,69 +12307,96 @@ msgstr "差分隐私" #: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:19 #: of +#, fuzzy msgid "" "Wrap the strategy with the DifferentialPrivacyServerSideFixedClipping " "wrapper" -msgstr "" +msgstr "用 DifferentialPrivacyServerSideFixedClipping 封装器封装策略" #: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 #: of +#, fuzzy msgid "" ":py:obj:`aggregate_evaluate " "`\\" " \\(server\\_round\\, results\\, ...\\)" msgstr "" +":py:obj:`aggregate_evaluate `\\ \\(" +"server\\_round\\, results\\, ...\\)" #: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 #: of +#, fuzzy msgid "" ":py:obj:`aggregate_fit " "`\\" " \\(server\\_round\\, results\\, failures\\)" msgstr "" +":py:obj:`aggregate_fit `\\ \\(" +"server\\_round\\, results\\, failures\\)" #: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 #: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_fit:1 #: of +#, fuzzy msgid "Compute the updates, clip, and pass them for aggregation." -msgstr "" +msgstr "计算更新、剪辑并将其传递给聚合。" #: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 #: of +#, fuzzy msgid "" ":py:obj:`configure_evaluate " "`\\" " \\(server\\_round\\, parameters\\, ...\\)" msgstr "" +":py:obj:`configure_evaluate `\\ \\(" +"server\\_round\\, parameters\\, ...\\)" #: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 #: of +#, fuzzy msgid "" ":py:obj:`configure_fit " "`\\" " \\(server\\_round\\, parameters\\, ...\\)" msgstr "" +":py:obj:`configure_fit `\\ \\(" +"server\\_round\\, parameters\\, ...\\)" #: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 #: of +#, fuzzy msgid "" ":py:obj:`evaluate " "`\\" " \\(server\\_round\\, parameters\\)" msgstr "" +":py:obj:`evaluate `\\ \\(server\\_round\\, " +"parameters\\)" #: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 #: of +#, fuzzy msgid "" ":py:obj:`initialize_parameters " "`\\" " \\(client\\_manager\\)" msgstr "" +":py:obj:`initialize_parameters `\\ \\(" +"client\\_manager\\)" #: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_fit:3 #: of +#, fuzzy msgid "Afterward, add noise to the aggregated parameters." -msgstr "" +msgstr "然后,在汇总参数中添加噪声。" #: ../../source/ref-api/flwr.server.strategy.FaultTolerantFedAvg.rst:2 #, fuzzy @@ -11220,19 +12405,25 @@ msgstr "server.strategy.FaultTolerantFedAvg" #: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 #: of +#, fuzzy msgid "" ":py:obj:`aggregate_evaluate " "`\\ " "\\(server\\_round\\, results\\, ...\\)" msgstr "" +":py:obj:`aggregate_evaluate `\\ \\(server\\_round\\, results\\, ...\\)" #: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 #: of +#, fuzzy msgid "" ":py:obj:`aggregate_fit " "`\\ " "\\(server\\_round\\, results\\, failures\\)" msgstr "" +":py:obj:`aggregate_fit `\\ \\(server\\_round\\, results\\, failures\\)" #: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 #: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_fit:1 @@ -11252,50 +12443,68 @@ msgstr "使用加权平均法汇总拟合结果。" #: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 #: of +#, fuzzy msgid "" ":py:obj:`configure_evaluate " "`\\ " "\\(server\\_round\\, parameters\\, ...\\)" msgstr "" +":py:obj:`configure_evaluate `\\ \\(server\\_round\\, parameters\\, ...\\)" #: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 #: of +#, fuzzy msgid "" ":py:obj:`configure_fit " "`\\ " "\\(server\\_round\\, parameters\\, ...\\)" msgstr "" +":py:obj:`configure_fit `\\ \\(server\\_round\\, parameters\\, ...\\)" #: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 #: of +#, fuzzy msgid "" ":py:obj:`evaluate `\\ " "\\(server\\_round\\, parameters\\)" msgstr "" +":py:obj:`evaluate `\\ \\(" +"server\\_round\\, parameters\\)" #: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 #: of +#, fuzzy msgid "" ":py:obj:`initialize_parameters " "`\\ " "\\(client\\_manager\\)" msgstr "" +":py:obj:`initialize_parameters `\\ \\(client\\_manager\\)" #: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 #: of +#, fuzzy msgid "" ":py:obj:`num_evaluation_clients " "`\\ " "\\(num\\_available\\_clients\\)" msgstr "" +":py:obj:`num_evaluation_clients `\\ \\(num\\_available\\_clients\\)" #: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 #: of +#, fuzzy msgid "" ":py:obj:`num_fit_clients " "`\\ " "\\(num\\_available\\_clients\\)" msgstr "" +":py:obj:`num_fit_clients `\\ \\(num\\_available\\_clients\\)" #: ../../source/ref-api/flwr.server.strategy.FedAdagrad.rst:2 #: ../../source/ref-changelog.md:839 @@ -11305,8 +12514,9 @@ msgstr "FedAdagrad" #: flwr.server.strategy.fedadagrad.FedAdagrad:1 #: flwr.server.strategy.fedadam.FedAdam:1 #: flwr.server.strategy.fedyogi.FedYogi:1 of +#, fuzzy msgid "Bases: :py:class:`~flwr.server.strategy.fedopt.FedOpt`" -msgstr "" +msgstr "Bases: :py:class:`~flwr.server.strategy.fedopt.FedOpt`" #: flwr.server.strategy.fedadagrad.FedAdagrad:3 #: flwr.server.strategy.fedadam.FedAdam:3 flwr.server.strategy.fedopt.FedOpt:3 @@ -11346,57 +12556,81 @@ msgid "Controls the algorithm's degree of adaptability. Defaults to 1e-9." msgstr "控制算法的适应度。默认为 1e-9。" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" ":py:obj:`aggregate_evaluate " "`\\ " "\\(server\\_round\\, results\\, ...\\)" msgstr "" +":py:obj:`aggregate_evaluate `\\ \\(server\\_round\\, results\\, ...\\)" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" ":py:obj:`aggregate_fit `\\" " \\(server\\_round\\, results\\, failures\\)" msgstr "" +":py:obj:`aggregate_fit `\\ \\(" +"server\\_round\\, results\\, failures\\)" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" ":py:obj:`configure_evaluate " "`\\ " "\\(server\\_round\\, parameters\\, ...\\)" msgstr "" +":py:obj:`configure_evaluate `\\ \\(server\\_round\\, parameters\\, ...\\)" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" ":py:obj:`configure_fit `\\" " \\(server\\_round\\, parameters\\, ...\\)" msgstr "" +":py:obj:`configure_fit `\\ \\(" +"server\\_round\\, parameters\\, ...\\)" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" ":py:obj:`evaluate `\\ " "\\(server\\_round\\, parameters\\)" msgstr "" +":py:obj:`evaluate `\\ \\(" +"server\\_round\\, parameters\\)" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" ":py:obj:`initialize_parameters " "`\\ " "\\(client\\_manager\\)" msgstr "" +":py:obj:`initialize_parameters `\\ \\(client\\_manager\\)" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" ":py:obj:`num_evaluation_clients " "`\\ " "\\(num\\_available\\_clients\\)" msgstr "" +":py:obj:`num_evaluation_clients `\\ \\(num\\_available\\_clients\\)" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" ":py:obj:`num_fit_clients " "`\\ " "\\(num\\_available\\_clients\\)" msgstr "" +":py:obj:`num_fit_clients `\\" +" \\(num\\_available\\_clients\\)" #: ../../source/ref-api/flwr.server.strategy.FedAdam.rst:2 #, fuzzy @@ -11414,57 +12648,81 @@ msgid "Second moment parameter. Defaults to 0.99." msgstr "第二动量参数。默认为 0.99。" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" ":py:obj:`aggregate_evaluate " "`\\ \\(server\\_round\\," " results\\, ...\\)" msgstr "" +":py:obj:`aggregate_evaluate `\\ \\(server\\_round\\, results\\, ...\\)" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" ":py:obj:`aggregate_fit `\\ " "\\(server\\_round\\, results\\, failures\\)" msgstr "" +":py:obj:`aggregate_fit `\\ \\(" +"server\\_round\\, results\\, failures\\)" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" ":py:obj:`configure_evaluate " "`\\ \\(server\\_round\\," " parameters\\, ...\\)" msgstr "" +":py:obj:`configure_evaluate `\\ \\(server\\_round\\, parameters\\, ...\\)" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" ":py:obj:`configure_fit `\\ " "\\(server\\_round\\, parameters\\, ...\\)" msgstr "" +":py:obj:`configure_fit `\\ \\(" +"server\\_round\\, parameters\\, ...\\)" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" ":py:obj:`evaluate `\\ " "\\(server\\_round\\, parameters\\)" msgstr "" +":py:obj:`evaluate `\\ \\(" +"server\\_round\\, parameters\\)" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" ":py:obj:`initialize_parameters " "`\\ " "\\(client\\_manager\\)" msgstr "" +":py:obj:`initialize_parameters `\\ \\(client\\_manager\\)" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" ":py:obj:`num_evaluation_clients " "`\\ " "\\(num\\_available\\_clients\\)" msgstr "" +":py:obj:`num_evaluation_clients `\\ \\(num\\_available\\_clients\\)" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" ":py:obj:`num_fit_clients " "`\\ " "\\(num\\_available\\_clients\\)" msgstr "" +":py:obj:`num_fit_clients `\\ \\" +"(num\\_available\\_clients\\)" #: ../../source/ref-api/flwr.server.strategy.FedAvg.rst:2 #, fuzzy @@ -11498,60 +12756,85 @@ msgstr "" "available_clients`,则仍会对 `min_evaluate_clients` 进行采样。默认为 1.0。" #: flwr.server.strategy.fedavg.FedAvg:33 of +#, fuzzy msgid "Enable (True) or disable (False) in-place aggregation of model updates." -msgstr "" +msgstr "启用(真)或禁用(假)模型更新的就地聚合。" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" ":py:obj:`aggregate_evaluate " "`\\ \\(server\\_round\\, " "results\\, ...\\)" msgstr "" +":py:obj:`aggregate_evaluate `" +"\\ \\(server\\_round\\, results\\, ...\\)" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" ":py:obj:`aggregate_fit `\\ " "\\(server\\_round\\, results\\, failures\\)" msgstr "" +":py:obj:`aggregate_fit `\\ \\(" +"server\\_round\\, results\\, failures\\)" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" ":py:obj:`configure_evaluate " "`\\ \\(server\\_round\\, " "parameters\\, ...\\)" msgstr "" +":py:obj:`configure_evaluate `" +"\\ \\(server\\_round\\, parameters\\, ...\\)" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" ":py:obj:`configure_fit `\\ " "\\(server\\_round\\, parameters\\, ...\\)" msgstr "" +":py:obj:`configure_fit `\\ \\(" +"server\\_round\\, parameters\\, ...\\)" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" ":py:obj:`evaluate `\\ " "\\(server\\_round\\, parameters\\)" msgstr "" +":py:obj:`evaluate `\\ \\(server\\_round" +"\\, parameters\\)" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" ":py:obj:`initialize_parameters " "`\\ " "\\(client\\_manager\\)" msgstr "" +":py:obj:`initialize_parameters `\\ \\(client\\_manager\\)" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" ":py:obj:`num_evaluation_clients " "`\\ " "\\(num\\_available\\_clients\\)" msgstr "" +":py:obj:`num_evaluation_clients `\\ \\(num\\_available\\_clients\\)" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" ":py:obj:`num_fit_clients `\\" " \\(num\\_available\\_clients\\)" msgstr "" +":py:obj:`num_fit_clients `\\ \\(" +"num\\_available\\_clients\\)" #: ../../source/ref-api/flwr.server.strategy.FedAvgAndroid.rst:2 #, fuzzy @@ -11560,26 +12843,35 @@ msgstr "DPFedAvgAdaptive" #: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 #: of +#, fuzzy msgid "" ":py:obj:`aggregate_evaluate " "`\\ " "\\(server\\_round\\, results\\, ...\\)" msgstr "" +":py:obj:`aggregate_evaluate `\\ \\(server\\_round\\, results\\, ...\\)" #: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 #: of +#, fuzzy msgid "" ":py:obj:`aggregate_fit " "`\\ " "\\(server\\_round\\, results\\, failures\\)" msgstr "" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" #: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 #: of +#, fuzzy msgid "" ":py:obj:`bytes_to_ndarray " "`\\ \\(tensor\\)" msgstr "" +":py:obj:`bytes_to_ndarray `\\ \\(tensor\\)" #: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 #: flwr.server.strategy.fedavg_android.FedAvgAndroid.bytes_to_ndarray:1 of @@ -11589,41 +12881,56 @@ msgstr "从字节反序列化 NumPy ndarray。" #: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 #: of +#, fuzzy msgid "" ":py:obj:`configure_evaluate " "`\\ " "\\(server\\_round\\, parameters\\, ...\\)" msgstr "" +":py:obj:`configure_evaluate `\\ \\(server\\_round\\, parameters\\, ...\\)" #: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 #: of +#, fuzzy msgid "" ":py:obj:`configure_fit " "`\\ " "\\(server\\_round\\, parameters\\, ...\\)" msgstr "" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" #: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 #: of +#, fuzzy msgid "" ":py:obj:`evaluate `\\ " "\\(server\\_round\\, parameters\\)" msgstr "" +":py:obj:`evaluate `\\ \\(" +"server\\_round\\, parameters\\)" #: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 #: of +#, fuzzy msgid "" ":py:obj:`initialize_parameters " "`\\ " "\\(client\\_manager\\)" msgstr "" +":py:obj:`initialize_parameters `\\ \\(client\\_manager\\)" #: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 #: of +#, fuzzy msgid "" ":py:obj:`ndarray_to_bytes " "`\\ \\(ndarray\\)" msgstr "" +":py:obj:`ndarray_to_bytes `\\ \\(ndarray\\)" #: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 #: flwr.server.strategy.fedavg_android.FedAvgAndroid.ndarray_to_bytes:1 of @@ -11633,35 +12940,47 @@ msgstr "将 NumPy ndarray 序列化为字节。" #: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 #: of +#, fuzzy msgid "" ":py:obj:`ndarrays_to_parameters " "`\\ " "\\(ndarrays\\)" msgstr "" +":py:obj:`ndarrays_to_parameters `\\ \\(ndarrays\\)" #: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 #: of +#, fuzzy msgid "" ":py:obj:`num_evaluation_clients " "`\\ " "\\(num\\_available\\_clients\\)" msgstr "" +":py:obj:`num_evaluation_clients `\\ \\(num\\_available\\_clients\\)" #: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 #: of +#, fuzzy msgid "" ":py:obj:`num_fit_clients " "`\\ " "\\(num\\_available\\_clients\\)" msgstr "" +":py:obj:`num_fit_clients `\\ \\(num\\_available\\_clients\\)" #: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 #: of +#, fuzzy msgid "" ":py:obj:`parameters_to_ndarrays " "`\\ " "\\(parameters\\)" msgstr "" +":py:obj:`parameters_to_ndarrays `\\ \\(parameters\\)" #: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 #: flwr.server.strategy.fedavg_android.FedAvgAndroid.parameters_to_ndarrays:1 @@ -11691,74 +13010,105 @@ msgid "Server-side momentum factor used for FedAvgM. Defaults to 0.0." msgstr "用于 FedAvgM 的服务器端动量因子。默认为 0.0。" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" ":py:obj:`aggregate_evaluate " "`\\ \\(server\\_round\\," " results\\, ...\\)" msgstr "" +":py:obj:`aggregate_evaluate `\\ \\(server\\_round\\, results\\, ...\\)" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" ":py:obj:`aggregate_fit `\\ " "\\(server\\_round\\, results\\, failures\\)" msgstr "" +":py:obj:`aggregate_fit `\\ \\(" +"server\\_round\\, results\\, failures\\)" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" ":py:obj:`configure_evaluate " "`\\ \\(server\\_round\\," " parameters\\, ...\\)" msgstr "" +":py:obj:`configure_evaluate `\\ \\(server\\_round\\, parameters\\, ...\\)" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" ":py:obj:`configure_fit `\\ " "\\(server\\_round\\, parameters\\, ...\\)" msgstr "" +":py:obj:`configure_fit `\\ \\(" +"server\\_round\\, parameters\\, ...\\)" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" ":py:obj:`evaluate `\\ " "\\(server\\_round\\, parameters\\)" msgstr "" +":py:obj:`evaluate `\\ \\(" +"server\\_round\\, parameters\\)" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" ":py:obj:`initialize_parameters " "`\\ " "\\(client\\_manager\\)" msgstr "" +":py:obj:`initialize_parameters `\\ \\(client\\_manager\\)" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" ":py:obj:`num_evaluation_clients " "`\\ " "\\(num\\_available\\_clients\\)" msgstr "" +":py:obj:`num_evaluation_clients `\\ \\(num\\_available\\_clients\\)" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" ":py:obj:`num_fit_clients " "`\\ " "\\(num\\_available\\_clients\\)" msgstr "" +":py:obj:`num_fit_clients `\\ \\" +"(num\\_available\\_clients\\)" #: ../../source/ref-api/flwr.server.strategy.FedMedian.rst:2 +#, fuzzy msgid "FedMedian" -msgstr "" +msgstr "联邦医保" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" ":py:obj:`aggregate_evaluate " "`\\ " "\\(server\\_round\\, results\\, ...\\)" msgstr "" +":py:obj:`aggregate_evaluate `\\ \\(server\\_round\\, results\\, ...\\)" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" ":py:obj:`aggregate_fit `\\ " "\\(server\\_round\\, results\\, failures\\)" msgstr "" +":py:obj:`aggregate_fit `\\ \\(" +"server\\_round\\, results\\, failures\\)" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 #: flwr.server.strategy.fedmedian.FedMedian.aggregate_fit:1 of @@ -11766,48 +13116,67 @@ msgid "Aggregate fit results using median." msgstr "使用中位数汇总拟合结果。" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" ":py:obj:`configure_evaluate " "`\\ " "\\(server\\_round\\, parameters\\, ...\\)" msgstr "" +":py:obj:`configure_evaluate `\\ \\(server\\_round\\, parameters\\, ...\\)" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" ":py:obj:`configure_fit `\\ " "\\(server\\_round\\, parameters\\, ...\\)" msgstr "" +":py:obj:`configure_fit `\\ \\(" +"server\\_round\\, parameters\\, ...\\)" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" ":py:obj:`evaluate `\\ " "\\(server\\_round\\, parameters\\)" msgstr "" +":py:obj:`evaluate `\\ \\(" +"server\\_round\\, parameters\\)" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" ":py:obj:`initialize_parameters " "`\\ " "\\(client\\_manager\\)" msgstr "" +":py:obj:`initialize_parameters `\\ \\(client\\_manager\\)" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" ":py:obj:`num_evaluation_clients " "`\\ " "\\(num\\_available\\_clients\\)" msgstr "" +":py:obj:`num_evaluation_clients `\\ \\(num\\_available\\_clients\\)" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" ":py:obj:`num_fit_clients " "`\\ " "\\(num\\_available\\_clients\\)" msgstr "" +":py:obj:`num_fit_clients `\\ " +"\\(num\\_available\\_clients\\)" #: ../../source/ref-api/flwr.server.strategy.FedOpt.rst:2 +#, fuzzy msgid "FedOpt" -msgstr "" +msgstr "FedOpt" #: flwr.server.strategy.fedopt.FedOpt:33 of msgid "Momentum parameter. Defaults to 0.0." @@ -11818,60 +13187,85 @@ msgid "Second moment parameter. Defaults to 0.0." msgstr "第二动量参数。默认为 0.0。" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" ":py:obj:`aggregate_evaluate " "`\\ \\(server\\_round\\, " "results\\, ...\\)" msgstr "" +":py:obj:`aggregate_evaluate `" +"\\ \\(server\\_round\\, results\\, ...\\)" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" ":py:obj:`aggregate_fit `\\ " "\\(server\\_round\\, results\\, failures\\)" msgstr "" +":py:obj:`aggregate_fit `\\ \\(" +"server\\_round\\, results\\, failures\\)" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" ":py:obj:`configure_evaluate " "`\\ \\(server\\_round\\, " "parameters\\, ...\\)" msgstr "" +":py:obj:`configure_evaluate `" +"\\ \\(server\\_round\\, parameters\\, ...\\)" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" ":py:obj:`configure_fit `\\ " "\\(server\\_round\\, parameters\\, ...\\)" msgstr "" +":py:obj:`configure_fit `\\ \\(" +"server\\_round\\, parameters\\, ...\\)" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" ":py:obj:`evaluate `\\ " "\\(server\\_round\\, parameters\\)" msgstr "" +":py:obj:`evaluate `\\ \\(server\\_round" +"\\, parameters\\)" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" ":py:obj:`initialize_parameters " "`\\ " "\\(client\\_manager\\)" msgstr "" +":py:obj:`initialize_parameters `\\ \\(client\\_manager\\)" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" ":py:obj:`num_evaluation_clients " "`\\ " "\\(num\\_available\\_clients\\)" msgstr "" +":py:obj:`num_evaluation_clients `\\ \\(num\\_available\\_clients\\)" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" ":py:obj:`num_fit_clients `\\" " \\(num\\_available\\_clients\\)" msgstr "" +":py:obj:`num_fit_clients `\\ \\(" +"num\\_available\\_clients\\)" #: ../../source/ref-api/flwr.server.strategy.FedProx.rst:2 +#, fuzzy msgid "FedProx" -msgstr "" +msgstr "FedProx" #: flwr.server.strategy.fedprox.FedProx:3 of msgid "Implementation based on https://arxiv.org/abs/1812.06127" @@ -11923,57 +13317,81 @@ msgstr "" "FedAvg,系数越大,使用的正则化就越多(也就是说,在训练过程中,客户端参数需要更接近服务器参数)。" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" ":py:obj:`aggregate_evaluate " "`\\ \\(server\\_round\\," " results\\, ...\\)" msgstr "" +":py:obj:`aggregate_evaluate `\\ \\(server\\_round\\, results\\, ...\\)" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" ":py:obj:`aggregate_fit `\\ " "\\(server\\_round\\, results\\, failures\\)" msgstr "" +":py:obj:`aggregate_fit `\\ \\(" +"server\\_round\\, results\\, failures\\)" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" ":py:obj:`configure_evaluate " "`\\ \\(server\\_round\\," " parameters\\, ...\\)" msgstr "" +":py:obj:`configure_evaluate `\\ \\(server\\_round\\, parameters\\, ...\\)" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" ":py:obj:`configure_fit `\\ " "\\(server\\_round\\, parameters\\, ...\\)" msgstr "" +":py:obj:`configure_fit `\\ \\(" +"server\\_round\\, parameters\\, ...\\)" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" ":py:obj:`evaluate `\\ " "\\(server\\_round\\, parameters\\)" msgstr "" +":py:obj:`evaluate `\\ \\(" +"server\\_round\\, parameters\\)" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" ":py:obj:`initialize_parameters " "`\\ " "\\(client\\_manager\\)" msgstr "" +":py:obj:`initialize_parameters `\\ \\(client\\_manager\\)" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" ":py:obj:`num_evaluation_clients " "`\\ " "\\(num\\_available\\_clients\\)" msgstr "" +":py:obj:`num_evaluation_clients `\\ \\(num\\_available\\_clients\\)" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" ":py:obj:`num_fit_clients " "`\\ " "\\(num\\_available\\_clients\\)" msgstr "" +":py:obj:`num_fit_clients `\\ \\" +"(num\\_available\\_clients\\)" #: flwr.server.strategy.fedprox.FedProx.configure_fit:3 of msgid "Sends the proximal factor mu to the clients" @@ -11994,18 +13412,24 @@ msgid "Fraction to cut off of both tails of the distribution. Defaults to 0.2." msgstr "截取分布两个尾部的分数。默认为 0.2。" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" ":py:obj:`aggregate_evaluate " "`\\ " "\\(server\\_round\\, results\\, ...\\)" msgstr "" +":py:obj:`aggregate_evaluate `\\ \\(server\\_round\\, results\\, ...\\)" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" ":py:obj:`aggregate_fit " "`\\ " "\\(server\\_round\\, results\\, failures\\)" msgstr "" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 #: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg.aggregate_fit:1 of @@ -12013,57 +13437,79 @@ msgid "Aggregate fit results using trimmed average." msgstr "使用修剪平均值汇总拟合结果。" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" ":py:obj:`configure_evaluate " "`\\ " "\\(server\\_round\\, parameters\\, ...\\)" msgstr "" +":py:obj:`configure_evaluate `\\ \\(server\\_round\\, parameters\\, ...\\)" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" ":py:obj:`configure_fit " "`\\ " "\\(server\\_round\\, parameters\\, ...\\)" msgstr "" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" ":py:obj:`evaluate `\\ " "\\(server\\_round\\, parameters\\)" msgstr "" +":py:obj:`evaluate `\\ \\(" +"server\\_round\\, parameters\\)" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" ":py:obj:`initialize_parameters " "`\\ " "\\(client\\_manager\\)" msgstr "" +":py:obj:`initialize_parameters `\\ \\(client\\_manager\\)" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" ":py:obj:`num_evaluation_clients " "`\\ " "\\(num\\_available\\_clients\\)" msgstr "" +":py:obj:`num_evaluation_clients `\\ \\(num\\_available\\_clients\\)" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" ":py:obj:`num_fit_clients " "`\\ " "\\(num\\_available\\_clients\\)" msgstr "" +":py:obj:`num_fit_clients `\\ \\(num\\_available\\_clients\\)" #: ../../source/ref-api/flwr.server.strategy.FedXgbBagging.rst:2 +#, fuzzy msgid "FedXgbBagging" -msgstr "" +msgstr "FedXgbBagging" #: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 #: of +#, fuzzy msgid "" ":py:obj:`aggregate_evaluate " "`\\ " "\\(server\\_round\\, results\\, ...\\)" msgstr "" +":py:obj:`aggregate_evaluate `\\ \\(server\\_round\\, results\\, ...\\)" #: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1 #: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 @@ -12076,11 +13522,14 @@ msgstr "采用加权平均法计算评估损失总额。" #: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 #: of +#, fuzzy msgid "" ":py:obj:`aggregate_fit " "`\\ " "\\(server\\_round\\, results\\, failures\\)" msgstr "" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" #: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 #: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_fit:1 @@ -12092,117 +13541,160 @@ msgstr "使用 Bulyan 技术汇总拟合结果。" #: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 #: of +#, fuzzy msgid "" ":py:obj:`configure_evaluate " "`\\ " "\\(server\\_round\\, parameters\\, ...\\)" msgstr "" +":py:obj:`configure_evaluate `\\ \\(server\\_round\\, parameters\\, ...\\)" #: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 #: of +#, fuzzy msgid "" ":py:obj:`configure_fit " "`\\ " "\\(server\\_round\\, parameters\\, ...\\)" msgstr "" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" #: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 #: of +#, fuzzy msgid "" ":py:obj:`evaluate `\\ " "\\(server\\_round\\, parameters\\)" msgstr "" +":py:obj:`evaluate `\\ \\(" +"server\\_round\\, parameters\\)" #: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 #: of +#, fuzzy msgid "" ":py:obj:`initialize_parameters " "`\\ " "\\(client\\_manager\\)" msgstr "" +":py:obj:`initialize_parameters `\\ \\(client\\_manager\\)" #: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 #: of +#, fuzzy msgid "" ":py:obj:`num_evaluation_clients " "`\\ " "\\(num\\_available\\_clients\\)" msgstr "" +":py:obj:`num_evaluation_clients `\\ \\(num\\_available\\_clients\\)" #: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 #: of +#, fuzzy msgid "" ":py:obj:`num_fit_clients " "`\\ " "\\(num\\_available\\_clients\\)" msgstr "" +":py:obj:`num_fit_clients `\\ \\(num\\_available\\_clients\\)" #: ../../source/ref-api/flwr.server.strategy.FedXgbCyclic.rst:2 +#, fuzzy msgid "FedXgbCyclic" -msgstr "" +msgstr "FedXgbCyclic" #: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 #: of +#, fuzzy msgid "" ":py:obj:`aggregate_evaluate " "`\\ " "\\(server\\_round\\, results\\, ...\\)" msgstr "" +":py:obj:`aggregate_evaluate `\\ \\(server\\_round\\, results\\, ...\\)" #: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 #: of +#, fuzzy msgid "" ":py:obj:`aggregate_fit " "`\\ \\(server\\_round\\," " results\\, failures\\)" msgstr "" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" #: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 #: of +#, fuzzy msgid "" ":py:obj:`configure_evaluate " "`\\ " "\\(server\\_round\\, parameters\\, ...\\)" msgstr "" +":py:obj:`configure_evaluate `\\ \\(server\\_round\\, parameters\\, ...\\)" #: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 #: of +#, fuzzy msgid "" ":py:obj:`configure_fit " "`\\ \\(server\\_round\\," " parameters\\, ...\\)" msgstr "" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" #: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 #: of +#, fuzzy msgid "" ":py:obj:`evaluate `\\ " "\\(server\\_round\\, parameters\\)" msgstr "" +":py:obj:`evaluate `\\ \\(" +"server\\_round\\, parameters\\)" #: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 #: of +#, fuzzy msgid "" ":py:obj:`initialize_parameters " "`\\ " "\\(client\\_manager\\)" msgstr "" +":py:obj:`initialize_parameters `\\ \\(client\\_manager\\)" #: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 #: of +#, fuzzy msgid "" ":py:obj:`num_evaluation_clients " "`\\ " "\\(num\\_available\\_clients\\)" msgstr "" +":py:obj:`num_evaluation_clients `\\ \\(num\\_available\\_clients\\)" #: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 #: of +#, fuzzy msgid "" ":py:obj:`num_fit_clients " "`\\ " "\\(num\\_available\\_clients\\)" msgstr "" +":py:obj:`num_fit_clients `" +"\\ \\(num\\_available\\_clients\\)" #: ../../source/ref-api/flwr.server.strategy.FedXgbNnAvg.rst:2 #, fuzzy @@ -12210,70 +13702,98 @@ msgid "FedXgbNnAvg" msgstr "DP-FedAvg" #: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg:5 of +#, fuzzy msgid "" "This strategy is deprecated, but a copy of it is available in Flower " "Baselines: " "https://github.com/adap/flower/tree/main/baselines/hfedxgboost." msgstr "" +"该策略已被弃用,但在 Flower Baselines: https://github.com/adap/flower/tree/" +"main/baselines/hfedxgboost 中有其副本。" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" ":py:obj:`aggregate_evaluate " "`\\ " "\\(server\\_round\\, results\\, ...\\)" msgstr "" +":py:obj:`aggregate_evaluate `\\ \\(server\\_round\\, results\\, ...\\)" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" ":py:obj:`aggregate_fit " "`\\ \\(server\\_round\\, " "results\\, failures\\)" msgstr "" +":py:obj:`aggregate_fit `\\ \\" +"(server\\_round\\, results\\, failures\\)" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" ":py:obj:`configure_evaluate " "`\\ " "\\(server\\_round\\, parameters\\, ...\\)" msgstr "" +":py:obj:`configure_evaluate `\\ \\(server\\_round\\, parameters\\, ...\\)" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" ":py:obj:`configure_fit " "`\\ \\(server\\_round\\, " "parameters\\, ...\\)" msgstr "" +":py:obj:`configure_fit `\\ \\" +"(server\\_round\\, parameters\\, ...\\)" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" ":py:obj:`evaluate `\\ " "\\(server\\_round\\, parameters\\)" msgstr "" +":py:obj:`evaluate `\\ \\(" +"server\\_round\\, parameters\\)" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" ":py:obj:`initialize_parameters " "`\\ " "\\(client\\_manager\\)" msgstr "" +":py:obj:`initialize_parameters `\\ \\(client\\_manager\\)" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" ":py:obj:`num_evaluation_clients " "`\\ " "\\(num\\_available\\_clients\\)" msgstr "" +":py:obj:`num_evaluation_clients `\\ \\(num\\_available\\_clients\\)" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" ":py:obj:`num_fit_clients " "`\\ " "\\(num\\_available\\_clients\\)" msgstr "" +":py:obj:`num_fit_clients `" +"\\ \\(num\\_available\\_clients\\)" #: ../../source/ref-api/flwr.server.strategy.FedYogi.rst:2 +#, fuzzy msgid "FedYogi" -msgstr "" +msgstr "FedYogi" #: flwr.server.strategy.fedyogi.FedYogi:32 of #, fuzzy @@ -12291,61 +13811,86 @@ msgid "Controls the algorithm's degree of adaptability. Defaults to 1e-3." msgstr "控制算法的适应度。默认为 1e-9。" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" ":py:obj:`aggregate_evaluate " "`\\ \\(server\\_round\\," " results\\, ...\\)" msgstr "" +":py:obj:`aggregate_evaluate `\\ \\(server\\_round\\, results\\, ...\\)" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" ":py:obj:`aggregate_fit `\\ " "\\(server\\_round\\, results\\, failures\\)" msgstr "" +":py:obj:`aggregate_fit `\\ \\(" +"server\\_round\\, results\\, failures\\)" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" ":py:obj:`configure_evaluate " "`\\ \\(server\\_round\\," " parameters\\, ...\\)" msgstr "" +":py:obj:`configure_evaluate `\\ \\(server\\_round\\, parameters\\, ...\\)" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" ":py:obj:`configure_fit `\\ " "\\(server\\_round\\, parameters\\, ...\\)" msgstr "" +":py:obj:`configure_fit `\\ \\(" +"server\\_round\\, parameters\\, ...\\)" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" ":py:obj:`evaluate `\\ " "\\(server\\_round\\, parameters\\)" msgstr "" +":py:obj:`evaluate `\\ \\(" +"server\\_round\\, parameters\\)" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" ":py:obj:`initialize_parameters " "`\\ " "\\(client\\_manager\\)" msgstr "" +":py:obj:`initialize_parameters `\\ \\(client\\_manager\\)" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" ":py:obj:`num_evaluation_clients " "`\\ " "\\(num\\_available\\_clients\\)" msgstr "" +":py:obj:`num_evaluation_clients `\\ \\(num\\_available\\_clients\\)" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" ":py:obj:`num_fit_clients " "`\\ " "\\(num\\_available\\_clients\\)" msgstr "" +":py:obj:`num_fit_clients `\\ \\" +"(num\\_available\\_clients\\)" #: ../../source/ref-api/flwr.server.strategy.Krum.rst:2 +#, fuzzy msgid "Krum" -msgstr "" +msgstr "Krum" #: flwr.server.strategy.krum.Krum:3 of #, fuzzy @@ -12359,17 +13904,23 @@ msgid "" msgstr "求平均值前保留的客户端数量(MultiKrum)。默认值为 0,在这种情况下会应用经典 Krum。" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" ":py:obj:`aggregate_evaluate " "`\\ \\(server\\_round\\, " "results\\, ...\\)" msgstr "" +":py:obj:`aggregate_evaluate `\\" +" \\(server\\_round\\, results\\, ...\\)" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" ":py:obj:`aggregate_fit `\\ " "\\(server\\_round\\, results\\, failures\\)" msgstr "" +":py:obj:`aggregate_fit `\\ \\(" +"server\\_round\\, results\\, failures\\)" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 #: flwr.server.strategy.krum.Krum.aggregate_fit:1 of @@ -12377,43 +13928,61 @@ msgid "Aggregate fit results using Krum." msgstr "使用 Krum 汇总拟合结果。" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" ":py:obj:`configure_evaluate " "`\\ \\(server\\_round\\, " "parameters\\, ...\\)" msgstr "" +":py:obj:`configure_evaluate `\\" +" \\(server\\_round\\, parameters\\, ...\\)" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" ":py:obj:`configure_fit `\\ " "\\(server\\_round\\, parameters\\, ...\\)" msgstr "" +":py:obj:`configure_fit `\\ \\(" +"server\\_round\\, parameters\\, ...\\)" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" ":py:obj:`evaluate `\\ " "\\(server\\_round\\, parameters\\)" msgstr "" +":py:obj:`evaluate `\\ \\(server\\_round\\" +", parameters\\)" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" ":py:obj:`initialize_parameters " "`\\ " "\\(client\\_manager\\)" msgstr "" +":py:obj:`initialize_parameters `\\ \\(client\\_manager\\)" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" ":py:obj:`num_evaluation_clients " "`\\ " "\\(num\\_available\\_clients\\)" msgstr "" +":py:obj:`num_evaluation_clients `\\ \\(num\\_available\\_clients\\)" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" ":py:obj:`num_fit_clients `\\ " "\\(num\\_available\\_clients\\)" msgstr "" +":py:obj:`num_fit_clients `\\ \\(" +"num\\_available\\_clients\\)" #: ../../source/ref-api/flwr.server.strategy.QFedAvg.rst:2 #, fuzzy @@ -12421,57 +13990,81 @@ msgid "QFedAvg" msgstr "DP-FedAvg" #: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" ":py:obj:`aggregate_evaluate " "`\\ \\(server\\_round\\," " results\\, ...\\)" msgstr "" +":py:obj:`aggregate_evaluate `\\ \\(server\\_round\\, results\\, ...\\)" #: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" ":py:obj:`aggregate_fit `\\ " "\\(server\\_round\\, results\\, failures\\)" msgstr "" +":py:obj:`aggregate_fit `\\ \\(" +"server\\_round\\, results\\, failures\\)" #: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" ":py:obj:`configure_evaluate " "`\\ \\(server\\_round\\," " parameters\\, ...\\)" msgstr "" +":py:obj:`configure_evaluate `\\ \\(server\\_round\\, parameters\\, ...\\)" #: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" ":py:obj:`configure_fit `\\ " "\\(server\\_round\\, parameters\\, ...\\)" msgstr "" +":py:obj:`configure_fit `\\ \\(" +"server\\_round\\, parameters\\, ...\\)" #: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" ":py:obj:`evaluate `\\ " "\\(server\\_round\\, parameters\\)" msgstr "" +":py:obj:`evaluate `\\ \\(" +"server\\_round\\, parameters\\)" #: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" ":py:obj:`initialize_parameters " "`\\ " "\\(client\\_manager\\)" msgstr "" +":py:obj:`initialize_parameters `\\ \\(client\\_manager\\)" #: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" ":py:obj:`num_evaluation_clients " "`\\ " "\\(num\\_available\\_clients\\)" msgstr "" +":py:obj:`num_evaluation_clients `\\ \\(num\\_available\\_clients\\)" #: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" ":py:obj:`num_fit_clients " "`\\ " "\\(num\\_available\\_clients\\)" msgstr "" +":py:obj:`num_fit_clients `\\ \\" +"(num\\_available\\_clients\\)" #: ../../source/ref-api/flwr.server.strategy.Strategy.rst:2 #, fuzzy @@ -12480,11 +14073,14 @@ msgstr "Krum 策略。" #: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 #: of +#, fuzzy msgid "" ":py:obj:`aggregate_evaluate " "`\\ " "\\(server\\_round\\, results\\, ...\\)" msgstr "" +":py:obj:`aggregate_evaluate `\\ \\(server\\_round\\, results\\, ...\\)" #: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1 #: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 @@ -12494,10 +14090,13 @@ msgstr "聚合评估结果。" #: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 #: of +#, fuzzy msgid "" ":py:obj:`aggregate_fit `\\ " "\\(server\\_round\\, results\\, failures\\)" msgstr "" +":py:obj:`aggregate_fit `\\ \\(" +"server\\_round\\, results\\, failures\\)" #: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 #: flwr.server.strategy.strategy.Strategy.aggregate_fit:1 of @@ -12506,25 +14105,34 @@ msgstr "汇总训练结果。" #: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 #: of +#, fuzzy msgid "" ":py:obj:`configure_evaluate " "`\\ " "\\(server\\_round\\, parameters\\, ...\\)" msgstr "" +":py:obj:`configure_evaluate `\\ \\(server\\_round\\, parameters\\, ...\\)" #: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 #: of +#, fuzzy msgid "" ":py:obj:`configure_fit `\\ " "\\(server\\_round\\, parameters\\, ...\\)" msgstr "" +":py:obj:`configure_fit `\\ \\(" +"server\\_round\\, parameters\\, ...\\)" #: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 #: of +#, fuzzy msgid "" ":py:obj:`evaluate `\\ " "\\(server\\_round\\, parameters\\)" msgstr "" +":py:obj:`evaluate `\\ \\(" +"server\\_round\\, parameters\\)" #: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 #: flwr.server.strategy.strategy.Strategy.evaluate:1 of @@ -12533,11 +14141,14 @@ msgstr "评估当前的模型参数。" #: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 #: of +#, fuzzy msgid "" ":py:obj:`initialize_parameters " "`\\ " "\\(client\\_manager\\)" msgstr "" +":py:obj:`initialize_parameters `\\ \\(client\\_manager\\)" #: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 #: flwr.server.strategy.strategy.Strategy.initialize_parameters:1 of @@ -12618,38 +14229,50 @@ msgid "workflow" msgstr "工作流程" #: ../../source/ref-api/flwr.server.workflow.rst:24::1 +#, fuzzy msgid "" ":py:obj:`DefaultWorkflow `\\ " "\\(\\[fit\\_workflow\\, ...\\]\\)" msgstr "" +":py:obj:`DefaultWorkflow `\\ \\(\\[" +"fit\\_workflow\\, ...\\]\\)" #: ../../source/ref-api/flwr.server.workflow.rst:24::1 #: flwr.server.workflow.default_workflows.DefaultWorkflow:1 of +#, fuzzy msgid "Default workflow in Flower." -msgstr "" +msgstr "Flower 中的默认工作流程。" #: ../../source/ref-api/flwr.server.workflow.rst:24::1 +#, fuzzy msgid "" ":py:obj:`SecAggPlusWorkflow `\\ " "\\(num\\_shares\\, ...\\[\\, ...\\]\\)" msgstr "" +":py:obj:`SecAggPlusWorkflow `\\ \\(" +"num\\_shares\\, ...\\[\\, ...\\]\\)" #: ../../source/ref-api/flwr.server.workflow.rst:24::1 #: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:1 #: of +#, fuzzy msgid "The workflow for the SecAgg+ protocol." -msgstr "" +msgstr "SecAgg+ 协议的工作流程。" #: ../../source/ref-api/flwr.server.workflow.rst:24::1 +#, fuzzy msgid "" ":py:obj:`SecAggWorkflow `\\ " "\\(reconstruction\\_threshold\\, \\*\\)" msgstr "" +":py:obj:`SecAggWorkflow `\\ \\(" +"reconstruction\\_threshold\\, \\*\\)" #: ../../source/ref-api/flwr.server.workflow.rst:24::1 #: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:1 of +#, fuzzy msgid "The workflow for the SecAgg protocol." -msgstr "" +msgstr "SecAgg 协议的工作流程。" #: ../../source/ref-api/flwr.server.workflow.DefaultWorkflow.rst:2 #, fuzzy @@ -12663,6 +14286,7 @@ msgstr "工作流程" #: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:3 #: of +#, fuzzy msgid "" "The SecAgg+ protocol ensures the secure summation of integer vectors " "owned by multiple parties, without accessing any individual integer " @@ -12676,47 +14300,63 @@ msgid "" "('parameters') from the client's `FitRes`. The server then aggregates " "these contributions to compute the weighted average of model parameters." msgstr "" +"SecAgg+ 协议可确保对多方拥有的整数向量进行安全求和,而不会访问任何单个整数向" +"量。该工作流程允许服务器计算所有客户端模型参数的加权平均值,确保个人贡献保持" +"私密。这可以通过客户端同时发送加权因子和本地更新参数的加权版本来实现,为了保" +"护隐私,两者都会被屏蔽。具体来说,每个客户端都会上传带掩码的\"[w, w * params]" +"\",其中加权因子 \"w \"是示例数(\"num_examples\"),\"params \"代表客户端 " +"\"FitRes \"中的模型参数(\"parameters\"" +")。然后,服务器会汇总这些贡献,计算模型参数的加权平均值。" #: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:14 #: of +#, fuzzy msgid "" "The protocol involves four main stages: - 'setup': Send SecAgg+ " "configuration to clients and collect their public keys. - 'share keys': " "Broadcast public keys among clients and collect encrypted secret" msgstr "" +"协议包括四个主要阶段: - 设置\": 向客户端发送 SecAgg+ 配置并收集其公钥。- " +"共享密钥\": 在客户端之间广播公钥,并收集加密密钥。" #: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:17 #: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:17 #: of +#, fuzzy msgid "key shares." -msgstr "" +msgstr "关键股份。" #: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:18 #: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:18 #: of +#, fuzzy msgid "" "'collect masked vectors': Forward encrypted secret key shares to target " "clients and collect masked model parameters." -msgstr "" +msgstr "收集屏蔽向量\": 向目标客户端转发加密密钥共享,并收集屏蔽模型参数。" #: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:20 #: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:20 #: of +#, fuzzy msgid "" "'unmask': Collect secret key shares to decrypt and aggregate the model " "parameters." -msgstr "" +msgstr "解密\": 收集密钥共享,解密并汇总模型参数。" #: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:22 #: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:22 #: of +#, fuzzy msgid "" "Only the aggregated model parameters are exposed and passed to " "`Strategy.aggregate_fit`, ensuring individual data privacy." -msgstr "" +msgstr "只有聚合模型参数才会公开并传递给 `Strategy." +"aggregate_fit`,从而确保个人数据隐私。" #: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:25 #: of +#, fuzzy msgid "" "The number of shares into which each client's private key is split under " "the SecAgg+ protocol. If specified as a float, it represents the " @@ -12725,10 +14365,14 @@ msgid "" "these shares, allowing for the secure aggregation of model updates. Each " "client sends one share to each of its neighbors while retaining one." msgstr "" +"在 SecAgg+ 协议下,每个客户的私钥被分成的份数。如果指定为浮点数,则代表所有选" +"定客户的比例,份额数将在运行时动态设置。私钥可以从这些份额中重建,从而实现模" +"型更新的安全聚合。每个客户端向其每个邻居发送一份,同时保留一份。" #: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:25 #: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:32 #: of +#, fuzzy msgid "" "The minimum number of shares required to reconstruct a client's private " "key, or, if specified as a float, it represents the proportion of the " @@ -12736,149 +14380,194 @@ msgid "" "privacy by allowing for the recovery of contributions from dropped " "clients during aggregation, without compromising individual client data." msgstr "" +"重建客户私钥所需的最小份数,如果指定为浮动,则表示重建所需的份数占总份数的比" +"例。这个阈值允许在聚合过程中恢复掉线客户的贡献,从而确保隐私,而不会泄露单个" +"客户的数据。" #: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:31 #: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:38 #: of +#, fuzzy msgid "" "The maximum value of the weight that can be assigned to any single " "client's update during the weighted average calculation on the server " "side, e.g., in the FedAvg algorithm." -msgstr "" +msgstr "在服务器端进行加权平均计算(如 FedAvg " +"算法)时,可分配给任何单个客户端更新的权重的最大值。" #: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:35 #: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:42 #: of +#, fuzzy msgid "" "The range within which model parameters are clipped before quantization. " "This parameter ensures each model parameter is bounded within " "[-clipping_range, clipping_range], facilitating quantization." -msgstr "" +msgstr "量化前模型参数的裁剪范围。该参数可确保每个模型参数都在 [-clipping_range, " +"clipping_range] 范围内,便于量化。" #: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:39 #: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:46 #: of +#, fuzzy msgid "" "The size of the range into which floating-point model parameters are " "quantized, mapping each parameter to an integer in [0, " "quantization_range-1]. This facilitates cryptographic operations on the " "model updates." -msgstr "" +msgstr "浮点模型参数量化范围的大小,将每个参数映射为 [0, quantization_range-1] " +"中的整数。这有助于对模型更新进行加密操作。" #: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:43 #: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:50 #: of +#, fuzzy msgid "" "The range of values from which random mask entries are uniformly sampled " "([0, modulus_range-1]). `modulus_range` must be less than 4294967296. " "Please use 2**n values for `modulus_range` to prevent overflow issues." msgstr "" +"对随机掩码条目进行均匀采样的数值范围([0, modulus_range-1])。modulus_range " +"\"必须小于 4294967296。为防止出现溢出问题,请为 `modulus_range` 使用 2**n " +"的值。" #: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:47 #: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:54 #: of +#, fuzzy msgid "" "The timeout duration in seconds. If specified, the workflow will wait for" " replies for this duration each time. If `None`, there is no time limit " "and the workflow will wait until replies for all messages are received." msgstr "" +"超时时间(秒)。如果指定,工作流将在每次等待回复的时间内等待回复。如果指定为 " +"\"无\",则没有时间限制,工作流程将一直等待到收到所有信息的回复。" #: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:61 #: of +#, fuzzy msgid "" "Generally, higher `num_shares` means more robust to dropouts while " "increasing the computational costs; higher `reconstruction_threshold` " "means better privacy guarantees but less tolerance to dropouts." msgstr "" +"一般来说,\"份额数 \"越高,意味着对丢弃的鲁棒性越强,同时计算成本也会增加;" +"\"重构阈值 \"越高,意味着隐私保证越好,但对丢弃的容忍度越低。" #: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:58 #: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:64 #: of +#, fuzzy msgid "Too large `max_weight` may compromise the precision of the quantization." -msgstr "" +msgstr "过大的 `max_weight` 可能会影响量化的精度。" #: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:59 #: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:65 #: of +#, fuzzy msgid "`modulus_range` must be 2**n and larger than `quantization_range`." -msgstr "" +msgstr "modulus_range \"必须为 2**n,且大于 \"quantization_range\"。" #: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:66 #: of +#, fuzzy msgid "" "When `num_shares` is a float, it is interpreted as the proportion of all " "selected clients, and hence the number of shares will be determined in " "the runtime. This allows for dynamic adjustment based on the total number" " of participating clients." msgstr "" +"当 `num_shares` 为浮点数时,它被解释为所有选定客户端的比例,因此份额数将在运" +"行时确定。这样就可以根据参与客户端的总数进行动态调整。" #: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:69 #: of +#, fuzzy msgid "" "Similarly, when `reconstruction_threshold` is a float, it is interpreted " "as the proportion of the number of shares needed for the reconstruction " "of a private key. This feature enables flexibility in setting the " "security threshold relative to the number of distributed shares." msgstr "" +"同样,当 `reconstruction_threshold` 为浮点数时,它被解释为重建私钥所需的份额" +"数比例。这一功能使我们可以根据分发的份额数灵活设置安全阈值。" #: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:73 #: of +#, fuzzy msgid "" "`num_shares`, `reconstruction_threshold`, and the quantization parameters" " (`clipping_range`, `quantization_range`, `modulus_range`) play critical " "roles in balancing privacy, robustness, and efficiency within the SecAgg+" " protocol." msgstr "" +"份额数\"、\"重建阈值 \"和量化参数(\"裁剪范围\"、\"量化范围\"、\"模数范围\")" +"在平衡 SecAgg+ 协议的隐私性、稳健性和效率方面发挥着关键作用。" #: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 #: of +#, fuzzy msgid "" ":py:obj:`collect_masked_vectors_stage " "`\\" " \\(driver\\, ...\\)" msgstr "" +":py:obj:`collect_masked_vectors_stage `\\ \\(driver\\, ...\\)" #: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1 #: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 #: of +#, fuzzy msgid "Execute the 'collect masked vectors' stage." -msgstr "" +msgstr "执行 \"收集屏蔽向量 \"阶段。" #: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 #: of +#, fuzzy msgid "" ":py:obj:`setup_stage " "`\\ \\(driver\\, " "context\\, state\\)" msgstr "" +":py:obj:`setup_stage `\\" +" \\(driver\\, context\\, state\\)" #: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 #: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.setup_stage:1 #: of +#, fuzzy msgid "Execute the 'setup' stage." -msgstr "" +msgstr "执行 \"设置 \"阶段。" #: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 #: of +#, fuzzy msgid "" ":py:obj:`share_keys_stage " "`\\ " "\\(driver\\, context\\, state\\)" msgstr "" +":py:obj:`share_keys_stage `\\ \\(driver\\, context\\, state\\)" #: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 #: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.share_keys_stage:1 #: of +#, fuzzy msgid "Execute the 'share keys' stage." -msgstr "" +msgstr "执行 \"共享密钥 \"阶段。" #: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 #: of +#, fuzzy msgid "" ":py:obj:`unmask_stage " "`\\ \\(driver\\, " "context\\, state\\)" msgstr "" +":py:obj:`unmask_stage `" +"\\ \\(driver\\, context\\, state\\)" #: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 #: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.unmask_stage:1 @@ -23111,4 +24800,3 @@ msgstr "" #~ msgid "|08cb60859b07461588fe44e55810b050|" #~ msgstr "" - From 2e1d943d88893adc7a3061316676655ea7afbe90 Mon Sep 17 00:00:00 2001 From: Javier Date: Mon, 27 May 2024 10:58:52 +0100 Subject: [PATCH 20/23] fix(framework:skip) Remove `pydantic` from list of extras in `pyproject.toml` (#3514) --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 2d8c24d8e80c..a045987367f6 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -81,7 +81,7 @@ starlette = { version = "^0.31.0", optional = true } uvicorn = { version = "^0.23.0", extras = ["standard"], optional = true } [tool.poetry.extras] -simulation = ["ray", "pydantic"] +simulation = ["ray"] rest = ["requests", "starlette", "uvicorn"] [tool.poetry.group.dev.dependencies] From 1647bd2bbc49246ac90f586fbbf78c47d5d2b3b3 Mon Sep 17 00:00:00 2001 From: Javier Date: Mon, 27 May 2024 11:22:15 +0100 Subject: [PATCH 21/23] break(framework) Remove option to start SuperLink for simulation (#3513) --- src/py/flwr/server/app.py | 78 --------------------------------------- 1 file changed, 78 deletions(-) diff --git a/src/py/flwr/server/app.py b/src/py/flwr/server/app.py index 84a075a3e6df..8049a864fd5d 100644 --- a/src/py/flwr/server/app.py +++ b/src/py/flwr/server/app.py @@ -15,7 +15,6 @@ """Flower server app.""" import argparse -import asyncio import csv import importlib.util import sys @@ -39,7 +38,6 @@ MISSING_EXTRA_REST, TRANSPORT_TYPE_GRPC_RERE, TRANSPORT_TYPE_REST, - TRANSPORT_TYPE_VCE, ) from flwr.common.exit_handlers import register_exit_handlers from flwr.common.logger import log, warn_deprecated_feature @@ -63,7 +61,6 @@ ) from .superlink.fleet.grpc_rere.fleet_servicer import FleetServicer from .superlink.fleet.grpc_rere.server_interceptor import AuthenticateServerInterceptor -from .superlink.fleet.vce import start_vce from .superlink.state import StateFactory ADDRESS_DRIVER_API = "0.0.0.0:9091" @@ -401,17 +398,6 @@ def run_superlink() -> None: interceptors=interceptors, ) grpc_servers.append(fleet_server) - elif args.fleet_api_type == TRANSPORT_TYPE_VCE: - f_stop = asyncio.Event() # Does nothing - _run_fleet_api_vce( - num_supernodes=args.num_supernodes, - client_app_attr=args.client_app, - backend_name=args.backend, - backend_config_json_stream=args.backend_config, - app_dir=args.app_dir, - state_factory=state_factory, - f_stop=f_stop, - ) else: raise ValueError(f"Unknown fleet_api_type: {args.fleet_api_type}") @@ -569,29 +555,6 @@ def _run_fleet_api_grpc_rere( return fleet_grpc_server -# pylint: disable=too-many-arguments -def _run_fleet_api_vce( - num_supernodes: int, - client_app_attr: str, - backend_name: str, - backend_config_json_stream: str, - app_dir: str, - state_factory: StateFactory, - f_stop: asyncio.Event, -) -> None: - log(INFO, "Flower VCE: Starting Fleet API (VirtualClientEngine)") - - start_vce( - num_supernodes=num_supernodes, - client_app_attr=client_app_attr, - backend_name=backend_name, - backend_config_json_stream=backend_config_json_stream, - state_factory=state_factory, - app_dir=app_dir, - f_stop=f_stop, - ) - - # pylint: disable=import-outside-toplevel,too-many-arguments def _run_fleet_api_rest( host: str, @@ -783,14 +746,6 @@ def _add_args_fleet_api(parser: argparse.ArgumentParser) -> None: help="Start a Fleet API server (REST, experimental)", ) - ex_group.add_argument( - "--vce", - action="store_const", - dest="fleet_api_type", - const=TRANSPORT_TYPE_VCE, - help="Start a Fleet API server (VirtualClientEngine)", - ) - # Fleet API gRPC-rere options grpc_rere_group = parser.add_argument_group( "Fleet API (gRPC-rere) server options", "" @@ -826,36 +781,3 @@ def _add_args_fleet_api(parser: argparse.ArgumentParser) -> None: type=int, default=1, ) - - # Fleet API VCE options - vce_group = parser.add_argument_group("Fleet API (VCE) server options", "") - vce_group.add_argument( - "--client-app", - help="For example: `client:app` or `project.package.module:wrapper.app`.", - ) - vce_group.add_argument( - "--num-supernodes", - type=int, - help="Number of simulated SuperNodes.", - ) - vce_group.add_argument( - "--backend", - default="ray", - type=str, - help="Simulation backend that executes the ClientApp.", - ) - vce_group.add_argument( - "--backend-config", - type=str, - default='{"client_resources": {"num_cpus":1, "num_gpus":0.0}, "tensorflow": 0}', - help='A JSON formatted stream, e.g \'{"":, "":}\' to ' - "configure a backend. Values supported in are those included by " - "`flwr.common.typing.ConfigsRecordValues`. ", - ) - parser.add_argument( - "--app-dir", - default="", - help="Add specified directory to the PYTHONPATH and load" - "ClientApp from there." - " Default: current working directory.", - ) From 17cd81ab481bc511fd8df878434b60a87c859bbf Mon Sep 17 00:00:00 2001 From: Robert Steiner Date: Mon, 27 May 2024 13:13:05 +0200 Subject: [PATCH 22/23] ci(skip:*) Use new base images is nightly CI (#3484) Signed-off-by: Robert Steiner --- .github/workflows/release-nightly.yml | 41 +++++++++++++++++++++------ 1 file changed, 32 insertions(+), 9 deletions(-) diff --git a/.github/workflows/release-nightly.yml b/.github/workflows/release-nightly.yml index f9668131b5a6..939a9581871c 100644 --- a/.github/workflows/release-nightly.yml +++ b/.github/workflows/release-nightly.yml @@ -17,9 +17,12 @@ jobs: name: ${{ steps.release.outputs.name }} version: ${{ steps.release.outputs.version }} skip: ${{ steps.release.outputs.skip }} + pip-version: ${{ steps.release.outputs.pip-version }} + setuptools-version: ${{ steps.release.outputs.setuptools-version }} steps: - uses: actions/checkout@v4 - name: Bootstrap + id: bootstrap uses: ./.github/actions/bootstrap - name: Release nightly if: github.event.schedule == '0 23 * * *' @@ -37,26 +40,46 @@ jobs: echo "name=$(poetry version | awk {'print $1'})" >> $GITHUB_OUTPUT echo "version=$(poetry version -s)" >> $GITHUB_OUTPUT + echo "pip-version=${{ steps.bootstrap.outputs.pip-version }}" >> "$GITHUB_OUTPUT" + echo "setuptools-version=${{ steps.bootstrap.outputs.setuptools-version }}" >> "$GITHUB_OUTPUT" - build-docker-images: - name: Build nightly images + build-docker-base-images: + name: Build nightly base images if: github.repository == 'adap/flower' && needs.release-nightly.outputs.skip != 'true' && github.event.schedule == '30 23 * * *' uses: ./.github/workflows/_docker-build.yml needs: release-nightly + with: + namespace-repository: flwr/base + file-dir: src/docker/base/ubuntu + build-args: | + PIP_VERSION=${{ needs.release-nightly.outputs.pip-version }} + SETUPTOOLS_VERSION=${{ needs.release-nightly.outputs.setuptools-version }} + FLWR_VERSION=${{ needs.release-nightly.outputs.version }} + FLWR_PACKAGE=${{ needs.release-nightly.outputs.name }} + tags: | + ${{ needs.release-nightly.outputs.version }} + nightly + secrets: + dockerhub-user: ${{ secrets.DOCKERHUB_USERNAME }} + dockerhub-token: ${{ secrets.DOCKERHUB_TOKEN }} + + build-docker-binary-images: + name: Build nightly binary images + if: github.repository == 'adap/flower' && needs.release-nightly.outputs.skip != 'true' && github.event.schedule == '30 23 * * *' + uses: ./.github/workflows/_docker-build.yml + needs: [release-nightly, build-docker-base-images] strategy: fail-fast: false matrix: images: [ - { repository: "flwr/superlink", file-dir: "src/docker/superlink" }, - { repository: "flwr/supernode", file-dir: "src/docker/supernode" }, - { repository: "flwr/serverapp", file-dir: "src/docker/serverapp" } + { repository: "flwr/superlink", file_dir: "src/docker/superlink" }, + { repository: "flwr/supernode", file_dir: "src/docker/supernode" }, + { repository: "flwr/serverapp", file_dir: "src/docker/serverapp" } ] with: namespace-repository: ${{ matrix.images.repository }} - file-dir: ${{ matrix.images.file-dir }} - build-args: | - FLWR_VERSION=${{ needs.release-nightly.outputs.version }} - FLWR_PACKAGE=${{ needs.release-nightly.outputs.name }} + file-dir: ${{ matrix.images.file_dir }} + build-args: BASE_IMAGE=${{ needs.release-nightly.outputs.version }} tags: | ${{ needs.release-nightly.outputs.version }} nightly From ca36bcf494ce37672337395321d993ca0cead814 Mon Sep 17 00:00:00 2001 From: "Weblate (bot)" Date: Mon, 27 May 2024 17:24:09 +0200 Subject: [PATCH 23/23] docs(framework) Add latest Hosted Weblate translation updates (#3516) Co-authored-by: Yan Gao Co-authored-by: Charles Beauville --- .../zh_Hans/LC_MESSAGES/framework-docs.po | 17671 ++++++++-------- 1 file changed, 8881 insertions(+), 8790 deletions(-) diff --git a/doc/locales/zh_Hans/LC_MESSAGES/framework-docs.po b/doc/locales/zh_Hans/LC_MESSAGES/framework-docs.po index 4f4d9cfe5214..253dfe0ccc86 100644 --- a/doc/locales/zh_Hans/LC_MESSAGES/framework-docs.po +++ b/doc/locales/zh_Hans/LC_MESSAGES/framework-docs.po @@ -30,8 +30,8 @@ msgstr "边缘客户端引擎" #: ../../source/contributor-explanation-architecture.rst:7 msgid "" -"`Flower `_ core framework architecture with Edge " -"Client Engine" +"`Flower `_ core framework architecture with Edge Client " +"Engine" msgstr "具有边缘客户端引擎的`Flower `核心架构" #: ../../source/contributor-explanation-architecture.rst:13 @@ -50,9 +50,10 @@ msgstr "可同步进行的虚拟客户端引擎和边缘客户端引擎" #: ../../source/contributor-explanation-architecture.rst:23 msgid "" -"`Flower `_ core framework architecture with both " -"Virtual Client Engine and Edge Client Engine" -msgstr "具有虚拟客户端引擎和边缘客户端引擎的`Flower `核心架构" +"`Flower `_ core framework architecture with both Virtual " +"Client Engine and Edge Client Engine" +msgstr "" +"具有虚拟客户端引擎和边缘客户端引擎的`Flower `核心架构" #: ../../source/contributor-how-to-build-docker-images.rst:2 msgid "How to build Docker Flower images locally" @@ -61,17 +62,17 @@ msgstr "如何在本地搭建Docker Flower images" #: ../../source/contributor-how-to-build-docker-images.rst:4 #, fuzzy msgid "" -"Flower provides pre-made docker images on `Docker Hub " -"`_ that include all necessary " -"dependencies for running the server. You can also build your own custom " -"docker images from scratch with a different version of Python or Ubuntu " -"if that is what you need. In this guide, we will explain what images " -"exist and how to build them locally." +"Flower provides pre-made docker images on `Docker Hub `_ that include all necessary dependencies for " +"running the server. You can also build your own custom docker images from " +"scratch with a different version of Python or Ubuntu if that is what you " +"need. In this guide, we will explain what images exist and how to build them " +"locally." msgstr "" -"Flower 在 `Docker Hub `_ " -"上提供了预制的 docker 镜像,其中包括运行服务器所需的所有依赖项。如果你需要," -"也可以使用不同版本的 Python 或 Ubuntu 从头开始构建自己的定制 docker " -"镜像。在本指南中,我们将介绍有哪些镜像,以及如何在本地构建它们。" +"Flower 在 `Docker Hub `_ 上提供了" +"预制的 docker 镜像,其中包括运行服务器所需的所有依赖项。如果你需要,也可以使" +"用不同版本的 Python 或 Ubuntu 从头开始构建自己的定制 docker 镜像。在本指南" +"中,我们将介绍有哪些镜像,以及如何在本地构建它们。" #: ../../source/contributor-how-to-build-docker-images.rst:9 #, fuzzy @@ -93,51 +94,51 @@ msgstr "验证 Docker 守护进程是否正在运行。" #: ../../source/contributor-how-to-build-docker-images.rst:19 #, fuzzy msgid "" -"Please follow the first section on :doc:`Run Flower using Docker ` which covers this step in more detail." +"Please follow the first section on :doc:`Run Flower using Docker ` which covers this step in more detail." msgstr "" -"请阅读 :doc:`Run Flower using Docker ` " -"的第一节,其中更详细地介绍了这一步骤。" +"请阅读 :doc:`Run Flower using Docker ` 的第一" +"节,其中更详细地介绍了这一步骤。" #: ../../source/contributor-how-to-build-docker-images.rst:23 #, fuzzy msgid "" "Currently, Flower provides two images, a base image and a server image. " "There will also be a client image soon. The base image, as the name " -"suggests, contains basic dependencies that both the server and the client" -" need. This includes system dependencies, Python and Python tools. The " -"server image is based on the base image, but it additionally installs the" -" Flower server using ``pip``." +"suggests, contains basic dependencies that both the server and the client " +"need. This includes system dependencies, Python and Python tools. The server " +"image is based on the base image, but it additionally installs the Flower " +"server using ``pip``." msgstr "" "目前,Flower 提供两个镜像,一个基础镜像和一个服务器镜像。不久还将推出客户端镜" "像。基础镜像,顾名思义,包含服务器和客户端都需要的基本依赖项。其中包括系统依" -"赖项、Python 和 Python 工具。服务器镜像基于基础镜像,但它会使用 ``pip`` " -"额外安装 Flower 服务器。" +"赖项、Python 和 Python 工具。服务器镜像基于基础镜像,但它会使用 ``pip`` 额外" +"安装 Flower 服务器。" #: ../../source/contributor-how-to-build-docker-images.rst:28 #, fuzzy msgid "" "The build instructions that assemble the images are located in the " -"respective Dockerfiles. You can find them in the subdirectories of " -"``src/docker``." -msgstr "组装镜像的构建说明位于各自的 Dockerfile 中。你可以在 ``src/docker`` " -"的子目录中找到它们。" +"respective Dockerfiles. You can find them in the subdirectories of ``src/" +"docker``." +msgstr "" +"组装镜像的构建说明位于各自的 Dockerfile 中。你可以在 ``src/docker`` 的子目录" +"中找到它们。" #: ../../source/contributor-how-to-build-docker-images.rst:31 #, fuzzy msgid "" "Both, base and server image are configured via build arguments. Through " -"build arguments, we can make our build more flexible. For example, in the" -" base image, we can specify the version of Python to install using the " -"``PYTHON_VERSION`` build argument. Some of the build arguments have " -"default values, others must be specified when building the image. All " -"available build arguments for each image are listed in one of the tables " -"below." +"build arguments, we can make our build more flexible. For example, in the " +"base image, we can specify the version of Python to install using the " +"``PYTHON_VERSION`` build argument. Some of the build arguments have default " +"values, others must be specified when building the image. All available " +"build arguments for each image are listed in one of the tables below." msgstr "" "基础镜像和服务器镜像都是通过构建参数配置的。通过联编参数,我们可以使联编更加" -"灵活。例如,在基础镜像中,我们可以使用 ``PYTHON_VERSION`` " -"联编参数指定要安装的 Python 版本。有些联编参数有默认值,有些则必须在联编映像" -"时指定。每个映像的所有可用联编参数都列在下表中。" +"灵活。例如,在基础镜像中,我们可以使用 ``PYTHON_VERSION`` 联编参数指定要安装" +"的 Python 版本。有些联编参数有默认值,有些则必须在联编映像时指定。每个映像的" +"所有可用联编参数都列在下表中。" #: ../../source/contributor-how-to-build-docker-images.rst:38 #, fuzzy @@ -239,19 +240,21 @@ msgstr "默认为 ``22.04``。" #: ../../source/contributor-how-to-build-docker-images.rst:65 #, fuzzy msgid "" -"The following example creates a base image with Python 3.11.0, pip 23.0.1" -" and setuptools 69.0.2:" -msgstr "下面的示例使用 Python 3.11.0、pip 23.0.1 和 setuptools 69.0.2 " -"创建了基本映像:" +"The following example creates a base image with Python 3.11.0, pip 23.0.1 " +"and setuptools 69.0.2:" +msgstr "" +"下面的示例使用 Python 3.11.0、pip 23.0.1 和 setuptools 69.0.2 创建了基本映" +"像:" #: ../../source/contributor-how-to-build-docker-images.rst:76 #, fuzzy msgid "" -"The name of image is ``flwr_base`` and the tag ``0.1.0``. Remember that " -"the build arguments as well as the name and tag can be adapted to your " -"needs. These values serve as examples only." -msgstr "图像名称为 ``flwr_base``,标记为 ``0.1." -"0``。请记住,编译参数以及名称和标记都可以根据需要进行调整。这些值仅供参考。" +"The name of image is ``flwr_base`` and the tag ``0.1.0``. Remember that the " +"build arguments as well as the name and tag can be adapted to your needs. " +"These values serve as examples only." +msgstr "" +"图像名称为 ``flwr_base``,标记为 ``0.1.0``。请记住,编译参数以及名称和标记都" +"可以根据需要进行调整。这些值仅供参考。" #: ../../source/contributor-how-to-build-docker-images.rst:80 #, fuzzy @@ -306,28 +309,30 @@ msgstr "``1.0.0b0``" #: ../../source/contributor-how-to-build-docker-images.rst:103 #, fuzzy msgid "" -"The following example creates a server image with the official Flower " -"base image py3.11-ubuntu22.04 and Flower 1.7.0:" -msgstr "下面的示例使用官方的 Flower 基本镜像 py3.11-ubuntu22.04 和 Flower 1.7.0 " -"创建了一个服务器镜像:" +"The following example creates a server image with the official Flower base " +"image py3.11-ubuntu22.04 and Flower 1.7.0:" +msgstr "" +"下面的示例使用官方的 Flower 基本镜像 py3.11-ubuntu22.04 和 Flower 1.7.0 创建" +"了一个服务器镜像:" #: ../../source/contributor-how-to-build-docker-images.rst:114 #, fuzzy msgid "" -"The name of image is ``flwr_server`` and the tag ``0.1.0``. Remember that" -" the build arguments as well as the name and tag can be adapted to your " +"The name of image is ``flwr_server`` and the tag ``0.1.0``. Remember that " +"the build arguments as well as the name and tag can be adapted to your " "needs. These values serve as examples only." -msgstr "图像名称为 ``flwr_server``,标记为 ``0.1." -"0``。请记住,编译参数以及名称和标记都可以根据需要进行调整。这些值仅供参考。" +msgstr "" +"图像名称为 ``flwr_server``,标记为 ``0.1.0``。请记住,编译参数以及名称和标记" +"都可以根据需要进行调整。这些值仅供参考。" #: ../../source/contributor-how-to-build-docker-images.rst:117 #, fuzzy msgid "" -"If you want to use your own base image instead of the official Flower " -"base image, all you need to do is set the ``BASE_REPOSITORY`` and " -"``BASE_IMAGE_TAG`` build arguments. The value of ``BASE_REPOSITORY`` must" -" match the name of your image and the value of ``BASE_IMAGE_TAG`` must " -"match the tag of your image." +"If you want to use your own base image instead of the official Flower base " +"image, all you need to do is set the ``BASE_REPOSITORY`` and " +"``BASE_IMAGE_TAG`` build arguments. The value of ``BASE_REPOSITORY`` must " +"match the name of your image and the value of ``BASE_IMAGE_TAG`` must match " +"the tag of your image." msgstr "" "如果您想使用自己的基础图片而不是 Flower 官方的基础图片,只需设置 " "``BASE_REPOSITORY`` 和 ``BASE_IMAGE_TAG`` 联编参数即可。`BASE_REPOSITORY``的" @@ -345,25 +350,26 @@ msgstr "贡献译文" #: ../../source/contributor-how-to-contribute-translations.rst:4 #, fuzzy msgid "" -"Since `Flower 1.5 `_ we have introduced translations to " -"our doc pages, but, as you might have noticed, the translations are often" -" imperfect. If you speak languages other than English, you might be able " -"to help us in our effort to make Federated Learning accessible to as many" -" people as possible by contributing to those translations! This might " -"also be a great opportunity for those wanting to become open source " -"contributors with little prerequisites." +"Since `Flower 1.5 `_ we have introduced translations to our doc pages, " +"but, as you might have noticed, the translations are often imperfect. If you " +"speak languages other than English, you might be able to help us in our " +"effort to make Federated Learning accessible to as many people as possible " +"by contributing to those translations! This might also be a great " +"opportunity for those wanting to become open source contributors with little " +"prerequisites." msgstr "" -"从 `Flower 1.5 `_ " -"开始,我们在文档页面中引入了翻译,但正如你可能已经注意到的,这些翻译往往并不完美。如果您会说英语以外的语言,也许您可以帮助我们翻译这些文档,让更多的人了解" -" Federated Learning!对于那些想成为开源贡献者的人来说,这也是一个很好的机会。" +"从 `Flower 1.5 `_ 开始,我们在文档页面中引入了翻译,但正如你可能已经" +"注意到的,这些翻译往往并不完美。如果您会说英语以外的语言,也许您可以帮助我们" +"翻译这些文档,让更多的人了解 Federated Learning!对于那些想成为开源贡献者的人" +"来说,这也是一个很好的机会。" #: ../../source/contributor-how-to-contribute-translations.rst:13 msgid "" -"Our translation project is publicly available over on `Weblate " -"`_, this " -"where most of the work will happen." +"Our translation project is publicly available over on `Weblate `_, this where most of " +"the work will happen." msgstr "" "我们的翻译项目已在 \"Weblate `_\"上公开,大部分工作都将在这里进行。" @@ -374,39 +380,41 @@ msgstr "为现有语言作出贡献" #: ../../source/contributor-how-to-contribute-translations.rst:23 msgid "" -"The first thing you will need to do in order to contribute is to create a" -" free Weblate account on this `page " -"`_. More information about" -" profile settings can be found `here " +"The first thing you will need to do in order to contribute is to create a " +"free Weblate account on this `page `_. More information about profile settings can be found `here " "`_." msgstr "" -"您需要做的第一件事就是在本`网页`_上创建一个免费的Weblate帐户。有关个人资料设置的更多信息,请参阅`这里" -" `_。" +"您需要做的第一件事就是在本`网页`_上创建一个免费的Weblate帐户。有关个人资料设置的更多信息,请参阅`这里 " +"`_。" #: ../../source/contributor-how-to-contribute-translations.rst:29 msgid "" -"Once you are signed in to Weblate, you can navigate to the `Flower " -"Framework project `_. Here, you should see the different existing languages" -" that can be found on the website." +"Once you are signed in to Weblate, you can navigate to the `Flower Framework " +"project `_. " +"Here, you should see the different existing languages that can be found on " +"the website." msgstr "" -"登录到Weblate后,您可以导航到 \"Flower Framework " -"\"项目`_。在这里,您可以看到网站上现有的各种语言。" +"登录到Weblate后,您可以导航到 \"Flower Framework \"项目`_。在这里,您可以看到网站上现有" +"的各种语言。" #: ../../source/contributor-how-to-contribute-translations.rst:34 msgid "" -"Once you have selected the language you want to contribute to, you should" -" see a similar interface to this:" +"Once you have selected the language you want to contribute to, you should " +"see a similar interface to this:" msgstr "选择您要贡献的语言后,您应该会看到与此类似的界面:" #: ../../source/contributor-how-to-contribute-translations.rst:39 msgid "" "The most straight forward option here is to click on the ``Translate`` " -"button on the top right (in the ``Translation status`` section). This " -"will automatically bring you to the translation interface for " -"untranslated strings." -msgstr "最简单的方法是点击右上角(\"翻译状态 \"部分)的 \"翻译 \"按钮。这将自动带您进入未翻译字符串的翻译界面。" +"button on the top right (in the ``Translation status`` section). This will " +"automatically bring you to the translation interface for untranslated " +"strings." +msgstr "" +"最简单的方法是点击右上角(\"翻译状态 \"部分)的 \"翻译 \"按钮。这将自动带您进" +"入未翻译字符串的翻译界面。" #: ../../source/contributor-how-to-contribute-translations.rst:43 msgid "This is what the interface looks like:" @@ -415,43 +423,46 @@ msgstr "这就是界面的样子:" #: ../../source/contributor-how-to-contribute-translations.rst:47 #, fuzzy msgid "" -"You input your translation in the text box at the top and then, once you " -"are happy with it, you either press ``Save and continue`` (to save the " -"translation and go to the next untranslated string), ``Save and stay`` " -"(to save the translation and stay on the same page), ``Suggest`` (to add " -"your translation to suggestions for other users to view), or ``Skip`` (to" -" go to the next untranslated string without saving anything)." +"You input your translation in the text box at the top and then, once you are " +"happy with it, you either press ``Save and continue`` (to save the " +"translation and go to the next untranslated string), ``Save and stay`` (to " +"save the translation and stay on the same page), ``Suggest`` (to add your " +"translation to suggestions for other users to view), or ``Skip`` (to go to " +"the next untranslated string without saving anything)." msgstr "" -"您可以在顶部的文本框中输入翻译内容,满意后按 " -"\"保存并继续\"(保存翻译内容并转到下一个未翻译的字符串)、\"保存并停留\"(保存翻译内容并停留在同一页面)、\"建议\"(将您的翻译添加到建议中供其他用户查看)或" -" \"跳过\"(转到下一个未翻译的字符串而不保存任何内容)。" +"您可以在顶部的文本框中输入翻译内容,满意后按 \"保存并继续\"(保存翻译内容并转" +"到下一个未翻译的字符串)、\"保存并停留\"(保存翻译内容并停留在同一页" +"面)、\"建议\"(将您的翻译添加到建议中供其他用户查看)或 \"跳过\"(转到下一个" +"未翻译的字符串而不保存任何内容)。" #: ../../source/contributor-how-to-contribute-translations.rst:54 msgid "" "In order to help with the translations, you can see on the bottom the " "``Nearby strings``, the ``Comments`` (from other contributors), the " "``Automatic suggestions`` (from machine translation engines), the " -"translations in ``Other languages``, and the ``History`` of translations " -"for this string." +"translations in ``Other languages``, and the ``History`` of translations for " +"this string." msgstr "" -"为了帮助翻译,您可以在底部看到 \"邻近字符串\"、\"评论\"(来自其他贡献者)、\"自动建议\"(来自机器翻译引擎)、\"其他语言 " -"\"中的翻译以及该字符串的 \"历史翻译\"。" +"为了帮助翻译,您可以在底部看到 \"邻近字符串\"、\"评论\"(来自其他贡献" +"者)、\"自动建议\"(来自机器翻译引擎)、\"其他语言 \"中的翻译以及该字符串的 " +"\"历史翻译\"。" #: ../../source/contributor-how-to-contribute-translations.rst:59 msgid "" -"On the right, under the ``String information`` section, you can also " -"click the link under ``Source string location`` in order to view the " -"source of the doc file containing the string." -msgstr "在右侧的 \"字符串信息 \"部分,您还可以单击 \"源字符串位置 \"下的链接,以查看包含字符串的 doc 文件的源文件。" +"On the right, under the ``String information`` section, you can also click " +"the link under ``Source string location`` in order to view the source of the " +"doc file containing the string." +msgstr "" +"在右侧的 \"字符串信息 \"部分,您还可以单击 \"源字符串位置 \"下的链接,以查看" +"包含字符串的 doc 文件的源文件。" #: ../../source/contributor-how-to-contribute-translations.rst:63 msgid "" -"For more information about translating using Weblate, you can check out " -"this `in-depth guide " -"`_." +"For more information about translating using Weblate, you can check out this " +"`in-depth guide `_." msgstr "" -"有关使用 Weblate 进行翻译的更多信息,您可以查看本 \"深入指南 " -"`_\"。" +"有关使用 Weblate 进行翻译的更多信息,您可以查看本 \"深入指南 `_\"。" #: ../../source/contributor-how-to-contribute-translations.rst:67 msgid "Add new languages" @@ -459,12 +470,13 @@ msgstr "添加新语言" #: ../../source/contributor-how-to-contribute-translations.rst:69 msgid "" -"If you want to add a new language, you will first have to contact us, " -"either on `Slack `_, or by opening an issue" -" on our `GitHub repo `_." +"If you want to add a new language, you will first have to contact us, either " +"on `Slack `_, or by opening an issue on our " +"`GitHub repo `_." msgstr "" -"如果您想添加新语言,请先联系我们,可以在 `Slack `_ 上联系,也可以在我们的 " -"`GitHub repo `_ 上提交问题。" +"如果您想添加新语言,请先联系我们,可以在 `Slack `_ 上联系,也可以在我们的 `GitHub repo `_ 上提交问题。" #: ../../source/contributor-how-to-create-new-messages.rst:2 msgid "Creating New Messages" @@ -472,15 +484,16 @@ msgstr "创建新信息" #: ../../source/contributor-how-to-create-new-messages.rst:4 msgid "" -"This is a simple guide for creating a new type of message between the " -"server and clients in Flower." +"This is a simple guide for creating a new type of message between the server " +"and clients in Flower." msgstr "这是一个如何用Flower在服务器和客户端之间创建新类型的信息的简要指导。" #: ../../source/contributor-how-to-create-new-messages.rst:6 msgid "" -"Let's suppose we have the following example functions in " -":code:`server.py` and :code:`numpy_client.py`..." -msgstr "假设我们在脚本code:`server.py`和code:`numpy_client.py`中有以下的示例函数..." +"Let's suppose we have the following example functions in :code:`server.py` " +"and :code:`numpy_client.py`..." +msgstr "" +"假设我们在脚本code:`server.py`和code:`numpy_client.py`中有以下的示例函数..." #: ../../source/contributor-how-to-create-new-messages.rst:8 msgid "Server's side:" @@ -492,9 +505,11 @@ msgstr "在客户端:" #: ../../source/contributor-how-to-create-new-messages.rst:26 msgid "" -"Let's now see what we need to implement in order to get this simple " -"function between the server and client to work!" -msgstr "现在让我们来看看,为了让服务器和客户端之间的这个简单的函数正常工作,我们需要实现哪些功能!" +"Let's now see what we need to implement in order to get this simple function " +"between the server and client to work!" +msgstr "" +"现在让我们来看看,为了让服务器和客户端之间的这个简单的函数正常工作,我们需要" +"实现哪些功能!" #: ../../source/contributor-how-to-create-new-messages.rst:30 msgid "Message Types for Protocol Buffers" @@ -503,15 +518,15 @@ msgstr "协议缓冲区的信息类型" #: ../../source/contributor-how-to-create-new-messages.rst:32 #, fuzzy msgid "" -"The first thing we need to do is to define a message type for the RPC " -"system in :code:`transport.proto`. Note that we have to do it for both " -"the request and response messages. For more details on the syntax of " -"proto3, please see the `official documentation `_." +"The first thing we need to do is to define a message type for the RPC system " +"in :code:`transport.proto`. Note that we have to do it for both the request " +"and response messages. For more details on the syntax of proto3, please see " +"the `official documentation `_." msgstr "" -"我们需要做的第一件事是在脚本code:`transport.proto`中定义 RPC " -"系统的消息类型。请注意,我们必须对请求信息和响应信息都这样做。有关 proto3 语法的更多详情,请参阅官方文档 " -"`_。" +"我们需要做的第一件事是在脚本code:`transport.proto`中定义 RPC 系统的消息类型。" +"请注意,我们必须对请求信息和响应信息都这样做。有关 proto3 语法的更多详情,请" +"参阅官方文档 `_。" #: ../../source/contributor-how-to-create-new-messages.rst:35 msgid "Within the :code:`ServerMessage` block:" @@ -523,8 +538,8 @@ msgstr "在 ClientMessage 代码块中:" #: ../../source/contributor-how-to-create-new-messages.rst:70 msgid "" -"Make sure to also add a field of the newly created message type in " -":code:`oneof msg`." +"Make sure to also add a field of the newly created message type in :code:" +"`oneof msg`." msgstr "确保在 :code:`oneof msg` 中也添加一个新创建的消息类型字段。" #: ../../source/contributor-how-to-create-new-messages.rst:72 @@ -545,8 +560,9 @@ msgid "" "datatypes to or from our defined RPC message types. You should add these " "functions in :code:`serde.py`." msgstr "" -"下一步是添加函数,以便将 Python 数据类型序列化和反序列化为我们定义的 RPC 消息类型或从我们定义的 RPC 消息类型反序列化和反序列化 " -"Python 数据类型。您应该在 :code:`serde.py` 中添加这些函数。" +"下一步是添加函数,以便将 Python 数据类型序列化和反序列化为我们定义的 RPC 消息" +"类型或从我们定义的 RPC 消息类型反序列化和反序列化 Python 数据类型。您应该在 :" +"code:`serde.py` 中添加这些函数。" #: ../../source/contributor-how-to-create-new-messages.rst:91 msgid "The four functions:" @@ -558,9 +574,11 @@ msgstr "从服务器发送信息" #: ../../source/contributor-how-to-create-new-messages.rst:114 msgid "" -"Now write the request function in your Client Proxy class (e.g., " -":code:`grpc_client_proxy.py`) using the serde functions you just created:" -msgstr "现在,在客户端代理类(例如 :code:`grpc_client_proxy.py`)中使用刚才创建的 serde 函数编写请求函数:" +"Now write the request function in your Client Proxy class (e.g., :code:" +"`grpc_client_proxy.py`) using the serde functions you just created:" +msgstr "" +"现在,在客户端代理类(例如 :code:`grpc_client_proxy.py`)中使用刚才创建的 " +"serde 函数编写请求函数:" #: ../../source/contributor-how-to-create-new-messages.rst:128 msgid "Receiving the Message by the Client" @@ -568,12 +586,12 @@ msgstr "由客户端接收信息" #: ../../source/contributor-how-to-create-new-messages.rst:130 msgid "" -"Last step! Modify the code in :code:`message_handler.py` to check the " -"field of your message and call the :code:`example_response` function. " -"Remember to use the serde functions!" +"Last step! Modify the code in :code:`message_handler.py` to check the field " +"of your message and call the :code:`example_response` function. Remember to " +"use the serde functions!" msgstr "" -"最后一步 修改 :code:`message_handler.py` 中的代码,检查信息的字段并调用 " -":code:`example_response` 函数。记住使用 serde 函数!" +"最后一步 修改 :code:`message_handler.py` 中的代码,检查信息的字段并调用 :" +"code:`example_response` 函数。记住使用 serde 函数!" #: ../../source/contributor-how-to-create-new-messages.rst:132 msgid "Within the handle function:" @@ -593,46 +611,51 @@ msgstr "使用 VSCode Dev Containers 进行开发" #: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:4 msgid "" -"When working on the Flower framework we want to ensure that all " -"contributors use the same developer environment to format code or run " -"tests. For this purpose we are using the VSCode Remote Containers " -"extension. What is it? Read the following quote:" +"When working on the Flower framework we want to ensure that all contributors " +"use the same developer environment to format code or run tests. For this " +"purpose we are using the VSCode Remote Containers extension. What is it? " +"Read the following quote:" msgstr "" -"在开发 Flower 框架时,我们希望确保所有贡献者使用相同的开发环境来格式化代码或运行测试。为此,我们使用了 VSCode " -"远程容器扩展。这是什么?请阅读下面这段话:" +"在开发 Flower 框架时,我们希望确保所有贡献者使用相同的开发环境来格式化代码或" +"运行测试。为此,我们使用了 VSCode 远程容器扩展。这是什么?请阅读下面这段话:" #: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:7 msgid "" -"The Visual Studio Code Remote - Containers extension lets you use a " -"Docker container as a fully-featured development environment. It allows " -"you to open any folder inside (or mounted into) a container and take " -"advantage of Visual Studio Code's full feature set. A " -":code:`devcontainer.json` file in your project tells VS Code how to " -"access (or create) a development container with a well-defined tool and " -"runtime stack. This container can be used to run an application or to " -"separate tools, libraries, or runtimes needed for working with a " -"codebase." -msgstr "" -"Visual Studio Code Remote - " -"Containers扩展可让你将Docker容器用作功能齐全的开发环境。它允许你打开容器内(或挂载到容器内)的任何文件夹,并利用 Visual " -"Studio Code 的全部功能集。项目中的 :code:`devcontainer.json` 文件会告诉 VS Code " -"如何访问(或创建)一个带有定义明确的工具和运行时栈的开发容器。该容器可用于运行应用程序,也可用于分离处理代码库所需的工具、库或运行时。" +"The Visual Studio Code Remote - Containers extension lets you use a Docker " +"container as a fully-featured development environment. It allows you to open " +"any folder inside (or mounted into) a container and take advantage of Visual " +"Studio Code's full feature set. A :code:`devcontainer.json` file in your " +"project tells VS Code how to access (or create) a development container with " +"a well-defined tool and runtime stack. This container can be used to run an " +"application or to separate tools, libraries, or runtimes needed for working " +"with a codebase." +msgstr "" +"Visual Studio Code Remote - Containers扩展可让你将Docker容器用作功能齐全的开" +"发环境。它允许你打开容器内(或挂载到容器内)的任何文件夹,并利用 Visual " +"Studio Code 的全部功能集。项目中的 :code:`devcontainer.json` 文件会告诉 VS " +"Code 如何访问(或创建)一个带有定义明确的工具和运行时栈的开发容器。该容器可用" +"于运行应用程序,也可用于分离处理代码库所需的工具、库或运行时。" #: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:9 msgid "" -"Workspace files are mounted from the local file system or copied or " -"cloned into the container. Extensions are installed and run inside the " -"container, where they have full access to the tools, platform, and file " -"system. This means that you can seamlessly switch your entire development" -" environment just by connecting to a different container." -msgstr "工作区文件从本地文件系统加载,或复制或克隆到容器中。扩展在容器内安装和运行,在容器内它们可以完全访问工具、平台和文件系统。这意味着,只需连接到不同的容器,就能无缝切换整个开发环境。" +"Workspace files are mounted from the local file system or copied or cloned " +"into the container. Extensions are installed and run inside the container, " +"where they have full access to the tools, platform, and file system. This " +"means that you can seamlessly switch your entire development environment " +"just by connecting to a different container." +msgstr "" +"工作区文件从本地文件系统加载,或复制或克隆到容器中。扩展在容器内安装和运行," +"在容器内它们可以完全访问工具、平台和文件系统。这意味着,只需连接到不同的容" +"器,就能无缝切换整个开发环境。" #: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:11 #, fuzzy msgid "" -"Source: `Official VSCode documentation " -"`_" -msgstr "来源:`VSCode 官方文档 `_" +"Source: `Official VSCode documentation `_" +msgstr "" +"来源:`VSCode 官方文档 `_" #: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:15 msgid "Getting started" @@ -641,42 +664,43 @@ msgstr "开始" #: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:17 #, fuzzy msgid "" -"Configuring and setting up the :code:`Dockerfile` as well the " -"configuration for the devcontainer can be a bit more involved. The good " -"thing is you don't have to do it. Usually it should be enough to install " -"`Docker `_ on your system and " -"ensure its available on your command line. Additionally, install the " -"`VSCode Containers Extension `_." +"Configuring and setting up the :code:`Dockerfile` as well the configuration " +"for the devcontainer can be a bit more involved. The good thing is you don't " +"have to do it. Usually it should be enough to install `Docker `_ on your system and ensure its available on " +"your command line. Additionally, install the `VSCode Containers Extension " +"`_." msgstr "" -"配置和设置 :code:`Dockerfile` 以及 devcontainer 的配置可能比较复杂。好在你想做就得做。通常只需在系统中安装 " -"Docker 并确保其在命令行中可用即可。此外,请安装 `VSCode Containers Extension " -"`_。" +"配置和设置 :code:`Dockerfile` 以及 devcontainer 的配置可能比较复杂。好在你想" +"做就得做。通常只需在系统中安装 Docker 并确保其在命令行中可用即可。此外,请安" +"装 `VSCode Containers Extension `_。" #: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:19 msgid "" -"Now you should be good to go. When starting VSCode, it will ask you to " -"run in the container environment and - if you confirm - automatically " -"build the container and use it. To manually instruct VSCode to use the " -"devcontainer, you can, after installing the extension, click the green " -"area in the bottom left corner of your VSCode window and select the " -"option *(Re)Open Folder in Container*." +"Now you should be good to go. When starting VSCode, it will ask you to run " +"in the container environment and - if you confirm - automatically build the " +"container and use it. To manually instruct VSCode to use the devcontainer, " +"you can, after installing the extension, click the green area in the bottom " +"left corner of your VSCode window and select the option *(Re)Open Folder in " +"Container*." msgstr "" -"现在你应该可以开始了。启动 VSCode 时,它会要求你在容器环境中运行,如果你确认,它会自动构建容器并使用它。要手动指示 VSCode 使用 " -"devcontainer,可以在安装扩展后,点击 VSCode 窗口左下角的绿色区域,然后选择 \"*(重新)在容器中打开文件夹*\"选项。" +"现在你应该可以开始了。启动 VSCode 时,它会要求你在容器环境中运行,如果你确" +"认,它会自动构建容器并使用它。要手动指示 VSCode 使用 devcontainer,可以在安装" +"扩展后,点击 VSCode 窗口左下角的绿色区域,然后选择 \"*(重新)在容器中打开文" +"件夹*\"选项。" #: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:21 msgid "" -"In some cases your setup might be more involved. For those cases consult " -"the following sources:" +"In some cases your setup might be more involved. For those cases consult the " +"following sources:" msgstr "在某些情况下,您的设置可能更复杂。有关这些情况,请参考以下资料:" #: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:23 #, fuzzy msgid "" -"`Developing inside a Container " -"`_" +"`Developing inside a Container `_" msgstr "" "在容器内开发 `_" @@ -684,9 +708,11 @@ msgstr "" #: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:24 #, fuzzy msgid "" -"`Remote development in Containers " -"`_" -msgstr "容器中的远程开发 `_" +"`Remote development in Containers `_" +msgstr "" +"容器中的远程开发 `_" #: ../../source/contributor-how-to-install-development-versions.rst:2 msgid "Install development versions" @@ -702,19 +728,21 @@ msgstr "使用诗歌(推荐)" #: ../../source/contributor-how-to-install-development-versions.rst:10 msgid "" -"Install a ``flwr`` pre-release from PyPI: update the ``flwr`` dependency " -"in ``pyproject.toml`` and then reinstall (don't forget to delete " -"``poetry.lock`` (``rm poetry.lock``) before running ``poetry install``)." +"Install a ``flwr`` pre-release from PyPI: update the ``flwr`` dependency in " +"``pyproject.toml`` and then reinstall (don't forget to delete ``poetry." +"lock`` (``rm poetry.lock``) before running ``poetry install``)." msgstr "" "安装来自 PyPI 的 ``flwr`` 预发布版本:更新 ``pyproject.toml`` 中的 ``flwr`` " -"依赖关系,然后重新安装(运行 ``poetry install` 前,别忘了删除 ``poetry.lock` (``rm " -"poetry.lock`))。" +"依赖关系,然后重新安装(运行 ``poetry install` 前,别忘了删除 ``poetry.lock` " +"(``rm poetry.lock`))。" #: ../../source/contributor-how-to-install-development-versions.rst:12 msgid "" "``flwr = { version = \"1.0.0a0\", allow-prereleases = true }`` (without " "extras)" -msgstr "``flwr = { version = \"1.0.0a0\", allow-prereleases = true }`` (不含额外内容)" +msgstr "" +"``flwr = { version = \"1.0.0a0\", allow-prereleases = true }`` (不含额外内" +"容)" #: ../../source/contributor-how-to-install-development-versions.rst:13 msgid "" @@ -726,8 +754,8 @@ msgstr "" #: ../../source/contributor-how-to-install-development-versions.rst:15 msgid "" -"Install ``flwr`` from a local copy of the Flower source code via " -"``pyproject.toml``:" +"Install ``flwr`` from a local copy of the Flower source code via ``pyproject." +"toml``:" msgstr "通过 ``pyproject.toml`` 从 Flower 源代码的本地副本安装 ``flwr``:" #: ../../source/contributor-how-to-install-development-versions.rst:17 @@ -736,11 +764,11 @@ msgstr "``flwr = { path = \"../../\", develop = true }`` (不含额外内容 #: ../../source/contributor-how-to-install-development-versions.rst:18 msgid "" -"``flwr = { path = \"../../\", develop = true, extras = [\"simulation\"] " -"}`` (with extras)" +"``flwr = { path = \"../../\", develop = true, extras = [\"simulation\"] }`` " +"(with extras)" msgstr "" -"``flwr = { path = \"../../\", develop = true, extras = [\"simulation\"] " -"}`` (包含额外内容)" +"``flwr = { path = \"../../\", develop = true, extras = [\"simulation\"] }`` " +"(包含额外内容)" #: ../../source/contributor-how-to-install-development-versions.rst:20 msgid "Install ``flwr`` from a local wheel file via ``pyproject.toml``:" @@ -749,9 +777,11 @@ msgstr "通过 ``pyproject.toml`` 从本地轮子文件安装 ``flwr``:" #: ../../source/contributor-how-to-install-development-versions.rst:22 #, fuzzy msgid "" -"``flwr = { path = \"../../dist/flwr-1.8.0-py3-none-any.whl\" }`` (without" -" extras)" -msgstr "``flwr = { path = \"../../dist/flwr-1.0.0-py3-none-any.whl\" }``(无额外内容)" +"``flwr = { path = \"../../dist/flwr-1.8.0-py3-none-any.whl\" }`` (without " +"extras)" +msgstr "" +"``flwr = { path = \"../../dist/flwr-1.0.0-py3-none-any.whl\" }``(无额外内" +"容)" #: ../../source/contributor-how-to-install-development-versions.rst:23 #, fuzzy @@ -768,8 +798,8 @@ msgid "" "Dependency Specification `_" msgstr "" -"有关详细信息,请参阅 Poetry 文档: 诗歌依赖性规范 `_" +"有关详细信息,请参阅 Poetry 文档: 诗歌依赖性规范 `_" #: ../../source/contributor-how-to-install-development-versions.rst:28 msgid "Using pip (recommended on Colab)" @@ -791,7 +821,8 @@ msgstr "`pip install -U -pre flwr[simulation]``(包含额外功能)" msgid "" "Python packages can be installed from git repositories. Use one of the " "following commands to install the Flower directly from GitHub." -msgstr "Python 软件包可以从 git 仓库安装。使用以下命令之一直接从 GitHub 安装 Flower。" +msgstr "" +"Python 软件包可以从 git 仓库安装。使用以下命令之一直接从 GitHub 安装 Flower。" #: ../../source/contributor-how-to-install-development-versions.rst:37 msgid "Install ``flwr`` from the default GitHub branch (``main``):" @@ -799,17 +830,17 @@ msgstr "从 GitHub 的默认分支 (``main`) 安装 ``flwr``:" #: ../../source/contributor-how-to-install-development-versions.rst:39 msgid "" -"``pip install flwr@git+https://github.com/adap/flower.git`` (without " -"extras)" -msgstr "`pip install flwr@git+https://github.com/adap/flower.git`` (不含额外功能)" +"``pip install flwr@git+https://github.com/adap/flower.git`` (without extras)" +msgstr "" +"`pip install flwr@git+https://github.com/adap/flower.git`` (不含额外功能)" #: ../../source/contributor-how-to-install-development-versions.rst:40 msgid "" "``pip install flwr[simulation]@git+https://github.com/adap/flower.git`` " "(with extras)" msgstr "" -"`pip install " -"flwr[simulation]@git+https://github.com/adap/flower.git``(带附加功能)" +"`pip install flwr[simulation]@git+https://github.com/adap/flower.git``(带附" +"加功能)" #: ../../source/contributor-how-to-install-development-versions.rst:42 msgid "Install ``flwr`` from a specific GitHub branch (``branch-name``):" @@ -820,14 +851,16 @@ msgid "" "``pip install flwr@git+https://github.com/adap/flower.git@branch-name`` " "(without extras)" msgstr "" -"`pip install flwr@git+https://github.com/adap/flower.git@branch-name`` " -"(不含附加功能)" +"`pip install flwr@git+https://github.com/adap/flower.git@branch-name`` (不含" +"附加功能)" #: ../../source/contributor-how-to-install-development-versions.rst:45 msgid "" -"``pip install flwr[simulation]@git+https://github.com/adap/flower.git" -"@branch-name`` (with extras)" -msgstr "`pip安装flwr[模拟]@git+https://github.com/adap/flower.git@分支名``(带附加功能)" +"``pip install flwr[simulation]@git+https://github.com/adap/flower.git@branch-" +"name`` (with extras)" +msgstr "" +"`pip安装flwr[模拟]@git+https://github.com/adap/flower.git@分支名``(带附加功" +"能)" #: ../../source/contributor-how-to-install-development-versions.rst:49 msgid "Open Jupyter Notebooks on Google Colab" @@ -835,33 +868,34 @@ msgstr "在谷歌 Colab 上打开 Jupyter 笔记本" #: ../../source/contributor-how-to-install-development-versions.rst:51 msgid "" -"Open the notebook ``doc/source/tutorial-get-started-with-flower-" -"pytorch.ipynb``:" -msgstr "打开笔记本 ``doc/source/tutorial-get-started-with-flower-pytorch.ipynb``:" +"Open the notebook ``doc/source/tutorial-get-started-with-flower-pytorch." +"ipynb``:" +msgstr "" +"打开笔记本 ``doc/source/tutorial-get-started-with-flower-pytorch.ipynb``:" #: ../../source/contributor-how-to-install-development-versions.rst:53 msgid "" -"https://colab.research.google.com/github/adap/flower/blob/main/doc/source" -"/tutorial-get-started-with-flower-pytorch.ipynb" +"https://colab.research.google.com/github/adap/flower/blob/main/doc/source/" +"tutorial-get-started-with-flower-pytorch.ipynb" msgstr "" -"https://colab.research.google.com/github/adap/flower/blob/main/doc/source" -"/tutorial-get-started-with-flower-pytorch.ipynb" +"https://colab.research.google.com/github/adap/flower/blob/main/doc/source/" +"tutorial-get-started-with-flower-pytorch.ipynb" #: ../../source/contributor-how-to-install-development-versions.rst:55 msgid "" -"Open a development version of the same notebook from branch `branch-name`" -" by changing ``main`` to ``branch-name`` (right after ``blob``):" +"Open a development version of the same notebook from branch `branch-name` by " +"changing ``main`` to ``branch-name`` (right after ``blob``):" msgstr "" -"将 ``main`` 改为 ``branch-name``(紧跟在 ``blob``之后),从分支 `branch-name` " -"打开同一笔记本的开发版本:" +"将 ``main`` 改为 ``branch-name``(紧跟在 ``blob``之后),从分支 `branch-" +"name` 打开同一笔记本的开发版本:" #: ../../source/contributor-how-to-install-development-versions.rst:57 msgid "" -"https://colab.research.google.com/github/adap/flower/blob/branch-" -"name/doc/source/tutorial-get-started-with-flower-pytorch.ipynb" +"https://colab.research.google.com/github/adap/flower/blob/branch-name/doc/" +"source/tutorial-get-started-with-flower-pytorch.ipynb" msgstr "" -"https://colab.research.google.com/github/adap/flower/blob/branch-" -"name/doc/source/tutorial-get-started-with-flower-pytorch.ipynb" +"https://colab.research.google.com/github/adap/flower/blob/branch-name/doc/" +"source/tutorial-get-started-with-flower-pytorch.ipynb" #: ../../source/contributor-how-to-install-development-versions.rst:59 msgid "Install a `whl` on Google Colab:" @@ -869,8 +903,8 @@ msgstr "在 Google Colab 上安装 `whl`:" #: ../../source/contributor-how-to-install-development-versions.rst:61 msgid "" -"In the vertical icon grid on the left hand side, select ``Files`` > " -"``Upload to session storage``" +"In the vertical icon grid on the left hand side, select ``Files`` > ``Upload " +"to session storage``" msgstr "在左侧的垂直图标网格中,选择 \"文件\">\"上传到会话存储\"" #: ../../source/contributor-how-to-install-development-versions.rst:62 @@ -881,13 +915,13 @@ msgstr "更新 whl (e.g., ``flwr-1.7.0-py3-none-any.whl``)" #: ../../source/contributor-how-to-install-development-versions.rst:63 #, fuzzy msgid "" -"Change ``!pip install -q 'flwr[simulation]' torch torchvision " -"matplotlib`` to ``!pip install -q 'flwr-1.8.0-py3-none-" -"any.whl[simulation]' torch torchvision matplotlib``" +"Change ``!pip install -q 'flwr[simulation]' torch torchvision matplotlib`` " +"to ``!pip install -q 'flwr-1.8.0-py3-none-any.whl[simulation]' torch " +"torchvision matplotlib``" msgstr "" -"把``!pip install -q 'flwr[simulation]' torch torchvision " -"matplotlib``变为``!pip install -q 'flwr-1.7.0-py3-none-any.whl[simulation]'" -" torch torchvision matplotlib``" +"把``!pip install -q 'flwr[simulation]' torch torchvision matplotlib``变为``!" +"pip install -q 'flwr-1.7.0-py3-none-any.whl[simulation]' torch torchvision " +"matplotlib``" #: ../../source/contributor-how-to-release-flower.rst:2 msgid "Release Flower" @@ -905,10 +939,11 @@ msgstr "在发布期间" #: ../../source/contributor-how-to-release-flower.rst:9 msgid "" -"The version number of a release is stated in ``pyproject.toml``. To " -"release a new version of Flower, the following things need to happen (in " -"that order):" -msgstr "版本号在 ``pyproject.toml`` 中说明。要发布 Flower 的新版本,需要完成以下工作(按顺序排列):" +"The version number of a release is stated in ``pyproject.toml``. To release " +"a new version of Flower, the following things need to happen (in that order):" +msgstr "" +"版本号在 ``pyproject.toml`` 中说明。要发布 Flower 的新版本,需要完成以下工作" +"(按顺序排列):" #: ../../source/contributor-how-to-release-flower.rst:11 #, fuzzy @@ -918,39 +953,39 @@ msgid "" "changes to the changelog afterwards until it looks good)." msgstr "" "运行 ``python3 src/py/flwr_tool/update_changelog.py `` 以将每" -"项新更改添加到更新日志中(之后可对更新日志进行手动更改,直到看起来不错为止)" -"。" +"项新更改添加到更新日志中(之后可对更新日志进行手动更改,直到看起来不错为" +"止)。" #: ../../source/contributor-how-to-release-flower.rst:12 #, fuzzy msgid "" -"Once the changelog has been updated with all the changes, run ``./dev" -"/prepare-release-changelog.sh v``, where ```` " -"is the version stated in ``pyproject.toml`` (notice the ``v`` added " -"before it). This will replace the ``Unreleased`` header of the changelog " -"by the version and current date, and it will add a thanking message for " -"the contributors. Open a pull request with those changes." +"Once the changelog has been updated with all the changes, run ``./dev/" +"prepare-release-changelog.sh v``, where ```` is " +"the version stated in ``pyproject.toml`` (notice the ``v`` added before it). " +"This will replace the ``Unreleased`` header of the changelog by the version " +"and current date, and it will add a thanking message for the contributors. " +"Open a pull request with those changes." msgstr "" -"更新更新日志后,运行``./dev/prepare-release-changelog.sh " -"v``,其中````是``pyproject." -"toml``中的版本(注意前面的``v``)。这将用版本和当前日期替换更新日志中的 " -"``Unreleased`` " -"标头,并为贡献者添加一条感谢信息。打开一个包含这些更改的拉取请求。" +"更新更新日志后,运行``./dev/prepare-release-changelog.sh v``,其" +"中````是``pyproject.toml``中的版本(注意前面的``v``)。这将用版" +"本和当前日期替换更新日志中的 ``Unreleased`` 标头,并为贡献者添加一条感谢信" +"息。打开一个包含这些更改的拉取请求。" #: ../../source/contributor-how-to-release-flower.rst:13 #, fuzzy msgid "" "Once the pull request is merged, tag the release commit with the version " -"number as soon as the PR is merged: ``git tag v`` (notice " -"the ``v`` added before the version number), then ``git push --tags``. " -"This will create a draft release on GitHub containing the correct " -"artifacts and the relevant part of the changelog." +"number as soon as the PR is merged: ``git tag v`` (notice the " +"``v`` added before the version number), then ``git push --tags``. This will " +"create a draft release on GitHub containing the correct artifacts and the " +"relevant part of the changelog." msgstr "" -"在 PR 合并后立即用版本号标记发布提交:``git tag v0.12.3``,然后``git push --tags``。这将在 GitHub" -" 上创建一个包含正确工件和更新日志相关部分的发布草案。" +"在 PR 合并后立即用版本号标记发布提交:``git tag v0.12.3``,然后``git push --" +"tags``。这将在 GitHub 上创建一个包含正确工件和更新日志相关部分的发布草案。" #: ../../source/contributor-how-to-release-flower.rst:14 -msgid "Check the draft release on GitHub, and if everything is good, publish it." +msgid "" +"Check the draft release on GitHub, and if everything is good, publish it." msgstr "检查 GitHub 上的发布稿,如果一切正常,就发布它。" #: ../../source/contributor-how-to-release-flower.rst:17 @@ -975,8 +1010,8 @@ msgstr "在 ``changelog.md`` 中添加新的 ``Unreleased`` 部分。" #: ../../source/contributor-how-to-release-flower.rst:25 msgid "" -"Merge the pull request on the same day (i.e., before a new nightly " -"release gets published to PyPI)." +"Merge the pull request on the same day (i.e., before a new nightly release " +"gets published to PyPI)." msgstr "在同一天合并拉取请求(即在新版本发布到 PyPI 之前)。" #: ../../source/contributor-how-to-release-flower.rst:28 @@ -989,9 +1024,11 @@ msgstr "释放前命名" #: ../../source/contributor-how-to-release-flower.rst:33 msgid "" -"PyPI supports pre-releases (alpha, beta, release candidate). Pre-releases" -" MUST use one of the following naming patterns:" -msgstr "PyPI 支持预发布版本(alpha、beta、release candidate)。预发布版本必须使用以下命名模式之一:" +"PyPI supports pre-releases (alpha, beta, release candidate). Pre-releases " +"MUST use one of the following naming patterns:" +msgstr "" +"PyPI 支持预发布版本(alpha、beta、release candidate)。预发布版本必须使用以下" +"命名模式之一:" #: ../../source/contributor-how-to-release-flower.rst:35 msgid "Alpha: ``MAJOR.MINOR.PATCHaN``" @@ -1037,38 +1074,41 @@ msgstr "`PEP-440 `_" #: ../../source/contributor-how-to-release-flower.rst:50 msgid "" -"`PyPA Choosing a versioning scheme " -"`_" +"`PyPA Choosing a versioning scheme `_" msgstr "" -"`PyPA 选择版本控制方案 `_" +"`PyPA 选择版本控制方案 `_" #: ../../source/contributor-how-to-release-flower.rst:52 msgid "" -"Note that the approach defined by PyPA is not compatible with SemVer " -"2.0.0 spec, for details consult the `Semantic Versioning Specification " -"`_ (specifically item " -"11 on precedence)." +"Note that the approach defined by PyPA is not compatible with SemVer 2.0.0 " +"spec, for details consult the `Semantic Versioning Specification `_ (specifically item 11 on " +"precedence)." msgstr "" -"请注意,PyPA 所定义的方法与 SemVer 2.0.0 " -"规范不兼容,详情请查阅《语义版本规范》`_(特别是关于优先级的第 11 项)。" +"请注意,PyPA 所定义的方法与 SemVer 2.0.0 规范不兼容,详情请查阅《语义版本规" +"范》`_(特别是关于优先级的" +"第 11 项)。" #: ../../source/contributor-how-to-release-flower.rst:55 msgid "Pre-release classification" msgstr "发布前分类" #: ../../source/contributor-how-to-release-flower.rst:57 -msgid "Should the next pre-release be called alpha, beta, or release candidate?" +msgid "" +"Should the next pre-release be called alpha, beta, or release candidate?" msgstr "下一个预发布版应该叫阿尔法版、贝塔版还是候选发布版?" #: ../../source/contributor-how-to-release-flower.rst:59 msgid "" -"RC: feature complete, no known issues (apart from issues that are " -"classified as \"won't fix\" for the next stable release) - if no issues " -"surface this will become the next stable release" -msgstr "RC:功能完整,无已知问题(除了下一个稳定版中被列为 \"不会修复 \"的问题)--如果没有问题出现,这将成为下一个稳定版" +"RC: feature complete, no known issues (apart from issues that are classified " +"as \"won't fix\" for the next stable release) - if no issues surface this " +"will become the next stable release" +msgstr "" +"RC:功能完整,无已知问题(除了下一个稳定版中被列为 \"不会修复 \"的问题)--如" +"果没有问题出现,这将成为下一个稳定版" #: ../../source/contributor-how-to-release-flower.rst:60 msgid "Beta: feature complete, allowed to have known issues" @@ -1086,11 +1126,12 @@ msgstr "建立虚拟环境" msgid "" "It is recommended to run your Python setup within a virtual environment. " "This guide shows three different examples how to create a virtual " -"environment with pyenv virtualenv, poetry, or Anaconda. You can follow " -"the instructions or choose your preferred setup." +"environment with pyenv virtualenv, poetry, or Anaconda. You can follow the " +"instructions or choose your preferred setup." msgstr "" -"建议在虚拟环境中运行 Python 设置。本指南展示了如何使用 pyenv virtualenv、poes 或 Anaconda " -"创建虚拟环境的三个不同示例。您可以按照说明或选择您喜欢的设置。" +"建议在虚拟环境中运行 Python 设置。本指南展示了如何使用 pyenv virtualenv、" +"poes 或 Anaconda 创建虚拟环境的三个不同示例。您可以按照说明或选择您喜欢的设" +"置。" #: ../../source/contributor-how-to-set-up-a-virtual-env.rst:9 msgid "Python Version" @@ -1099,12 +1140,11 @@ msgstr "Python 版本" #: ../../source/contributor-how-to-set-up-a-virtual-env.rst:11 #: ../../source/how-to-install-flower.rst:8 msgid "" -"Flower requires at least `Python 3.8 `_, " -"but `Python 3.10 `_ or above is " -"recommended." +"Flower requires at least `Python 3.8 `_, but " +"`Python 3.10 `_ or above is recommended." msgstr "" -"Flower 至少需要 `Python 3.8 `_,但建议使用 `Python " -"3.10 `_或更高版本。" +"Flower 至少需要 `Python 3.8 `_,但建议使用 " +"`Python 3.10 `_或更高版本。" #: ../../source/contributor-how-to-set-up-a-virtual-env.rst:14 msgid "Virutualenv with Pyenv/Virtualenv" @@ -1112,20 +1152,22 @@ msgstr "Virutualenv 和 Pyenv/Virtualenv" #: ../../source/contributor-how-to-set-up-a-virtual-env.rst:16 msgid "" -"One of the recommended virtual environment is `pyenv " -"`_/`virtualenv `_. Please see `Flower examples " -"`_ for details." +"One of the recommended virtual environment is `pyenv `_/`virtualenv `_. " +"Please see `Flower examples `_ for details." msgstr "" -"其中一个推荐的虚拟环境是 `pyenv `_/`virtualenv " -"`_。详情请参见 `Flower 示例 " -"`_。" +"其中一个推荐的虚拟环境是 `pyenv `_/" +"`virtualenv `_。详情请参见 " +"`Flower 示例 `_。" #: ../../source/contributor-how-to-set-up-a-virtual-env.rst:18 msgid "" "Once Pyenv is set up, you can use it to install `Python Version 3.10 " "`_ or above:" -msgstr "一旦设置好 Pyenv,就可以用它来安装 `Python 3.10 `_ 或更高版本:" +msgstr "" +"一旦设置好 Pyenv,就可以用它来安装 `Python 3.10 `_ 或更高版本:" #: ../../source/contributor-how-to-set-up-a-virtual-env.rst:24 msgid "Create the virtualenv with:" @@ -1141,17 +1183,17 @@ msgstr "有诗意的 Virtualenv" #: ../../source/contributor-how-to-set-up-a-virtual-env.rst:41 msgid "" -"The Flower examples are based on `Poetry `_ to manage dependencies. After installing Poetry you " -"simply create a virtual environment with:" +"The Flower examples are based on `Poetry `_ " +"to manage dependencies. After installing Poetry you simply create a virtual " +"environment with:" msgstr "" -"Flower 示例基于 `Poetry `_ 来管理依赖关系。安装 Poetry" -" 后,只需创建一个虚拟环境即可:" +"Flower 示例基于 `Poetry `_ 来管理依赖关系。" +"安装 Poetry 后,只需创建一个虚拟环境即可:" #: ../../source/contributor-how-to-set-up-a-virtual-env.rst:47 msgid "" -"If you open a new terminal you can activate the previously created " -"virtual environment with the following command:" +"If you open a new terminal you can activate the previously created virtual " +"environment with the following command:" msgstr "如果打开一个新终端,可以使用以下命令激活之前创建的虚拟环境:" #: ../../source/contributor-how-to-set-up-a-virtual-env.rst:55 @@ -1160,14 +1202,14 @@ msgstr "使用 Anaconda 的 Virtualenv" #: ../../source/contributor-how-to-set-up-a-virtual-env.rst:57 msgid "" -"If you prefer to use Anaconda for your virtual environment then install " -"and setup the `conda `_ package. After setting it up you can " -"create a virtual environment with:" +"If you prefer to use Anaconda for your virtual environment then install and " +"setup the `conda `_ package. After setting it up you can create a virtual " +"environment with:" msgstr "" -"如果你更喜欢在虚拟环境中使用 Anaconda,那么请安装并设置 `conda " -"`_ 软件包。设置完成后,您就可以使用以下工具创建虚拟环境:" +"如果你更喜欢在虚拟环境中使用 Anaconda,那么请安装并设置 `conda `_ 软件包。设" +"置完成后,您就可以使用以下工具创建虚拟环境:" #: ../../source/contributor-how-to-set-up-a-virtual-env.rst:63 msgid "and activate the virtual environment with:" @@ -1179,11 +1221,11 @@ msgstr "然后呢?" #: ../../source/contributor-how-to-set-up-a-virtual-env.rst:73 msgid "" -"As soon as you created your virtual environment you clone one of the " -"`Flower examples `_." +"As soon as you created your virtual environment you clone one of the `Flower " +"examples `_." msgstr "" -"创建虚拟环境后,您可以克隆一个 `Flower 示例 " -"`_。" +"创建虚拟环境后,您可以克隆一个 `Flower 示例 `_。" #: ../../source/contributor-how-to-write-documentation.rst:2 msgid "Write documentation" @@ -1195,24 +1237,23 @@ msgstr "项目布局" #: ../../source/contributor-how-to-write-documentation.rst:8 msgid "" -"The Flower documentation lives in the ``doc`` directory. The Sphinx-based" -" documentation system supports both reStructuredText (``.rst`` files) and" -" Markdown (``.md`` files)." +"The Flower documentation lives in the ``doc`` directory. The Sphinx-based " +"documentation system supports both reStructuredText (``.rst`` files) and " +"Markdown (``.md`` files)." msgstr "" -"Flower 文档位于 ``doc`` 目录中。基于 Sphinx 的文档系统支持 reStructuredText(``.rst`` 文件)和 " -"Markdown(``.md`` 文件)。" +"Flower 文档位于 ``doc`` 目录中。基于 Sphinx 的文档系统支持 reStructuredText" +"(``.rst`` 文件)和 Markdown(``.md`` 文件)。" #: ../../source/contributor-how-to-write-documentation.rst:10 #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:142 #, fuzzy msgid "" -"Note that, in order to build the documentation locally (with ``poetry run" -" make html``, like described below), `Pandoc " -"`_ needs to be installed on the " -"system." +"Note that, in order to build the documentation locally (with ``poetry run " +"make html``, like described below), `Pandoc `_ needs to be installed on the system." msgstr "" -"请注意,要在本地构建文档(使用 ``poetry run make html``,如下所述),系统上必须安装 ``Pandoc " -"_`。" +"请注意,要在本地构建文档(使用 ``poetry run make html``,如下所述),系统上必" +"须安装 ``Pandoc _`。" #: ../../source/contributor-how-to-write-documentation.rst:14 msgid "Edit an existing page" @@ -1254,13 +1295,13 @@ msgstr "首次代码贡献" #: ../../source/contributor-ref-good-first-contributions.rst:4 msgid "" -"We welcome contributions to Flower! However, it is not always easy to " -"know where to start. We therefore put together a few recommendations on " -"where to start to increase your chances of getting your PR accepted into " -"the Flower codebase." +"We welcome contributions to Flower! However, it is not always easy to know " +"where to start. We therefore put together a few recommendations on where to " +"start to increase your chances of getting your PR accepted into the Flower " +"codebase." msgstr "" -"我们欢迎为Flower做出代码贡献!然而,要知道从哪里开始并非易事。因此,我们提出了一些建议,告诉您从哪里开始,以增加您的 PR 被 Flower" -" 代码库接受的机会。" +"我们欢迎为Flower做出代码贡献!然而,要知道从哪里开始并非易事。因此,我们提出" +"了一些建议,告诉您从哪里开始,以增加您的 PR 被 Flower 代码库接受的机会。" #: ../../source/contributor-ref-good-first-contributions.rst:11 msgid "Where to start" @@ -1268,10 +1309,12 @@ msgstr "从哪里开始" #: ../../source/contributor-ref-good-first-contributions.rst:13 msgid "" -"Until the Flower core library matures it will be easier to get PR's " -"accepted if they only touch non-core areas of the codebase. Good " -"candidates to get started are:" -msgstr "在 Flower 核心库成熟之前,如果 PR 只涉及代码库中的非核心区域,则会更容易被接受。可以从以下方面入手:" +"Until the Flower core library matures it will be easier to get PR's accepted " +"if they only touch non-core areas of the codebase. Good candidates to get " +"started are:" +msgstr "" +"在 Flower 核心库成熟之前,如果 PR 只涉及代码库中的非核心区域,则会更容易被接" +"受。可以从以下方面入手:" #: ../../source/contributor-ref-good-first-contributions.rst:17 msgid "Documentation: What's missing? What could be expressed more clearly?" @@ -1292,9 +1335,9 @@ msgstr "Flower Baselines的申请" #: ../../source/contributor-ref-good-first-contributions.rst:25 #, fuzzy msgid "" -"If you are not familiar with Flower Baselines, you should probably check-" -"out our `contributing guide for baselines " -"`_." +"If you are not familiar with Flower Baselines, you should probably check-out " +"our `contributing guide for baselines `_." msgstr "" "如果您对 Flower Baselines 还不熟悉,也许可以看看我们的 `Baselines贡献指南 " "`_。" @@ -1302,21 +1345,22 @@ msgstr "" #: ../../source/contributor-ref-good-first-contributions.rst:27 #, fuzzy msgid "" -"You should then check out the open `issues " -"`_" -" for baseline requests. If you find a baseline that you'd like to work on" -" and that has no assignees, feel free to assign it to yourself and start " -"working on it!" +"You should then check out the open `issues `_ for baseline " +"requests. If you find a baseline that you'd like to work on and that has no " +"assignees, feel free to assign it to yourself and start working on it!" msgstr "" -"然后查看开放的 `issues " -"`_" -" baseline请求。如果您发现了自己想做的baseline,而它还没有被分配,请随时把它分配给自己,然后开始工作!" +"然后查看开放的 `issues `_ baseline请求。如果您发现" +"了自己想做的baseline,而它还没有被分配,请随时把它分配给自己,然后开始工作!" #: ../../source/contributor-ref-good-first-contributions.rst:31 msgid "" -"Otherwise, if you don't find a baseline you'd like to work on, be sure to" -" open a new issue with the baseline request template!" -msgstr "如果您没有找到想要做的baseline,请务必使用baseline请求模板打开一个新问题(GitHub issue)!" +"Otherwise, if you don't find a baseline you'd like to work on, be sure to " +"open a new issue with the baseline request template!" +msgstr "" +"如果您没有找到想要做的baseline,请务必使用baseline请求模板打开一个新问题" +"(GitHub issue)!" #: ../../source/contributor-ref-good-first-contributions.rst:34 msgid "Request for examples" @@ -1325,9 +1369,11 @@ msgstr "示例请求" #: ../../source/contributor-ref-good-first-contributions.rst:36 msgid "" "We wish we had more time to write usage examples because we believe they " -"help users to get started with building what they want to build. Here are" -" a few ideas where we'd be happy to accept a PR:" -msgstr "我们希望有更多的时间来撰写使用示例,因为我们相信这些示例可以帮助用户开始构建他们想要的东西。以下是我们乐意接受 PR 的几个想法:" +"help users to get started with building what they want to build. Here are a " +"few ideas where we'd be happy to accept a PR:" +msgstr "" +"我们希望有更多的时间来撰写使用示例,因为我们相信这些示例可以帮助用户开始构建" +"他们想要的东西。以下是我们乐意接受 PR 的几个想法:" #: ../../source/contributor-ref-good-first-contributions.rst:40 msgid "Llama 2 fine-tuning, with Hugging Face Transformers and PyTorch" @@ -1347,13 +1393,13 @@ msgstr "安全聚合协议" #: ../../source/contributor-ref-secure-aggregation-protocols.rst:4 msgid "" -"Include SecAgg, SecAgg+, and LightSecAgg protocol. The LightSecAgg " -"protocol has not been implemented yet, so its diagram and abstraction may" -" not be accurate in practice. The SecAgg protocol can be considered as a " -"special case of the SecAgg+ protocol." +"Include SecAgg, SecAgg+, and LightSecAgg protocol. The LightSecAgg protocol " +"has not been implemented yet, so its diagram and abstraction may not be " +"accurate in practice. The SecAgg protocol can be considered as a special " +"case of the SecAgg+ protocol." msgstr "" -"包括 SecAgg、SecAgg+ 和 LightSecAgg 协议。LightSecAgg " -"协议尚未实施,因此其图表和抽象在实践中可能并不准确。SecAgg 协议可视为 SecAgg+ 协议的特例。" +"包括 SecAgg、SecAgg+ 和 LightSecAgg 协议。LightSecAgg 协议尚未实施,因此其图" +"表和抽象在实践中可能并不准确。SecAgg 协议可视为 SecAgg+ 协议的特例。" #: ../../source/contributor-ref-secure-aggregation-protocols.rst:8 msgid "The :code:`SecAgg+` abstraction" @@ -1363,17 +1409,17 @@ msgstr "代码:`SecAgg+` 抽象" #: ../../source/contributor-ref-secure-aggregation-protocols.rst:161 msgid "" "In this implementation, each client will be assigned with a unique index " -"(int) for secure aggregation, and thus many python dictionaries used have" -" keys of int type rather than ClientProxy type." +"(int) for secure aggregation, and thus many python dictionaries used have " +"keys of int type rather than ClientProxy type." msgstr "" -"在此实现中,将为每个客户端分配一个唯一索引(int),以确保聚合的安全性,因此使用的许多 python 字典的键都是 int 类型,而不是 " -"ClientProxy 类型。" +"在此实现中,将为每个客户端分配一个唯一索引(int),以确保聚合的安全性,因此使" +"用的许多 python 字典的键都是 int 类型,而不是 ClientProxy 类型。" #: ../../source/contributor-ref-secure-aggregation-protocols.rst:65 #: ../../source/contributor-ref-secure-aggregation-protocols.rst:198 msgid "" -"The Flower server will execute and process received results in the " -"following order:" +"The Flower server will execute and process received results in the following " +"order:" msgstr "Flower 服务器将按以下顺序执行和处理收到的结果:" #: ../../source/contributor-ref-secure-aggregation-protocols.rst:159 @@ -1390,20 +1436,20 @@ msgstr "在 GitHub 上投稿" #: ../../source/contributor-tutorial-contribute-on-github.rst:4 msgid "" -"This guide is for people who want to get involved with Flower, but who " -"are not used to contributing to GitHub projects." +"This guide is for people who want to get involved with Flower, but who are " +"not used to contributing to GitHub projects." msgstr "本指南适用于想参与 Flower,但不习惯为 GitHub 项目贡献的人。" #: ../../source/contributor-tutorial-contribute-on-github.rst:6 #, fuzzy msgid "" -"If you're familiar with how contributing on GitHub works, you can " -"directly checkout our :doc:`getting started guide for contributors " -"`." +"If you're familiar with how contributing on GitHub works, you can directly " +"checkout our :doc:`getting started guide for contributors `." msgstr "" -"如果您熟悉如何在 GitHub 上贡献,可以直接查看我们的 \"贡献者入门指南\" `_ 和 \"优秀的首次贡献示例\" " -"`_。" +"如果您熟悉如何在 GitHub 上贡献,可以直接查看我们的 \"贡献者入门指南\" " +"`_ 和 \"优秀的" +"首次贡献示例\" `_。" #: ../../source/contributor-tutorial-contribute-on-github.rst:10 msgid "Setting up the repository" @@ -1419,34 +1465,38 @@ msgid "" "Git is a distributed version control tool. This allows for an entire " "codebase's history to be stored and every developer's machine. It is a " "software that will need to be installed on your local machine, you can " -"follow this `guide `_ to set it up." +"follow this `guide `_ to set it up." msgstr "" -"Git 是一种分布式版本控制工具。它可以将整个代码库的历史记录保存在每个开发人员的机器上。您需要在本地计算机上安装该软件,可以按照本指南 " -"`_ 进行设置。" +"Git 是一种分布式版本控制工具。它可以将整个代码库的历史记录保存在每个开发人员" +"的机器上。您需要在本地计算机上安装该软件,可以按照本指南 `_ 进行设置。" #: ../../source/contributor-tutorial-contribute-on-github.rst:16 msgid "" "GitHub, itself, is a code hosting platform for version control and " -"collaboration. It allows for everyone to collaborate and work from " -"anywhere on remote repositories." -msgstr "GitHub 本身是一个用于版本控制和协作的代码托管平台。它允许每个人在任何地方对远程仓库进行协作和工作。" +"collaboration. It allows for everyone to collaborate and work from anywhere " +"on remote repositories." +msgstr "" +"GitHub 本身是一个用于版本控制和协作的代码托管平台。它允许每个人在任何地方对远" +"程仓库进行协作和工作。" #: ../../source/contributor-tutorial-contribute-on-github.rst:18 msgid "" "If you haven't already, you will need to create an account on `GitHub " "`_." -msgstr "如果还没有,您需要在 `GitHub `_ 上创建一个账户。" +msgstr "" +"如果还没有,您需要在 `GitHub `_ 上创建一个账户。" #: ../../source/contributor-tutorial-contribute-on-github.rst:20 msgid "" -"The idea behind the generic Git and GitHub workflow boils down to this: " -"you download code from a remote repository on GitHub, make changes " -"locally and keep track of them using Git and then you upload your new " -"history back to GitHub." +"The idea behind the generic Git and GitHub workflow boils down to this: you " +"download code from a remote repository on GitHub, make changes locally and " +"keep track of them using Git and then you upload your new history back to " +"GitHub." msgstr "" -"通用的 Git 和 GitHub 工作流程背后的理念可以归结为:从 GitHub 上的远程仓库下载代码,在本地进行修改并使用 Git " -"进行跟踪,然后将新的历史记录上传回 GitHub。" +"通用的 Git 和 GitHub 工作流程背后的理念可以归结为:从 GitHub 上的远程仓库下载" +"代码,在本地进行修改并使用 Git 进行跟踪,然后将新的历史记录上传回 GitHub。" #: ../../source/contributor-tutorial-contribute-on-github.rst:32 msgid "**Forking the Flower repository**" @@ -1455,23 +1505,25 @@ msgstr "**叉花仓库**" #: ../../source/contributor-tutorial-contribute-on-github.rst:24 #, fuzzy msgid "" -"A fork is a personal copy of a GitHub repository. To create one for " -"Flower, you must navigate to ``_ (while " -"connected to your GitHub account) and click the ``Fork`` button situated " -"on the top right of the page." +"A fork is a personal copy of a GitHub repository. To create one for Flower, " +"you must navigate to ``_ (while connected to " +"your GitHub account) and click the ``Fork`` button situated on the top right " +"of the page." msgstr "" "fork 是 GitHub 仓库的个人副本。要为 Flower 创建一个 fork,您必须导航到 " -"https://github.com/adap/flower(同时连接到您的 GitHub 账户),然后点击页面右上方的 ``Fork`` 按钮。" +"https://github.com/adap/flower(同时连接到您的 GitHub 账户),然后点击页面右" +"上方的 ``Fork`` 按钮。" #: ../../source/contributor-tutorial-contribute-on-github.rst:29 msgid "" "You can change the name if you want, but this is not necessary as this " -"version of Flower will be yours and will sit inside your own account " -"(i.e., in your own list of repositories). Once created, you should see on" -" the top left corner that you are looking at your own version of Flower." +"version of Flower will be yours and will sit inside your own account (i.e., " +"in your own list of repositories). Once created, you should see on the top " +"left corner that you are looking at your own version of Flower." msgstr "" -"您可以更改名称,但没有必要,因为这个版本的 Flower " -"将是您自己的,并位于您自己的账户中(即,在您自己的版本库列表中)。创建完成后,您会在左上角看到自己的 Flower 版本。" +"您可以更改名称,但没有必要,因为这个版本的 Flower 将是您自己的,并位于您自己" +"的账户中(即,在您自己的版本库列表中)。创建完成后,您会在左上角看到自己的 " +"Flower 版本。" #: ../../source/contributor-tutorial-contribute-on-github.rst:47 msgid "**Cloning your forked repository**" @@ -1480,25 +1532,29 @@ msgstr "**克隆你的分叉仓库**" #: ../../source/contributor-tutorial-contribute-on-github.rst:35 msgid "" "The next step is to download the forked repository on your machine to be " -"able to make changes to it. On your forked repository page, you should " -"first click on the ``Code`` button on the right, this will give you the " -"ability to copy the HTTPS link of the repository." +"able to make changes to it. On your forked repository page, you should first " +"click on the ``Code`` button on the right, this will give you the ability to " +"copy the HTTPS link of the repository." msgstr "" -"下一步是在你的机器上下载分叉版本库,以便对其进行修改。在分叉版本库页面上,首先点击右侧的 \"代码 \"按钮,这样就能复制版本库的 HTTPS " -"链接。" +"下一步是在你的机器上下载分叉版本库,以便对其进行修改。在分叉版本库页面上,首" +"先点击右侧的 \"代码 \"按钮,这样就能复制版本库的 HTTPS 链接。" #: ../../source/contributor-tutorial-contribute-on-github.rst:41 msgid "" "Once you copied the \\, you can open a terminal on your machine, " "navigate to the place you want to download the repository to and type:" -msgstr "一旦复制了 (),你就可以在你的机器上打开一个终端,导航到你想下载软件源的地方,然后键入:" +msgstr "" +"一旦复制了 (),你就可以在你的机器上打开一个终端,导航到你想下载软件" +"源的地方,然后键入:" #: ../../source/contributor-tutorial-contribute-on-github.rst:47 #, fuzzy msgid "" -"This will create a ``flower/`` (or the name of your fork if you renamed " -"it) folder in the current working directory." -msgstr "这将在当前工作目录下创建一个 `flower/`(如果重命名了,则使用 fork 的名称)文件夹。" +"This will create a ``flower/`` (or the name of your fork if you renamed it) " +"folder in the current working directory." +msgstr "" +"这将在当前工作目录下创建一个 `flower/`(如果重命名了,则使用 fork 的名称)文" +"件夹。" #: ../../source/contributor-tutorial-contribute-on-github.rst:66 msgid "**Add origin**" @@ -1510,13 +1566,14 @@ msgstr "然后,您就可以进入存储库文件夹:" #: ../../source/contributor-tutorial-contribute-on-github.rst:56 msgid "" -"And here we will need to add an origin to our repository. The origin is " -"the \\ of the remote fork repository. To obtain it, we can do as " -"previously mentioned by going to our fork repository on our GitHub " -"account and copying the link." +"And here we will need to add an origin to our repository. The origin is the " +"\\ of the remote fork repository. To obtain it, we can do as " +"previously mentioned by going to our fork repository on our GitHub account " +"and copying the link." msgstr "" "在这里,我们需要为我们的版本库添加一个 origin。origin 是远程 fork 仓库的 " -"\\。要获得它,我们可以像前面提到的那样,访问 GitHub 账户上的分叉仓库并复制链接。" +"\\。要获得它,我们可以像前面提到的那样,访问 GitHub 账户上的分叉仓库并" +"复制链接。" #: ../../source/contributor-tutorial-contribute-on-github.rst:61 msgid "" @@ -1533,28 +1590,33 @@ msgstr "**增加上游**" msgid "" "Now we will add an upstream address to our repository. Still in the same " "directory, we must run the following command:" -msgstr "现在,我们要为版本库添加一个上游地址。还是在同一目录下,我们必须运行以下命令:" +msgstr "" +"现在,我们要为版本库添加一个上游地址。还是在同一目录下,我们必须运行以下命" +"令:" #: ../../source/contributor-tutorial-contribute-on-github.rst:76 -msgid "The following diagram visually explains what we did in the previous steps:" +msgid "" +"The following diagram visually explains what we did in the previous steps:" msgstr "下图直观地解释了我们在前面步骤中的操作:" #: ../../source/contributor-tutorial-contribute-on-github.rst:80 msgid "" -"The upstream is the GitHub remote address of the parent repository (in " -"this case Flower), i.e. the one we eventually want to contribute to and " -"therefore need an up-to-date history of. The origin is just the GitHub " -"remote address of the forked repository we created, i.e. the copy (fork) " -"in our own account." +"The upstream is the GitHub remote address of the parent repository (in this " +"case Flower), i.e. the one we eventually want to contribute to and therefore " +"need an up-to-date history of. The origin is just the GitHub remote address " +"of the forked repository we created, i.e. the copy (fork) in our own account." msgstr "" -"上游是父版本库(这里是 Flower)的 GitHub 远程地址,即我们最终要贡献的版本库,因此需要最新的历史记录。origin " -"只是我们创建的分叉仓库的 GitHub 远程地址,即我们自己账户中的副本(分叉)。" +"上游是父版本库(这里是 Flower)的 GitHub 远程地址,即我们最终要贡献的版本库," +"因此需要最新的历史记录。origin 只是我们创建的分叉仓库的 GitHub 远程地址,即我" +"们自己账户中的副本(分叉)。" #: ../../source/contributor-tutorial-contribute-on-github.rst:84 msgid "" "To make sure our local version of the fork is up-to-date with the latest " "changes from the Flower repository, we can execute the following command:" -msgstr "为了确保本地版本的分叉程序与 Flower 代码库的最新更改保持一致,我们可以执行以下命令:" +msgstr "" +"为了确保本地版本的分叉程序与 Flower 代码库的最新更改保持一致,我们可以执行以" +"下命令:" #: ../../source/contributor-tutorial-contribute-on-github.rst:93 msgid "Setting up the coding environment" @@ -1564,10 +1626,12 @@ msgstr "设置编码环境" #, fuzzy msgid "" "This can be achieved by following this :doc:`getting started guide for " -"contributors ` (note " -"that you won't need to clone the repository). Once you are able to write " -"code and test it, you can finally start making changes!" -msgstr "您可以按照这份 \"贡献者入门指南\"__(注意,您不需要克隆版本库)来实现这一点。一旦您能够编写代码并进行测试,您就可以开始修改了!" +"contributors ` (note that " +"you won't need to clone the repository). Once you are able to write code and " +"test it, you can finally start making changes!" +msgstr "" +"您可以按照这份 \"贡献者入门指南\"__(注意,您不需要克隆版本库)来实现这一点。" +"一旦您能够编写代码并进行测试,您就可以开始修改了!" #: ../../source/contributor-tutorial-contribute-on-github.rst:100 msgid "Making changes" @@ -1575,8 +1639,7 @@ msgstr "做出改变" #: ../../source/contributor-tutorial-contribute-on-github.rst:102 msgid "" -"Before making any changes make sure you are up-to-date with your " -"repository:" +"Before making any changes make sure you are up-to-date with your repository:" msgstr "在进行任何更改之前,请确保您的版本库是最新的:" #: ../../source/contributor-tutorial-contribute-on-github.rst:108 @@ -1589,15 +1652,15 @@ msgstr "**创建一个新分支**" #: ../../source/contributor-tutorial-contribute-on-github.rst:115 msgid "" -"To make the history cleaner and easier to work with, it is good practice " -"to create a new branch for each feature/project that needs to be " -"implemented." -msgstr "为了使历史记录更简洁、更易于操作,为每个需要实现的功能/项目创建一个新分支是个不错的做法。" +"To make the history cleaner and easier to work with, it is good practice to " +"create a new branch for each feature/project that needs to be implemented." +msgstr "" +"为了使历史记录更简洁、更易于操作,为每个需要实现的功能/项目创建一个新分支是个" +"不错的做法。" #: ../../source/contributor-tutorial-contribute-on-github.rst:118 msgid "" -"To do so, just run the following command inside the repository's " -"directory:" +"To do so, just run the following command inside the repository's directory:" msgstr "为此,只需在版本库目录下运行以下命令即可:" #: ../../source/contributor-tutorial-contribute-on-github.rst:125 @@ -1605,7 +1668,8 @@ msgid "**Make changes**" msgstr "**进行修改**" #: ../../source/contributor-tutorial-contribute-on-github.rst:125 -msgid "Write great code and create wonderful changes using your favorite editor!" +msgid "" +"Write great code and create wonderful changes using your favorite editor!" msgstr "使用您最喜欢的编辑器编写优秀的代码并创建精彩的更改!" #: ../../source/contributor-tutorial-contribute-on-github.rst:138 @@ -1614,10 +1678,12 @@ msgstr "**测试并格式化您的代码**" #: ../../source/contributor-tutorial-contribute-on-github.rst:128 msgid "" -"Don't forget to test and format your code! Otherwise your code won't be " -"able to be merged into the Flower repository. This is done so the " -"codebase stays consistent and easy to understand." -msgstr "不要忘记测试和格式化您的代码!否则您的代码将无法并入 Flower 代码库。这样做是为了使代码库保持一致并易于理解。" +"Don't forget to test and format your code! Otherwise your code won't be able " +"to be merged into the Flower repository. This is done so the codebase stays " +"consistent and easy to understand." +msgstr "" +"不要忘记测试和格式化您的代码!否则您的代码将无法并入 Flower 代码库。这样做是" +"为了使代码库保持一致并易于理解。" #: ../../source/contributor-tutorial-contribute-on-github.rst:131 msgid "To do so, we have written a few scripts that you can execute:" @@ -1629,8 +1695,8 @@ msgstr "**舞台变化**" #: ../../source/contributor-tutorial-contribute-on-github.rst:141 msgid "" -"Before creating a commit that will update your history, you must specify " -"to Git which files it needs to take into account." +"Before creating a commit that will update your history, you must specify to " +"Git which files it needs to take into account." msgstr "在创建更新历史记录的提交之前,必须向 Git 说明需要考虑哪些文件。" #: ../../source/contributor-tutorial-contribute-on-github.rst:143 @@ -1639,10 +1705,12 @@ msgstr "这可以通过:" #: ../../source/contributor-tutorial-contribute-on-github.rst:149 msgid "" -"To check which files have been modified compared to the last version " -"(last commit) and to see which files are staged for commit, you can use " -"the :code:`git status` command." -msgstr "要查看与上一版本(上次提交)相比哪些文件已被修改,以及哪些文件处于提交阶段,可以使用 :code:`git status` 命令。" +"To check which files have been modified compared to the last version (last " +"commit) and to see which files are staged for commit, you can use the :code:" +"`git status` command." +msgstr "" +"要查看与上一版本(上次提交)相比哪些文件已被修改,以及哪些文件处于提交阶段," +"可以使用 :code:`git status` 命令。" #: ../../source/contributor-tutorial-contribute-on-github.rst:160 msgid "**Commit changes**" @@ -1652,16 +1720,17 @@ msgstr "**提交更改**" msgid "" "Once you have added all the files you wanted to commit using :code:`git " "add`, you can finally create your commit using this command:" -msgstr "使用 :code:`git add` 添加完所有要提交的文件后,就可以使用此命令创建提交了:" +msgstr "" +"使用 :code:`git add` 添加完所有要提交的文件后,就可以使用此命令创建提交了:" #: ../../source/contributor-tutorial-contribute-on-github.rst:159 msgid "" -"The \\ is there to explain to others what the commit " -"does. It should be written in an imperative style and be concise. An " -"example would be :code:`git commit -m \"Add images to README\"`." +"The \\ is there to explain to others what the commit does. " +"It should be written in an imperative style and be concise. An example would " +"be :code:`git commit -m \"Add images to README\"`." msgstr "" -" 用于向他人解释提交的作用。它应该以命令式风格书写,并且简明扼要。例如 :code:`git commit " -"-m \"Add images to README\"`。" +" 用于向他人解释提交的作用。它应该以命令式风格书写,并且简明" +"扼要。例如 :code:`git commit -m \"Add images to README\"`。" #: ../../source/contributor-tutorial-contribute-on-github.rst:171 msgid "**Push the changes to the fork**" @@ -1669,16 +1738,19 @@ msgstr "**将更改推送到分叉**" #: ../../source/contributor-tutorial-contribute-on-github.rst:163 msgid "" -"Once we have committed our changes, we have effectively updated our local" -" history, but GitHub has no way of knowing this unless we push our " -"changes to our origin's remote address:" -msgstr "一旦提交了修改,我们就有效地更新了本地历史记录,但除非我们将修改推送到原点的远程地址,否则 GitHub 无法得知:" +"Once we have committed our changes, we have effectively updated our local " +"history, but GitHub has no way of knowing this unless we push our changes to " +"our origin's remote address:" +msgstr "" +"一旦提交了修改,我们就有效地更新了本地历史记录,但除非我们将修改推送到原点的" +"远程地址,否则 GitHub 无法得知:" #: ../../source/contributor-tutorial-contribute-on-github.rst:170 msgid "" "Once this is done, you will see on the GitHub that your forked repo was " "updated with the changes you have made." -msgstr "完成此操作后,您将在 GitHub 上看到您的分叉仓库已根据您所做的更改进行了更新。" +msgstr "" +"完成此操作后,您将在 GitHub 上看到您的分叉仓库已根据您所做的更改进行了更新。" #: ../../source/contributor-tutorial-contribute-on-github.rst:174 msgid "Creating and merging a pull request (PR)" @@ -1690,8 +1762,8 @@ msgstr "**创建 PR**" #: ../../source/contributor-tutorial-contribute-on-github.rst:177 msgid "" -"Once you have pushed changes, on the GitHub webpage of your repository " -"you should see the following message:" +"Once you have pushed changes, on the GitHub webpage of your repository you " +"should see the following message:" msgstr "推送更改后,在仓库的 GitHub 网页上应该会看到以下信息:" #: ../../source/contributor-tutorial-contribute-on-github.rst:181 @@ -1707,49 +1779,57 @@ msgid "" msgstr "点击 \"比较和拉取请求 \"按钮后,您应该会看到类似下面的内容:" #: ../../source/contributor-tutorial-contribute-on-github.rst:187 -msgid "At the top you have an explanation of which branch will be merged where:" +msgid "" +"At the top you have an explanation of which branch will be merged where:" msgstr "在顶部,你可以看到关于哪个分支将被合并的说明:" #: ../../source/contributor-tutorial-contribute-on-github.rst:191 msgid "" -"In this example you can see that the request is to merge the branch " -"``doc-fixes`` from my forked repository to branch ``main`` from the " -"Flower repository." -msgstr "在这个例子中,你可以看到请求将我分叉的版本库中的分支 ``doc-fixes`` 合并到 Flower 版本库中的分支 ``main``。" +"In this example you can see that the request is to merge the branch ``doc-" +"fixes`` from my forked repository to branch ``main`` from the Flower " +"repository." +msgstr "" +"在这个例子中,你可以看到请求将我分叉的版本库中的分支 ``doc-fixes`` 合并到 " +"Flower 版本库中的分支 ``main``。" #: ../../source/contributor-tutorial-contribute-on-github.rst:193 msgid "" -"The input box in the middle is there for you to describe what your PR " -"does and to link it to existing issues. We have placed comments (that " -"won't be rendered once the PR is opened) to guide you through the " -"process." -msgstr "中间的输入框供您描述 PR 的作用,并将其与现有问题联系起来。我们在此放置了注释(一旦 PR 打开,注释将不会显示),以指导您完成整个过程。" +"The input box in the middle is there for you to describe what your PR does " +"and to link it to existing issues. We have placed comments (that won't be " +"rendered once the PR is opened) to guide you through the process." +msgstr "" +"中间的输入框供您描述 PR 的作用,并将其与现有问题联系起来。我们在此放置了注释" +"(一旦 PR 打开,注释将不会显示),以指导您完成整个过程。" #: ../../source/contributor-tutorial-contribute-on-github.rst:196 #, fuzzy msgid "" "It is important to follow the instructions described in comments. For " -"instance, in order to not break how our changelog system works, you " -"should read the information above the ``Changelog entry`` section " -"carefully. You can also checkout some examples and details in the " -":ref:`changelogentry` appendix." +"instance, in order to not break how our changelog system works, you should " +"read the information above the ``Changelog entry`` section carefully. You " +"can also checkout some examples and details in the :ref:`changelogentry` " +"appendix." msgstr "" -"请务必遵守注释中的说明。例如,为了不破坏我们的更新日志系统,你应该仔细阅读\"" -"`更新日志条目``\"部分上面的信息。您还可以查看 :ref:`changelogentry` " -"附录中的一些示例和细节。" +"请务必遵守注释中的说明。例如,为了不破坏我们的更新日志系统,你应该仔细阅读\"`" +"更新日志条目``\"部分上面的信息。您还可以查看 :ref:`changelogentry` 附录中的一" +"些示例和细节。" #: ../../source/contributor-tutorial-contribute-on-github.rst:200 msgid "" "At the bottom you will find the button to open the PR. This will notify " -"reviewers that a new PR has been opened and that they should look over it" -" to merge or to request changes." -msgstr "在底部,您可以找到打开 PR 的按钮。这将通知审核人员新的 PR 已经打开,他们应该查看该 PR 以进行合并或要求修改。" +"reviewers that a new PR has been opened and that they should look over it to " +"merge or to request changes." +msgstr "" +"在底部,您可以找到打开 PR 的按钮。这将通知审核人员新的 PR 已经打开,他们应该" +"查看该 PR 以进行合并或要求修改。" #: ../../source/contributor-tutorial-contribute-on-github.rst:203 msgid "" -"If your PR is not yet ready for review, and you don't want to notify " -"anyone, you have the option to create a draft pull request:" -msgstr "如果您的 PR 尚未准备好接受审核,而且您不想通知任何人,您可以选择创建一个草案拉取请求:" +"If your PR is not yet ready for review, and you don't want to notify anyone, " +"you have the option to create a draft pull request:" +msgstr "" +"如果您的 PR 尚未准备好接受审核,而且您不想通知任何人,您可以选择创建一个草案" +"拉取请求:" #: ../../source/contributor-tutorial-contribute-on-github.rst:208 msgid "**Making new changes**" @@ -1758,9 +1838,11 @@ msgstr "**作出新的改变**" #: ../../source/contributor-tutorial-contribute-on-github.rst:208 msgid "" "Once the PR has been opened (as draft or not), you can still push new " -"commits to it the same way we did before, by making changes to the branch" -" associated with the PR." -msgstr "一旦 PR 被打开(无论是否作为草案),你仍然可以像以前一样,通过修改与 PR 关联的分支来推送新的提交。" +"commits to it the same way we did before, by making changes to the branch " +"associated with the PR." +msgstr "" +"一旦 PR 被打开(无论是否作为草案),你仍然可以像以前一样,通过修改与 PR 关联" +"的分支来推送新的提交。" #: ../../source/contributor-tutorial-contribute-on-github.rst:230 msgid "**Review the PR**" @@ -1768,14 +1850,14 @@ msgstr "**审查 PR**" #: ../../source/contributor-tutorial-contribute-on-github.rst:211 msgid "" -"Once the PR has been opened or once the draft PR has been marked as " -"ready, a review from code owners will be automatically requested:" +"Once the PR has been opened or once the draft PR has been marked as ready, a " +"review from code owners will be automatically requested:" msgstr "一旦 PR 被打开或 PR 草案被标记为就绪,就会自动要求代码所有者进行审核:" #: ../../source/contributor-tutorial-contribute-on-github.rst:215 msgid "" -"Code owners will then look into the code, ask questions, request changes " -"or validate the PR." +"Code owners will then look into the code, ask questions, request changes or " +"validate the PR." msgstr "然后,代码所有者会查看代码、提出问题、要求修改或验证 PR。" #: ../../source/contributor-tutorial-contribute-on-github.rst:217 @@ -1784,8 +1866,8 @@ msgstr "如果有正在进行的更改请求,合并将被阻止。" #: ../../source/contributor-tutorial-contribute-on-github.rst:221 msgid "" -"To resolve them, just push the necessary changes to the branch associated" -" with the PR:" +"To resolve them, just push the necessary changes to the branch associated " +"with the PR:" msgstr "要解决这些问题,只需将必要的更改推送到与 PR 关联的分支即可:" #: ../../source/contributor-tutorial-contribute-on-github.rst:225 @@ -1794,8 +1876,7 @@ msgstr "并解决对话:" #: ../../source/contributor-tutorial-contribute-on-github.rst:229 msgid "" -"Once all the conversations have been resolved, you can re-request a " -"review." +"Once all the conversations have been resolved, you can re-request a review." msgstr "一旦所有对话都得到解决,您就可以重新申请审核。" #: ../../source/contributor-tutorial-contribute-on-github.rst:250 @@ -1804,15 +1885,19 @@ msgstr "**一旦 PR 被合并**" #: ../../source/contributor-tutorial-contribute-on-github.rst:233 msgid "" -"If all the automatic tests have passed and reviewers have no more changes" -" to request, they can approve the PR and merge it." -msgstr "如果所有自动测试都已通过,且审核员不再需要修改,他们就可以批准 PR 并将其合并。" +"If all the automatic tests have passed and reviewers have no more changes to " +"request, they can approve the PR and merge it." +msgstr "" +"如果所有自动测试都已通过,且审核员不再需要修改,他们就可以批准 PR 并将其合" +"并。" #: ../../source/contributor-tutorial-contribute-on-github.rst:237 msgid "" "Once it is merged, you can delete the branch on GitHub (a button should " "appear to do so) and also delete it locally by doing:" -msgstr "合并后,您可以在 GitHub 上删除该分支(会出现一个删除按钮),也可以在本地删除该分支:" +msgstr "" +"合并后,您可以在 GitHub 上删除该分支(会出现一个删除按钮),也可以在本地删除" +"该分支:" #: ../../source/contributor-tutorial-contribute-on-github.rst:244 msgid "Then you should update your forked repository by doing:" @@ -1829,28 +1914,33 @@ msgstr "问题" #: ../../source/contributor-tutorial-contribute-on-github.rst:258 #, fuzzy msgid "" -"For our documentation, we've started to use the `Diàtaxis framework " -"`_." -msgstr "对于我们的文档,我们已经开始使用 \"Diàtaxis 框架 `_\"。" +"For our documentation, we've started to use the `Diàtaxis framework `_." +msgstr "" +"对于我们的文档,我们已经开始使用 \"Diàtaxis 框架 `_\"。" #: ../../source/contributor-tutorial-contribute-on-github.rst:260 #, fuzzy msgid "" -"Our \"How to\" guides should have titles that continue the sentence \"How" -" to …\", for example, \"How to upgrade to Flower 1.0\"." -msgstr "我们的 \"如何 \"指南的标题应延续 \"如何...... \"的句式,例如 \"如何升级到 Flower 1.0\"。" +"Our \"How to\" guides should have titles that continue the sentence \"How to " +"…\", for example, \"How to upgrade to Flower 1.0\"." +msgstr "" +"我们的 \"如何 \"指南的标题应延续 \"如何...... \"的句式,例如 \"如何升级到 " +"Flower 1.0\"。" #: ../../source/contributor-tutorial-contribute-on-github.rst:262 msgid "" "Most of our guides do not follow this new format yet, and changing their " "title is (unfortunately) more involved than one might think." -msgstr "我们的大多数指南还没有采用这种新格式,而更改其标题(不幸的是)比人们想象的要复杂得多。" +msgstr "" +"我们的大多数指南还没有采用这种新格式,而更改其标题(不幸的是)比人们想象的要" +"复杂得多。" #: ../../source/contributor-tutorial-contribute-on-github.rst:264 #, fuzzy msgid "" -"This issue is about changing the title of a doc from present continuous " -"to present simple." +"This issue is about changing the title of a doc from present continuous to " +"present simple." msgstr "这个问题是关于将文档标题从现在进行时改为现在进行时。" #: ../../source/contributor-tutorial-contribute-on-github.rst:266 @@ -1858,7 +1948,8 @@ msgstr "这个问题是关于将文档标题从现在进行时改为现在进行 msgid "" "Let's take the example of \"Saving Progress\" which we changed to \"Save " "Progress\". Does this pass our check?" -msgstr "以 \"保存进度 \"为例,我们将其改为 \"保存进度\"。这是否通过了我们的检查?" +msgstr "" +"以 \"保存进度 \"为例,我们将其改为 \"保存进度\"。这是否通过了我们的检查?" #: ../../source/contributor-tutorial-contribute-on-github.rst:268 #, fuzzy @@ -1879,7 +1970,9 @@ msgstr "解决方案" msgid "" "This is a tiny change, but it'll allow us to test your end-to-end setup. " "After cloning and setting up the Flower repo, here's what you should do:" -msgstr "这只是一个很小的改动,但可以让我们测试你的端到端设置。克隆并设置好 Flower repo 后,你应该这样做:" +msgstr "" +"这只是一个很小的改动,但可以让我们测试你的端到端设置。克隆并设置好 Flower " +"repo 后,你应该这样做:" #: ../../source/contributor-tutorial-contribute-on-github.rst:277 #, fuzzy @@ -1896,11 +1989,11 @@ msgstr "在 `.rst` 文件中进行修改(注意,标题下的破折号应与 #: ../../source/contributor-tutorial-contribute-on-github.rst:279 #, fuzzy msgid "" -"Build the docs and `check the result `_" +"Build the docs and `check the result `_" msgstr "" -"构建文档并检查结果: ``_" +"构建文档并检查结果: ``_" #: ../../source/contributor-tutorial-contribute-on-github.rst:282 msgid "Rename file" @@ -1908,13 +2001,14 @@ msgstr "重命名文件" #: ../../source/contributor-tutorial-contribute-on-github.rst:284 msgid "" -"You might have noticed that the file name still reflects the old wording." -" If we just change the file, then we break all existing links to it - it " -"is **very important** to avoid that, breaking links can harm our search " -"engine ranking." +"You might have noticed that the file name still reflects the old wording. If " +"we just change the file, then we break all existing links to it - it is " +"**very important** to avoid that, breaking links can harm our search engine " +"ranking." msgstr "" -"您可能已经注意到,文件名仍然反映了旧的措辞。如果我们只是更改文件,那么就会破坏与该文件的所有现有链接--" -"避免这种情况是***重要的,破坏链接会损害我们的搜索引擎排名。" +"您可能已经注意到,文件名仍然反映了旧的措辞。如果我们只是更改文件,那么就会破" +"坏与该文件的所有现有链接--避免这种情况是***重要的,破坏链接会损害我们的搜索引" +"擎排名。" #: ../../source/contributor-tutorial-contribute-on-github.rst:287 #, fuzzy @@ -1934,9 +2028,11 @@ msgstr "在 `doc/source/conf.py` 中添加重定向规则" #: ../../source/contributor-tutorial-contribute-on-github.rst:292 #, fuzzy msgid "" -"This will cause a redirect from ``saving-progress.html`` to ``save-" -"progress.html``, old links will continue to work." -msgstr "这将导致从 `saving-progress.html` 重定向到 `save-progress.html`,旧链接将继续工作。" +"This will cause a redirect from ``saving-progress.html`` to ``save-progress." +"html``, old links will continue to work." +msgstr "" +"这将导致从 `saving-progress.html` 重定向到 `save-progress.html`,旧链接将继续" +"工作。" #: ../../source/contributor-tutorial-contribute-on-github.rst:295 msgid "Apply changes in the index file" @@ -1948,7 +2044,9 @@ msgid "" "For the lateral navigation bar to work properly, it is very important to " "update the ``index.rst`` file as well. This is where we define the whole " "arborescence of the navbar." -msgstr "要使横向导航栏正常工作,更新 `index.rst` 文件也非常重要。我们就是在这里定义整个导航栏的结构。" +msgstr "" +"要使横向导航栏正常工作,更新 `index.rst` 文件也非常重要。我们就是在这里定义整" +"个导航栏的结构。" #: ../../source/contributor-tutorial-contribute-on-github.rst:300 #, fuzzy @@ -1962,8 +2060,8 @@ msgstr "开放式 PR" #: ../../source/contributor-tutorial-contribute-on-github.rst:305 #, fuzzy msgid "" -"Commit the changes (commit messages are always imperative: \"Do " -"something\", in this case \"Change …\")" +"Commit the changes (commit messages are always imperative: \"Do something\", " +"in this case \"Change …\")" msgstr "提交更改(提交信息总是命令式的:\"做某事\",这里是 \"更改......\")" #: ../../source/contributor-tutorial-contribute-on-github.rst:306 @@ -1988,28 +2086,31 @@ msgstr "如何撰写好的公关标题" #: ../../source/contributor-tutorial-contribute-on-github.rst:315 msgid "" -"A well-crafted PR title helps team members quickly understand the purpose" -" and scope of the changes being proposed. Here's a guide to help you " -"write a good GitHub PR title:" -msgstr "一个精心撰写的公关标题能帮助团队成员迅速了解所提修改的目的和范围。以下指南可帮助您撰写一个好的 GitHub PR 标题:" +"A well-crafted PR title helps team members quickly understand the purpose " +"and scope of the changes being proposed. Here's a guide to help you write a " +"good GitHub PR title:" +msgstr "" +"一个精心撰写的公关标题能帮助团队成员迅速了解所提修改的目的和范围。以下指南可" +"帮助您撰写一个好的 GitHub PR 标题:" #: ../../source/contributor-tutorial-contribute-on-github.rst:317 msgid "" -"1. Be Clear and Concise: Provide a clear summary of the changes in a " -"concise manner. 1. Use Actionable Verbs: Start with verbs like \"Add,\" " -"\"Update,\" or \"Fix\" to indicate the purpose. 1. Include Relevant " -"Information: Mention the affected feature or module for context. 1. Keep " -"it Short: Avoid lengthy titles for easy readability. 1. Use Proper " -"Capitalization and Punctuation: Follow grammar rules for clarity." +"1. Be Clear and Concise: Provide a clear summary of the changes in a concise " +"manner. 1. Use Actionable Verbs: Start with verbs like \"Add,\" \"Update,\" " +"or \"Fix\" to indicate the purpose. 1. Include Relevant Information: Mention " +"the affected feature or module for context. 1. Keep it Short: Avoid lengthy " +"titles for easy readability. 1. Use Proper Capitalization and Punctuation: " +"Follow grammar rules for clarity." msgstr "" -"1. 简明扼要: 以简明扼要的方式清楚地概述变化。1. 使用可操作的动词: 使用 \"添加\"、\"更新 \"或 \"修复 " -"\"等动词来表明目的。1. 包含相关信息: 提及受影响的功能或模块以了解上下文。1. 简短:避免冗长的标题,以方便阅读。1. " -"使用正确的大小写和标点符号: 遵守语法规则,以确保清晰。" +"1. 简明扼要: 以简明扼要的方式清楚地概述变化。1. 使用可操作的动词: 使用 \"添" +"加\"、\"更新 \"或 \"修复 \"等动词来表明目的。1. 包含相关信息: 提及受影响的功" +"能或模块以了解上下文。1. 简短:避免冗长的标题,以方便阅读。1. 使用正确的大小" +"写和标点符号: 遵守语法规则,以确保清晰。" #: ../../source/contributor-tutorial-contribute-on-github.rst:323 msgid "" -"Let's start with a few examples for titles that should be avoided because" -" they do not provide meaningful information:" +"Let's start with a few examples for titles that should be avoided because " +"they do not provide meaningful information:" msgstr "让我们先举例说明几个应该避免使用的标题,因为它们不能提供有意义的信息:" #: ../../source/contributor-tutorial-contribute-on-github.rst:325 @@ -2034,10 +2135,12 @@ msgstr "更改 SomeModule" #: ../../source/contributor-tutorial-contribute-on-github.rst:331 msgid "" -"Here are a few positive examples which provide helpful information " -"without repeating how they do it, as that is already visible in the " -"\"Files changed\" section of the PR:" -msgstr "这里有几个正面的例子,提供了有用的信息,但没有重复他们是如何做的,因为在 PR 的 \"已更改文件 \"部分已经可以看到:" +"Here are a few positive examples which provide helpful information without " +"repeating how they do it, as that is already visible in the \"Files " +"changed\" section of the PR:" +msgstr "" +"这里有几个正面的例子,提供了有用的信息,但没有重复他们是如何做的,因为在 PR " +"的 \"已更改文件 \"部分已经可以看到:" #: ../../source/contributor-tutorial-contribute-on-github.rst:333 msgid "Update docs banner to mention Flower Summit 2023" @@ -2053,13 +2156,13 @@ msgstr "删除 FedAvg 子类化策略中的多余属性" #: ../../source/contributor-tutorial-contribute-on-github.rst:336 #, fuzzy -msgid "Add CI job to deploy the staging system when the ``main`` branch changes" +msgid "" +"Add CI job to deploy the staging system when the ``main`` branch changes" msgstr "添加 CI 作业,以便在 \"主 \"分支发生变化时部署暂存系统" #: ../../source/contributor-tutorial-contribute-on-github.rst:337 msgid "" -"Add new amazing library which will be used to improve the simulation " -"engine" +"Add new amazing library which will be used to improve the simulation engine" msgstr "添加新的惊人库,用于改进模拟引擎" #: ../../source/contributor-tutorial-contribute-on-github.rst:341 @@ -2073,16 +2176,15 @@ msgstr "接下来的步骤" #: ../../source/contributor-tutorial-contribute-on-github.rst:343 msgid "" -"Once you have made your first PR, and want to contribute more, be sure to" -" check out the following :" +"Once you have made your first PR, and want to contribute more, be sure to " +"check out the following :" msgstr "一旦您完成了第一份 PR,并希望做出更多贡献,请务必查看以下内容:" #: ../../source/contributor-tutorial-contribute-on-github.rst:345 #, fuzzy msgid "" -":doc:`Good first contributions `, where you should particularly look into the " -":code:`baselines` contributions." +":doc:`Good first contributions `, " +"where you should particularly look into the :code:`baselines` contributions." msgstr "" "`优秀的首次贡献 `_,在这里你应该特别看看 :code:`baselines` 的贡献。" @@ -2100,50 +2202,52 @@ msgstr "更新日志" #: ../../source/contributor-tutorial-contribute-on-github.rst:356 #, fuzzy msgid "" -"When opening a new PR, inside its description, there should be a " -"``Changelog entry`` header." +"When opening a new PR, inside its description, there should be a ``Changelog " +"entry`` header." msgstr "打开一个新 PR 时,在其描述中应有一个 ``Changelog entry`` 标头。" #: ../../source/contributor-tutorial-contribute-on-github.rst:358 #, fuzzy msgid "" -"Above this header you should see the following comment that explains how " -"to write your changelog entry:" +"Above this header you should see the following comment that explains how to " +"write your changelog entry:" msgstr "在页眉上方,你会看到以下注释,说明如何编写更新日志条目:" #: ../../source/contributor-tutorial-contribute-on-github.rst:360 #, fuzzy msgid "" "Inside the following 'Changelog entry' section, you should put the " -"description of your changes that will be added to the changelog alongside" -" your PR title." -msgstr "在下面的 \"更新日志条目 \"部分中,您应该在 PR " -"标题旁边写上将添加到更新日志中的更改描述。" +"description of your changes that will be added to the changelog alongside " +"your PR title." +msgstr "" +"在下面的 \"更新日志条目 \"部分中,您应该在 PR 标题旁边写上将添加到更新日志中" +"的更改描述。" #: ../../source/contributor-tutorial-contribute-on-github.rst:363 #, fuzzy msgid "" -"If the section is completely empty (without any token) or non-existent, " -"the changelog will just contain the title of the PR for the changelog " -"entry, without any description." -msgstr "如果该部分完全为空(没有任何标记)或不存在,更新日志将只包含更新日志条目的 " +"If the section is completely empty (without any token) or non-existent, the " +"changelog will just contain the title of the PR for the changelog entry, " +"without any description." +msgstr "" +"如果该部分完全为空(没有任何标记)或不存在,更新日志将只包含更新日志条目的 " "PR 标题,而不包含任何描述。" #: ../../source/contributor-tutorial-contribute-on-github.rst:366 #, fuzzy msgid "" -"If the section contains some text other than tokens, it will use it to " -"add a description to the change." +"If the section contains some text other than tokens, it will use it to add a " +"description to the change." msgstr "如果该部分包含标记以外的文本,它将使用这些文本为更改添加说明。" #: ../../source/contributor-tutorial-contribute-on-github.rst:368 #, fuzzy msgid "" -"If the section contains one of the following tokens it will ignore any " -"other text and put the PR under the corresponding section of the " -"changelog:" -msgstr "如果该部分包含以下标记之一,它将忽略任何其他文本,并将 PR " -"放在更新日志的相应部分下:" +"If the section contains one of the following tokens it will ignore any other " +"text and put the PR under the corresponding section of the changelog:" +msgstr "" +"如果该部分包含以下标记之一,它将忽略任何其他文本,并将 PR 放在更新日志的相应" +"部分下:" #: ../../source/contributor-tutorial-contribute-on-github.rst:370 #, fuzzy @@ -2190,70 +2294,78 @@ msgstr "其内容必须有特定的格式。我们将分析每种可能性的作 #: ../../source/contributor-tutorial-contribute-on-github.rst:386 #, fuzzy msgid "" -"If the ``### Changelog entry`` section contains nothing or doesn't exist," -" the following text will be added to the changelog::" -msgstr "如果 ``#### Changelog entry`` " -"部分不包含任何内容或不存在,则会在更新日志中添加以下文本::" +"If the ``### Changelog entry`` section contains nothing or doesn't exist, " +"the following text will be added to the changelog::" +msgstr "" +"如果 ``#### Changelog entry`` 部分不包含任何内容或不存在,则会在更新日志中添" +"加以下文本::" #: ../../source/contributor-tutorial-contribute-on-github.rst:390 #, fuzzy msgid "" "If the ``### Changelog entry`` section contains a description (and no " "token), the following text will be added to the changelog::" -msgstr "如果 ``#### Changelog entry`` " -"部分包含描述(但没有标记),则会在更新日志中添加以下文本::" +msgstr "" +"如果 ``#### Changelog entry`` 部分包含描述(但没有标记),则会在更新日志中添" +"加以下文本::" #: ../../source/contributor-tutorial-contribute-on-github.rst:396 #, fuzzy msgid "" "If the ``### Changelog entry`` section contains ````, nothing will " "change in the changelog." -msgstr "如果 ``#### Changelog entry`` 部分包含 " -"````,更新日志中将不会有任何更改。" +msgstr "" +"如果 ``#### Changelog entry`` 部分包含 ````,更新日志中将不会有任何更" +"改。" #: ../../source/contributor-tutorial-contribute-on-github.rst:398 #, fuzzy msgid "" -"If the ``### Changelog entry`` section contains ````, the " -"following text will be added to the changelog::" -msgstr "如果 ``### Changelog entry`` 部分包含 " -"````,则会在更新日志中添加以下文本::" +"If the ``### Changelog entry`` section contains ````, the following " +"text will be added to the changelog::" +msgstr "" +"如果 ``### Changelog entry`` 部分包含 ````,则会在更新日志中添加以下" +"文本::" #: ../../source/contributor-tutorial-contribute-on-github.rst:402 #, fuzzy msgid "" "If the ``### Changelog entry`` section contains ````, the " "following text will be added to the changelog::" -msgstr "如果``### " -"更新日志条目``部分包含``<基准线>``,则会在更新日志中添加以下文本::" +msgstr "" +"如果``### 更新日志条目``部分包含``<基准线>``,则会在更新日志中添加以下文" +"本::" #: ../../source/contributor-tutorial-contribute-on-github.rst:406 #, fuzzy msgid "" "If the ``### Changelog entry`` section contains ````, the " "following text will be added to the changelog::" -msgstr "如果``### 更新日志条目``部分包含``<示例>``,则会在更新日志中添加以下文本::" +msgstr "" +"如果``### 更新日志条目``部分包含``<示例>``,则会在更新日志中添加以下文本::" #: ../../source/contributor-tutorial-contribute-on-github.rst:410 #, fuzzy msgid "" "If the ``### Changelog entry`` section contains ````, the following " "text will be added to the changelog::" -msgstr "如果``### 更新日志条目``部分包含````,则会在更新日志中添加以下文本::" +msgstr "" +"如果``### 更新日志条目``部分包含````,则会在更新日志中添加以下文本::" #: ../../source/contributor-tutorial-contribute-on-github.rst:414 #, fuzzy msgid "" "If the ``### Changelog entry`` section contains ````, the " "following text will be added to the changelog::" -msgstr "如果 ``### Changelog entry`` 部分包含 " -"````,则会在更新日志中添加以下文本::" +msgstr "" +"如果 ``### Changelog entry`` 部分包含 ````,则会在更新日志中添加" +"以下文本::" #: ../../source/contributor-tutorial-contribute-on-github.rst:418 #, fuzzy msgid "" -"Note that only one token must be provided, otherwise, only the first " -"action (in the order listed above), will be performed." +"Note that only one token must be provided, otherwise, only the first action " +"(in the order listed above), will be performed." msgstr "请注意,必须只提供一个标记,否则将只执行第一个操作(按上述顺序)。" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:2 @@ -2278,18 +2390,21 @@ msgid "(Optional) `pyenv `_" msgstr "(可选) `pyenv `_" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:10 -msgid "(Optional) `pyenv-virtualenv `_" -msgstr "(可选) `pyenv-virtualenv `_" +msgid "" +"(Optional) `pyenv-virtualenv `_" +msgstr "" +"(可选) `pyenv-virtualenv `_" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:12 #, fuzzy msgid "" "Flower uses :code:`pyproject.toml` to manage dependencies and configure " -"development tools (the ones which support it). Poetry is a build tool " -"which supports `PEP 517 `_." +"development tools (the ones which support it). Poetry is a build tool which " +"supports `PEP 517 `_." msgstr "" -"Flower 使用 :code:`pyproject.toml` 来管理依赖关系和配置开发工具(支持它的)。Poetry 是一种支持 `PEP " -"517 `_ 的构建工具。" +"Flower 使用 :code:`pyproject.toml` 来管理依赖关系和配置开发工具(支持它的)。" +"Poetry 是一种支持 `PEP 517 `_ 的构" +"建工具。" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:18 msgid "Developer Machine Setup" @@ -2313,16 +2428,17 @@ msgstr "适用于 macOS" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:27 #, fuzzy msgid "" -"Install `homebrew `_. Don't forget the post-" -"installation actions to add `brew` to your PATH." -msgstr "安装 `homebrew `_。别忘了安装后的操作,将 `brew` " -"添加到你的 PATH。" +"Install `homebrew `_. Don't forget the post-installation " +"actions to add `brew` to your PATH." +msgstr "" +"安装 `homebrew `_。别忘了安装后的操作,将 `brew` 添加到你" +"的 PATH。" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:28 #, fuzzy msgid "" -"Install `xz` (to install different Python versions) and `pandoc` to build" -" the docs::" +"Install `xz` (to install different Python versions) and `pandoc` to build " +"the docs::" msgstr "安装 `xz`(用于安装不同的 Python 版本)和 `pandoc` 以构建文档::" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:34 @@ -2333,8 +2449,8 @@ msgstr "针对 Ubuntu" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:35 #, fuzzy msgid "" -"Ensure you system (Ubuntu 22.04+) is up-to-date, and you have all " -"necessary packages::" +"Ensure you system (Ubuntu 22.04+) is up-to-date, and you have all necessary " +"packages::" msgstr "确保您的系统(Ubuntu 22.04+)为最新版本,并安装了所有必要的软件包::" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:44 @@ -2347,47 +2463,50 @@ msgstr "创建/删除虚拟环境" msgid "" "1. Clone the `Flower repository `_ from " "GitHub::" -msgstr "首先,从 GitHub 克隆 \"Flower 存储库 `_\":" +msgstr "" +"首先,从 GitHub 克隆 \"Flower 存储库 `_\":" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:52 #, fuzzy msgid "" -"Let's create the Python environment for all-things Flower. If you wish to" -" use :code:`pyenv`, we provide two convenience scripts that you can use. " -"If you prefer using something else than :code:`pyenv`, create a new " +"Let's create the Python environment for all-things Flower. If you wish to " +"use :code:`pyenv`, we provide two convenience scripts that you can use. If " +"you prefer using something else than :code:`pyenv`, create a new " "environment, activate and skip to the last point where all packages are " "installed." msgstr "" -"让我们为 Flower 创建一个 Python 环境。如果您想使用 " -":code:`pyenv`,我们提供了两个方便的脚本供您使用。如果你不喜欢使用 " -":code:`pyenv`,请创建一个新环境,激活并跳到最后一点,即安装所有软件包。" +"让我们为 Flower 创建一个 Python 环境。如果您想使用 :code:`pyenv`,我们提供了" +"两个方便的脚本供您使用。如果你不喜欢使用 :code:`pyenv`,请创建一个新环境,激" +"活并跳到最后一点,即安装所有软件包。" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:54 #, fuzzy msgid "" -"If you don't have :code:`pyenv` installed, the following script that will" -" install it, set it up, and create the virtual environment (with " -":code:`Python 3.8.17` by default)::" +"If you don't have :code:`pyenv` installed, the following script that will " +"install it, set it up, and create the virtual environment (with :code:" +"`Python 3.8.17` by default)::" msgstr "" -"如果没有安装 :code:`pyenv`,可以使用以下脚本安装 pyenv、设置并创建虚拟环境(默认使用 " -":code:`Python3.8.17)::" +"如果没有安装 :code:`pyenv`,可以使用以下脚本安装 pyenv、设置并创建虚拟环境" +"(默认使用 :code:`Python3.8.17)::" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:58 #, fuzzy msgid "" "If you already have :code:`pyenv` installed (along with the :code:`pyenv-" -"virtualenv` plugin), you can use the following convenience script (with " -":code:`Python 3.8.17` by default)::" +"virtualenv` plugin), you can use the following convenience script (with :" +"code:`Python 3.8.17` by default)::" msgstr "" -"如果没有安装 :code:`pyenv`,可以使用以下脚本安装 pyenv、设置并创建虚拟环境(默认使用 " -":code:`Python3.8.17)::" +"如果没有安装 :code:`pyenv`,可以使用以下脚本安装 pyenv、设置并创建虚拟环境" +"(默认使用 :code:`Python3.8.17)::" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:62 #, fuzzy msgid "" -"3. Install the Flower package in development mode (think :code:`pip " -"install -e`) along with all necessary dependencies::" -msgstr "第三,在开发模式下安装 Flower 软件包(想想 :code:`pip install -e`)以及所有必要的依赖项::" +"3. Install the Flower package in development mode (think :code:`pip install -" +"e`) along with all necessary dependencies::" +msgstr "" +"第三,在开发模式下安装 Flower 软件包(想想 :code:`pip install -e`)以及所有必" +"要的依赖项::" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:69 msgid "Convenience Scripts" @@ -2396,10 +2515,12 @@ msgstr "便捷脚本" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:71 msgid "" "The Flower repository contains a number of convenience scripts to make " -"recurring development tasks easier and less error-prone. See the " -":code:`/dev` subdirectory for a full list. The following scripts are " -"amongst the most important ones:" -msgstr "Flower 软件仓库包含大量便捷脚本,可使重复性开发任务更轻松、更不易出错。完整列表请参见 :code:`/dev` 子目录。以下是最重要的脚本:" +"recurring development tasks easier and less error-prone. See the :code:`/" +"dev` subdirectory for a full list. The following scripts are amongst the " +"most important ones:" +msgstr "" +"Flower 软件仓库包含大量便捷脚本,可使重复性开发任务更轻松、更不易出错。完整列" +"表请参见 :code:`/dev` 子目录。以下是最重要的脚本:" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:77 msgid "Create/Delete Virtual Environment" @@ -2424,13 +2545,14 @@ msgstr "在本地运行 Github 操作 (CI)" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:108 #, fuzzy msgid "" -"Developers could run the full set of Github Actions workflows under their" -" local environment by using `Act `_. " -"Please refer to the installation instructions under the linked repository" -" and run the next command under Flower main cloned repository folder::" +"Developers could run the full set of Github Actions workflows under their " +"local environment by using `Act `_. Please " +"refer to the installation instructions under the linked repository and run " +"the next command under Flower main cloned repository folder::" msgstr "" -"开发人员可以使用 `Act _` 在本地环境下运行全套 Github Actions" -" 工作流程。请参考链接仓库下的安装说明,并在 Flower 主克隆仓库文件夹下运行下一条命令::" +"开发人员可以使用 `Act _` 在本地环境下运行全套 " +"Github Actions 工作流程。请参考链接仓库下的安装说明,并在 Flower 主克隆仓库文" +"件夹下运行下一条命令::" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:115 msgid "" @@ -2444,15 +2566,16 @@ msgstr "版本发布" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:122 msgid "" -"Flower uses Poetry to build releases. The necessary command is wrapped in" -" a simple script::" +"Flower uses Poetry to build releases. The necessary command is wrapped in a " +"simple script::" msgstr "Flower 使用 Poetry 创建发布版本。必要的命令封装在一个简单的脚本中::" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:127 msgid "" -"The resulting :code:`.whl` and :code:`.tar.gz` releases will be stored in" -" the :code:`/dist` subdirectory." -msgstr "生成的 :code:`.whl` 和 :code:`.tar.gz` 版本将存储在 :code:`/dist` 子目录中。" +"The resulting :code:`.whl` and :code:`.tar.gz` releases will be stored in " +"the :code:`/dist` subdirectory." +msgstr "" +"生成的 :code:`.whl` 和 :code:`.tar.gz` 版本将存储在 :code:`/dist` 子目录中。" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:132 msgid "Build Documentation" @@ -2460,12 +2583,12 @@ msgstr "构建文档" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:134 msgid "" -"Flower's documentation uses `Sphinx `_. " -"There's no convenience script to re-build the documentation yet, but it's" -" pretty easy::" +"Flower's documentation uses `Sphinx `_. There's " +"no convenience script to re-build the documentation yet, but it's pretty " +"easy::" msgstr "" -"Flower 的文档使用 `Sphinx `_。目前还没有很方便的脚本来重新构建文档,不过这很容易::" +"Flower 的文档使用 `Sphinx `_。目前还没有很方便的" +"脚本来重新构建文档,不过这很容易::" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:140 msgid "This will generate HTML documentation in ``doc/build/html``." @@ -2478,19 +2601,19 @@ msgstr "示例: PyTorch 中的 FedBN - 从集中式到联邦式" #: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:4 #, fuzzy msgid "" -"This tutorial will show you how to use Flower to build a federated " -"version of an existing machine learning workload with `FedBN " -"`_, a federated training strategy " -"designed for non-iid data. We are using PyTorch to train a Convolutional " -"Neural Network(with Batch Normalization layers) on the CIFAR-10 dataset. " -"When applying FedBN, only few changes needed compared to :doc:`Example: " -"PyTorch - From Centralized To Federated `." +"This tutorial will show you how to use Flower to build a federated version " +"of an existing machine learning workload with `FedBN `_, a federated training strategy designed for non-iid data. We " +"are using PyTorch to train a Convolutional Neural Network(with Batch " +"Normalization layers) on the CIFAR-10 dataset. When applying FedBN, only few " +"changes needed compared to :doc:`Example: PyTorch - From Centralized To " +"Federated `." msgstr "" -"本教程将向您展示如何使用 Flower 为现有的机器学习框架构建一个联邦学习的版本,并使用 \"FedBN `_\"(一种针对非 iid 数据设计的联邦训练策略)。我们使用 PyTorch 在 CIFAR-10 " -"数据集上训练一个卷积神经网络(带有Batch Normalization层)。在应用 FedBN 时,只需对 `示例: PyTorch - " -"从集中式到联邦式 `_\"(一种针对非 iid 数据设" +"计的联邦训练策略)。我们使用 PyTorch 在 CIFAR-10 数据集上训练一个卷积神经网络" +"(带有Batch Normalization层)。在应用 FedBN 时,只需对 `示例: PyTorch - 从集" +"中式到联邦式 `_ 做少量改动。" #: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:9 @@ -2501,14 +2624,14 @@ msgstr "集中式训练" #: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:10 #, fuzzy msgid "" -"All files are revised based on :doc:`Example: PyTorch - From Centralized " -"To Federated `. The only " -"thing to do is modifying the file called :code:`cifar.py`, revised part " -"is shown below:" +"All files are revised based on :doc:`Example: PyTorch - From Centralized To " +"Federated `. The only thing " +"to do is modifying the file called :code:`cifar.py`, revised part is shown " +"below:" msgstr "" -"所有文件均根据 `示例: PyTorch -从集中式到联邦式 `_。唯一要做的就是修改名为 :code:`cifar.py` " -"的文件,修改部分如下所示:" +"所有文件均根据 `示例: PyTorch -从集中式到联邦式 `_。唯一要做的就是修改名" +"为 :code:`cifar.py` 的文件,修改部分如下所示:" #: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:13 msgid "" @@ -2524,13 +2647,14 @@ msgstr "现在,您可以运行您的机器学习工作了:" #: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:47 #, fuzzy msgid "" -"So far this should all look fairly familiar if you've used PyTorch " -"before. Let's take the next step and use what we've built to create a " -"federated learning system within FedBN, the system consists of one server" -" and two clients." +"So far this should all look fairly familiar if you've used PyTorch before. " +"Let's take the next step and use what we've built to create a federated " +"learning system within FedBN, the system consists of one server and two " +"clients." msgstr "" -"到目前为止,如果您以前使用过 PyTorch,这一切看起来应该相当熟悉。让我们进行下一步,使用我们所构建的内容在 FedBN " -"中创建一个联邦学习系统,该系统由一个服务器和两个客户端组成。" +"到目前为止,如果您以前使用过 PyTorch,这一切看起来应该相当熟悉。让我们进行下" +"一步,使用我们所构建的内容在 FedBN 中创建一个联邦学习系统,该系统由一个服务器" +"和两个客户端组成。" #: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:51 #: ../../source/example-pytorch-from-centralized-to-federated.rst:167 @@ -2541,34 +2665,37 @@ msgstr "联邦培训" #, fuzzy msgid "" "If you have read :doc:`Example: PyTorch - From Centralized To Federated " -"`, the following parts are" -" easy to follow, only :code:`get_parameters` and :code:`set_parameters` " -"function in :code:`client.py` needed to revise. If not, please read the " -":doc:`Example: PyTorch - From Centralized To Federated `. first." -msgstr "" -"如果你读过 `示例: PyTorch - 从集中式到联邦式 `_,下面的部分就很容易理解了,只需要修改 " -":code:`get_parameters` 和 :code:`set_parameters` 中的 :code:`client.py` " -"函数。如果没有,请阅读 `示例: PyTorch - 从集中式到联邦式 `_。" +"`, the following parts are " +"easy to follow, only :code:`get_parameters` and :code:`set_parameters` " +"function in :code:`client.py` needed to revise. If not, please read the :doc:" +"`Example: PyTorch - From Centralized To Federated `. first." +msgstr "" +"如果你读过 `示例: PyTorch - 从集中式到联邦式 `_,下面的部分就很容易理" +"解了,只需要修改 :code:`get_parameters` 和 :code:`set_parameters` 中的 :code:" +"`client.py` 函数。如果没有,请阅读 `示例: PyTorch - 从集中式到联邦式 " +"`_。" #: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:56 msgid "" -"Our example consists of one *server* and two *clients*. In FedBN, " -":code:`server.py` keeps unchanged, we can start the server directly." -msgstr "我们的示例包括一个*服务器*和两个*客户端*。在 FedBN 中,:code:`server.py` 保持不变,我们可以直接启动服务器。" +"Our example consists of one *server* and two *clients*. In FedBN, :code:" +"`server.py` keeps unchanged, we can start the server directly." +msgstr "" +"我们的示例包括一个*服务器*和两个*客户端*。在 FedBN 中,:code:`server.py` 保持" +"不变,我们可以直接启动服务器。" #: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:62 msgid "" -"Finally, we will revise our *client* logic by changing " -":code:`get_parameters` and :code:`set_parameters` in :code:`client.py`, " -"we will exclude batch normalization parameters from model parameter list " -"when sending to or receiving from the server." +"Finally, we will revise our *client* logic by changing :code:" +"`get_parameters` and :code:`set_parameters` in :code:`client.py`, we will " +"exclude batch normalization parameters from model parameter list when " +"sending to or receiving from the server." msgstr "" -"最后,我们将修改 *client* 的逻辑,修改 :code:`client.py` 中的 :code:`get_parameters` 和 " -":code:`set_parameters`,在向服务器发送或从服务器接收时,我们将从模型参数列表中排除batch " -"normalization层的参数。" +"最后,我们将修改 *client* 的逻辑,修改 :code:`client.py` 中的 :code:" +"`get_parameters` 和 :code:`set_parameters`,在向服务器发送或从服务器接收时," +"我们将从模型参数列表中排除batch normalization层的参数。" #: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:85 msgid "Now, you can now open two additional terminal windows and run" @@ -2576,10 +2703,12 @@ msgstr "现在,您可以打开另外两个终端窗口并运行程序" #: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:91 msgid "" -"in each window (make sure that the server is still running before you do " -"so) and see your (previously centralized) PyTorch project run federated " -"learning with FedBN strategy across two clients. Congratulations!" -msgstr "确保服务器仍在运行后,然后您就能看到您的 PyTorch 项目(之前是集中式的)通过 FedBN 策略在两个客户端上运行联合学习。祝贺!" +"in each window (make sure that the server is still running before you do so) " +"and see your (previously centralized) PyTorch project run federated learning " +"with FedBN strategy across two clients. Congratulations!" +msgstr "" +"确保服务器仍在运行后,然后您就能看到您的 PyTorch 项目(之前是集中式的)通过 " +"FedBN 策略在两个客户端上运行联合学习。祝贺!" #: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:94 #: ../../source/example-jax-from-centralized-to-federated.rst:277 @@ -2591,18 +2720,17 @@ msgstr "下一步工作" #: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:96 msgid "" -"The full source code for this example can be found `here " -"`_. Our example is of course somewhat over-" -"simplified because both clients load the exact same dataset, which isn't " -"realistic. You're now prepared to explore this topic further. How about " -"using different subsets of CIFAR-10 on each client? How about adding more" -" clients?" +"The full source code for this example can be found `here `_. Our " +"example is of course somewhat over-simplified because both clients load the " +"exact same dataset, which isn't realistic. You're now prepared to explore " +"this topic further. How about using different subsets of CIFAR-10 on each " +"client? How about adding more clients?" msgstr "" -"本示例的完整源代码可在 `_ " -"找到。当然,我们的示例有些过于简单,因为两个客户端都加载了完全相同的数据集,这并不真实。让我们准备好进一步探讨这一主题。如在每个客户端使用不同的 " -"CIFAR-10 子集,或者增加客户端的数量。" +"本示例的完整源代码可在 `_ 找到。当然,我们的示例有些过于简单," +"因为两个客户端都加载了完全相同的数据集,这并不真实。让我们准备好进一步探讨这" +"一主题。如在每个客户端使用不同的 CIFAR-10 子集,或者增加客户端的数量。" #: ../../source/example-jax-from-centralized-to-federated.rst:2 msgid "Example: JAX - Run JAX Federated" @@ -2611,32 +2739,32 @@ msgstr "示例: JAX - 运行联邦式 JAX" #: ../../source/example-jax-from-centralized-to-federated.rst:4 #: ../../source/tutorial-quickstart-jax.rst:10 msgid "" -"This tutorial will show you how to use Flower to build a federated " -"version of an existing JAX workload. We are using JAX to train a linear " -"regression model on a scikit-learn dataset. We will structure the example" -" similar to our `PyTorch - From Centralized To Federated " -"`_ walkthrough. First, we build a centralized " -"training approach based on the `Linear Regression with JAX " -"`_" -" tutorial`. Then, we build upon the centralized training code to run the " -"training in a federated fashion." -msgstr "" -"本教程将向您展示如何使用 Flower 构建现有 JAX 的联邦学习版本。我们将使用 JAX 在 scikit-learn " -"数据集上训练线性回归模型。我们将采用与 `PyTorch - 从集中式到联邦式 " -"`_ 教程中类似的示例结构。首先,我们根据 `JAX 的线性回归 " -"`_" -" 教程构建集中式训练方法。然后,我们在集中式训练代码的基础上以联邦方式运行训练。" +"This tutorial will show you how to use Flower to build a federated version " +"of an existing JAX workload. We are using JAX to train a linear regression " +"model on a scikit-learn dataset. We will structure the example similar to " +"our `PyTorch - From Centralized To Federated `_ walkthrough. " +"First, we build a centralized training approach based on the `Linear " +"Regression with JAX `_ tutorial`. Then, we build upon the centralized " +"training code to run the training in a federated fashion." +msgstr "" +"本教程将向您展示如何使用 Flower 构建现有 JAX 的联邦学习版本。我们将使用 JAX " +"在 scikit-learn 数据集上训练线性回归模型。我们将采用与 `PyTorch - 从集中式到" +"联邦式 `_ 教程中类似的示例结构。首先,我们根据 `JAX 的线性" +"回归 `_ 教程构建集中式训练方法。然后,我们在集中式训练代码的基础上以联邦方式" +"运行训练。" #: ../../source/example-jax-from-centralized-to-federated.rst:10 #: ../../source/tutorial-quickstart-jax.rst:16 msgid "" -"Before we start building our JAX example, we need install the packages " -":code:`jax`, :code:`jaxlib`, :code:`scikit-learn`, and :code:`flwr`:" +"Before we start building our JAX example, we need install the packages :code:" +"`jax`, :code:`jaxlib`, :code:`scikit-learn`, and :code:`flwr`:" msgstr "" -"在开始构建 JAX 示例之前,我们需要安装软件包 :code:`jax`、:code:`jaxlib`、:code:`scikit-learn` " -"和 :code:`flwr`:" +"在开始构建 JAX 示例之前,我们需要安装软件包 :code:`jax`、:code:`jaxlib`、:" +"code:`scikit-learn` 和 :code:`flwr`:" #: ../../source/example-jax-from-centralized-to-federated.rst:18 #: ../../source/tutorial-quickstart-jax.rst:24 @@ -2646,77 +2774,83 @@ msgstr "使用 JAX 进行线性回归" #: ../../source/example-jax-from-centralized-to-federated.rst:20 #: ../../source/tutorial-quickstart-jax.rst:26 msgid "" -"We begin with a brief description of the centralized training code based " -"on a :code:`Linear Regression` model. If you want a more in-depth " -"explanation of what's going on then have a look at the official `JAX " -"documentation `_." +"We begin with a brief description of the centralized training code based on " +"a :code:`Linear Regression` model. If you want a more in-depth explanation " +"of what's going on then have a look at the official `JAX documentation " +"`_." msgstr "" -"首先,我们将简要介绍基于 :code:`Linear Regression` 模型的集中式训练代码。如果您想获得更深入的解释,请参阅官方的 " -"`JAX 文档 `_。" +"首先,我们将简要介绍基于 :code:`Linear Regression` 模型的集中式训练代码。如果" +"您想获得更深入的解释,请参阅官方的 `JAX 文档 `_。" #: ../../source/example-jax-from-centralized-to-federated.rst:23 #: ../../source/tutorial-quickstart-jax.rst:29 msgid "" "Let's create a new file called :code:`jax_training.py` with all the " "components required for a traditional (centralized) linear regression " -"training. First, the JAX packages :code:`jax` and :code:`jaxlib` need to " -"be imported. In addition, we need to import :code:`sklearn` since we use " -":code:`make_regression` for the dataset and :code:`train_test_split` to " -"split the dataset into a training and test set. You can see that we do " -"not yet import the :code:`flwr` package for federated learning. This will" -" be done later." -msgstr "" -"让我们创建一个名为 :code:`jax_training.py` 的新文件,其中包含传统(集中式)线性回归训练所需的所有组件。首先,需要导入 " -"JAX 包 :code:`jax` 和 :code:`jaxlib`。此外,我们还需要导入 :code:`sklearn`,因为我们使用 " -":code:`make_regression` 创建数据集,并使用 :code:`train_test_split` " -"将数据集拆分成训练集和测试集。您可以看到,我们还没有导入用于联邦学习的 :code:`flwr` 软件包,这将在稍后完成。" +"training. First, the JAX packages :code:`jax` and :code:`jaxlib` need to be " +"imported. In addition, we need to import :code:`sklearn` since we use :code:" +"`make_regression` for the dataset and :code:`train_test_split` to split the " +"dataset into a training and test set. You can see that we do not yet import " +"the :code:`flwr` package for federated learning. This will be done later." +msgstr "" +"让我们创建一个名为 :code:`jax_training.py` 的新文件,其中包含传统(集中式)线" +"性回归训练所需的所有组件。首先,需要导入 JAX 包 :code:`jax` 和 :code:" +"`jaxlib`。此外,我们还需要导入 :code:`sklearn`,因为我们使用 :code:" +"`make_regression` 创建数据集,并使用 :code:`train_test_split` 将数据集拆分成" +"训练集和测试集。您可以看到,我们还没有导入用于联邦学习的 :code:`flwr` 软件" +"包,这将在稍后完成。" #: ../../source/example-jax-from-centralized-to-federated.rst:37 #: ../../source/tutorial-quickstart-jax.rst:43 msgid "" -"The :code:`load_data()` function loads the mentioned training and test " -"sets." +"The :code:`load_data()` function loads the mentioned training and test sets." msgstr ":code:`load_data()` 函数会加载上述训练集和测试集。" #: ../../source/example-jax-from-centralized-to-federated.rst:47 #: ../../source/tutorial-quickstart-jax.rst:53 msgid "" -"The model architecture (a very simple :code:`Linear Regression` model) is" -" defined in :code:`load_model()`." -msgstr "模型结构(一个非常简单的 :code:`Linear Regression` 线性回归模型)在 :code:`load_model()` 中定义。" +"The model architecture (a very simple :code:`Linear Regression` model) is " +"defined in :code:`load_model()`." +msgstr "" +"模型结构(一个非常简单的 :code:`Linear Regression` 线性回归模型)在 :code:" +"`load_model()` 中定义。" #: ../../source/example-jax-from-centralized-to-federated.rst:59 #: ../../source/tutorial-quickstart-jax.rst:65 msgid "" -"We now need to define the training (function :code:`train()`), which " -"loops over the training set and measures the loss (function " -":code:`loss_fn()`) for each batch of training examples. The loss function" -" is separate since JAX takes derivatives with a :code:`grad()` function " -"(defined in the :code:`main()` function and called in :code:`train()`)." +"We now need to define the training (function :code:`train()`), which loops " +"over the training set and measures the loss (function :code:`loss_fn()`) for " +"each batch of training examples. The loss function is separate since JAX " +"takes derivatives with a :code:`grad()` function (defined in the :code:" +"`main()` function and called in :code:`train()`)." msgstr "" -"现在,我们需要定义训练函数( :code:`train()`)。它循环遍历训练集,并计算每批训练数据的损失值(函数 " -":code:`loss_fn()`)。由于 JAX 使用 :code:`grad()` 函数提取导数(在 :code:`main()` " -"函数中定义,并在 :code:`train()` 中调用),因此损失函数是独立的。" +"现在,我们需要定义训练函数( :code:`train()`)。它循环遍历训练集,并计算每批" +"训练数据的损失值(函数 :code:`loss_fn()`)。由于 JAX 使用 :code:`grad()` 函数" +"提取导数(在 :code:`main()` 函数中定义,并在 :code:`train()` 中调用),因此损" +"失函数是独立的。" #: ../../source/example-jax-from-centralized-to-federated.rst:77 #: ../../source/tutorial-quickstart-jax.rst:83 msgid "" -"The evaluation of the model is defined in the function " -":code:`evaluation()`. The function takes all test examples and measures " -"the loss of the linear regression model." -msgstr "模型的评估在函数 :code:`evaluation()` 中定义。该函数获取所有测试数据,并计算线性回归模型的损失值。" +"The evaluation of the model is defined in the function :code:`evaluation()`. " +"The function takes all test examples and measures the loss of the linear " +"regression model." +msgstr "" +"模型的评估在函数 :code:`evaluation()` 中定义。该函数获取所有测试数据,并计算" +"线性回归模型的损失值。" #: ../../source/example-jax-from-centralized-to-federated.rst:88 #: ../../source/tutorial-quickstart-jax.rst:94 msgid "" "Having defined the data loading, model architecture, training, and " -"evaluation we can put everything together and train our model using JAX. " -"As already mentioned, the :code:`jax.grad()` function is defined in " -":code:`main()` and passed to :code:`train()`." +"evaluation we can put everything together and train our model using JAX. As " +"already mentioned, the :code:`jax.grad()` function is defined in :code:" +"`main()` and passed to :code:`train()`." msgstr "" -"在定义了数据加载、模型架构、训练和评估之后,我们就可以把这些放在一起,使用 JAX " -"训练我们的模型了。如前所述,:code:`jax.grad()` 函数在 :code:`main()` 中定义,并传递给 " -":code:`train()`。" +"在定义了数据加载、模型架构、训练和评估之后,我们就可以把这些放在一起,使用 " +"JAX 训练我们的模型了。如前所述,:code:`jax.grad()` 函数在 :code:`main()` 中定" +"义,并传递给 :code:`train()`。" #: ../../source/example-jax-from-centralized-to-federated.rst:105 #: ../../source/tutorial-quickstart-jax.rst:111 @@ -2726,10 +2860,12 @@ msgstr "现在您可以运行(集中式)JAX 线性回归工作了:" #: ../../source/example-jax-from-centralized-to-federated.rst:111 #: ../../source/tutorial-quickstart-jax.rst:117 msgid "" -"So far this should all look fairly familiar if you've used JAX before. " -"Let's take the next step and use what we've built to create a simple " -"federated learning system consisting of one server and two clients." -msgstr "到目前为止,如果你以前使用过 JAX,就会对这一切感到很熟悉。下一步,让我们利用已构建的代码创建一个简单的联邦学习系统(一个服务器和两个客户端)。" +"So far this should all look fairly familiar if you've used JAX before. Let's " +"take the next step and use what we've built to create a simple federated " +"learning system consisting of one server and two clients." +msgstr "" +"到目前为止,如果你以前使用过 JAX,就会对这一切感到很熟悉。下一步,让我们利用" +"已构建的代码创建一个简单的联邦学习系统(一个服务器和两个客户端)。" #: ../../source/example-jax-from-centralized-to-federated.rst:115 #: ../../source/tutorial-quickstart-jax.rst:121 @@ -2739,30 +2875,34 @@ msgstr "JAX 结合 Flower" #: ../../source/example-jax-from-centralized-to-federated.rst:117 #: ../../source/tutorial-quickstart-jax.rst:123 msgid "" -"The concept of federating an existing workload is always the same and " -"easy to understand. We have to start a *server* and then use the code in " -":code:`jax_training.py` for the *clients* that are connected to the " -"*server*. The *server* sends model parameters to the clients. The " -"*clients* run the training and update the parameters. The updated " -"parameters are sent back to the *server*, which averages all received " -"parameter updates. This describes one round of the federated learning " -"process, and we repeat this for multiple rounds." -msgstr "" -"把现有工作联邦化的概念始终是相同的,也很容易理解。我们要启动一个*服务器*,然后对连接到*服务器*的*客户端*运行 " -":code:`jax_training.py`中的代码。*服务器*向客户端发送模型参数,*客户端*运行训练并更新参数。更新后的参数被发回*服务器*,然后服务器对所有收到的参数进行平均聚合。以上的描述构成了一轮联邦学习,我们将重复进行多轮学习。" +"The concept of federating an existing workload is always the same and easy " +"to understand. We have to start a *server* and then use the code in :code:" +"`jax_training.py` for the *clients* that are connected to the *server*. The " +"*server* sends model parameters to the clients. The *clients* run the " +"training and update the parameters. The updated parameters are sent back to " +"the *server*, which averages all received parameter updates. This describes " +"one round of the federated learning process, and we repeat this for multiple " +"rounds." +msgstr "" +"把现有工作联邦化的概念始终是相同的,也很容易理解。我们要启动一个*服务器*,然" +"后对连接到*服务器*的*客户端*运行 :code:`jax_training.py`中的代码。*服务器*向" +"客户端发送模型参数,*客户端*运行训练并更新参数。更新后的参数被发回*服务器*," +"然后服务器对所有收到的参数进行平均聚合。以上的描述构成了一轮联邦学习,我们将" +"重复进行多轮学习。" #: ../../source/example-jax-from-centralized-to-federated.rst:123 #: ../../source/example-mxnet-walk-through.rst:204 #: ../../source/example-pytorch-from-centralized-to-federated.rst:181 #: ../../source/tutorial-quickstart-jax.rst:129 msgid "" -"Our example consists of one *server* and two *clients*. Let's set up " -":code:`server.py` first. The *server* needs to import the Flower package " -":code:`flwr`. Next, we use the :code:`start_server` function to start a " -"server and tell it to perform three rounds of federated learning." +"Our example consists of one *server* and two *clients*. Let's set up :code:" +"`server.py` first. The *server* needs to import the Flower package :code:" +"`flwr`. Next, we use the :code:`start_server` function to start a server and " +"tell it to perform three rounds of federated learning." msgstr "" -"我们的示例包括一个*服务器*和两个*客户端*。让我们先设置 :code:`server.py`。*服务器*需要导入 Flower 软件包 " -":code:`flwr`。接下来,我们使用 :code:`start_server` 函数启动服务器,并让它执行三轮联邦学习。" +"我们的示例包括一个*服务器*和两个*客户端*。让我们先设置 :code:`server.py`。*服" +"务器*需要导入 Flower 软件包 :code:`flwr`。接下来,我们使用 :code:" +"`start_server` 函数启动服务器,并让它执行三轮联邦学习。" #: ../../source/example-jax-from-centralized-to-federated.rst:133 #: ../../source/example-mxnet-walk-through.rst:214 @@ -2774,35 +2914,34 @@ msgstr "我们已经可以启动*服务器*了:" #: ../../source/example-jax-from-centralized-to-federated.rst:139 #: ../../source/tutorial-quickstart-jax.rst:145 msgid "" -"Finally, we will define our *client* logic in :code:`client.py` and build" -" upon the previously defined JAX training in :code:`jax_training.py`. Our" -" *client* needs to import :code:`flwr`, but also :code:`jax` and " -":code:`jaxlib` to update the parameters on our JAX model:" +"Finally, we will define our *client* logic in :code:`client.py` and build " +"upon the previously defined JAX training in :code:`jax_training.py`. Our " +"*client* needs to import :code:`flwr`, but also :code:`jax` and :code:" +"`jaxlib` to update the parameters on our JAX model:" msgstr "" -"最后,我们将在 :code:`client.py` 中定义我们的 *client* 逻辑,并以之前在 " -":code:`jax_training.py` 中定义的 JAX 训练为基础。我们的 *client* 需要导入 " -":code:`flwr`,还需要导入 :code:`jax` 和 :code:`jaxlib` 以更新 JAX 模型的参数:" +"最后,我们将在 :code:`client.py` 中定义我们的 *client* 逻辑,并以之前在 :" +"code:`jax_training.py` 中定义的 JAX 训练为基础。我们的 *client* 需要导入 :" +"code:`flwr`,还需要导入 :code:`jax` 和 :code:`jaxlib` 以更新 JAX 模型的参数:" #: ../../source/example-jax-from-centralized-to-federated.rst:154 #: ../../source/tutorial-quickstart-jax.rst:160 msgid "" -"Implementing a Flower *client* basically means implementing a subclass of" -" either :code:`flwr.client.Client` or :code:`flwr.client.NumPyClient`. " -"Our implementation will be based on :code:`flwr.client.NumPyClient` and " -"we'll call it :code:`FlowerClient`. :code:`NumPyClient` is slightly " -"easier to implement than :code:`Client` if you use a framework with good " -"NumPy interoperability (like JAX) because it avoids some of the " -"boilerplate that would otherwise be necessary. :code:`FlowerClient` needs" -" to implement four methods, two methods for getting/setting model " -"parameters, one method for training the model, and one method for testing" -" the model:" -msgstr "" -"实现一个 Flower *client*基本上意味着去实现一个 :code:`flwr.client.Client` 或 " -":code:`flwr.client.NumPyClient` 的子类。我们的代码实现将基于 " -":code:`flwr.client.NumPyClient`,并将其命名为 :code:`FlowerClient`。如果使用具有良好 " -"NumPy 互操作性的框架(如 JAX),:code:`NumPyClient` 比 " -":code:`Client`更容易实现,因为它避免了一些不必要的操作。:code:`FlowerClient` " -"需要实现四个方法,两个用于获取/设置模型参数,一个用于训练模型,一个用于测试模型:" +"Implementing a Flower *client* basically means implementing a subclass of " +"either :code:`flwr.client.Client` or :code:`flwr.client.NumPyClient`. Our " +"implementation will be based on :code:`flwr.client.NumPyClient` and we'll " +"call it :code:`FlowerClient`. :code:`NumPyClient` is slightly easier to " +"implement than :code:`Client` if you use a framework with good NumPy " +"interoperability (like JAX) because it avoids some of the boilerplate that " +"would otherwise be necessary. :code:`FlowerClient` needs to implement four " +"methods, two methods for getting/setting model parameters, one method for " +"training the model, and one method for testing the model:" +msgstr "" +"实现一个 Flower *client*基本上意味着去实现一个 :code:`flwr.client.Client` " +"或 :code:`flwr.client.NumPyClient` 的子类。我们的代码实现将基于 :code:`flwr." +"client.NumPyClient`,并将其命名为 :code:`FlowerClient`。如果使用具有良好 " +"NumPy 互操作性的框架(如 JAX),:code:`NumPyClient` 比 :code:`Client`更容易实" +"现,因为它避免了一些不必要的操作。:code:`FlowerClient` 需要实现四个方法,两个" +"用于获取/设置模型参数,一个用于训练模型,一个用于测试模型:" #: ../../source/example-jax-from-centralized-to-federated.rst:161 #: ../../source/example-mxnet-walk-through.rst:242 @@ -2815,8 +2954,7 @@ msgstr ":code:`set_parameters (可选)`" #: ../../source/example-pytorch-from-centralized-to-federated.rst:219 #: ../../source/tutorial-quickstart-jax.rst:166 msgid "" -"set the model parameters on the local model that are received from the " -"server" +"set the model parameters on the local model that are received from the server" msgstr "在本地模型上设置从服务器接收的模型参数" #: ../../source/example-jax-from-centralized-to-federated.rst:161 @@ -2829,9 +2967,11 @@ msgstr "将参数转换为 NumPy :code:`ndarray`格式" #: ../../source/example-pytorch-from-centralized-to-federated.rst:220 #: ../../source/tutorial-quickstart-jax.rst:168 msgid "" -"loop over the list of model parameters received as NumPy " -":code:`ndarray`'s (think list of neural network layers)" -msgstr "循环遍历以 NumPy :code:`ndarray` 形式接收的模型参数列表(可以看作神经网络的列表)" +"loop over the list of model parameters received as NumPy :code:`ndarray`'s " +"(think list of neural network layers)" +msgstr "" +"循环遍历以 NumPy :code:`ndarray` 形式接收的模型参数列表(可以看作神经网络的列" +"表)" #: ../../source/example-jax-from-centralized-to-federated.rst:163 #: ../../source/example-mxnet-walk-through.rst:244 @@ -2848,11 +2988,11 @@ msgstr ":code:`get_parameters`" #: ../../source/example-pytorch-from-centralized-to-federated.rst:222 #: ../../source/tutorial-quickstart-jax.rst:170 msgid "" -"get the model parameters and return them as a list of NumPy " -":code:`ndarray`'s (which is what :code:`flwr.client.NumPyClient` expects)" +"get the model parameters and return them as a list of NumPy :code:" +"`ndarray`'s (which is what :code:`flwr.client.NumPyClient` expects)" msgstr "" -"获取模型参数,并以 NumPy :code:`ndarray`的列表形式返回(这正是 " -":code:`flwr.client.NumPyClient`所匹配的格式)" +"获取模型参数,并以 NumPy :code:`ndarray`的列表形式返回(这正是 :code:`flwr." +"client.NumPyClient`所匹配的格式)" #: ../../source/example-jax-from-centralized-to-federated.rst:167 #: ../../source/example-mxnet-walk-through.rst:248 @@ -2873,8 +3013,8 @@ msgstr ":code:`fit`" #: ../../source/tutorial-quickstart-jax.rst:172 #: ../../source/tutorial-quickstart-jax.rst:176 msgid "" -"update the parameters of the local model with the parameters received " -"from the server" +"update the parameters of the local model with the parameters received from " +"the server" msgstr "用从服务器接收到的参数更新本地模型的参数" #: ../../source/example-jax-from-centralized-to-federated.rst:167 @@ -2914,28 +3054,28 @@ msgstr "向服务器返回本地损失值" #: ../../source/example-jax-from-centralized-to-federated.rst:174 #: ../../source/tutorial-quickstart-jax.rst:180 msgid "" -"The challenging part is to transform the JAX model parameters from " -":code:`DeviceArray` to :code:`NumPy ndarray` to make them compatible with" -" `NumPyClient`." +"The challenging part is to transform the JAX model parameters from :code:" +"`DeviceArray` to :code:`NumPy ndarray` to make them compatible with " +"`NumPyClient`." msgstr "" -"具有挑战性的部分是将 JAX 模型参数从 :code:`DeviceArray` 转换为 :code:`NumPy ndarray`,使其与 " -"`NumPyClient` 兼容。" +"具有挑战性的部分是将 JAX 模型参数从 :code:`DeviceArray` 转换为 :code:`NumPy " +"ndarray`,使其与 `NumPyClient` 兼容。" #: ../../source/example-jax-from-centralized-to-federated.rst:176 #: ../../source/tutorial-quickstart-jax.rst:182 msgid "" -"The two :code:`NumPyClient` methods :code:`fit` and :code:`evaluate` make" -" use of the functions :code:`train()` and :code:`evaluate()` previously " +"The two :code:`NumPyClient` methods :code:`fit` and :code:`evaluate` make " +"use of the functions :code:`train()` and :code:`evaluate()` previously " "defined in :code:`jax_training.py`. So what we really do here is we tell " -"Flower through our :code:`NumPyClient` subclass which of our already " -"defined functions to call for training and evaluation. We included type " -"annotations to give you a better understanding of the data types that get" -" passed around." +"Flower through our :code:`NumPyClient` subclass which of our already defined " +"functions to call for training and evaluation. We included type annotations " +"to give you a better understanding of the data types that get passed around." msgstr "" -"这两个 :code:`NumPyClient` 方法 :code:`fit` 和 :code:`evaluate` 使用了之前在 " -":code:`jax_training.py` 中定义的函数 :code:`train()` 和 " -":code:`evaluate()`。因此,我们在这里要做的就是通过 :code:`NumPyClient` 子类告知 Flower " -"在训练和评估时要调用哪些已定义的函数。我们加入了类型注解,以便让您更好地理解传递的数据类型。" +"这两个 :code:`NumPyClient` 方法 :code:`fit` 和 :code:`evaluate` 使用了之前" +"在 :code:`jax_training.py` 中定义的函数 :code:`train()` 和 :code:" +"`evaluate()`。因此,我们在这里要做的就是通过 :code:`NumPyClient` 子类告知 " +"Flower 在训练和评估时要调用哪些已定义的函数。我们加入了类型注解,以便让您更好" +"地理解传递的数据类型。" #: ../../source/example-jax-from-centralized-to-federated.rst:245 #: ../../source/tutorial-quickstart-jax.rst:251 @@ -2952,30 +3092,34 @@ msgstr "就是这样,现在你可以打开另外两个终端窗口,然后运 #: ../../source/example-jax-from-centralized-to-federated.rst:274 #: ../../source/tutorial-quickstart-jax.rst:280 msgid "" -"in each window (make sure that the server is still running before you do " -"so) and see your JAX project run federated learning across two clients. " +"in each window (make sure that the server is still running before you do so) " +"and see your JAX project run federated learning across two clients. " "Congratulations!" -msgstr "确保服务器仍在运行,然后在每个客户端窗口就能看到你的 JAX 项目在两个客户端上运行联邦学习了。祝贺!" +msgstr "" +"确保服务器仍在运行,然后在每个客户端窗口就能看到你的 JAX 项目在两个客户端上运" +"行联邦学习了。祝贺!" #: ../../source/example-jax-from-centralized-to-federated.rst:279 #: ../../source/tutorial-quickstart-jax.rst:285 msgid "" "The source code of this example was improved over time and can be found " -"here: `Quickstart JAX `_. Our example is somewhat over-simplified because both " +"here: `Quickstart JAX `_. Our example is somewhat over-simplified because both " "clients load the same dataset." msgstr "" -"此示例的源代码经过长期改进,可在此处找到: `Quickstart JAX " -"`_。我们的示例有些过于简单,因为两个客户端都加载了相同的数据集。" +"此示例的源代码经过长期改进,可在此处找到: `Quickstart JAX `_。我们的示例有些过于简" +"单,因为两个客户端都加载了相同的数据集。" #: ../../source/example-jax-from-centralized-to-federated.rst:282 #: ../../source/tutorial-quickstart-jax.rst:288 msgid "" -"You're now prepared to explore this topic further. How about using a more" -" sophisticated model or using a different dataset? How about adding more " +"You're now prepared to explore this topic further. How about using a more " +"sophisticated model or using a different dataset? How about adding more " "clients?" -msgstr "现在,您已准备好进行更深一步探索了。例如使用更复杂的模型或使用不同的数据集会如何?增加更多客户端会如何?" +msgstr "" +"现在,您已准备好进行更深一步探索了。例如使用更复杂的模型或使用不同的数据集会" +"如何?增加更多客户端会如何?" #: ../../source/example-mxnet-walk-through.rst:2 msgid "Example: MXNet - Run MXNet Federated" @@ -2983,35 +3127,36 @@ msgstr "示例: MXNet - 运行联邦式 MXNet" #: ../../source/example-mxnet-walk-through.rst:4 msgid "" -"This tutorial will show you how to use Flower to build a federated " -"version of an existing MXNet workload. We are using MXNet to train a " -"Sequential model on the MNIST dataset. We will structure the example " -"similar to our `PyTorch - From Centralized To Federated " -"`_ walkthrough. MXNet and PyTorch are very " -"similar and a very good comparison between MXNet and PyTorch is given " -"`here `_. First, we build a centralized " -"training approach based on the `Handwritten Digit Recognition " -"`_" -" tutorial. Then, we build upon the centralized training code to run the " -"training in a federated fashion." -msgstr "" -"本教程将向您展示如何使用 Flower 构建现有 MXNet 的联学习版本。我们将使用 MXNet 在 MNIST " -"数据集上训练一个序列模型。另外,我们将采用与我们的 `PyTorch - 从集中式到联邦式 " -"`_ 教程类似的示例结构。MXNet 和 PyTorch 非常相似,参考 `此处 " -"`_对 MXNet 和 PyTorch " -"进行了详细的比较。首先,我们根据 `手写数字识别 " -"`" -" 教程 建立了集中式训练方法。然后,我们在集中式训练代码的基础上,以联邦方式运行训练。" +"This tutorial will show you how to use Flower to build a federated version " +"of an existing MXNet workload. We are using MXNet to train a Sequential " +"model on the MNIST dataset. We will structure the example similar to our " +"`PyTorch - From Centralized To Federated `_ walkthrough. " +"MXNet and PyTorch are very similar and a very good comparison between MXNet " +"and PyTorch is given `here `_. First, we " +"build a centralized training approach based on the `Handwritten Digit " +"Recognition `_ tutorial. Then, we build upon " +"the centralized training code to run the training in a federated fashion." +msgstr "" +"本教程将向您展示如何使用 Flower 构建现有 MXNet 的联学习版本。我们将使用 " +"MXNet 在 MNIST 数据集上训练一个序列模型。另外,我们将采用与我们的 `PyTorch - " +"从集中式到联邦式 `_ 教程类似的示例结构。MXNet 和 PyTorch 非常相" +"似,参考 `此处 `_对 MXNet 和 PyTorch 进行了" +"详细的比较。首先,我们根据 `手写数字识别 ` 教" +"程 建立了集中式训练方法。然后,我们在集中式训练代码的基础上,以联邦方式运行训" +"练。" #: ../../source/example-mxnet-walk-through.rst:10 msgid "" -"Before we start setting up our MXNet example, we install the " -":code:`mxnet` and :code:`flwr` packages:" -msgstr "在开始设置 MXNet 示例之前,我们先安装 :code:`mxnet` 和 :code:`flwr` 软件包:" +"Before we start setting up our MXNet example, we install the :code:`mxnet` " +"and :code:`flwr` packages:" +msgstr "" +"在开始设置 MXNet 示例之前,我们先安装 :code:`mxnet` 和 :code:`flwr` 软件包:" #: ../../source/example-mxnet-walk-through.rst:19 msgid "MNIST Training with MXNet" @@ -3019,61 +3164,69 @@ msgstr "使用 MXNet 进行 MNIST 训练" #: ../../source/example-mxnet-walk-through.rst:21 msgid "" -"We begin with a brief description of the centralized training code based " -"on a :code:`Sequential` model. If you want a more in-depth explanation of" -" what's going on then have a look at the official `MXNet tutorial " -"`_." +"We begin with a brief description of the centralized training code based on " +"a :code:`Sequential` model. If you want a more in-depth explanation of " +"what's going on then have a look at the official `MXNet tutorial `_." msgstr "" -"首先,我们将简要介绍基于 :code:`Sequential` 模型的集中式训练代码。如果您想获得更深入的解释,请参阅官方的 `MXNet教程 " -"`_。" +"首先,我们将简要介绍基于 :code:`Sequential` 模型的集中式训练代码。如果您想获" +"得更深入的解释,请参阅官方的 `MXNet教程 `_。" #: ../../source/example-mxnet-walk-through.rst:24 msgid "" -"Let's create a new file called:code:`mxnet_mnist.py` with all the " -"components required for a traditional (centralized) MNIST training. " -"First, the MXNet package :code:`mxnet` needs to be imported. You can see " -"that we do not yet import the :code:`flwr` package for federated " -"learning. This will be done later." +"Let's create a new file called:code:`mxnet_mnist.py` with all the components " +"required for a traditional (centralized) MNIST training. First, the MXNet " +"package :code:`mxnet` needs to be imported. You can see that we do not yet " +"import the :code:`flwr` package for federated learning. This will be done " +"later." msgstr "" -"让我们创建一个名为:code:`mxnet_mnist.py`的新文件,其中包含传统(集中式)MNIST 训练所需的所有组件。首先,需要导入 " -"MXNet 包 :code:`mxnet`。您可以看到,我们尚未导入用于联合学习的 :code:`flwr` 包,这将在稍后完成。" +"让我们创建一个名为:code:`mxnet_mnist.py`的新文件,其中包含传统(集中式)" +"MNIST 训练所需的所有组件。首先,需要导入 MXNet 包 :code:`mxnet`。您可以看到," +"我们尚未导入用于联合学习的 :code:`flwr` 包,这将在稍后完成。" #: ../../source/example-mxnet-walk-through.rst:42 -msgid "The :code:`load_data()` function loads the MNIST training and test sets." +msgid "" +"The :code:`load_data()` function loads the MNIST training and test sets." msgstr ":code:`load_data()` 函数加载 MNIST 训练集和测试集。" #: ../../source/example-mxnet-walk-through.rst:57 msgid "" "As already mentioned, we will use the MNIST dataset for this machine " -"learning workload. The model architecture (a very simple " -":code:`Sequential` model) is defined in :code:`model()`." +"learning workload. The model architecture (a very simple :code:`Sequential` " +"model) is defined in :code:`model()`." msgstr "" -"如前所述,我们将使用 MNIST 数据集进行机器学习。模型架构(一个非常简单的 :code:`Sequential` 模型)在 " -":code:`model()` 中定义。" +"如前所述,我们将使用 MNIST 数据集进行机器学习。模型架构(一个非常简单的 :" +"code:`Sequential` 模型)在 :code:`model()` 中定义。" #: ../../source/example-mxnet-walk-through.rst:70 msgid "" -"We now need to define the training (function :code:`train()`) which loops" -" over the training set and measures the loss for each batch of training " +"We now need to define the training (function :code:`train()`) which loops " +"over the training set and measures the loss for each batch of training " "examples." -msgstr "现在,我们需要定义训练函数( :code:`train()`),该函数在训练集上循环训练,并计算每批训练示例的损失值。" +msgstr "" +"现在,我们需要定义训练函数( :code:`train()`),该函数在训练集上循环训练,并" +"计算每批训练示例的损失值。" #: ../../source/example-mxnet-walk-through.rst:123 msgid "" "The evaluation of the model is defined in function :code:`test()`. The " -"function loops over all test samples and measures the loss and accuracy " -"of the model based on the test dataset." -msgstr "模型的评估在函数 :code:`test()` 中定义。该函数循环遍历所有测试样本,并根据测试数据集计算模型的损失值和准确度。" +"function loops over all test samples and measures the loss and accuracy of " +"the model based on the test dataset." +msgstr "" +"模型的评估在函数 :code:`test()` 中定义。该函数循环遍历所有测试样本,并根据测" +"试数据集计算模型的损失值和准确度。" #: ../../source/example-mxnet-walk-through.rst:158 msgid "" "Having defined the data loading, model architecture, training, and " -"evaluation we can put everything together and train our model on MNIST. " -"Note that the GPU/CPU device for the training and testing is defined " -"within the :code:`ctx` (context)." +"evaluation we can put everything together and train our model on MNIST. Note " +"that the GPU/CPU device for the training and testing is defined within the :" +"code:`ctx` (context)." msgstr "" -"在定义了数据加载、模型架构、训练和评估之后,我们就可以把所有放在一起,在 MNIST 上训练我们的模型了。请注意,用于训练和测试的 GPU/CPU" -" 设备是在 :code:`ctx`中定义的。" +"在定义了数据加载、模型架构、训练和评估之后,我们就可以把所有放在一起,在 " +"MNIST 上训练我们的模型了。请注意,用于训练和测试的 GPU/CPU 设备是在 :code:" +"`ctx`中定义的。" #: ../../source/example-mxnet-walk-through.rst:184 msgid "You can now run your (centralized) MXNet machine learning workload:" @@ -3081,13 +3234,13 @@ msgstr "现在,您可以运行(集中式)MXNet 机器学习工作:" #: ../../source/example-mxnet-walk-through.rst:190 msgid "" -"So far this should all look fairly familiar if you've used MXNet (or even" -" PyTorch) before. Let's take the next step and use what we've built to " -"create a simple federated learning system consisting of one server and " -"two clients." +"So far this should all look fairly familiar if you've used MXNet (or even " +"PyTorch) before. Let's take the next step and use what we've built to create " +"a simple federated learning system consisting of one server and two clients." msgstr "" -"到目前为止,如果你以前使用过 MXNet(甚至 " -"PyTorch),这一切看起来应该相当熟悉。下一步,让我们利用已构建的内容创建一个简单联邦学习系统(由一个服务器和两个客户端组成)。" +"到目前为止,如果你以前使用过 MXNet(甚至 PyTorch),这一切看起来应该相当熟" +"悉。下一步,让我们利用已构建的内容创建一个简单联邦学习系统(由一个服务器和两" +"个客户端组成)。" #: ../../source/example-mxnet-walk-through.rst:194 msgid "MXNet meets Flower" @@ -3096,59 +3249,61 @@ msgstr "MXNet 结合 Flower" #: ../../source/example-mxnet-walk-through.rst:196 msgid "" "So far, it was not easily possible to use MXNet workloads for federated " -"learning because federated learning is not supported in MXNet. Since " -"Flower is fully agnostic towards the underlying machine learning " -"framework, it can be used to federated arbitrary machine learning " -"workloads. This section will show you how Flower can be used to federate " -"our centralized MXNet workload." +"learning because federated learning is not supported in MXNet. Since Flower " +"is fully agnostic towards the underlying machine learning framework, it can " +"be used to federated arbitrary machine learning workloads. This section will " +"show you how Flower can be used to federate our centralized MXNet workload." msgstr "" -"由于 MXNet 目前不支持联邦学习,因此无法轻松地直接将 MXNet 用于联邦学习之中。Flower " -"与底层机器学习框架完全无关,因此它可用于任意联邦式机器学习工作。本节将向你展示如何使用 Flower 将我们的集中式 MXNet 改为联邦式训练。" +"由于 MXNet 目前不支持联邦学习,因此无法轻松地直接将 MXNet 用于联邦学习之中。" +"Flower 与底层机器学习框架完全无关,因此它可用于任意联邦式机器学习工作。本节将" +"向你展示如何使用 Flower 将我们的集中式 MXNet 改为联邦式训练。" #: ../../source/example-mxnet-walk-through.rst:198 msgid "" -"The concept to federate an existing workload is always the same and easy " -"to understand. We have to start a *server* and then use the code in " -":code:`mxnet_mnist.py` for the *clients* that are connected to the " -"*server*. The *server* sends model parameters to the clients. The " -"*clients* run the training and update the parameters. The updated " -"parameters are sent back to the *server* which averages all received " -"parameter updates. This describes one round of the federated learning " -"process and we repeat this for multiple rounds." -msgstr "" -"将现有模型框架联邦化的概念始终是相同的,也很容易理解。我们必须启动一个*服务器*,然后对连接到*服务器*的*客户端*使用 " -":code:`mxnet_mnist.py`中的代码。*服务器*向客户端发送模型参数,然后*客户端*运行训练并更新参数。更新后的参数被发回*服务器*,然后会对所有收到的参数更新进行平均聚合。以上描述的是一轮联邦学习过程,我们将重复进行多轮学习。" +"The concept to federate an existing workload is always the same and easy to " +"understand. We have to start a *server* and then use the code in :code:" +"`mxnet_mnist.py` for the *clients* that are connected to the *server*. The " +"*server* sends model parameters to the clients. The *clients* run the " +"training and update the parameters. The updated parameters are sent back to " +"the *server* which averages all received parameter updates. This describes " +"one round of the federated learning process and we repeat this for multiple " +"rounds." +msgstr "" +"将现有模型框架联邦化的概念始终是相同的,也很容易理解。我们必须启动一个*服务器" +"*,然后对连接到*服务器*的*客户端*使用 :code:`mxnet_mnist.py`中的代码。*服务器" +"*向客户端发送模型参数,然后*客户端*运行训练并更新参数。更新后的参数被发回*服" +"务器*,然后会对所有收到的参数更新进行平均聚合。以上描述的是一轮联邦学习过程," +"我们将重复进行多轮学习。" #: ../../source/example-mxnet-walk-through.rst:220 msgid "" -"Finally, we will define our *client* logic in :code:`client.py` and build" -" upon the previously defined MXNet training in :code:`mxnet_mnist.py`. " -"Our *client* needs to import :code:`flwr`, but also :code:`mxnet` to " -"update the parameters on our MXNet model:" +"Finally, we will define our *client* logic in :code:`client.py` and build " +"upon the previously defined MXNet training in :code:`mxnet_mnist.py`. Our " +"*client* needs to import :code:`flwr`, but also :code:`mxnet` to update the " +"parameters on our MXNet model:" msgstr "" -"最后,我们将在 :code:`client.py` 中定义我们的 *client* 逻辑,并以之前在 :code:`mxnet_mnist.py`" -" 中定义的 MXNet 训练为基础。我们的 *client* 不仅需要导入 :code:`flwr`,还需要导入 " -":code:`mxnet`,以更新 MXNet 模型的参数:" +"最后,我们将在 :code:`client.py` 中定义我们的 *client* 逻辑,并以之前在 :" +"code:`mxnet_mnist.py` 中定义的 MXNet 训练为基础。我们的 *client* 不仅需要导" +"入 :code:`flwr`,还需要导入 :code:`mxnet`,以更新 MXNet 模型的参数:" #: ../../source/example-mxnet-walk-through.rst:235 msgid "" -"Implementing a Flower *client* basically means implementing a subclass of" -" either :code:`flwr.client.Client` or :code:`flwr.client.NumPyClient`. " -"Our implementation will be based on :code:`flwr.client.NumPyClient` and " -"we'll call it :code:`MNISTClient`. :code:`NumPyClient` is slightly easier" -" to implement than :code:`Client` if you use a framework with good NumPy " +"Implementing a Flower *client* basically means implementing a subclass of " +"either :code:`flwr.client.Client` or :code:`flwr.client.NumPyClient`. Our " +"implementation will be based on :code:`flwr.client.NumPyClient` and we'll " +"call it :code:`MNISTClient`. :code:`NumPyClient` is slightly easier to " +"implement than :code:`Client` if you use a framework with good NumPy " "interoperability (like PyTorch or MXNet) because it avoids some of the " -"boilerplate that would otherwise be necessary. :code:`MNISTClient` needs " -"to implement four methods, two methods for getting/setting model " -"parameters, one method for training the model, and one method for testing" -" the model:" -msgstr "" -"实现 Flower *client*基本上意味着要实现 :code:`flwr.client.Client` 或 " -":code:`flwr.client.NumPyClient` 的子类。我们的代码实现将基于 " -":code:`flwr.client.NumPyClient`,并将其命名为 :code:`MNISTClient`。如果使用具有良好 NumPy" -" 互操作性的框架(如 PyTorch 或 MXNet),:code:`NumPyClient` 比 " -":code:`Client`更容易实现,因为它避免了一些不必要的操作。:code:`MNISTClient` " -"需要实现四个方法,两个用于获取/设置模型参数,一个用于训练模型,一个用于测试模型:" +"boilerplate that would otherwise be necessary. :code:`MNISTClient` needs to " +"implement four methods, two methods for getting/setting model parameters, " +"one method for training the model, and one method for testing the model:" +msgstr "" +"实现 Flower *client*基本上意味着要实现 :code:`flwr.client.Client` 或 :code:" +"`flwr.client.NumPyClient` 的子类。我们的代码实现将基于 :code:`flwr.client." +"NumPyClient`,并将其命名为 :code:`MNISTClient`。如果使用具有良好 NumPy 互操作" +"性的框架(如 PyTorch 或 MXNet),:code:`NumPyClient` 比 :code:`Client`更容易" +"实现,因为它避免了一些不必要的操作。:code:`MNISTClient` 需要实现四个方法,两" +"个用于获取/设置模型参数,一个用于训练模型,一个用于测试模型:" #: ../../source/example-mxnet-walk-through.rst:242 msgid "transform MXNet :code:`NDArray`'s to NumPy :code:`ndarray`'s" @@ -3166,58 +3321,57 @@ msgstr "向服务器返回本地损失值和精确度" #: ../../source/example-mxnet-walk-through.rst:255 msgid "" -"The challenging part is to transform the MXNet parameters from " -":code:`NDArray` to :code:`NumPy Arrays` to make it readable for Flower." +"The challenging part is to transform the MXNet parameters from :code:" +"`NDArray` to :code:`NumPy Arrays` to make it readable for Flower." msgstr "" -"具有挑战性的部分是将 MXNet 参数从 :code:`NDArray` 转换为 :code:`NumPy Arrays` 以便 Flower " -"可以读取。" +"具有挑战性的部分是将 MXNet 参数从 :code:`NDArray` 转换为 :code:`NumPy " +"Arrays` 以便 Flower 可以读取。" #: ../../source/example-mxnet-walk-through.rst:257 msgid "" -"The two :code:`NumPyClient` methods :code:`fit` and :code:`evaluate` make" -" use of the functions :code:`train()` and :code:`test()` previously " -"defined in :code:`mxnet_mnist.py`. So what we really do here is we tell " -"Flower through our :code:`NumPyClient` subclass which of our already " -"defined functions to call for training and evaluation. We included type " -"annotations to give you a better understanding of the data types that get" -" passed around." +"The two :code:`NumPyClient` methods :code:`fit` and :code:`evaluate` make " +"use of the functions :code:`train()` and :code:`test()` previously defined " +"in :code:`mxnet_mnist.py`. So what we really do here is we tell Flower " +"through our :code:`NumPyClient` subclass which of our already defined " +"functions to call for training and evaluation. We included type annotations " +"to give you a better understanding of the data types that get passed around." msgstr "" -"这两个 :code:`NumPyClient` 方法 :code:`fit` 和 :code:`evaluate` 使用了之前在 " -":code:`mxnet_mnist.py` 中定义的函数 :code:`train()` 和 " -":code:`test()`。因此,我们要做的就是通过 :code:`NumPyClient` 子类告知 Flower " -"在训练和评估时要调用哪些已定义的函数。我们加入了类型注解,以便让您更好地理解传递的数据类型。" +"这两个 :code:`NumPyClient` 方法 :code:`fit` 和 :code:`evaluate` 使用了之前" +"在 :code:`mxnet_mnist.py` 中定义的函数 :code:`train()` 和 :code:`test()`。因" +"此,我们要做的就是通过 :code:`NumPyClient` 子类告知 Flower 在训练和评估时要调" +"用哪些已定义的函数。我们加入了类型注解,以便让您更好地理解传递的数据类型。" #: ../../source/example-mxnet-walk-through.rst:319 msgid "" -"Having defined data loading, model architecture, training, and evaluation" -" we can put everything together and train our :code:`Sequential` model on" -" MNIST." +"Having defined data loading, model architecture, training, and evaluation we " +"can put everything together and train our :code:`Sequential` model on MNIST." msgstr "" -"在定义了数据加载、模型架构、训练和评估之后,我们就可以将所有内容整合在一起,在 MNIST 上训练我们的 :code:`Sequential` " -"模型。" +"在定义了数据加载、模型架构、训练和评估之后,我们就可以将所有内容整合在一起," +"在 MNIST 上训练我们的 :code:`Sequential` 模型。" #: ../../source/example-mxnet-walk-through.rst:353 msgid "" -"in each window (make sure that the server is still running before you do " -"so) and see your MXNet project run federated learning across two clients." -" Congratulations!" -msgstr "确保服务器仍在运行后,然后就能在每个窗口中看到 MXNet 项目在两个客户端上运行联邦学习了。祝贺!" +"in each window (make sure that the server is still running before you do so) " +"and see your MXNet project run federated learning across two clients. " +"Congratulations!" +msgstr "" +"确保服务器仍在运行后,然后就能在每个窗口中看到 MXNet 项目在两个客户端上运行联" +"邦学习了。祝贺!" #: ../../source/example-mxnet-walk-through.rst:358 msgid "" -"The full source code for this example: `MXNet: From Centralized To " -"Federated (Code) `_. Our example is of course " -"somewhat over-simplified because both clients load the exact same " -"dataset, which isn't realistic. You're now prepared to explore this topic" -" further. How about using a CNN or using a different dataset? How about " -"adding more clients?" +"The full source code for this example: `MXNet: From Centralized To Federated " +"(Code) `_. Our example is of course somewhat over-" +"simplified because both clients load the exact same dataset, which isn't " +"realistic. You're now prepared to explore this topic further. How about " +"using a CNN or using a different dataset? How about adding more clients?" msgstr "" "此示例的完整源代码在:\"MXNet: From Centralized To Federated (Code) " -"`_。当然,我们的示例有些过于简单,因为两个客户端都加载了完全相同的数据集,这并不真实。现在您已经准备好进一步探讨了。使用 " -"CNN 或使用不同的数据集会如何?添加更多客户端会如何?" +"`_。当然,我们的示例有些过于简单,因为两个客户端都加载了完全相同的" +"数据集,这并不真实。现在您已经准备好进一步探讨了。使用 CNN 或使用不同的数据集" +"会如何?添加更多客户端会如何?" #: ../../source/example-pytorch-from-centralized-to-federated.rst:2 msgid "Example: PyTorch - From Centralized To Federated" @@ -3225,41 +3379,44 @@ msgstr "实例: PyTorch - 从集中式到联邦式" #: ../../source/example-pytorch-from-centralized-to-federated.rst:4 msgid "" -"This tutorial will show you how to use Flower to build a federated " -"version of an existing machine learning workload. We are using PyTorch to" -" train a Convolutional Neural Network on the CIFAR-10 dataset. First, we " -"introduce this machine learning task with a centralized training approach" -" based on the `Deep Learning with PyTorch " -"`_ " -"tutorial. Then, we build upon the centralized training code to run the " -"training in a federated fashion." +"This tutorial will show you how to use Flower to build a federated version " +"of an existing machine learning workload. We are using PyTorch to train a " +"Convolutional Neural Network on the CIFAR-10 dataset. First, we introduce " +"this machine learning task with a centralized training approach based on the " +"`Deep Learning with PyTorch `_ tutorial. Then, we build upon the centralized " +"training code to run the training in a federated fashion." msgstr "" -"本教程将向您展示如何使用 Flower 构建现有机器学习工作的联邦版本。我们使用 PyTorch 在 CIFAR-10 " -"数据集上训练一个卷积神经网络。首先,我们基于 \"Deep Learning with PyTorch " -"`_\"教程,采用集中式训练方法介绍了这项机器学习任务。然后,我们在集中式训练代码的基础上以联邦方式运行训练。" +"本教程将向您展示如何使用 Flower 构建现有机器学习工作的联邦版本。我们使用 " +"PyTorch 在 CIFAR-10 数据集上训练一个卷积神经网络。首先,我们基于 \"Deep " +"Learning with PyTorch `_\"教程,采用集中式训练方法介绍了这项机器学习任务。然" +"后,我们在集中式训练代码的基础上以联邦方式运行训练。" #: ../../source/example-pytorch-from-centralized-to-federated.rst:12 msgid "" -"We begin with a brief description of the centralized CNN training code. " -"If you want a more in-depth explanation of what's going on then have a " -"look at the official `PyTorch tutorial " -"`_." +"We begin with a brief description of the centralized CNN training code. If " +"you want a more in-depth explanation of what's going on then have a look at " +"the official `PyTorch tutorial `_." msgstr "" -"我们首先简要介绍一下集中式 CNN 训练代码。如果您想获得更深入的解释,请参阅 PyTorch 官方教程`PyTorch tutorial " -"`_。" +"我们首先简要介绍一下集中式 CNN 训练代码。如果您想获得更深入的解释,请参阅 " +"PyTorch 官方教程`PyTorch tutorial `_。" #: ../../source/example-pytorch-from-centralized-to-federated.rst:15 msgid "" "Let's create a new file called :code:`cifar.py` with all the components " -"required for a traditional (centralized) training on CIFAR-10. First, all" -" required packages (such as :code:`torch` and :code:`torchvision`) need " -"to be imported. You can see that we do not import any package for " -"federated learning. You can keep all these imports as they are even when " -"we add the federated learning components at a later point." +"required for a traditional (centralized) training on CIFAR-10. First, all " +"required packages (such as :code:`torch` and :code:`torchvision`) need to be " +"imported. You can see that we do not import any package for federated " +"learning. You can keep all these imports as they are even when we add the " +"federated learning components at a later point." msgstr "" -"让我们创建一个名为 :code:`cifar.py` 的新文件,其中包含 CIFAR-10 " -"传统(集中)培训所需的所有组件。首先,需要导入所有必需的软件包(如 :code:`torch` 和 " -":code:`torchvision`)。您可以看到,我们没有导入任何用于联邦学习的软件包。即使在以后添加联邦学习组件时,也可以保留所有这些导入。" +"让我们创建一个名为 :code:`cifar.py` 的新文件,其中包含 CIFAR-10 传统(集中)" +"培训所需的所有组件。首先,需要导入所有必需的软件包(如 :code:`torch` 和 :" +"code:`torchvision`)。您可以看到,我们没有导入任何用于联邦学习的软件包。即使" +"在以后添加联邦学习组件时,也可以保留所有这些导入。" #: ../../source/example-pytorch-from-centralized-to-federated.rst:32 msgid "" @@ -3267,107 +3424,122 @@ msgid "" "learning workload. The model architecture (a very simple Convolutional " "Neural Network) is defined in :code:`class Net()`." msgstr "" -"如前所述,我们将使用 CIFAR-10 数据集进行机器学习。模型架构(一个非常简单的卷积神经网络)在 :code:`class Net()` " -"中定义。" +"如前所述,我们将使用 CIFAR-10 数据集进行机器学习。模型架构(一个非常简单的卷" +"积神经网络)在 :code:`class Net()` 中定义。" #: ../../source/example-pytorch-from-centralized-to-federated.rst:56 msgid "" -"The :code:`load_data()` function loads the CIFAR-10 training and test " -"sets. The :code:`transform` normalized the data after loading." +"The :code:`load_data()` function loads the CIFAR-10 training and test sets. " +"The :code:`transform` normalized the data after loading." msgstr "" -":code:`load_data()` 函数加载 CIFAR-10 " -"训练集和测试集。加载数据后,:code:`transform`函数对数据进行了归一化处理。" +":code:`load_data()` 函数加载 CIFAR-10 训练集和测试集。加载数据后,:code:" +"`transform`函数对数据进行了归一化处理。" #: ../../source/example-pytorch-from-centralized-to-federated.rst:74 msgid "" -"We now need to define the training (function :code:`train()`) which loops" -" over the training set, measures the loss, backpropagates it, and then " -"takes one optimizer step for each batch of training examples." -msgstr "现在,我们需要定义训练函数(:code:`train()`),该函数在训练集上循环训练,计算损失值并反向传播,然后为每批训练数据在优化器上执行一个优化步骤。" +"We now need to define the training (function :code:`train()`) which loops " +"over the training set, measures the loss, backpropagates it, and then takes " +"one optimizer step for each batch of training examples." +msgstr "" +"现在,我们需要定义训练函数(:code:`train()`),该函数在训练集上循环训练,计算" +"损失值并反向传播,然后为每批训练数据在优化器上执行一个优化步骤。" #: ../../source/example-pytorch-from-centralized-to-federated.rst:76 msgid "" -"The evaluation of the model is defined in the function :code:`test()`. " -"The function loops over all test samples and measures the loss of the " -"model based on the test dataset." -msgstr "模型的评估在函数 :code:`test()` 中定义。该函数循环遍历所有测试样本,并计算测试数据集的模型损失值。" +"The evaluation of the model is defined in the function :code:`test()`. The " +"function loops over all test samples and measures the loss of the model " +"based on the test dataset." +msgstr "" +"模型的评估在函数 :code:`test()` 中定义。该函数循环遍历所有测试样本,并计算测" +"试数据集的模型损失值。" #: ../../source/example-pytorch-from-centralized-to-federated.rst:136 msgid "" "Having defined the data loading, model architecture, training, and " "evaluation we can put everything together and train our CNN on CIFAR-10." -msgstr "在确定了数据加载、模型架构、训练和评估之后,我们就可以将所有整合在一起,在 CIFAR-10 上训练我们的 CNN。" +msgstr "" +"在确定了数据加载、模型架构、训练和评估之后,我们就可以将所有整合在一起,在 " +"CIFAR-10 上训练我们的 CNN。" #: ../../source/example-pytorch-from-centralized-to-federated.rst:163 msgid "" -"So far, this should all look fairly familiar if you've used PyTorch " -"before. Let's take the next step and use what we've built to create a " -"simple federated learning system consisting of one server and two " -"clients." +"So far, this should all look fairly familiar if you've used PyTorch before. " +"Let's take the next step and use what we've built to create a simple " +"federated learning system consisting of one server and two clients." msgstr "" -"到目前为止,如果你以前用过 " -"PyTorch,这一切看起来应该相当熟悉。让我们进行下一步,利用我们所构建的内容创建一个简单联邦学习系统(由一个服务器和两个客户端组成)。" +"到目前为止,如果你以前用过 PyTorch,这一切看起来应该相当熟悉。让我们进行下一" +"步,利用我们所构建的内容创建一个简单联邦学习系统(由一个服务器和两个客户端组" +"成)。" #: ../../source/example-pytorch-from-centralized-to-federated.rst:169 msgid "" -"The simple machine learning project discussed in the previous section " -"trains the model on a single dataset (CIFAR-10), we call this centralized" -" learning. This concept of centralized learning, as shown in the previous" -" section, is probably known to most of you, and many of you have used it " -"previously. Normally, if you'd want to run machine learning workloads in " -"a federated fashion, then you'd have to change most of your code and set " -"everything up from scratch. This can be a considerable effort." -msgstr "上一节讨论的简单机器学习项目在单一数据集(CIFAR-10)上训练模型,我们称之为集中学习。如上一节所示,集中学习的概念可能为大多数人所熟知,而且很多人以前都使用过。通常情况下,如果要以联邦方式运行机器学习工作,就必须更改大部分代码,并从头开始设置一切。这可能是一个相当大的工作量。" +"The simple machine learning project discussed in the previous section trains " +"the model on a single dataset (CIFAR-10), we call this centralized learning. " +"This concept of centralized learning, as shown in the previous section, is " +"probably known to most of you, and many of you have used it previously. " +"Normally, if you'd want to run machine learning workloads in a federated " +"fashion, then you'd have to change most of your code and set everything up " +"from scratch. This can be a considerable effort." +msgstr "" +"上一节讨论的简单机器学习项目在单一数据集(CIFAR-10)上训练模型,我们称之为集" +"中学习。如上一节所示,集中学习的概念可能为大多数人所熟知,而且很多人以前都使" +"用过。通常情况下,如果要以联邦方式运行机器学习工作,就必须更改大部分代码,并" +"从头开始设置一切。这可能是一个相当大的工作量。" #: ../../source/example-pytorch-from-centralized-to-federated.rst:173 msgid "" -"However, with Flower you can evolve your pre-existing code into a " -"federated learning setup without the need for a major rewrite." -msgstr "不过,有了 Flower,您可以轻松地将已有的代码转变成联邦学习的模式,无需进行大量重写。" +"However, with Flower you can evolve your pre-existing code into a federated " +"learning setup without the need for a major rewrite." +msgstr "" +"不过,有了 Flower,您可以轻松地将已有的代码转变成联邦学习的模式,无需进行大量" +"重写。" #: ../../source/example-pytorch-from-centralized-to-federated.rst:175 msgid "" -"The concept is easy to understand. We have to start a *server* and then " -"use the code in :code:`cifar.py` for the *clients* that are connected to " -"the *server*. The *server* sends model parameters to the clients. The " -"*clients* run the training and update the parameters. The updated " -"parameters are sent back to the *server* which averages all received " -"parameter updates. This describes one round of the federated learning " -"process and we repeat this for multiple rounds." +"The concept is easy to understand. We have to start a *server* and then use " +"the code in :code:`cifar.py` for the *clients* that are connected to the " +"*server*. The *server* sends model parameters to the clients. The *clients* " +"run the training and update the parameters. The updated parameters are sent " +"back to the *server* which averages all received parameter updates. This " +"describes one round of the federated learning process and we repeat this for " +"multiple rounds." msgstr "" -"这个概念很容易理解。我们必须启动一个*服务器*,然后对连接到*服务器*的*客户端*使用 " -":code:`cifar.py`中的代码。*服务器*向客户端发送模型参数,*客户端*运行训练并更新参数。更新后的参数被发回*服务器*,然后会对所有收到的参数更新进行平均聚合。以上描述的是一轮联邦学习过程,我们将重复进行多轮学习。" +"这个概念很容易理解。我们必须启动一个*服务器*,然后对连接到*服务器*的*客户端*" +"使用 :code:`cifar.py`中的代码。*服务器*向客户端发送模型参数,*客户端*运行训练" +"并更新参数。更新后的参数被发回*服务器*,然后会对所有收到的参数更新进行平均聚" +"合。以上描述的是一轮联邦学习过程,我们将重复进行多轮学习。" #: ../../source/example-pytorch-from-centralized-to-federated.rst:197 msgid "" -"Finally, we will define our *client* logic in :code:`client.py` and build" -" upon the previously defined centralized training in :code:`cifar.py`. " -"Our *client* needs to import :code:`flwr`, but also :code:`torch` to " -"update the parameters on our PyTorch model:" +"Finally, we will define our *client* logic in :code:`client.py` and build " +"upon the previously defined centralized training in :code:`cifar.py`. Our " +"*client* needs to import :code:`flwr`, but also :code:`torch` to update the " +"parameters on our PyTorch model:" msgstr "" -"最后,我们将在 :code:`client.py` 中定义我们的 *client* 逻辑,并以之前在 :code:`cifar.py` " -"中定义的集中式训练为基础。我们的 *client* 不仅需要导入 :code:`flwr`,还需要导入 :code:`torch`,以更新 " -"PyTorch 模型的参数:" +"最后,我们将在 :code:`client.py` 中定义我们的 *client* 逻辑,并以之前在 :" +"code:`cifar.py` 中定义的集中式训练为基础。我们的 *client* 不仅需要导入 :code:" +"`flwr`,还需要导入 :code:`torch`,以更新 PyTorch 模型的参数:" #: ../../source/example-pytorch-from-centralized-to-federated.rst:213 msgid "" -"Implementing a Flower *client* basically means implementing a subclass of" -" either :code:`flwr.client.Client` or :code:`flwr.client.NumPyClient`. " -"Our implementation will be based on :code:`flwr.client.NumPyClient` and " -"we'll call it :code:`CifarClient`. :code:`NumPyClient` is slightly easier" -" to implement than :code:`Client` if you use a framework with good NumPy " -"interoperability (like PyTorch or TensorFlow/Keras) because it avoids " -"some of the boilerplate that would otherwise be necessary. " -":code:`CifarClient` needs to implement four methods, two methods for " -"getting/setting model parameters, one method for training the model, and " -"one method for testing the model:" -msgstr "" -"实现 Flower *client*基本上意味着实现 :code:`flwr.client.Client` 或 " -":code:`flwr.client.NumPyClient` 的子类。我们的代码实现将基于 " -":code:`flwr.client.NumPyClient`,并将其命名为 :code:`CifarClient`。如果使用具有良好 NumPy" -" 互操作性的框架(如 PyTorch 或 TensorFlow/Keras),:code:`NumPyClient`的实现比 " -":code:`Client`略微容易一些,因为它避免了一些不必要的操作。:code:`CifarClient` " -"需要实现四个方法,两个用于获取/设置模型参数,一个用于训练模型,一个用于测试模型:" +"Implementing a Flower *client* basically means implementing a subclass of " +"either :code:`flwr.client.Client` or :code:`flwr.client.NumPyClient`. Our " +"implementation will be based on :code:`flwr.client.NumPyClient` and we'll " +"call it :code:`CifarClient`. :code:`NumPyClient` is slightly easier to " +"implement than :code:`Client` if you use a framework with good NumPy " +"interoperability (like PyTorch or TensorFlow/Keras) because it avoids some " +"of the boilerplate that would otherwise be necessary. :code:`CifarClient` " +"needs to implement four methods, two methods for getting/setting model " +"parameters, one method for training the model, and one method for testing " +"the model:" +msgstr "" +"实现 Flower *client*基本上意味着实现 :code:`flwr.client.Client` 或 :code:" +"`flwr.client.NumPyClient` 的子类。我们的代码实现将基于 :code:`flwr.client." +"NumPyClient`,并将其命名为 :code:`CifarClient`。如果使用具有良好 NumPy 互操作" +"性的框架(如 PyTorch 或 TensorFlow/Keras),:code:`NumPyClient`的实现比 :" +"code:`Client`略微容易一些,因为它避免了一些不必要的操作。:code:`CifarClient` " +"需要实现四个方法,两个用于获取/设置模型参数,一个用于训练模型,一个用于测试模" +"型:" #: ../../source/example-pytorch-from-centralized-to-federated.rst:219 msgid ":code:`set_parameters`" @@ -3375,50 +3547,55 @@ msgstr ":code:`set_parameters`" #: ../../source/example-pytorch-from-centralized-to-federated.rst:232 msgid "" -"The two :code:`NumPyClient` methods :code:`fit` and :code:`evaluate` make" -" use of the functions :code:`train()` and :code:`test()` previously " -"defined in :code:`cifar.py`. So what we really do here is we tell Flower " -"through our :code:`NumPyClient` subclass which of our already defined " -"functions to call for training and evaluation. We included type " -"annotations to give you a better understanding of the data types that get" -" passed around." +"The two :code:`NumPyClient` methods :code:`fit` and :code:`evaluate` make " +"use of the functions :code:`train()` and :code:`test()` previously defined " +"in :code:`cifar.py`. So what we really do here is we tell Flower through " +"our :code:`NumPyClient` subclass which of our already defined functions to " +"call for training and evaluation. We included type annotations to give you a " +"better understanding of the data types that get passed around." msgstr "" -"这两个 :code:`NumPyClient` 中的方法 :code:`fit` 和 :code:`evaluate` 使用了之前在 " -":code:`cifar.py` 中定义的函数 :code:`train()` 和 :code:`test()`。因此,我们在这里要做的就是通过 " -":code:`NumPyClient` 子类告知 Flower " -"在训练和评估时要调用哪些已定义的函数。我们加入了类型注解,以便让你更好地理解传递的数据类型。" +"这两个 :code:`NumPyClient` 中的方法 :code:`fit` 和 :code:`evaluate` 使用了之" +"前在 :code:`cifar.py` 中定义的函数 :code:`train()` 和 :code:`test()`。因此," +"我们在这里要做的就是通过 :code:`NumPyClient` 子类告知 Flower 在训练和评估时要" +"调用哪些已定义的函数。我们加入了类型注解,以便让你更好地理解传递的数据类型。" #: ../../source/example-pytorch-from-centralized-to-federated.rst:280 msgid "" "All that's left to do it to define a function that loads both model and " -"data, creates a :code:`CifarClient`, and starts this client. You load " -"your data and model by using :code:`cifar.py`. Start :code:`CifarClient` " -"with the function :code:`fl.client.start_client()` by pointing it at the " -"same IP address we used in :code:`server.py`:" -msgstr "剩下的就是定义模型和数据加载函数了。创建一个:code:`CifarClient`类,并运行这个客服端。您将通过:code:`cifar.py`加载数据和模型。另外,通过:code:`fl.client.start_client()`函数来运行客户端:code:`CifarClient`,需要保证IP地址和:code:`server.py`中所使用的一致:" +"data, creates a :code:`CifarClient`, and starts this client. You load your " +"data and model by using :code:`cifar.py`. Start :code:`CifarClient` with the " +"function :code:`fl.client.start_client()` by pointing it at the same IP " +"address we used in :code:`server.py`:" +msgstr "" +"剩下的就是定义模型和数据加载函数了。创建一个:code:`CifarClient`类,并运行这个" +"客服端。您将通过:code:`cifar.py`加载数据和模型。另外,通过:code:`fl.client." +"start_client()`函数来运行客户端:code:`CifarClient`,需要保证IP地址和:code:" +"`server.py`中所使用的一致:" #: ../../source/example-pytorch-from-centralized-to-federated.rst:307 msgid "" -"in each window (make sure that the server is running before you do so) " -"and see your (previously centralized) PyTorch project run federated " -"learning across two clients. Congratulations!" -msgstr "确保服务器正在运行后,您就能看到您的 PyTorch 项目(之前是集中式的)在两个客户端上运行联邦学习了。祝贺!" +"in each window (make sure that the server is running before you do so) and " +"see your (previously centralized) PyTorch project run federated learning " +"across two clients. Congratulations!" +msgstr "" +"确保服务器正在运行后,您就能看到您的 PyTorch 项目(之前是集中式的)在两个客户" +"端上运行联邦学习了。祝贺!" #: ../../source/example-pytorch-from-centralized-to-federated.rst:312 msgid "" "The full source code for this example: `PyTorch: From Centralized To " -"Federated (Code) `_. Our example is, of course, " -"somewhat over-simplified because both clients load the exact same " -"dataset, which isn't realistic. You're now prepared to explore this topic" -" further. How about using different subsets of CIFAR-10 on each client? " -"How about adding more clients?" -msgstr "" -"本示例的完整源代码为:`PyTorch: 从集中式到联合式 " -"`_。当然,我们的示例有些过于简单,因为两个客户端都加载了完全相同的数据集,这并不真实。现在,您已经准备好进一步探讨这一主题了。比如在每个客户端使用不同的" -" CIFAR-10 子集会如何?增加更多客户端会如何?" +"Federated (Code) `_. Our example is, of course, somewhat over-" +"simplified because both clients load the exact same dataset, which isn't " +"realistic. You're now prepared to explore this topic further. How about " +"using different subsets of CIFAR-10 on each client? How about adding more " +"clients?" +msgstr "" +"本示例的完整源代码为:`PyTorch: 从集中式到联合式 `_。当然,我" +"们的示例有些过于简单,因为两个客户端都加载了完全相同的数据集,这并不真实。现" +"在,您已经准备好进一步探讨这一主题了。比如在每个客户端使用不同的 CIFAR-10 子" +"集会如何?增加更多客户端会如何?" #: ../../source/explanation-differential-privacy.rst:2 #: ../../source/explanation-differential-privacy.rst:11 @@ -3429,11 +3606,10 @@ msgstr "差分隐私" #: ../../source/explanation-differential-privacy.rst:3 #, fuzzy msgid "" -"The information in datasets like healthcare, financial transactions, user" -" preferences, etc., is valuable and has the potential for scientific " -"breakthroughs and provides important business insights. However, such " -"data is also sensitive and there is a risk of compromising individual " -"privacy." +"The information in datasets like healthcare, financial transactions, user " +"preferences, etc., is valuable and has the potential for scientific " +"breakthroughs and provides important business insights. However, such data " +"is also sensitive and there is a risk of compromising individual privacy." msgstr "" "医疗保健、金融交易、用户偏好等数据集中的信息非常宝贵,有可能带来科学突破并提" "供重要的商业见解。然而,这些数据也是敏感数据,存在泄露个人隐私的风险。" @@ -3442,10 +3618,11 @@ msgstr "" #, fuzzy msgid "" "Traditional methods like anonymization alone would not work because of " -"attacks like Re-identification and Data Linkage. That's where " -"differential privacy comes in. It provides the possibility of analyzing " -"data while ensuring the privacy of individuals." -msgstr "单靠匿名等传统方法是行不通的,因为会受到重新识别和数据链接等攻击。这就是差异" +"attacks like Re-identification and Data Linkage. That's where differential " +"privacy comes in. It provides the possibility of analyzing data while " +"ensuring the privacy of individuals." +msgstr "" +"单靠匿名等传统方法是行不通的,因为会受到重新识别和数据链接等攻击。这就是差异" "化隐私的用武之地。它提供了在分析数据的同时确保个人隐私的可能性。" #: ../../source/explanation-differential-privacy.rst:12 @@ -3455,13 +3632,13 @@ msgid "" "instance, Alice's data). Differential Privacy (DP) guarantees that any " "analysis (M), like calculating the average income, will produce nearly " "identical results for both datasets (O and O' would be similar). This " -"preserves group patterns while obscuring individual details, ensuring the" -" individual's information remains hidden in the crowd." +"preserves group patterns while obscuring individual details, ensuring the " +"individual's information remains hidden in the crowd." msgstr "" -"试想一下,两个数据集除了一条记录(例如 Alice 的数据)之外完全相同。差分隐私(" -"DP)可以保证任何分析(M),比如计算平均收入,对两个数据集都会产生几乎相同的结" -"果(O 和 O' 将是相似的)。这既保留了群体模式,又掩盖了个人细节,确保个人的信" -"息隐藏在人群中。" +"试想一下,两个数据集除了一条记录(例如 Alice 的数据)之外完全相同。差分隐私" +"(DP)可以保证任何分析(M),比如计算平均收入,对两个数据集都会产生几乎相同的" +"结果(O 和 O' 将是相似的)。这既保留了群体模式,又掩盖了个人细节,确保个人的" +"信息隐藏在人群中。" #: ../../source/explanation-differential-privacy.rst:-1 #, fuzzy @@ -3473,9 +3650,9 @@ msgstr "DP 介绍" msgid "" "One of the most commonly used mechanisms to achieve DP is adding enough " "noise to the output of the analysis to mask the contribution of each " -"individual in the data while preserving the overall accuracy of the " -"analysis." -msgstr "实现 DP 的最常用机制之一是在分析输出中加入足够的噪音,以掩盖数据中每个个体的" +"individual in the data while preserving the overall accuracy of the analysis." +msgstr "" +"实现 DP 的最常用机制之一是在分析输出中加入足够的噪音,以掩盖数据中每个个体的" "贡献,同时保持分析的整体准确性。" #: ../../source/explanation-differential-privacy.rst:25 @@ -3488,18 +3665,18 @@ msgstr "编译 ProtoBuf 定义" msgid "" "Differential Privacy (DP) provides statistical guarantees against the " "information an adversary can infer through the output of a randomized " -"algorithm. It provides an unconditional upper bound on the influence of a" -" single individual on the output of the algorithm by adding noise [1]. A " -"randomized mechanism M provides (:math:`\\epsilon`, " -":math:`\\delta`)-differential privacy if for any two neighboring " -"databases, D :sub:`1` and D :sub:`2`, that differ in only a single " -"record, and for all possible outputs S ⊆ Range(A):" +"algorithm. It provides an unconditional upper bound on the influence of a " +"single individual on the output of the algorithm by adding noise [1]. A " +"randomized mechanism M provides (:math:`\\epsilon`, :math:`\\delta`)-" +"differential privacy if for any two neighboring databases, D :sub:`1` and D :" +"sub:`2`, that differ in only a single record, and for all possible outputs S " +"⊆ Range(A):" msgstr "" "差分隐私(Differential Privacy,DP)针对对手通过随机算法的输出所能推断出的信" "息提供统计保证。它为单个个体通过添加噪声对算法输出的影响提供了一个无条件的上" -"限[1]。如果任意两个相邻的数据库D :sub:`1`和D :sub:`2`只有一条记录不同," -"并且对于所有可能的输出S ⊆ Range(A),随机化机制M提供(:math:`epsilon`,:math:`" -"\\delta`)差异隐私:" +"限[1]。如果任意两个相邻的数据库D :sub:`1`和D :sub:`2`只有一条记录不同,并且对" +"于所有可能的输出S ⊆ Range(A),随机化机制M提供(:math:`epsilon`,:math:" +"`\\delta`)差异隐私:" #: ../../source/explanation-differential-privacy.rst:32 #, fuzzy @@ -3516,17 +3693,16 @@ msgid "" "The :math:`\\epsilon` parameter, also known as the privacy budget, is a " "metric of privacy loss. It also controls the privacy-utility trade-off; " "lower :math:`\\epsilon` values indicate higher levels of privacy but are " -"likely to reduce utility as well. The :math:`\\delta` parameter accounts " -"for a small probability on which the upper bound :math:`\\epsilon` does " -"not hold. The amount of noise needed to achieve differential privacy is " -"proportional to the sensitivity of the output, which measures the maximum" -" change in the output due to the inclusion or removal of a single record." -msgstr "" -":math:`\\epsilon`参数也称为隐私预算,是衡量隐私损失的指标。较低的 :math:`" -"\\epsilon` 值表示较高的隐私级别,但也可能降低效用。:math:`\\delta" -"`参数考虑了:math:`\\epsilon`上限不成立的小概率。实现差异化隐私所需的噪声量与" -"输出的灵敏度成正比,而输出的灵敏度是指由于包含或删除一条记录而导致的输出的最" -"大变化。" +"likely to reduce utility as well. The :math:`\\delta` parameter accounts for " +"a small probability on which the upper bound :math:`\\epsilon` does not " +"hold. The amount of noise needed to achieve differential privacy is " +"proportional to the sensitivity of the output, which measures the maximum " +"change in the output due to the inclusion or removal of a single record." +msgstr "" +":math:`\\epsilon`参数也称为隐私预算,是衡量隐私损失的指标。较低的 :math:" +"`\\epsilon` 值表示较高的隐私级别,但也可能降低效用。:math:`\\delta`参数考虑" +"了:math:`\\epsilon`上限不成立的小概率。实现差异化隐私所需的噪声量与输出的灵敏" +"度成正比,而输出的灵敏度是指由于包含或删除一条记录而导致的输出的最大变化。" #: ../../source/explanation-differential-privacy.rst:45 #, fuzzy @@ -3538,15 +3714,14 @@ msgstr "差分隐私" msgid "" "DP can be utilized in machine learning to preserve the privacy of the " "training data. Differentially private machine learning algorithms are " -"designed in a way to prevent the algorithm to learn any specific " -"information about any individual data points and subsequently prevent the" -" model from revealing sensitive information. Depending on the stage at " -"which noise is introduced, various methods exist for applying DP to " -"machine learning algorithms. One approach involves adding noise to the " -"training data (either to the features or labels), while another method " -"entails injecting noise into the gradients of the loss function during " -"model training. Additionally, such noise can be incorporated into the " -"model's output." +"designed in a way to prevent the algorithm to learn any specific information " +"about any individual data points and subsequently prevent the model from " +"revealing sensitive information. Depending on the stage at which noise is " +"introduced, various methods exist for applying DP to machine learning " +"algorithms. One approach involves adding noise to the training data (either " +"to the features or labels), while another method entails injecting noise " +"into the gradients of the loss function during model training. Additionally, " +"such noise can be incorporated into the model's output." msgstr "" "机器学习中可以利用 DP 来保护训练数据的隐私。差分保密机器学习算法的设计方式是" "防止算法学习到任何单个数据点的任何特定信息,从而防止模型泄露敏感信息。根据引" @@ -3565,8 +3740,8 @@ msgid "" "Federated learning is a data minimization approach that allows multiple " "parties to collaboratively train a model without sharing their raw data. " "However, federated learning also introduces new privacy challenges. The " -"model updates between parties and the central server can leak information" -" about the local data. These leaks can be exploited by attacks such as " +"model updates between parties and the central server can leak information " +"about the local data. These leaks can be exploited by attacks such as " "membership inference and property inference attacks, or model inversion " "attacks." msgstr "" @@ -3578,41 +3753,43 @@ msgstr "" #: ../../source/explanation-differential-privacy.rst:58 #, fuzzy msgid "" -"DP can play a crucial role in federated learning to provide privacy for " -"the clients' data." +"DP can play a crucial role in federated learning to provide privacy for the " +"clients' data." msgstr "DP 可以在联合学习中发挥重要作用,为客户数据提供隐私保护。" #: ../../source/explanation-differential-privacy.rst:60 #, fuzzy msgid "" -"Depending on the granularity of privacy provision or the location of " -"noise addition, different forms of DP exist in federated learning. In " -"this explainer, we focus on two approaches of DP utilization in federated" -" learning based on where the noise is added: at the server (also known as" -" the center) or at the client (also known as the local)." +"Depending on the granularity of privacy provision or the location of noise " +"addition, different forms of DP exist in federated learning. In this " +"explainer, we focus on two approaches of DP utilization in federated " +"learning based on where the noise is added: at the server (also known as the " +"center) or at the client (also known as the local)." msgstr "" -"根据提供隐私的粒度或添加噪声的位置,联合学习中存在不同形式的 " -"DP。在本说明中,我们将根据添加噪声的位置,重点介绍联合学习中利用 DP " -"的两种方法:在服务器(也称为中心)或客户端(也称为本地)。" +"根据提供隐私的粒度或添加噪声的位置,联合学习中存在不同形式的 DP。在本说明中," +"我们将根据添加噪声的位置,重点介绍联合学习中利用 DP 的两种方法:在服务器(也" +"称为中心)或客户端(也称为本地)。" #: ../../source/explanation-differential-privacy.rst:63 #, fuzzy msgid "" -"**Central Differential Privacy**: DP is applied by the server and the " -"goal is to prevent the aggregated model from leaking information about " -"each client's data." -msgstr "**中央差分隐私**: DP " -"由服务器应用,目标是防止聚合模型泄露每个客户的数据信息。" +"**Central Differential Privacy**: DP is applied by the server and the goal " +"is to prevent the aggregated model from leaking information about each " +"client's data." +msgstr "" +"**中央差分隐私**: DP 由服务器应用,目标是防止聚合模型泄露每个客户的数据信" +"息。" #: ../../source/explanation-differential-privacy.rst:65 #, fuzzy msgid "" "**Local Differential Privacy**: DP is applied on the client side before " -"sending any information to the server and the goal is to prevent the " -"updates that are sent to the server from leaking any information about " -"the client's data." -msgstr "**本地差分隐私**: 在向服务器发送任何信息之前,在客户端应用 " -"DP,目的是防止向服务器发送的更新泄露任何有关客户端数据的信息。" +"sending any information to the server and the goal is to prevent the updates " +"that are sent to the server from leaking any information about the client's " +"data." +msgstr "" +"**本地差分隐私**: 在向服务器发送任何信息之前,在客户端应用 DP,目的是防止向" +"服务器发送的更新泄露任何有关客户端数据的信息。" #: ../../source/explanation-differential-privacy.rst:-1 #: ../../source/explanation-differential-privacy.rst:68 @@ -3624,34 +3801,34 @@ msgstr "差分隐私" #: ../../source/explanation-differential-privacy.rst:69 #, fuzzy msgid "" -"In this approach, which is also known as user-level DP, the central " -"server is responsible for adding noise to the globally aggregated " -"parameters. It should be noted that trust in the server is required." -msgstr "在这种方法(也称为用户级 DP)中,中央服务器负责在全局汇总参数中添加噪声。需要" +"In this approach, which is also known as user-level DP, the central server " +"is responsible for adding noise to the globally aggregated parameters. It " +"should be noted that trust in the server is required." +msgstr "" +"在这种方法(也称为用户级 DP)中,中央服务器负责在全局汇总参数中添加噪声。需要" "注意的是,这需要对服务器的信任。" #: ../../source/explanation-differential-privacy.rst:76 #, fuzzy msgid "" -"While there are various ways to implement central DP in federated " -"learning, we concentrate on the algorithms proposed by [2] and [3]. The " -"overall approach is to clip the model updates sent by the clients and add" -" some amount of noise to the aggregated model. In each iteration, a " -"random set of clients is chosen with a specific probability for training." -" Each client performs local training on its own data. The update of each " -"client is then clipped by some value `S` (sensitivity `S`). This would " -"limit the impact of any individual client which is crucial for privacy " -"and often beneficial for robustness. A common approach to achieve this is" -" by restricting the `L2` norm of the clients' model updates, ensuring " -"that larger updates are scaled down to fit within the norm `S`." +"While there are various ways to implement central DP in federated learning, " +"we concentrate on the algorithms proposed by [2] and [3]. The overall " +"approach is to clip the model updates sent by the clients and add some " +"amount of noise to the aggregated model. In each iteration, a random set of " +"clients is chosen with a specific probability for training. Each client " +"performs local training on its own data. The update of each client is then " +"clipped by some value `S` (sensitivity `S`). This would limit the impact of " +"any individual client which is crucial for privacy and often beneficial for " +"robustness. A common approach to achieve this is by restricting the `L2` " +"norm of the clients' model updates, ensuring that larger updates are scaled " +"down to fit within the norm `S`." msgstr "" "虽然在联合学习中实现中央数据处理的方法有很多种,但我们将重点放在[2]和[3]提出" -"的算法上。总体方法是剪辑客户端发送的模型更新,并在聚合模型中添加一定量的噪声" -"。在每次迭代中,以特定概率随机选择一组客户端进行训练。每个客户端对自己的数据" -"进行局部训练。然后,每个客户端的更新会被某个值`S`(灵敏度`S`)剪切。这将限制" -"任何单个客户端的影响,这对隐私至关重要,通常也有利于稳健性。" -"实现这一点的常用方法是限制客户机模型更新的 `L2` 准则," -"确保较大的更新被缩减以适应 `S` 准则。" +"的算法上。总体方法是剪辑客户端发送的模型更新,并在聚合模型中添加一定量的噪" +"声。在每次迭代中,以特定概率随机选择一组客户端进行训练。每个客户端对自己的数" +"据进行局部训练。然后,每个客户端的更新会被某个值`S`(灵敏度`S`)剪切。这将限" +"制任何单个客户端的影响,这对隐私至关重要,通常也有利于稳健性。实现这一点的常" +"用方法是限制客户机模型更新的 `L2` 准则,确保较大的更新被缩减以适应 `S` 准则。" #: ../../source/explanation-differential-privacy.rst:-1 #, fuzzy @@ -3661,15 +3838,15 @@ msgstr "剪贴" #: ../../source/explanation-differential-privacy.rst:89 #, fuzzy msgid "" -"Afterwards, the Gaussian mechanism is used to add noise in order to " -"distort the sum of all clients' updates. The amount of noise is scaled to" -" the sensitivity value to obtain a privacy guarantee. The Gaussian " -"mechanism is used with a noise sampled from `N (0, σ²)` where `σ = ( " -"noise_scale * S ) / (number of sampled clients)`." +"Afterwards, the Gaussian mechanism is used to add noise in order to distort " +"the sum of all clients' updates. The amount of noise is scaled to the " +"sensitivity value to obtain a privacy guarantee. The Gaussian mechanism is " +"used with a noise sampled from `N (0, σ²)` where `σ = ( noise_scale * S ) / " +"(number of sampled clients)`." msgstr "" "然后,使用高斯机制添加噪声,以扭曲所有客户端的更新总和。噪声量与灵敏度值成正" -"比,以获得隐私保证。高斯机制的噪声采样范围为 `N (0, σ²)` ,其中 σ = ( " -"噪声规模 * S ) / (采样客户数)`。" +"比,以获得隐私保证。高斯机制的噪声采样范围为 `N (0, σ²)` ,其中 σ = ( 噪声规" +"模 * S ) / (采样客户数)`。" #: ../../source/explanation-differential-privacy.rst:94 #, fuzzy @@ -3679,36 +3856,39 @@ msgstr "剪贴" #: ../../source/explanation-differential-privacy.rst:96 #, fuzzy msgid "" -"There are two forms of clipping commonly used in Central DP: Fixed " -"Clipping and Adaptive Clipping." +"There are two forms of clipping commonly used in Central DP: Fixed Clipping " +"and Adaptive Clipping." msgstr "中央处理器常用的剪切有两种形式:固定剪切和自适应剪切。" #: ../../source/explanation-differential-privacy.rst:98 #, fuzzy msgid "" -"**Fixed Clipping** : A predefined fix threshold is set for the magnitude " -"of clients' updates. Any update exceeding this threshold is clipped back " -"to the threshold value." -msgstr "** 固定削波** : 为客户端更新的大小设置了一个预定义的固定阈值。任何超过该阈值" +"**Fixed Clipping** : A predefined fix threshold is set for the magnitude of " +"clients' updates. Any update exceeding this threshold is clipped back to the " +"threshold value." +msgstr "" +"** 固定削波** : 为客户端更新的大小设置了一个预定义的固定阈值。任何超过该阈值" "的更新都会被剪切回阈值。" #: ../../source/explanation-differential-privacy.rst:100 #, fuzzy msgid "" -"**Adaptive Clipping** : The clipping threshold dynamically adjusts based " -"on the observed update distribution [4]. It means that the clipping value" -" is tuned during the rounds with respect to the quantile of the update " -"norm distribution." -msgstr "** 自适应削波** : 削波阈值根据观察到的更新分布动态调整[4]。这意味着,在各轮" +"**Adaptive Clipping** : The clipping threshold dynamically adjusts based on " +"the observed update distribution [4]. It means that the clipping value is " +"tuned during the rounds with respect to the quantile of the update norm " +"distribution." +msgstr "" +"** 自适应削波** : 削波阈值根据观察到的更新分布动态调整[4]。这意味着,在各轮" "中,会根据更新规范分布的量化值调整削波值。" #: ../../source/explanation-differential-privacy.rst:102 #, fuzzy msgid "" -"The choice between fixed and adaptive clipping depends on various factors" -" such as privacy requirements, data distribution, model complexity, and " +"The choice between fixed and adaptive clipping depends on various factors " +"such as privacy requirements, data distribution, model complexity, and " "others." -msgstr "在固定剪切和自适应剪切之间做出选择取决于各种因素,如隐私要求、数据分布、模型" +msgstr "" +"在固定剪切和自适应剪切之间做出选择取决于各种因素,如隐私要求、数据分布、模型" "复杂性等。" #: ../../source/explanation-differential-privacy.rst:-1 @@ -3722,13 +3902,13 @@ msgstr "差分隐私" #, fuzzy msgid "" "In this approach, each client is responsible for performing DP. Local DP " -"avoids the need for a fully trusted aggregator, but it should be noted " -"that local DP leads to a decrease in accuracy but better privacy in " -"comparison to central DP." +"avoids the need for a fully trusted aggregator, but it should be noted that " +"local DP leads to a decrease in accuracy but better privacy in comparison to " +"central DP." msgstr "" -"在这种方法中,每个客户端都负责执行 DP。本地 DP " -"避免了对完全可信的聚合器的需求,但需要注意的是,与中央 DP 相比,本地 DP " -"会降低准确性,但却能更好地保护隐私。" +"在这种方法中,每个客户端都负责执行 DP。本地 DP 避免了对完全可信的聚合器的需" +"求,但需要注意的是,与中央 DP 相比,本地 DP 会降低准确性,但却能更好地保护隐" +"私。" #: ../../source/explanation-differential-privacy.rst:116 #, fuzzy @@ -3739,25 +3919,25 @@ msgstr "在本说明中,我们将重点介绍实现本地 DP 的两种形式 #, fuzzy msgid "" "Each client adds noise to the local updates before sending them to the " -"server. To achieve (:math:`\\epsilon`, :math:`\\delta`)-DP, considering " -"the sensitivity of the local model to be ∆, Gaussian noise is applied " -"with a noise scale of σ where:" +"server. To achieve (:math:`\\epsilon`, :math:`\\delta`)-DP, considering the " +"sensitivity of the local model to be ∆, Gaussian noise is applied with a " +"noise scale of σ where:" msgstr "" -"每个客户端在向服务器发送本地更新之前,都会在本地更新中加入噪声。为了实现(:ma" -"th:`\\epsilon`, :math:`\\delta`)-DP,考虑到本地模型的灵敏度为 " -"∆,应用了高斯噪声,噪声尺度为 σ,其中:" +"每个客户端在向服务器发送本地更新之前,都会在本地更新中加入噪声。为了实现(:" +"math:`\\epsilon`, :math:`\\delta`)-DP,考虑到本地模型的灵敏度为 ∆,应用了高" +"斯噪声,噪声尺度为 σ,其中:" #: ../../source/explanation-differential-privacy.rst:120 #, fuzzy msgid "" "\\small\n" -"\\frac{∆ \\times \\sqrt{2 \\times " -"\\log\\left(\\frac{1.25}{\\delta}\\right)}}{\\epsilon}\n" +"\\frac{∆ \\times \\sqrt{2 \\times \\log\\left(\\frac{1.25}{\\delta}\\right)}}" +"{\\epsilon}\n" "\n" msgstr "" "\\small\n" -"\\frac{∆ \\times \\sqrt{2 \\times \\log\\left(\\frac{1.25}{\\delta}\\right" -")}}{\\epsilon}\n" +"\\frac{∆ \\times \\sqrt{2 \\times \\log\\left(\\frac{1.25}{\\delta}\\right)}}" +"{\\epsilon}\n" "\n" #: ../../source/explanation-differential-privacy.rst:125 @@ -3766,7 +3946,8 @@ msgid "" "Each client adds noise to the gradients of the model during the local " "training (DP-SGD). More specifically, in this approach, gradients are " "clipped and an amount of calibrated noise is injected into the gradients." -msgstr "在局部训练过程中,每个客户端都会向模型的梯度添加噪声(DP-SGD)。更具体地说," +msgstr "" +"在局部训练过程中,每个客户端都会向模型的梯度添加噪声(DP-SGD)。更具体地说," "在这种方法中,梯度会被剪切,并在梯度中注入一定量的校准噪声。" #: ../../source/explanation-differential-privacy.rst:128 @@ -3789,26 +3970,25 @@ msgstr "[1] Dwork 等:《差分隐私的算法基础》。" #: ../../source/explanation-differential-privacy.rst:135 #, fuzzy msgid "" -"[2] McMahan et al. Learning Differentially Private Recurrent Language " -"Models." +"[2] McMahan et al. Learning Differentially Private Recurrent Language Models." msgstr "" -"McMahan, H. Brendan等. \"Learning differentially private recurrent " -"language models.\" arXiv preprint arXiv:1710.06963 (2017)." +"McMahan, H. Brendan等. \"Learning differentially private recurrent language " +"models.\" arXiv preprint arXiv:1710.06963 (2017)." #: ../../source/explanation-differential-privacy.rst:137 #, fuzzy msgid "" -"[3] Geyer et al. Differentially Private Federated Learning: A Client " -"Level Perspective." +"[3] Geyer et al. Differentially Private Federated Learning: A Client Level " +"Perspective." msgstr "[3] Geyer 等人。差异化化私人联合学习:客户层面的视角。" #: ../../source/explanation-differential-privacy.rst:139 #, fuzzy -msgid "[4] Galen et al. Differentially Private Learning with Adaptive Clipping." +msgid "" +"[4] Galen et al. Differentially Private Learning with Adaptive Clipping." msgstr "" -"Andrew, Galen等. \"Differentially private learning with adaptive " -"clipping.\" Advances in Neural Information Processing Systems 34 (2021): " -"17455-17466." +"Andrew, Galen等. \"Differentially private learning with adaptive clipping.\" " +"Advances in Neural Information Processing Systems 34 (2021): 17455-17466." #: ../../source/explanation-federated-evaluation.rst:2 #: ../../source/tutorial-series-what-is-federated-learning.ipynb:292 @@ -3818,9 +3998,11 @@ msgstr "联邦学习评估" #: ../../source/explanation-federated-evaluation.rst:4 msgid "" "There are two main approaches to evaluating models in federated learning " -"systems: centralized (or server-side) evaluation and federated (or " -"client-side) evaluation." -msgstr "评估联合学习系统中的模型主要有两种方法:集中(或服务器端)评估和联邦(或客户端)评估。" +"systems: centralized (or server-side) evaluation and federated (or client-" +"side) evaluation." +msgstr "" +"评估联合学习系统中的模型主要有两种方法:集中(或服务器端)评估和联邦(或客户" +"端)评估。" #: ../../source/explanation-federated-evaluation.rst:8 msgid "Centralized Evaluation" @@ -3836,7 +4018,9 @@ msgid "" "evaluation function during initialization. An evaluation function is any " "function that can take the current global model parameters as input and " "return evaluation results:" -msgstr "所有内置策略都通过在初始化过程中提供一个评估函数来支持集中评估。评估函数是任何可以将当前全局模型参数作为输入并返回评估结果的函数:" +msgstr "" +"所有内置策略都通过在初始化过程中提供一个评估函数来支持集中评估。评估函数是任" +"何可以将当前全局模型参数作为输入并返回评估结果的函数:" #: ../../source/explanation-federated-evaluation.rst:58 msgid "Custom Strategies" @@ -3844,14 +4028,14 @@ msgstr "定制策略" #: ../../source/explanation-federated-evaluation.rst:60 msgid "" -"The :code:`Strategy` abstraction provides a method called " -":code:`evaluate` that can directly be used to evaluate the current global" -" model parameters. The current server implementation calls " -":code:`evaluate` after parameter aggregation and before federated " -"evaluation (see next paragraph)." +"The :code:`Strategy` abstraction provides a method called :code:`evaluate` " +"that can directly be used to evaluate the current global model parameters. " +"The current server implementation calls :code:`evaluate` after parameter " +"aggregation and before federated evaluation (see next paragraph)." msgstr "" -":code:`Strategy` 抽象提供了一个名为 :code:`evaluate` " -"的方法,可直接用于评估当前的全局模型参数。服务器会在参数聚合后和联邦评估前调用 :code:`evaluate`(见下段)。" +":code:`Strategy` 抽象提供了一个名为 :code:`evaluate` 的方法,可直接用于评估当" +"前的全局模型参数。服务器会在参数聚合后和联邦评估前调用 :code:`evaluate`(见下" +"段)。" #: ../../source/explanation-federated-evaluation.rst:65 msgid "Federated Evaluation" @@ -3863,9 +4047,10 @@ msgstr "实现联邦评估" #: ../../source/explanation-federated-evaluation.rst:70 msgid "" -"Client-side evaluation happens in the :code:`Client.evaluate` method and " -"can be configured from the server side." -msgstr "客户端评估在 :code:`Client.evaluate` 方法中进行,并可从服务器端进行配置。" +"Client-side evaluation happens in the :code:`Client.evaluate` method and can " +"be configured from the server side." +msgstr "" +"客户端评估在 :code:`Client.evaluate` 方法中进行,并可从服务器端进行配置。" #: ../../source/explanation-federated-evaluation.rst:101 msgid "Configuring Federated Evaluation" @@ -3879,51 +4064,55 @@ msgstr "联邦评估可从服务器端进行配置。内置策略支持以下参 #: ../../source/explanation-federated-evaluation.rst:105 msgid "" -":code:`fraction_evaluate`: a :code:`float` defining the fraction of " -"clients that will be selected for evaluation. If " -":code:`fraction_evaluate` is set to :code:`0.1` and :code:`100` clients " -"are connected to the server, then :code:`10` will be randomly selected " -"for evaluation. If :code:`fraction_evaluate` is set to :code:`0.0`, " -"federated evaluation will be disabled." +":code:`fraction_evaluate`: a :code:`float` defining the fraction of clients " +"that will be selected for evaluation. If :code:`fraction_evaluate` is set " +"to :code:`0.1` and :code:`100` clients are connected to the server, then :" +"code:`10` will be randomly selected for evaluation. If :code:" +"`fraction_evaluate` is set to :code:`0.0`, federated evaluation will be " +"disabled." msgstr "" -":code:`fraction_evaluate`: :code:`float`,定义了被选中进行评估的客户端的比例。如果 " -":code:`fraction_evaluate` 设置为 :code:`0.1`,并且 :code:`100` 个客户端连接到服务器,那么 " -":code:`10` 个客户端将被随机选中进行评估。如果 :code:`fraction_evaluate` 设置为 " -":code:`0.0`,联邦评估将被禁用。" +":code:`fraction_evaluate`: :code:`float`,定义了被选中进行评估的客户端的比" +"例。如果 :code:`fraction_evaluate` 设置为 :code:`0.1`,并且 :code:`100` 个客" +"户端连接到服务器,那么 :code:`10` 个客户端将被随机选中进行评估。如果 :code:" +"`fraction_evaluate` 设置为 :code:`0.0`,联邦评估将被禁用。" #: ../../source/explanation-federated-evaluation.rst:106 msgid "" -":code:`min_evaluate_clients`: an :code:`int`: the minimum number of " -"clients to be selected for evaluation. If :code:`fraction_evaluate` is " -"set to :code:`0.1`, :code:`min_evaluate_clients` is set to 20, and " -":code:`100` clients are connected to the server, then :code:`20` clients " -"will be selected for evaluation." +":code:`min_evaluate_clients`: an :code:`int`: the minimum number of clients " +"to be selected for evaluation. If :code:`fraction_evaluate` is set to :code:" +"`0.1`, :code:`min_evaluate_clients` is set to 20, and :code:`100` clients " +"are connected to the server, then :code:`20` clients will be selected for " +"evaluation." msgstr "" -":code:`min_evaluate_clients`:一个 :code:`int`,需要评估的客户的最小数量。如果 " -":code:`fraction_evaluate` 设置为 :code:`0.1`,:code:`min_evaluate_clients` " -"设置为 20,并且有 :code:`100` 个客户端已连接到服务器,那么 :code:`20` 个客户端将被选中进行评估。" +":code:`min_evaluate_clients`:一个 :code:`int`,需要评估的客户的最小数量。如" +"果 :code:`fraction_evaluate` 设置为 :code:`0.1`,:code:" +"`min_evaluate_clients` 设置为 20,并且有 :code:`100` 个客户端已连接到服务器," +"那么 :code:`20` 个客户端将被选中进行评估。" #: ../../source/explanation-federated-evaluation.rst:107 msgid "" ":code:`min_available_clients`: an :code:`int` that defines the minimum " -"number of clients which need to be connected to the server before a round" -" of federated evaluation can start. If fewer than " -":code:`min_available_clients` are connected to the server, the server " -"will wait until more clients are connected before it continues to sample " -"clients for evaluation." +"number of clients which need to be connected to the server before a round of " +"federated evaluation can start. If fewer than :code:`min_available_clients` " +"are connected to the server, the server will wait until more clients are " +"connected before it continues to sample clients for evaluation." msgstr "" -":code:`min_available_clients`: " -":code:`int`,定义了在一轮联邦评估开始之前,需要连接到服务器的最小客户端数量。如果连接到服务器的客户端数量少于 " -":code:`min_available_clients`,服务器将等待更多客户端连接后,才继续采样客户端进行评估。" +":code:`min_available_clients`: :code:`int`,定义了在一轮联邦评估开始之前,需" +"要连接到服务器的最小客户端数量。如果连接到服务器的客户端数量少于 :code:" +"`min_available_clients`,服务器将等待更多客户端连接后,才继续采样客户端进行评" +"估。" #: ../../source/explanation-federated-evaluation.rst:108 msgid "" ":code:`on_evaluate_config_fn`: a function that returns a configuration " -"dictionary which will be sent to the selected clients. The function will " -"be called during each round and provides a convenient way to customize " -"client-side evaluation from the server side, for example, to configure " -"the number of validation steps performed." -msgstr "code:`on_evaluate_config_fn`:返回配置字典的函数,该字典将发送给选定的客户端。该函数将在每一轮中被调用,并提供了一种方便的方法来从服务器端自定义客户端评估,例如,配置执行的验证步骤数。" +"dictionary which will be sent to the selected clients. The function will be " +"called during each round and provides a convenient way to customize client-" +"side evaluation from the server side, for example, to configure the number " +"of validation steps performed." +msgstr "" +"code:`on_evaluate_config_fn`:返回配置字典的函数,该字典将发送给选定的客户" +"端。该函数将在每一轮中被调用,并提供了一种方便的方法来从服务器端自定义客户端" +"评估,例如,配置执行的验证步骤数。" #: ../../source/explanation-federated-evaluation.rst:135 msgid "Evaluating Local Model Updates During Training" @@ -3931,10 +4120,11 @@ msgstr "评估训练期间的本地模型更新" #: ../../source/explanation-federated-evaluation.rst:137 msgid "" -"Model parameters can also be evaluated during training. " -":code:`Client.fit` can return arbitrary evaluation results as a " -"dictionary:" -msgstr "模型参数也可在训练过程中进行评估。 :code:`Client.fit`可以字典形式返回任意评估结果:" +"Model parameters can also be evaluated during training. :code:`Client.fit` " +"can return arbitrary evaluation results as a dictionary:" +msgstr "" +"模型参数也可在训练过程中进行评估。 :code:`Client.fit`可以字典形式返回任意评估" +"结果:" #: ../../source/explanation-federated-evaluation.rst:177 msgid "Full Code Example" @@ -3942,14 +4132,14 @@ msgstr "完整代码示例" #: ../../source/explanation-federated-evaluation.rst:179 msgid "" -"For a full code example that uses both centralized and federated " -"evaluation, see the *Advanced TensorFlow Example* (the same approach can " -"be applied to workloads implemented in any other framework): " -"https://github.com/adap/flower/tree/main/examples/advanced-tensorflow" +"For a full code example that uses both centralized and federated evaluation, " +"see the *Advanced TensorFlow Example* (the same approach can be applied to " +"workloads implemented in any other framework): https://github.com/adap/" +"flower/tree/main/examples/advanced-tensorflow" msgstr "" "有关同时使用集中评估和联邦评估的完整代码示例,请参阅 *Advanced TensorFlow " -"Example*(同样的方法也可应用于任何其他框架中): " -"https://github.com/adap/flower/tree/main/examples/advanced-tensorflow" +"Example*(同样的方法也可应用于任何其他框架中): https://github.com/adap/" +"flower/tree/main/examples/advanced-tensorflow" #: ../../source/fed/0000-20200102-fed-template.md:10 msgid "FED Template" @@ -4122,10 +4312,12 @@ msgstr "保留 GitHub 问题,用于跟踪进行中的工作" #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:37 msgid "" -"ensure community participants can successfully drive changes to " -"completion across one or more releases while stakeholders are adequately " -"represented throughout the process" -msgstr "确保社区参与者能够成功推动改动,完成一个或多个版本,同时利益相关者在整个过程中得到充分展现" +"ensure community participants can successfully drive changes to completion " +"across one or more releases while stakeholders are adequately represented " +"throughout the process" +msgstr "" +"确保社区参与者能够成功推动改动,完成一个或多个版本,同时利益相关者在整个过程" +"中得到充分展现" #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:39 msgid "Hence, an Enhancement Doc combines aspects of" @@ -4152,61 +4344,69 @@ msgstr "该文件是与社区合作逐步创建的。" #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:49 msgid "" "For far-fetching changes or features proposed to Flower, an abstraction " -"beyond a single GitHub issue or pull request is required to understand " -"and communicate upcoming changes to the project." +"beyond a single GitHub issue or pull request is required to understand and " +"communicate upcoming changes to the project." msgstr "" -"对于向 Flower 提出的远期变更或功能,需要一个超越单个 GitHub 问题或拉取请求(pull " -"request)的抽象概念,以了解和沟通项目即将发生的变更。" +"对于向 Flower 提出的远期变更或功能,需要一个超越单个 GitHub 问题或拉取请求" +"(pull request)的抽象概念,以了解和沟通项目即将发生的变更。" #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:51 msgid "" -"The purpose of this process is to reduce the amount of \"tribal " -"knowledge\" in our community. By moving decisions from Slack threads, " -"video calls, and hallway conversations into a well-tracked artifact, this" -" process aims to enhance communication and discoverability." +"The purpose of this process is to reduce the amount of \"tribal knowledge\" " +"in our community. By moving decisions from Slack threads, video calls, and " +"hallway conversations into a well-tracked artifact, this process aims to " +"enhance communication and discoverability." msgstr "" -"这一流程的目的是减少我们社区中 \"部落知识 \"的数量。通过将决策从 Slack " -"线程、视频通话和走廊对话转移到一个跟踪良好的工作环境中,该流程旨在加强沟通和可发现性。" +"这一流程的目的是减少我们社区中 \"部落知识 \"的数量。通过将决策从 Slack 线程、" +"视频通话和走廊对话转移到一个跟踪良好的工作环境中,该流程旨在加强沟通和可发现" +"性。" #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:55 msgid "" -"Roughly any larger, user-facing enhancement should follow the Enhancement" -" process. If an enhancement would be described in either written or " -"verbal communication to anyone besides the author or developer, then " -"consider creating an Enhancement Doc." -msgstr "任何较大的、面向用户的增强都应遵循增强流程。如果要以书面或口头形式向作者或开发人员以外的任何人描述增强功能,则应考虑创建改善文档。" +"Roughly any larger, user-facing enhancement should follow the Enhancement " +"process. If an enhancement would be described in either written or verbal " +"communication to anyone besides the author or developer, then consider " +"creating an Enhancement Doc." +msgstr "" +"任何较大的、面向用户的增强都应遵循增强流程。如果要以书面或口头形式向作者或开" +"发人员以外的任何人描述增强功能,则应考虑创建改善文档。" #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:57 msgid "" -"Similarly, any technical effort (refactoring, major architectural change)" -" that will impact a large section of the development community should " -"also be communicated widely. The Enhancement process is suited for this " -"even if it will have zero impact on the typical user or operator." -msgstr "同样,任何会对开发社区的大部分人产生影响的技术工作(重构、重大架构变更)也应广泛传播。即使对典型用户或操作员的影响为零,改进流程也适用于这种情况。" +"Similarly, any technical effort (refactoring, major architectural change) " +"that will impact a large section of the development community should also be " +"communicated widely. The Enhancement process is suited for this even if it " +"will have zero impact on the typical user or operator." +msgstr "" +"同样,任何会对开发社区的大部分人产生影响的技术工作(重构、重大架构变更)也应" +"广泛传播。即使对典型用户或操作员的影响为零,改进流程也适用于这种情况。" #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:61 msgid "" -"For small changes and additions, going through the Enhancement process " -"would be time-consuming and unnecessary. This includes, for example, " -"adding new Federated Learning algorithms, as these only add features " -"without changing how Flower works or is used." +"For small changes and additions, going through the Enhancement process would " +"be time-consuming and unnecessary. This includes, for example, adding new " +"Federated Learning algorithms, as these only add features without changing " +"how Flower works or is used." msgstr "" -"对于小的改动和添加,通过 \"改善\"程序既耗时又没有必要。例如,这包括添加新的联邦学习算法,因为这只会增加功能,而不会改变 \"Flower " -"\"的工作或使用方式。" +"对于小的改动和添加,通过 \"改善\"程序既耗时又没有必要。例如,这包括添加新的联" +"邦学习算法,因为这只会增加功能,而不会改变 \"Flower \"的工作或使用方式。" #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:63 msgid "" "Enhancements are different from feature requests, as they are already " -"providing a laid-out path for implementation and are championed by " -"members of the community." -msgstr "增强功能与功能请求不同,因为它们已经提供了实施路径,并得到了社区成员的支持。" +"providing a laid-out path for implementation and are championed by members " +"of the community." +msgstr "" +"增强功能与功能请求不同,因为它们已经提供了实施路径,并得到了社区成员的支持。" #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:67 msgid "" "An Enhancement is captured in a Markdown file that follows a defined " -"template and a workflow to review and store enhancement docs for " -"reference — the Enhancement Doc." -msgstr "增强功能被记录在一个 Markdown 文件中,该文件遵循已定义的模板和工作流程,用于审查和存储增强功能文档(即增强功能文档)以供参考。" +"template and a workflow to review and store enhancement docs for reference " +"— the Enhancement Doc." +msgstr "" +"增强功能被记录在一个 Markdown 文件中,该文件遵循已定义的模板和工作流程,用于" +"审查和存储增强功能文档(即增强功能文档)以供参考。" #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:69 msgid "Enhancement Doc Template" @@ -4257,9 +4457,11 @@ msgstr "描述数据" #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:92 msgid "" -"**fed-number** (Required) The `fed-number` of the last Flower Enhancement" -" Doc + 1. With this number, it becomes easy to reference other proposals." -msgstr "**fed-number**(必填)上一个Flower增强文件的 \"fed-number \"+1。有了这个编号,就很容易参考其他提案。" +"**fed-number** (Required) The `fed-number` of the last Flower Enhancement " +"Doc + 1. With this number, it becomes easy to reference other proposals." +msgstr "" +"**fed-number**(必填)上一个Flower增强文件的 \"fed-number \"+1。有了这个编" +"号,就很容易参考其他提案。" #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:94 msgid "**title** (Required) The title of the proposal in plain language." @@ -4267,20 +4469,22 @@ msgstr "**标题** (必填)用简明语言写出提案的标题。" #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:96 msgid "" -"**status** (Required) The current status of the proposal. See " -"[workflow](#workflow) for the possible states." -msgstr "**status** (必填)提案的当前状态。有关可能的状态,请参阅 [工作流程](#workflow)。" +"**status** (Required) The current status of the proposal. See [workflow]" +"(#workflow) for the possible states." +msgstr "" +"**status** (必填)提案的当前状态。有关可能的状态,请参阅 [工作流程]" +"(#workflow)。" #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:98 msgid "" -"**authors** (Required) A list of authors of the proposal. This is simply " -"the GitHub ID." +"**authors** (Required) A list of authors of the proposal. This is simply the " +"GitHub ID." msgstr "**作者**(必填) 提案的作者列表。这只是 GitHub ID。" #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:100 msgid "" -"**creation-date** (Required) The date that the proposal was first " -"submitted in a PR." +"**creation-date** (Required) The date that the proposal was first submitted " +"in a PR." msgstr "**创建日期**(必填) 建议书在 PR 中首次提交的日期。" #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:102 @@ -4291,8 +4495,8 @@ msgstr "**最后更新** (可选)提案最后一次重大修改的日期。" #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:104 msgid "" -"**see-also** (Optional) A list of other proposals that are relevant to " -"this one." +"**see-also** (Optional) A list of other proposals that are relevant to this " +"one." msgstr "**另见** (可选)与本提案相关的其他提案清单。" #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:106 @@ -4300,7 +4504,8 @@ msgid "**replaces** (Optional) A list of proposals that this one replaces." msgstr "**取代**(可选) 这份提案所取代的提案列表。" #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:108 -msgid "**superseded-by** (Optional) A list of proposals that this one supersedes." +msgid "" +"**superseded-by** (Optional) A list of proposals that this one supersedes." msgstr "**被取代者** (可选) 此提案取代的提案列表。" #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:111 @@ -4310,46 +4515,52 @@ msgstr "工作流程" #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:113 msgid "" "The idea forming the enhancement should already have been discussed or " -"pitched in the community. As such, it needs a champion, usually the " -"author, who shepherds the enhancement. This person also has to find " -"committers to Flower willing to review the proposal." -msgstr "形成增强功能的想法应该已经在社区中讨论过或提出过。因此,它需要一个支持者(通常是作者)来引导增强。这个人还必须找到愿意审核提案的提交者。" +"pitched in the community. As such, it needs a champion, usually the author, " +"who shepherds the enhancement. This person also has to find committers to " +"Flower willing to review the proposal." +msgstr "" +"形成增强功能的想法应该已经在社区中讨论过或提出过。因此,它需要一个支持者(通" +"常是作者)来引导增强。这个人还必须找到愿意审核提案的提交者。" #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:115 msgid "" "New enhancements are checked in with a file name in the form of `NNNN-" -"YYYYMMDD-enhancement-title.md`, with `NNNN` being the Flower Enhancement " -"Doc number, to `enhancements`. All enhancements start in `provisional` " -"state as part of a pull request. Discussions are done as part of the pull" -" request review." +"YYYYMMDD-enhancement-title.md`, with `NNNN` being the Flower Enhancement Doc " +"number, to `enhancements`. All enhancements start in `provisional` state as " +"part of a pull request. Discussions are done as part of the pull request " +"review." msgstr "" -"新的增强功能以 `NNNN-YYYYMMDD-enhancement-title.md` 的文件名签入,其中 `NNNN` " -"是花朵增强文档的编号,并将其转入 `enhancements`。作为拉取请求(pull request)的一部分,所有增强功能都从 " -"`provisional` 状态开始。讨论是作为拉取请求审查的一部分进行的。" +"新的增强功能以 `NNNN-YYYYMMDD-enhancement-title.md` 的文件名签入,其中 " +"`NNNN` 是花朵增强文档的编号,并将其转入 `enhancements`。作为拉取请求(pull " +"request)的一部分,所有增强功能都从 `provisional` 状态开始。讨论是作为拉取请" +"求审查的一部分进行的。" #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:117 msgid "" -"Once an enhancement has been reviewed and approved, its status is changed" -" to `implementable`. The actual implementation is then done in separate " -"pull requests. These pull requests should mention the respective " -"enhancement as part of their description. After the implementation is " -"done, the proposal status is changed to `implemented`." +"Once an enhancement has been reviewed and approved, its status is changed to " +"`implementable`. The actual implementation is then done in separate pull " +"requests. These pull requests should mention the respective enhancement as " +"part of their description. After the implementation is done, the proposal " +"status is changed to `implemented`." msgstr "" -"一旦增强功能通过审核和批准,其状态就会变为 " -"`可实施`。实际的实施工作将在单独的拉取请求中完成。这些拉取请求应在其描述中提及相应的增强功能。实施完成后,提案状态将更改为 `已实施`。" +"一旦增强功能通过审核和批准,其状态就会变为 `可实施`。实际的实施工作将在单独的" +"拉取请求中完成。这些拉取请求应在其描述中提及相应的增强功能。实施完成后,提案" +"状态将更改为 `已实施`。" #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:119 msgid "" -"Under certain conditions, other states are possible. An Enhancement has " -"the following states:" +"Under certain conditions, other states are possible. An Enhancement has the " +"following states:" msgstr "在某些条件下,还可能出现其他状态。增强提案具有以下状态:" #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:121 msgid "" "`provisional`: The enhancement has been proposed and is actively being " -"defined. This is the starting state while the proposal is being fleshed " -"out and actively defined and discussed." -msgstr "`暂定`: 已提出改进建议并正在积极定义。这是在提案得到充实、积极定义和讨论时的起始状态。" +"defined. This is the starting state while the proposal is being fleshed out " +"and actively defined and discussed." +msgstr "" +"`暂定`: 已提出改进建议并正在积极定义。这是在提案得到充实、积极定义和讨论时的" +"起始状态。" #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:122 msgid "`implementable`: The enhancement has been reviewed and approved." @@ -4362,13 +4573,14 @@ msgid "" msgstr "`已实施`: 增强功能已实施,不再主动更改。" #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:124 -msgid "`deferred`: The enhancement is proposed but not actively being worked on." +msgid "" +"`deferred`: The enhancement is proposed but not actively being worked on." msgstr "`推迟`: 已提出改进建议,但尚未积极开展工作。" #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:125 msgid "" -"`rejected`: The authors and reviewers have decided that this enhancement " -"is not moving forward." +"`rejected`: The authors and reviewers have decided that this enhancement is " +"not moving forward." msgstr "`拒绝`: 作者和审稿人已决定不再推进该增强功能。" #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:126 @@ -4381,17 +4593,21 @@ msgstr "`已替换`: 增强功能已被新的增强功能取代。" #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:131 msgid "" -"Adding an additional process to the ones already provided by GitHub " -"(Issues and Pull Requests) adds more complexity and can be a barrier for " -"potential first-time contributors." -msgstr "在 GitHub 已提供的流程(问题和拉取请求)之外再增加一个流程,会增加复杂性,并可能成为潜在首次贡献者的障碍。" +"Adding an additional process to the ones already provided by GitHub (Issues " +"and Pull Requests) adds more complexity and can be a barrier for potential " +"first-time contributors." +msgstr "" +"在 GitHub 已提供的流程(问题和拉取请求)之外再增加一个流程,会增加复杂性,并" +"可能成为潜在首次贡献者的障碍。" #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:133 msgid "" "Expanding the proposal template beyond the single-sentence description " -"currently required in the features issue template may be a heavy burden " -"for non-native English speakers." -msgstr "对于英语非母语者来说,将提案模板扩展到目前要求的单句描述之外可能是一个沉重的负担。" +"currently required in the features issue template may be a heavy burden for " +"non-native English speakers." +msgstr "" +"对于英语非母语者来说,将提案模板扩展到目前要求的单句描述之外可能是一个沉重的" +"负担。" #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:137 msgid "GitHub Issues" @@ -4400,16 +4616,17 @@ msgstr "GitHub 问题" #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:139 msgid "" "Using GitHub Issues for these kinds of enhancements is doable. One could " -"use, for example, tags, to differentiate and filter them from other " -"issues. The main issue is in discussing and reviewing an enhancement: " -"GitHub issues only have a single thread for comments. Enhancements " -"usually have multiple threads of discussion at the same time for various " -"parts of the doc. Managing these multiple discussions can be confusing " -"when using GitHub Issues." -msgstr "" -"使用 GitHub Issues 进行此类改进是可行的。例如,我们可以使用标签来区分和过滤这些问题。主要的问题在于讨论和审查增强功能: " -"GitHub 问题只有一个评论线程。而增强功能通常会同时有多个讨论线程,针对文档的不同部分。在使用 GitHub " -"问题时,管理这些多重讨论会很混乱。" +"use, for example, tags, to differentiate and filter them from other issues. " +"The main issue is in discussing and reviewing an enhancement: GitHub issues " +"only have a single thread for comments. Enhancements usually have multiple " +"threads of discussion at the same time for various parts of the doc. " +"Managing these multiple discussions can be confusing when using GitHub " +"Issues." +msgstr "" +"使用 GitHub Issues 进行此类改进是可行的。例如,我们可以使用标签来区分和过滤这" +"些问题。主要的问题在于讨论和审查增强功能: GitHub 问题只有一个评论线程。而增" +"强功能通常会同时有多个讨论线程,针对文档的不同部分。在使用 GitHub 问题时,管" +"理这些多重讨论会很混乱。" #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:141 msgid "Google Docs" @@ -4417,15 +4634,15 @@ msgstr "谷歌文档" #: ../../source/fed/0001-20220311-flower-enhancement-doc.md:143 msgid "" -"Google Docs allow for multiple threads of discussions. But as Google Docs" -" are hosted outside the project, their discoverability by the community " -"needs to be taken care of. A list of links to all proposals has to be " -"managed and made available for the community. Compared to shipping " -"proposals as part of Flower's repository, the potential for missing links" -" is much higher." +"Google Docs allow for multiple threads of discussions. But as Google Docs " +"are hosted outside the project, their discoverability by the community needs " +"to be taken care of. A list of links to all proposals has to be managed and " +"made available for the community. Compared to shipping proposals as part of " +"Flower's repository, the potential for missing links is much higher." msgstr "" -"谷歌文档允许多线程讨论。但是,由于谷歌文档是在项目之外托管的,因此需要注意它们是否能被社区发现。我们必须管理所有提案的链接列表,并提供给社区使用。与作为" -" Flower 资源库一部分的提案相比,丢失链接的可能性要大得多。" +"谷歌文档允许多线程讨论。但是,由于谷歌文档是在项目之外托管的,因此需要注意它" +"们是否能被社区发现。我们必须管理所有提案的链接列表,并提供给社区使用。与作为 " +"Flower 资源库一部分的提案相比,丢失链接的可能性要大得多。" #: ../../source/fed/index.md:1 msgid "FED - Flower Enhancement Doc" @@ -4437,9 +4654,10 @@ msgstr "整合评估结果" #: ../../source/how-to-aggregate-evaluation-results.rst:4 msgid "" -"The Flower server does not prescribe a way to aggregate evaluation " -"results, but it enables the user to fully customize result aggregation." -msgstr "Flower 服务器没有规定整合评估结果的方法,但用户可以完全自定义如何整合。" +"The Flower server does not prescribe a way to aggregate evaluation results, " +"but it enables the user to fully customize result aggregation." +msgstr "" +"Flower 服务器没有规定整合评估结果的方法,但用户可以完全自定义如何整合。" #: ../../source/how-to-aggregate-evaluation-results.rst:8 msgid "Aggregate Custom Evaluation Results" @@ -4447,10 +4665,12 @@ msgstr "自定义整合评估结果" #: ../../source/how-to-aggregate-evaluation-results.rst:10 msgid "" -"The same :code:`Strategy`-customization approach can be used to aggregate" -" custom evaluation results coming from individual clients. Clients can " -"return custom metrics to the server by returning a dictionary:" -msgstr "同样的 :code:`Strategy` 定制方法也可用于汇总来自单个客户端的自定义评估结果。客户端可以通过返回字典的方式向服务器返回自定义指标:" +"The same :code:`Strategy`-customization approach can be used to aggregate " +"custom evaluation results coming from individual clients. Clients can return " +"custom metrics to the server by returning a dictionary:" +msgstr "" +"同样的 :code:`Strategy` 定制方法也可用于汇总来自单个客户端的自定义评估结果。" +"客户端可以通过返回字典的方式向服务器返回自定义指标:" #: ../../source/how-to-aggregate-evaluation-results.rst:36 msgid "" @@ -4465,10 +4685,12 @@ msgstr "配置客户端" #: ../../source/how-to-configure-clients.rst:4 msgid "" "Along with model parameters, Flower can send configuration values to " -"clients. Configuration values can be used for various purposes. They are," -" for example, a popular way to control client-side hyperparameters from " -"the server." -msgstr "除了模型参数,Flower 还可以向客户端发送配置值。配置值有多种用途。它们是一种从服务器控制客户端超参数的常用方法。" +"clients. Configuration values can be used for various purposes. They are, " +"for example, a popular way to control client-side hyperparameters from the " +"server." +msgstr "" +"除了模型参数,Flower 还可以向客户端发送配置值。配置值有多种用途。它们是一种从" +"服务器控制客户端超参数的常用方法。" #: ../../source/how-to-configure-clients.rst:7 msgid "Configuration values" @@ -4476,41 +4698,45 @@ msgstr "配置值" #: ../../source/how-to-configure-clients.rst:9 msgid "" -"Configuration values are represented as a dictionary with ``str`` keys " -"and values of type ``bool``, ``bytes``, ``double`` (64-bit precision " -"float), ``int``, or ``str`` (or equivalent types in different languages)." -" Here is an example of a configuration dictionary in Python:" +"Configuration values are represented as a dictionary with ``str`` keys and " +"values of type ``bool``, ``bytes``, ``double`` (64-bit precision float), " +"``int``, or ``str`` (or equivalent types in different languages). Here is an " +"example of a configuration dictionary in Python:" msgstr "" -"配置值以字典的形式表示,字典的键为 ``str``,值的类型为 ``bool``、``bytes``、``double``(64 " -"位精度浮点型)、``int``或 ``str`(或不同语言中的等效类型)。下面是一个 Python 配置字典的示例:" +"配置值以字典的形式表示,字典的键为 ``str``,值的类型为 ``bool``、``bytes``、" +"``double``(64 位精度浮点型)、``int``或 ``str`(或不同语言中的等效类型)。下" +"面是一个 Python 配置字典的示例:" #: ../../source/how-to-configure-clients.rst:20 msgid "" "Flower serializes these configuration dictionaries (or *config dict* for " -"short) to their ProtoBuf representation, transports them to the client " -"using gRPC, and then deserializes them back to Python dictionaries." +"short) to their ProtoBuf representation, transports them to the client using " +"gRPC, and then deserializes them back to Python dictionaries." msgstr "" -"Flower 将这些配置字典(简称 *config dict*)序列化为 ProtoBuf 表示形式,使用 gRPC " -"将其传输到客户端,然后再反序列化为 Python 字典。" +"Flower 将这些配置字典(简称 *config dict*)序列化为 ProtoBuf 表示形式,使用 " +"gRPC 将其传输到客户端,然后再反序列化为 Python 字典。" #: ../../source/how-to-configure-clients.rst:24 msgid "" -"Currently, there is no support for directly sending collection types " -"(e.g., ``Set``, ``List``, ``Map``) as values in configuration " -"dictionaries. There are several workarounds to send collections as values" -" by converting them to one of the supported value types (and converting " -"them back on the client-side)." +"Currently, there is no support for directly sending collection types (e.g., " +"``Set``, ``List``, ``Map``) as values in configuration dictionaries. There " +"are several workarounds to send collections as values by converting them to " +"one of the supported value types (and converting them back on the client-" +"side)." msgstr "" "目前,还不支持在配置字典中直接发送作为值的集合类型(例如,`Set``, `List`, " -"`Map``)。有几种变通方法可将集合转换为支持的值类型之一(并在客户端将其转换回),从而将集合作为值发送。" +"`Map``)。有几种变通方法可将集合转换为支持的值类型之一(并在客户端将其转换" +"回),从而将集合作为值发送。" #: ../../source/how-to-configure-clients.rst:26 msgid "" "One can, for example, convert a list of floating-point numbers to a JSON " -"string, then send the JSON string using the configuration dictionary, and" -" then convert the JSON string back to a list of floating-point numbers on" -" the client." -msgstr "例如,可以将浮点数列表转换为 JSON 字符串,然后使用配置字典发送 JSON 字符串,再在客户端将 JSON 字符串转换回浮点数列表。" +"string, then send the JSON string using the configuration dictionary, and " +"then convert the JSON string back to a list of floating-point numbers on the " +"client." +msgstr "" +"例如,可以将浮点数列表转换为 JSON 字符串,然后使用配置字典发送 JSON 字符串," +"再在客户端将 JSON 字符串转换回浮点数列表。" #: ../../source/how-to-configure-clients.rst:30 msgid "Configuration through built-in strategies" @@ -4518,57 +4744,65 @@ msgstr "通过内置策略进行配置" #: ../../source/how-to-configure-clients.rst:32 msgid "" -"The easiest way to send configuration values to clients is to use a " -"built-in strategy like :code:`FedAvg`. Built-in strategies support so-" -"called configuration functions. A configuration function is a function " -"that the built-in strategy calls to get the configuration dictionary for " -"the current round. It then forwards the configuration dictionary to all " -"the clients selected during that round." +"The easiest way to send configuration values to clients is to use a built-in " +"strategy like :code:`FedAvg`. Built-in strategies support so-called " +"configuration functions. A configuration function is a function that the " +"built-in strategy calls to get the configuration dictionary for the current " +"round. It then forwards the configuration dictionary to all the clients " +"selected during that round." msgstr "" -"向客户端发送配置值的最简单方法是使用内置策略,如 " -":code:`FedAvg`。内置策略支持所谓的配置函数。配置函数是内置策略调用的函数,用于获取当前轮的配置字典。然后,它会将配置字典转发给该轮中选择的所有客户端。" +"向客户端发送配置值的最简单方法是使用内置策略,如 :code:`FedAvg`。内置策略支持" +"所谓的配置函数。配置函数是内置策略调用的函数,用于获取当前轮的配置字典。然" +"后,它会将配置字典转发给该轮中选择的所有客户端。" #: ../../source/how-to-configure-clients.rst:34 msgid "" "Let's start with a simple example. Imagine we want to send (a) the batch " -"size that the client should use, (b) the current global round of " -"federated learning, and (c) the number of epochs to train on the client-" -"side. Our configuration function could look like this:" -msgstr "让我们从一个简单的例子开始。想象一下,我们想要发送给客户端(a)应该使用的批次大小,(b)当前联邦学习的全局轮次,以及(c)客户端训练的遍历数。我们的配置函数可以是这样的:" +"size that the client should use, (b) the current global round of federated " +"learning, and (c) the number of epochs to train on the client-side. Our " +"configuration function could look like this:" +msgstr "" +"让我们从一个简单的例子开始。想象一下,我们想要发送给客户端(a)应该使用的批次" +"大小,(b)当前联邦学习的全局轮次,以及(c)客户端训练的遍历数。我们的配置函" +"数可以是这样的:" #: ../../source/how-to-configure-clients.rst:47 msgid "" "To make the built-in strategies use this function, we can pass it to " -"``FedAvg`` during initialization using the parameter " -":code:`on_fit_config_fn`:" -msgstr "为了让内置策略使用这个函数,我们可以在初始化时使用参数 :code:`on_fit_config_fn` 将它传递给 ``FedAvg`` :" +"``FedAvg`` during initialization using the parameter :code:" +"`on_fit_config_fn`:" +msgstr "" +"为了让内置策略使用这个函数,我们可以在初始化时使用参数 :code:" +"`on_fit_config_fn` 将它传递给 ``FedAvg`` :" #: ../../source/how-to-configure-clients.rst:56 -msgid "One the client side, we receive the configuration dictionary in ``fit``:" +msgid "" +"One the client side, we receive the configuration dictionary in ``fit``:" msgstr "在客户端,我们在 ``fit`` 中接收配置字典:" #: ../../source/how-to-configure-clients.rst:67 msgid "" "There is also an `on_evaluate_config_fn` to configure evaluation, which " -"works the same way. They are separate functions because one might want to" -" send different configuration values to `evaluate` (for example, to use a" -" different batch size)." +"works the same way. They are separate functions because one might want to " +"send different configuration values to `evaluate` (for example, to use a " +"different batch size)." msgstr "" -"还有一个 `on_evaluate_config_fn` 用于配置评估,其工作方式相同。它们是不同的函数,因为可能需要向 `evaluate` " -"发送不同的配置值(例如,使用不同的批量大小)。" +"还有一个 `on_evaluate_config_fn` 用于配置评估,其工作方式相同。它们是不同的函" +"数,因为可能需要向 `evaluate` 发送不同的配置值(例如,使用不同的批量大小)。" #: ../../source/how-to-configure-clients.rst:69 msgid "" -"The built-in strategies call this function every round (that is, every " -"time `Strategy.configure_fit` or `Strategy.configure_evaluate` runs). " -"Calling `on_evaluate_config_fn` every round allows us to vary/change the " -"config dict over consecutive rounds. If we wanted to implement a " -"hyperparameter schedule, for example, to increase the number of local " -"epochs during later rounds, we could do the following:" +"The built-in strategies call this function every round (that is, every time " +"`Strategy.configure_fit` or `Strategy.configure_evaluate` runs). Calling " +"`on_evaluate_config_fn` every round allows us to vary/change the config dict " +"over consecutive rounds. If we wanted to implement a hyperparameter " +"schedule, for example, to increase the number of local epochs during later " +"rounds, we could do the following:" msgstr "" -"内置策略每轮都会调用此函数(即每次运行 `Strategy.configure_fit` 或 " -"`Strategy.configure_evaluate` 时)。每轮调用 `on_evaluate_config_fn` " -"允许我们在连续几轮中改变配置指令。例如,如果我们想实现一个超参数时间表,以增加后几轮的本地遍历次数,我们可以这样做:" +"内置策略每轮都会调用此函数(即每次运行 `Strategy.configure_fit` 或 `Strategy." +"configure_evaluate` 时)。每轮调用 `on_evaluate_config_fn` 允许我们在连续几轮" +"中改变配置指令。例如,如果我们想实现一个超参数时间表,以增加后几轮的本地遍历" +"次数,我们可以这样做:" #: ../../source/how-to-configure-clients.rst:82 msgid "The :code:`FedAvg` strategy will call this function *every round*." @@ -4587,19 +4821,18 @@ msgstr "在某些情况下,有必要向不同的客户端发送不同的配置 #: ../../source/how-to-configure-clients.rst:89 #, fuzzy msgid "" -"This can be achieved by customizing an existing strategy or by " -":doc:`implementing a custom strategy from scratch `. Here's a nonsensical example that customizes :code:`FedAvg`" -" by adding a custom ``\"hello\": \"world\"`` configuration key/value pair" -" to the config dict of a *single client* (only the first client in the " -"list, the other clients in this round to not receive this \"special\" " -"config value):" +"This can be achieved by customizing an existing strategy or by :doc:" +"`implementing a custom strategy from scratch `. " +"Here's a nonsensical example that customizes :code:`FedAvg` by adding a " +"custom ``\"hello\": \"world\"`` configuration key/value pair to the config " +"dict of a *single client* (only the first client in the list, the other " +"clients in this round to not receive this \"special\" config value):" msgstr "" -"这可以通过定制现有策略或 `从头开始实施一个定制策略 `_来实现。下面是一个无厘头的例子,`FedAvg`通过在*单个客户端*的配置指令(config " -"dict)中添加自定义的``\"hello\": \"world\"``配置键/值对添加到此的配置 dict " -"中(仅列表中的第一个客户端,本轮中的其他客户端不会收到此 \"特殊 \"配置值):" +"这可以通过定制现有策略或 `从头开始实施一个定制策略 `_来实现。下面是一个无厘头的例子," +"`FedAvg`通过在*单个客户端*的配置指令(config dict)中添加自定义的" +"``\"hello\": \"world\"``配置键/值对添加到此的配置 dict 中(仅列表中的第一个" +"客户端,本轮中的其他客户端不会收到此 \"特殊 \"配置值):" #: ../../source/how-to-configure-logging.rst:2 msgid "Configure logging" @@ -4608,19 +4841,21 @@ msgstr "配置日志记录" #: ../../source/how-to-configure-logging.rst:4 msgid "" "The Flower logger keeps track of all core events that take place in " -"federated learning workloads. It presents information by default " -"following a standard message format:" -msgstr "Flower 日志记录器会跟踪联邦学习工作负载中发生的所有核心事件。它默认按照标准信息格式提供信息:" +"federated learning workloads. It presents information by default following a " +"standard message format:" +msgstr "" +"Flower 日志记录器会跟踪联邦学习工作负载中发生的所有核心事件。它默认按照标准信" +"息格式提供信息:" #: ../../source/how-to-configure-logging.rst:13 msgid "" -"containing relevant information including: log message level (e.g. " -":code:`INFO`, :code:`DEBUG`), a timestamp, the line where the logging " -"took place from, as well as the log message itself. In this way, the " -"logger would typically display information on your terminal as follows:" +"containing relevant information including: log message level (e.g. :code:" +"`INFO`, :code:`DEBUG`), a timestamp, the line where the logging took place " +"from, as well as the log message itself. In this way, the logger would " +"typically display information on your terminal as follows:" msgstr "" -"相关信息包括:日志信息级别(例如 " -":code:`INFO`、:code:`DEBUG`)、时间戳、日志记录的行以及日志信息本身。这样,日志记录器通常会在终端上显示如下信息:" +"相关信息包括:日志信息级别(例如 :code:`INFO`、:code:`DEBUG`)、时间戳、日志" +"记录的行以及日志信息本身。这样,日志记录器通常会在终端上显示如下信息:" #: ../../source/how-to-configure-logging.rst:34 msgid "Saving log to file" @@ -4630,31 +4865,30 @@ msgstr "将日志保存到文件" msgid "" "By default, the Flower log is outputted to the terminal where you launch " "your Federated Learning workload from. This applies for both gRPC-based " -"federation (i.e. when you do :code:`fl.server.start_server`) and when " -"using the :code:`VirtualClientEngine` (i.e. when you do " -":code:`fl.simulation.start_simulation`). In some situations you might " -"want to save this log to disk. You can do so by calling the " -"`fl.common.logger.configure() " -"`_" -" function. For example:" -msgstr "" -"默认情况下,Flower 日志会输出到启动联邦学习工作负载的终端。这既适用于基于 gRPC 的联邦学习(即执行 " -":code:`fl.server.start_server` 时),也适用于使用 :code:`VirtualClientEngine` " -"时(即执行 :code:`fl.simulation.start_simulation` " -"时)。在某些情况下,您可能希望将此日志保存到磁盘。为此,您可以调用 `fl.common.logger.configure() " -"`_" -" 函数。例如:" +"federation (i.e. when you do :code:`fl.server.start_server`) and when using " +"the :code:`VirtualClientEngine` (i.e. when you do :code:`fl.simulation." +"start_simulation`). In some situations you might want to save this log to " +"disk. You can do so by calling the `fl.common.logger.configure() `_ function. " +"For example:" +msgstr "" +"默认情况下,Flower 日志会输出到启动联邦学习工作负载的终端。这既适用于基于 " +"gRPC 的联邦学习(即执行 :code:`fl.server.start_server` 时),也适用于使用 :" +"code:`VirtualClientEngine` 时(即执行 :code:`fl.simulation.start_simulation` " +"时)。在某些情况下,您可能希望将此日志保存到磁盘。为此,您可以调用 `fl." +"common.logger.configure() `_ 函数。例如:" #: ../../source/how-to-configure-logging.rst:53 msgid "" -"With the above, Flower will record the log you see on your terminal to " -":code:`log.txt`. This file will be created in the same directory as were " -"you are running the code from. If we inspect we see the log above is also" -" recorded but prefixing with :code:`identifier` each line:" +"With the above, Flower will record the log you see on your terminal to :code:" +"`log.txt`. This file will be created in the same directory as were you are " +"running the code from. If we inspect we see the log above is also recorded " +"but prefixing with :code:`identifier` each line:" msgstr "" -"通过上述操作,Flower 会将您在终端上看到的日志记录到 " -":code:`log.txt`。该文件将创建在运行代码的同一目录下。如果我们检查一下,就会发现上面的日志也被记录了下来,但每一行都以 " -":code:`identifier` 作为前缀:" +"通过上述操作,Flower 会将您在终端上看到的日志记录到 :code:`log.txt`。该文件将" +"创建在运行代码的同一目录下。如果我们检查一下,就会发现上面的日志也被记录了下" +"来,但每一行都以 :code:`identifier` 作为前缀:" #: ../../source/how-to-configure-logging.rst:74 msgid "Log your own messages" @@ -4662,16 +4896,19 @@ msgstr "记录自己的信息" #: ../../source/how-to-configure-logging.rst:76 msgid "" -"You might expand the information shown by default with the Flower logger " -"by adding more messages relevant to your application. You can achieve " -"this easily as follows." -msgstr "您可以通过添加更多与应用程序相关的信息来扩展 Flower 日志记录器默认显示的信息。您可以通过以下方法轻松实现这一目标。" +"You might expand the information shown by default with the Flower logger by " +"adding more messages relevant to your application. You can achieve this " +"easily as follows." +msgstr "" +"您可以通过添加更多与应用程序相关的信息来扩展 Flower 日志记录器默认显示的信" +"息。您可以通过以下方法轻松实现这一目标。" #: ../../source/how-to-configure-logging.rst:102 msgid "" -"In this way your logger will show, in addition to the default messages, " -"the ones introduced by the clients as specified above." -msgstr "这样,除默认信息外,您的日志记录器还将显示由客户引入的信息,如上文所述。" +"In this way your logger will show, in addition to the default messages, the " +"ones introduced by the clients as specified above." +msgstr "" +"这样,除默认信息外,您的日志记录器还将显示由客户引入的信息,如上文所述。" #: ../../source/how-to-configure-logging.rst:128 msgid "Log to a remote service" @@ -4679,20 +4916,21 @@ msgstr "登录远程服务" #: ../../source/how-to-configure-logging.rst:130 msgid "" -"The :code:`fl.common.logger.configure` function, also allows specifying a" -" host to which logs can be pushed (via :code:`POST`) through a native " -"Python :code:`logging.handler.HTTPHandler`. This is a particularly useful" -" feature in :code:`gRPC`-based Federated Learning workloads where " -"otherwise gathering logs from all entities (i.e. the server and the " -"clients) might be cumbersome. Note that in Flower simulation, the server " -"automatically displays all logs. You can still specify a " -":code:`HTTPHandler` should you wish to backup or analyze the logs " -"somewhere else." -msgstr "" -"此外,:code:`fl.common.logger.configure`函数还允许指定主机,通过本地 Python " -":code:`logging.handler.HTTPHandler`,向该主机推送日志(通过 :code:`POST`)。在基于 " -":code:`gRPC` 的联邦学习工作负载中,这是一个特别有用的功能,否则从所有实体(即服务器和客户端)收集日志可能会很麻烦。请注意,在 " -"Flower 模拟器中,服务器会自动显示所有日志。如果希望在其他地方备份或分析日志,仍可指定 :code:`HTTPHandler`。" +"The :code:`fl.common.logger.configure` function, also allows specifying a " +"host to which logs can be pushed (via :code:`POST`) through a native Python :" +"code:`logging.handler.HTTPHandler`. This is a particularly useful feature " +"in :code:`gRPC`-based Federated Learning workloads where otherwise gathering " +"logs from all entities (i.e. the server and the clients) might be " +"cumbersome. Note that in Flower simulation, the server automatically " +"displays all logs. You can still specify a :code:`HTTPHandler` should you " +"wish to backup or analyze the logs somewhere else." +msgstr "" +"此外,:code:`fl.common.logger.configure`函数还允许指定主机,通过本地 Python :" +"code:`logging.handler.HTTPHandler`,向该主机推送日志(通过 :code:`POST`)。在" +"基于 :code:`gRPC` 的联邦学习工作负载中,这是一个特别有用的功能,否则从所有实" +"体(即服务器和客户端)收集日志可能会很麻烦。请注意,在 Flower 模拟器中,服务" +"器会自动显示所有日志。如果希望在其他地方备份或分析日志,仍可指定 :code:" +"`HTTPHandler`。" #: ../../source/how-to-enable-ssl-connections.rst:2 msgid "Enable SSL connections" @@ -4702,24 +4940,26 @@ msgstr "启用 SSL 连接" msgid "" "This guide describes how to a SSL-enabled secure Flower server can be " "started and how a Flower client can establish a secure connections to it." -msgstr "本指南介绍如何启动启用 SSL 的安全 Flower 服务器,以及 Flower 客户端如何与其建立安全连接。" +msgstr "" +"本指南介绍如何启动启用 SSL 的安全 Flower 服务器,以及 Flower 客户端如何与其建" +"立安全连接。" #: ../../source/how-to-enable-ssl-connections.rst:7 msgid "" -"A complete code example demonstrating a secure connection can be found " -"`here `_." +"A complete code example demonstrating a secure connection can be found `here " +"`_." msgstr "" -"有关安全连接的完整代码示例,请参见 `_ 。" +"有关安全连接的完整代码示例,请参见 `_ 。" #: ../../source/how-to-enable-ssl-connections.rst:10 msgid "" -"The code example comes with a README.md file which will explain how to " -"start it. Although it is already SSL-enabled, it might be less " -"descriptive on how. Stick to this guide for a deeper introduction to the " -"topic." -msgstr "代码示例附带的 README.md 文件将解释如何启动它。虽然它已经启用了 SSL,但对如何启用可能描述较少。请参考本指南,了解更深入的相关介绍。" +"The code example comes with a README.md file which will explain how to start " +"it. Although it is already SSL-enabled, it might be less descriptive on how. " +"Stick to this guide for a deeper introduction to the topic." +msgstr "" +"代码示例附带的 README.md 文件将解释如何启动它。虽然它已经启用了 SSL,但对如何" +"启用可能描述较少。请参考本指南,了解更深入的相关介绍。" #: ../../source/how-to-enable-ssl-connections.rst:16 msgid "Certificates" @@ -4728,13 +4968,14 @@ msgstr "证书" #: ../../source/how-to-enable-ssl-connections.rst:18 msgid "" "Using SSL-enabled connections requires certificates to be passed to the " -"server and client. For the purpose of this guide we are going to generate" -" self-signed certificates. As this can become quite complex we are going " -"to ask you to run the script in :code:`examples/advanced-" -"tensorflow/certificates/generate.sh`" +"server and client. For the purpose of this guide we are going to generate " +"self-signed certificates. As this can become quite complex we are going to " +"ask you to run the script in :code:`examples/advanced-tensorflow/" +"certificates/generate.sh`" msgstr "" -"使用支持 SSL 的连接需要向服务器和客户端传递证书。在本指南中,我们将生成自签名证书。由于这可能会变得相当复杂,我们将要求你运行 " -":code:`examples/advanced-tensorflow/certificates/generate.sh` 中的脚本" +"使用支持 SSL 的连接需要向服务器和客户端传递证书。在本指南中,我们将生成自签名" +"证书。由于这可能会变得相当复杂,我们将要求你运行 :code:`examples/advanced-" +"tensorflow/certificates/generate.sh` 中的脚本" #: ../../source/how-to-enable-ssl-connections.rst:23 msgid "with the following command sequence:" @@ -4742,9 +4983,10 @@ msgstr "使用以下命令序列:" #: ../../source/how-to-enable-ssl-connections.rst:30 msgid "" -"This will generate the certificates in :code:`examples/advanced-" -"tensorflow/.cache/certificates`." -msgstr "这将在 :code:`examples/advanced-tensorflow/.cache/certificates` 中生成证书。" +"This will generate the certificates in :code:`examples/advanced-tensorflow/." +"cache/certificates`." +msgstr "" +"这将在 :code:`examples/advanced-tensorflow/.cache/certificates` 中生成证书。" #: ../../source/how-to-enable-ssl-connections.rst:32 msgid "" @@ -4753,12 +4995,14 @@ msgid "" "complete for production environments. Please refer to other sources " "regarding the issue of correctly generating certificates for production " "environments." -msgstr "本示例中生成 SSL 证书的方法可作为启发和起点,但不应被视为生产环境的完整方法。有关在生产环境中正确生成证书的问题,请参考其他资料。" +msgstr "" +"本示例中生成 SSL 证书的方法可作为启发和起点,但不应被视为生产环境的完整方法。" +"有关在生产环境中正确生成证书的问题,请参考其他资料。" #: ../../source/how-to-enable-ssl-connections.rst:36 msgid "" -"In case you are a researcher you might be just fine using the self-signed" -" certificates generated using the scripts which are part of this guide." +"In case you are a researcher you might be just fine using the self-signed " +"certificates generated using the scripts which are part of this guide." msgstr "如果你是一名研究人员,使用本指南中的脚本生成的自签名证书就可以了。" #: ../../source/how-to-enable-ssl-connections.rst:41 @@ -4775,12 +5019,12 @@ msgstr "现在,我们将展示如何编写一个使用先前生成的脚本的 #: ../../source/how-to-enable-ssl-connections.rst:61 msgid "" "When providing certificates, the server expects a tuple of three " -"certificates. :code:`Path` can be used to easily read the contents of " -"those files into byte strings, which is the data type " -":code:`start_server` expects." +"certificates. :code:`Path` can be used to easily read the contents of those " +"files into byte strings, which is the data type :code:`start_server` expects." msgstr "" -"在提供证书时,服务器希望得到由三个证书组成的元组。 :code:`Path` 可用于轻松地将这些文件的内容读取为字节字符串,这就是 " -":code:`start_server` 期望的数据类型。" +"在提供证书时,服务器希望得到由三个证书组成的元组。 :code:`Path` 可用于轻松地" +"将这些文件的内容读取为字节字符串,这就是 :code:`start_server` 期望的数据类" +"型。" #: ../../source/how-to-enable-ssl-connections.rst:65 #: ../../source/how-to-upgrade-to-flower-1.0.rst:37 @@ -4796,12 +5040,12 @@ msgstr "现在我们将演示如何编写一个客户端,使用之前生成的 #: ../../source/how-to-enable-ssl-connections.rst:84 msgid "" -"When setting :code:`root_certificates`, the client expects the PEM-" -"encoded root certificates as a byte string. We are again using " -":code:`Path` to simplify reading those as byte strings." +"When setting :code:`root_certificates`, the client expects the PEM-encoded " +"root certificates as a byte string. We are again using :code:`Path` to " +"simplify reading those as byte strings." msgstr "" -"当设置 :code:`root_certificates` 时,客户端希望 PEM 编码的根证书是字节字符串。我们再次使用 " -":code:`Path` 来简化以字节字符串形式读取证书的过程。" +"当设置 :code:`root_certificates` 时,客户端希望 PEM 编码的根证书是字节字符" +"串。我们再次使用 :code:`Path` 来简化以字节字符串形式读取证书的过程。" #: ../../source/how-to-enable-ssl-connections.rst:89 #: ../../source/how-to-use-built-in-mods.rst:85 @@ -4811,10 +5055,12 @@ msgstr "总结" #: ../../source/how-to-enable-ssl-connections.rst:91 msgid "" -"You should now have learned how to generate self-signed certificates " -"using the given script, start a SSL-enabled server, and have a client " -"establish a secure connection to it." -msgstr "现在,你应该已经学会了如何使用给定的脚本生成自签名证书、启动启用 SSL 的服务器并让客户端与其建立安全连接。" +"You should now have learned how to generate self-signed certificates using " +"the given script, start a SSL-enabled server, and have a client establish a " +"secure connection to it." +msgstr "" +"现在,你应该已经学会了如何使用给定的脚本生成自签名证书、启动启用 SSL 的服务器" +"并让客户端与其建立安全连接。" #: ../../source/how-to-enable-ssl-connections.rst:96 msgid "Additional resources" @@ -4822,8 +5068,8 @@ msgstr "补充资源" #: ../../source/how-to-enable-ssl-connections.rst:98 msgid "" -"These additional sources might be relevant if you would like to dive " -"deeper into the topic of certificates:" +"These additional sources might be relevant if you would like to dive deeper " +"into the topic of certificates:" msgstr "如果您想更深入地了解证书主题,这些额外的资料来源可能有帮助:" #: ../../source/how-to-enable-ssl-connections.rst:100 @@ -4840,15 +5086,16 @@ msgstr "实施策略" #: ../../source/how-to-implement-strategies.rst:4 msgid "" -"The strategy abstraction enables implementation of fully custom " -"strategies. A strategy is basically the federated learning algorithm that" -" runs on the server. Strategies decide how to sample clients, how to " -"configure clients for training, how to aggregate updates, and how to " -"evaluate models. Flower provides a few built-in strategies which are " -"based on the same API described below." +"The strategy abstraction enables implementation of fully custom strategies. " +"A strategy is basically the federated learning algorithm that runs on the " +"server. Strategies decide how to sample clients, how to configure clients " +"for training, how to aggregate updates, and how to evaluate models. Flower " +"provides a few built-in strategies which are based on the same API described " +"below." msgstr "" -"策略抽象类可以实现完全定制的策略。策略基本上就是在服务器上运行的联邦学习算法。策略决定如何对客户端进行采样、如何配置客户端进行训练、如何聚合参数更新以及如何评估模型。Flower" -" 提供了一些内置策略,这些策略基于下文所述的相同 API。" +"策略抽象类可以实现完全定制的策略。策略基本上就是在服务器上运行的联邦学习算" +"法。策略决定如何对客户端进行采样、如何配置客户端进行训练、如何聚合参数更新以" +"及如何评估模型。Flower 提供了一些内置策略,这些策略基于下文所述的相同 API。" #: ../../source/how-to-implement-strategies.rst:11 msgid "The :code:`Strategy` abstraction" @@ -4856,14 +5103,13 @@ msgstr ":code:`策略 ` 抽象类" #: ../../source/how-to-implement-strategies.rst:13 msgid "" -"All strategy implementation are derived from the abstract base class " -":code:`flwr.server.strategy.Strategy`, both built-in implementations and " -"third party implementations. This means that custom strategy " -"implementations have the exact same capabilities at their disposal as " -"built-in ones." +"All strategy implementation are derived from the abstract base class :code:" +"`flwr.server.strategy.Strategy`, both built-in implementations and third " +"party implementations. This means that custom strategy implementations have " +"the exact same capabilities at their disposal as built-in ones." msgstr "" -"所有策略实现均源自抽象基类 " -":code:`flwr.server.strategy.Strategy`,包括内置实现和第三方实现。这意味着自定义策略实现与内置实现具有完全相同的功能。" +"所有策略实现均源自抽象基类 :code:`flwr.server.strategy.Strategy`,包括内置实" +"现和第三方实现。这意味着自定义策略实现与内置实现具有完全相同的功能。" #: ../../source/how-to-implement-strategies.rst:18 msgid "" @@ -4873,10 +5119,12 @@ msgstr "策略抽象定义了一些需要实现的抽象方法:" #: ../../source/how-to-implement-strategies.rst:75 msgid "" -"Creating a new strategy means implementing a new :code:`class` (derived " -"from the abstract base class :code:`Strategy`) that implements for the " -"previously shown abstract methods:" -msgstr "创建一个新策略意味着要实现一个新的 :code:`class`(从抽象基类 :code:`Strategy` 派生),该类要实现前面显示的抽象方法:" +"Creating a new strategy means implementing a new :code:`class` (derived from " +"the abstract base class :code:`Strategy`) that implements for the previously " +"shown abstract methods:" +msgstr "" +"创建一个新策略意味着要实现一个新的 :code:`class`(从抽象基类 :code:" +"`Strategy` 派生),该类要实现前面显示的抽象方法:" #: ../../source/how-to-implement-strategies.rst:100 msgid "The Flower server calls these methods in the following order:" @@ -4892,44 +5140,48 @@ msgstr ":code:`初始化参数` 方法" #: ../../source/how-to-implement-strategies.rst:182 msgid "" -":code:`initialize_parameters` is called only once, at the very beginning " -"of an execution. It is responsible for providing the initial global model" -" parameters in a serialized form (i.e., as a :code:`Parameters` object)." +":code:`initialize_parameters` is called only once, at the very beginning of " +"an execution. It is responsible for providing the initial global model " +"parameters in a serialized form (i.e., as a :code:`Parameters` object)." msgstr "" -":code:`initialize_parameters` 只调用一次,即在执行开始时。它负责以序列化形式(即 " -":code:`Parameters` 对象)提供初始全局模型参数。" +":code:`initialize_parameters` 只调用一次,即在执行开始时。它负责以序列化形式" +"(即 :code:`Parameters` 对象)提供初始全局模型参数。" #: ../../source/how-to-implement-strategies.rst:184 msgid "" -"Built-in strategies return user-provided initial parameters. The " -"following example shows how initial parameters can be passed to " -":code:`FedAvg`:" -msgstr "内置策略会返回用户提供的初始参数。下面的示例展示了如何将初始参数传递给 :code:`FedAvg`:" +"Built-in strategies return user-provided initial parameters. The following " +"example shows how initial parameters can be passed to :code:`FedAvg`:" +msgstr "" +"内置策略会返回用户提供的初始参数。下面的示例展示了如何将初始参数传递给 :code:" +"`FedAvg`:" #: ../../source/how-to-implement-strategies.rst:209 msgid "" "The Flower server will call :code:`initialize_parameters`, which either " -"returns the parameters that were passed to :code:`initial_parameters`, or" -" :code:`None`. If no parameters are returned from " -":code:`initialize_parameters` (i.e., :code:`None`), the server will " -"randomly select one client and ask it to provide its parameters. This is " -"a convenience feature and not recommended in practice, but it can be " -"useful for prototyping. In practice, it is recommended to always use " -"server-side parameter initialization." -msgstr "" -"Flower 服务器将调用 :code:`initialize_parameters`,返回传给 " -":code:`initial_parameters` 的参数或 :code:`None`。如果 " -":code:`initialize_parameters` 没有返回任何参数(即 " -":code:`None`),服务器将随机选择一个客户端并要求其提供参数。这只是一个便捷的功能,在实际应用中并不推荐使用,但在原型开发中可能很有用。在实践中,建议始终使用服务器端参数初始化。" +"returns the parameters that were passed to :code:`initial_parameters`, or :" +"code:`None`. If no parameters are returned from :code:" +"`initialize_parameters` (i.e., :code:`None`), the server will randomly " +"select one client and ask it to provide its parameters. This is a " +"convenience feature and not recommended in practice, but it can be useful " +"for prototyping. In practice, it is recommended to always use server-side " +"parameter initialization." +msgstr "" +"Flower 服务器将调用 :code:`initialize_parameters`,返回传给 :code:" +"`initial_parameters` 的参数或 :code:`None`。如果 :code:" +"`initialize_parameters` 没有返回任何参数(即 :code:`None`),服务器将随机选择" +"一个客户端并要求其提供参数。这只是一个便捷的功能,在实际应用中并不推荐使用," +"但在原型开发中可能很有用。在实践中,建议始终使用服务器端参数初始化。" #: ../../source/how-to-implement-strategies.rst:213 msgid "" "Server-side parameter initialization is a powerful mechanism. It can be " -"used, for example, to resume training from a previously saved checkpoint." -" It is also the fundamental capability needed to implement hybrid " -"approaches, for example, to fine-tune a pre-trained model using federated" -" learning." -msgstr "服务器端参数初始化是一种强大的机制。例如,它可以用来从先前保存的检查点恢复训练。它也是实现混合方法所需的基本能力,例如,使用联邦学习对预先训练好的模型进行微调。" +"used, for example, to resume training from a previously saved checkpoint. It " +"is also the fundamental capability needed to implement hybrid approaches, " +"for example, to fine-tune a pre-trained model using federated learning." +msgstr "" +"服务器端参数初始化是一种强大的机制。例如,它可以用来从先前保存的检查点恢复训" +"练。它也是实现混合方法所需的基本能力,例如,使用联邦学习对预先训练好的模型进" +"行微调。" #: ../../source/how-to-implement-strategies.rst:216 msgid "The :code:`configure_fit` method" @@ -4937,21 +5189,23 @@ msgstr ":code:`configure_fit`方法" #: ../../source/how-to-implement-strategies.rst:218 msgid "" -":code:`configure_fit` is responsible for configuring the upcoming round " -"of training. What does *configure* mean in this context? Configuring a " -"round means selecting clients and deciding what instructions to send to " -"these clients. The signature of :code:`configure_fit` makes this clear:" +":code:`configure_fit` is responsible for configuring the upcoming round of " +"training. What does *configure* mean in this context? Configuring a round " +"means selecting clients and deciding what instructions to send to these " +"clients. The signature of :code:`configure_fit` makes this clear:" msgstr "" -":code:`configure_fit` " -"负责配置即将开始的一轮训练。*配置*在这里是什么意思?配置一轮训练意味着选择客户并决定向这些客户发送什么指令。:code:`configure_fit`" -" 说明了这一点:" +":code:`configure_fit` 负责配置即将开始的一轮训练。*配置*在这里是什么意思?配" +"置一轮训练意味着选择客户并决定向这些客户发送什么指令。:code:`configure_fit` " +"说明了这一点:" #: ../../source/how-to-implement-strategies.rst:231 msgid "" "The return value is a list of tuples, each representing the instructions " -"that will be sent to a particular client. Strategy implementations " -"usually perform the following steps in :code:`configure_fit`:" -msgstr "返回值是一个元组列表,每个元组代表将发送到特定客户端的指令。策略实现通常在 :code:`configure_fit` 中执行以下步骤:" +"that will be sent to a particular client. Strategy implementations usually " +"perform the following steps in :code:`configure_fit`:" +msgstr "" +"返回值是一个元组列表,每个元组代表将发送到特定客户端的指令。策略实现通常在 :" +"code:`configure_fit` 中执行以下步骤:" #: ../../source/how-to-implement-strategies.rst:233 #: ../../source/how-to-implement-strategies.rst:280 @@ -4959,39 +5213,40 @@ msgid "" "Use the :code:`client_manager` to randomly sample all (or a subset of) " "available clients (each represented as a :code:`ClientProxy` object)" msgstr "" -"使用 :code:`client_manager` 随机抽样所有(或部分)可用客户端(每个客户端都表示为 :code:`ClientProxy` " -"对象)" +"使用 :code:`client_manager` 随机抽样所有(或部分)可用客户端(每个客户端都表" +"示为 :code:`ClientProxy` 对象)" #: ../../source/how-to-implement-strategies.rst:234 msgid "" "Pair each :code:`ClientProxy` with the same :code:`FitIns` holding the " "current global model :code:`parameters` and :code:`config` dict" msgstr "" -"将每个 :code:`ClientProxy` 与持有当前全局模型 :code:`parameters` 和 :code:`config` " -"dict 的 :code:`FitIns` 配对" +"将每个 :code:`ClientProxy` 与持有当前全局模型 :code:`parameters` 和 :code:" +"`config` dict 的 :code:`FitIns` 配对" #: ../../source/how-to-implement-strategies.rst:236 #, fuzzy msgid "" "More sophisticated implementations can use :code:`configure_fit` to " -"implement custom client selection logic. A client will only participate " -"in a round if the corresponding :code:`ClientProxy` is included in the " -"list returned from :code:`configure_fit`." +"implement custom client selection logic. A client will only participate in a " +"round if the corresponding :code:`ClientProxy` is included in the list " +"returned from :code:`configure_fit`." msgstr "" -"更复杂的实现可以使用 :code:`configure_fit` 来实现自定义的客户端选择逻辑。只有当相应的 " -":code:`ClientProxy` 包含在 :code:`configure_fit` 返回的列表中时,客户端才会参与进来。" +"更复杂的实现可以使用 :code:`configure_fit` 来实现自定义的客户端选择逻辑。只有" +"当相应的 :code:`ClientProxy` 包含在 :code:`configure_fit` 返回的列表中时,客" +"户端才会参与进来。" #: ../../source/how-to-implement-strategies.rst:240 msgid "" "The structure of this return value provides a lot of flexibility to the " "user. Since instructions are defined on a per-client basis, different " -"instructions can be sent to each client. This enables custom strategies " -"to train, for example, different models on different clients, or use " -"different hyperparameters on different clients (via the :code:`config` " -"dict)." +"instructions can be sent to each client. This enables custom strategies to " +"train, for example, different models on different clients, or use different " +"hyperparameters on different clients (via the :code:`config` dict)." msgstr "" -"该返回值的结构为用户提供了很大的灵活性。由于指令是按客户端定义的,因此可以向每个客户端发送不同的指令。这使得自定义策略成为可能,例如在不同的客户端上训练不同的模型,或在不同的客户端上使用不同的超参数(通过" -" :code:`config` dict)。" +"该返回值的结构为用户提供了很大的灵活性。由于指令是按客户端定义的,因此可以向" +"每个客户端发送不同的指令。这使得自定义策略成为可能,例如在不同的客户端上训练" +"不同的模型,或在不同的客户端上使用不同的超参数(通过 :code:`config` dict)。" #: ../../source/how-to-implement-strategies.rst:243 msgid "The :code:`aggregate_fit` method" @@ -4999,31 +5254,33 @@ msgstr ":code:`aggregate_fit` 方法" #: ../../source/how-to-implement-strategies.rst:245 msgid "" -":code:`aggregate_fit` is responsible for aggregating the results returned" -" by the clients that were selected and asked to train in " -":code:`configure_fit`." -msgstr ":code:`aggregate_fit` 负责汇总在 :code:`configure_fit` 中选择并要求训练的客户端所返回的结果。" +":code:`aggregate_fit` is responsible for aggregating the results returned by " +"the clients that were selected and asked to train in :code:`configure_fit`." +msgstr "" +":code:`aggregate_fit` 负责汇总在 :code:`configure_fit` 中选择并要求训练的客户" +"端所返回的结果。" #: ../../source/how-to-implement-strategies.rst:258 msgid "" "Of course, failures can happen, so there is no guarantee that the server " -"will get results from all the clients it sent instructions to (via " -":code:`configure_fit`). :code:`aggregate_fit` therefore receives a list " -"of :code:`results`, but also a list of :code:`failures`." +"will get results from all the clients it sent instructions to (via :code:" +"`configure_fit`). :code:`aggregate_fit` therefore receives a list of :code:" +"`results`, but also a list of :code:`failures`." msgstr "" -"当然,失败是有可能发生的,因此无法保证服务器会从它发送指令(通过 :code:`configure_fit`)的所有客户端获得结果。因此 " -":code:`aggregate_fit` 会收到 :code:`results` 的列表,但也会收到 :code:`failures` 的列表。" +"当然,失败是有可能发生的,因此无法保证服务器会从它发送指令(通过 :code:" +"`configure_fit`)的所有客户端获得结果。因此 :code:`aggregate_fit` 会收到 :" +"code:`results` 的列表,但也会收到 :code:`failures` 的列表。" #: ../../source/how-to-implement-strategies.rst:260 msgid "" -":code:`aggregate_fit` returns an optional :code:`Parameters` object and a" -" dictionary of aggregated metrics. The :code:`Parameters` return value is" -" optional because :code:`aggregate_fit` might decide that the results " +":code:`aggregate_fit` returns an optional :code:`Parameters` object and a " +"dictionary of aggregated metrics. The :code:`Parameters` return value is " +"optional because :code:`aggregate_fit` might decide that the results " "provided are not sufficient for aggregation (e.g., too many failures)." msgstr "" -":code:`aggregate_fit` 返回一个可选的 :code:`Parameters` " -"对象和一个聚合度量的字典。:code:`Parameters` 返回值是可选的,因为 :code:`aggregate_fit` " -"可能会认为所提供的结果不足以进行聚合(例如,失败次数过多)。" +":code:`aggregate_fit` 返回一个可选的 :code:`Parameters` 对象和一个聚合度量的" +"字典。:code:`Parameters` 返回值是可选的,因为 :code:`aggregate_fit` 可能会认" +"为所提供的结果不足以进行聚合(例如,失败次数过多)。" #: ../../source/how-to-implement-strategies.rst:263 msgid "The :code:`configure_evaluate` method" @@ -5031,53 +5288,55 @@ msgstr ":code:`configure_evaluate`方法" #: ../../source/how-to-implement-strategies.rst:265 msgid "" -":code:`configure_evaluate` is responsible for configuring the upcoming " -"round of evaluation. What does *configure* mean in this context? " -"Configuring a round means selecting clients and deciding what " -"instructions to send to these clients. The signature of " -":code:`configure_evaluate` makes this clear:" +":code:`configure_evaluate` is responsible for configuring the upcoming round " +"of evaluation. What does *configure* mean in this context? Configuring a " +"round means selecting clients and deciding what instructions to send to " +"these clients. The signature of :code:`configure_evaluate` makes this clear:" msgstr "" -":code:`configure_evaluate` " -"负责配置下一轮评估。*配置*在这里是什么意思?配置一轮评估意味着选择客户端并决定向这些客户端发送什么指令。:code:`configure_evaluate`" -" 说明了这一点:" +":code:`configure_evaluate` 负责配置下一轮评估。*配置*在这里是什么意思?配置一" +"轮评估意味着选择客户端并决定向这些客户端发送什么指令。:code:" +"`configure_evaluate` 说明了这一点:" #: ../../source/how-to-implement-strategies.rst:278 msgid "" "The return value is a list of tuples, each representing the instructions " -"that will be sent to a particular client. Strategy implementations " -"usually perform the following steps in :code:`configure_evaluate`:" -msgstr "返回值是一个元组列表,每个元组代表将发送到特定客户端的指令。策略实现通常在 :code:`configure_evaluate` 中执行以下步骤:" +"that will be sent to a particular client. Strategy implementations usually " +"perform the following steps in :code:`configure_evaluate`:" +msgstr "" +"返回值是一个元组列表,每个元组代表将发送到特定客户端的指令。策略实现通常在 :" +"code:`configure_evaluate` 中执行以下步骤:" #: ../../source/how-to-implement-strategies.rst:281 msgid "" -"Pair each :code:`ClientProxy` with the same :code:`EvaluateIns` holding " -"the current global model :code:`parameters` and :code:`config` dict" +"Pair each :code:`ClientProxy` with the same :code:`EvaluateIns` holding the " +"current global model :code:`parameters` and :code:`config` dict" msgstr "" -"将每个 :code:`ClientProxy` 与持有当前全局模型 :code:`parameters` 和 :code:`config` " -"dict 的 :code:`EvaluateIns` 配对" +"将每个 :code:`ClientProxy` 与持有当前全局模型 :code:`parameters` 和 :code:" +"`config` dict 的 :code:`EvaluateIns` 配对" #: ../../source/how-to-implement-strategies.rst:283 #, fuzzy msgid "" "More sophisticated implementations can use :code:`configure_evaluate` to " -"implement custom client selection logic. A client will only participate " -"in a round if the corresponding :code:`ClientProxy` is included in the " -"list returned from :code:`configure_evaluate`." +"implement custom client selection logic. A client will only participate in a " +"round if the corresponding :code:`ClientProxy` is included in the list " +"returned from :code:`configure_evaluate`." msgstr "" -"更复杂的实现可以使用 :code:`configure_evaluate` 来实现自定义的客户端选择逻辑。只有当相应的 " -":code:`ClientProxy` 包含在 :code:`configure_evaluate` 返回的列表中时,客户端才会参与进来。" +"更复杂的实现可以使用 :code:`configure_evaluate` 来实现自定义的客户端选择逻" +"辑。只有当相应的 :code:`ClientProxy` 包含在 :code:`configure_evaluate` 返回的" +"列表中时,客户端才会参与进来。" #: ../../source/how-to-implement-strategies.rst:287 msgid "" "The structure of this return value provides a lot of flexibility to the " "user. Since instructions are defined on a per-client basis, different " -"instructions can be sent to each client. This enables custom strategies " -"to evaluate, for example, different models on different clients, or use " -"different hyperparameters on different clients (via the :code:`config` " -"dict)." +"instructions can be sent to each client. This enables custom strategies to " +"evaluate, for example, different models on different clients, or use " +"different hyperparameters on different clients (via the :code:`config` dict)." msgstr "" -"该返回值的结构为用户提供了很大的灵活性。由于指令是按客户端定义的,因此可以向每个客户端发送不同的指令。这使得自定义策略可以在不同客户端上评估不同的模型,或在不同客户端上使用不同的超参数(通过" -" :code:`config` dict)。" +"该返回值的结构为用户提供了很大的灵活性。由于指令是按客户端定义的,因此可以向" +"每个客户端发送不同的指令。这使得自定义策略可以在不同客户端上评估不同的模型," +"或在不同客户端上使用不同的超参数(通过 :code:`config` dict)。" #: ../../source/how-to-implement-strategies.rst:291 msgid "The :code:`aggregate_evaluate` method" @@ -5086,33 +5345,34 @@ msgstr ":code:`aggregate_evaluate` 方法" #: ../../source/how-to-implement-strategies.rst:293 msgid "" ":code:`aggregate_evaluate` is responsible for aggregating the results " -"returned by the clients that were selected and asked to evaluate in " -":code:`configure_evaluate`." +"returned by the clients that were selected and asked to evaluate in :code:" +"`configure_evaluate`." msgstr "" -":code:`aggregate_evaluate` 负责汇总在 :code:`configure_evaluate` " -"中选择并要求评估的客户端返回的结果。" +":code:`aggregate_evaluate` 负责汇总在 :code:`configure_evaluate` 中选择并要求" +"评估的客户端返回的结果。" #: ../../source/how-to-implement-strategies.rst:306 msgid "" "Of course, failures can happen, so there is no guarantee that the server " -"will get results from all the clients it sent instructions to (via " -":code:`configure_evaluate`). :code:`aggregate_evaluate` therefore " -"receives a list of :code:`results`, but also a list of :code:`failures`." +"will get results from all the clients it sent instructions to (via :code:" +"`configure_evaluate`). :code:`aggregate_evaluate` therefore receives a list " +"of :code:`results`, but also a list of :code:`failures`." msgstr "" -"当然,失败是有可能发生的,因此无法保证服务器会从它发送指令(通过 " -":code:`configure_evaluate`)的所有客户端获得结果。因此, :code:`aggregate_evaluate` 会接收 " -":code:`results` 的列表,但也会接收 :code:`failures` 的列表。" +"当然,失败是有可能发生的,因此无法保证服务器会从它发送指令(通过 :code:" +"`configure_evaluate`)的所有客户端获得结果。因此, :code:" +"`aggregate_evaluate` 会接收 :code:`results` 的列表,但也会接收 :code:" +"`failures` 的列表。" #: ../../source/how-to-implement-strategies.rst:308 msgid "" -":code:`aggregate_evaluate` returns an optional :code:`float` (loss) and a" -" dictionary of aggregated metrics. The :code:`float` return value is " -"optional because :code:`aggregate_evaluate` might decide that the results" -" provided are not sufficient for aggregation (e.g., too many failures)." +":code:`aggregate_evaluate` returns an optional :code:`float` (loss) and a " +"dictionary of aggregated metrics. The :code:`float` return value is optional " +"because :code:`aggregate_evaluate` might decide that the results provided " +"are not sufficient for aggregation (e.g., too many failures)." msgstr "" -":code:`aggregate_evaluate` 返回一个可选的 " -":code:`float`(损失值)和一个聚合指标字典。:code:`float` 返回值是可选的,因为 " -":code:`aggregate_evaluate` 可能会认为所提供的结果不足以进行聚合(例如,失败次数过多)。" +":code:`aggregate_evaluate` 返回一个可选的 :code:`float`(损失值)和一个聚合指" +"标字典。:code:`float` 返回值是可选的,因为 :code:`aggregate_evaluate` 可能会" +"认为所提供的结果不足以进行聚合(例如,失败次数过多)。" #: ../../source/how-to-implement-strategies.rst:311 msgid "The :code:`evaluate` method" @@ -5121,23 +5381,24 @@ msgstr ":code:`evaluate`方法" #: ../../source/how-to-implement-strategies.rst:313 msgid "" ":code:`evaluate` is responsible for evaluating model parameters on the " -"server-side. Having :code:`evaluate` in addition to " -":code:`configure_evaluate`/:code:`aggregate_evaluate` enables strategies " -"to perform both servers-side and client-side (federated) evaluation." +"server-side. Having :code:`evaluate` in addition to :code:" +"`configure_evaluate`/:code:`aggregate_evaluate` enables strategies to " +"perform both servers-side and client-side (federated) evaluation." msgstr "" -":code:`evaluate` 负责在服务器端评估模型参数。除了 " -":code:`configure_evaluate`/:code:`aggregate_evaluate` 之外,:code:`evaluate`" -" 可以使策略同时执行服务器端和客户端(联邦)评估。" +":code:`evaluate` 负责在服务器端评估模型参数。除了 :code:" +"`configure_evaluate`/:code:`aggregate_evaluate` 之外,:code:`evaluate` 可以使" +"策略同时执行服务器端和客户端(联邦)评估。" #: ../../source/how-to-implement-strategies.rst:323 msgid "" -"The return value is again optional because the strategy might not need to" -" implement server-side evaluation or because the user-defined " -":code:`evaluate` method might not complete successfully (e.g., it might " -"fail to load the server-side evaluation data)." +"The return value is again optional because the strategy might not need to " +"implement server-side evaluation or because the user-defined :code:" +"`evaluate` method might not complete successfully (e.g., it might fail to " +"load the server-side evaluation data)." msgstr "" -"返回值也是可选的,因为策略可能不需要执行服务器端评估,或者因为用户定义的 :code:`evaluate` " -"方法可能无法成功完成(例如,它可能无法加载服务器端评估数据)。" +"返回值也是可选的,因为策略可能不需要执行服务器端评估,或者因为用户定义的 :" +"code:`evaluate` 方法可能无法成功完成(例如,它可能无法加载服务器端评估数" +"据)。" #: ../../source/how-to-install-flower.rst:2 msgid "Install Flower" @@ -5158,8 +5419,7 @@ msgstr "使用 pip" #: ../../source/how-to-install-flower.rst:17 msgid "" -"Stable releases are available on `PyPI " -"`_::" +"Stable releases are available on `PyPI `_::" msgstr "稳定版本可在 `PyPI `_::" #: ../../source/how-to-install-flower.rst:21 @@ -5181,15 +5441,15 @@ msgstr "Flower 也可以从 ``conda-forge`` 频道安装。" #: ../../source/how-to-install-flower.rst:31 #, fuzzy msgid "" -"If you have not added ``conda-forge`` to your channels, you will first " -"need to run the following::" +"If you have not added ``conda-forge`` to your channels, you will first need " +"to run the following::" msgstr "如果您尚未在频道中添加 ``conda-forge``,则首先需要运行以下程序::" #: ../../source/how-to-install-flower.rst:36 #, fuzzy msgid "" -"Once the ``conda-forge`` channel has been enabled, ``flwr`` can be " -"installed with ``conda``::" +"Once the ``conda-forge`` channel has been enabled, ``flwr`` can be installed " +"with ``conda``::" msgstr "一旦启用了 ``conda-forge`` 频道,就可以使用 ``conda``: 安装 ``flwr``:" #: ../../source/how-to-install-flower.rst:40 @@ -5205,9 +5465,11 @@ msgstr "验证安装" #, fuzzy msgid "" "The following command can be used to verify if Flower was successfully " -"installed. If everything worked, it should print the version of Flower to" -" the command line::" -msgstr "可以使用以下命令来验证 Flower 是否安装成功。如果一切正常,它将在命令行中打印 Flower 的版本::" +"installed. If everything worked, it should print the version of Flower to " +"the command line::" +msgstr "" +"可以使用以下命令来验证 Flower 是否安装成功。如果一切正常,它将在命令行中打印 " +"Flower 的版本::" #: ../../source/how-to-install-flower.rst:55 msgid "Advanced installation options" @@ -5231,16 +5493,19 @@ msgstr "安装预发布版本" #: ../../source/how-to-install-flower.rst:65 msgid "" -"New (possibly unstable) versions of Flower are sometimes available as " -"pre-release versions (alpha, beta, release candidate) before the stable " -"release happens::" -msgstr "在稳定版发布之前,Flower 的新版本(可能是不稳定版)有时会作为预发布版本(alpha、beta、候选发布版本)提供::" +"New (possibly unstable) versions of Flower are sometimes available as pre-" +"release versions (alpha, beta, release candidate) before the stable release " +"happens::" +msgstr "" +"在稳定版发布之前,Flower 的新版本(可能是不稳定版)有时会作为预发布版本" +"(alpha、beta、候选发布版本)提供::" #: ../../source/how-to-install-flower.rst:69 msgid "" -"For simulations that use the Virtual Client Engine, ``flwr`` pre-releases" -" should be installed with the ``simulation`` extra::" -msgstr "对于使用虚拟客户端引擎的模拟,`flwr``预发行版应与`simulation``一起安装:" +"For simulations that use the Virtual Client Engine, ``flwr`` pre-releases " +"should be installed with the ``simulation`` extra::" +msgstr "" +"对于使用虚拟客户端引擎的模拟,`flwr``预发行版应与`simulation``一起安装:" #: ../../source/how-to-install-flower.rst:74 msgid "Install nightly release" @@ -5248,14 +5513,14 @@ msgstr "安装隔夜版本" #: ../../source/how-to-install-flower.rst:76 msgid "" -"The latest (potentially unstable) changes in Flower are available as " -"nightly releases::" +"The latest (potentially unstable) changes in Flower are available as nightly " +"releases::" msgstr "Flower 中最新(可能不稳定)的更改以隔夜发布的形式提供::" #: ../../source/how-to-install-flower.rst:80 msgid "" -"For simulations that use the Virtual Client Engine, ``flwr-nightly`` " -"should be installed with the ``simulation`` extra::" +"For simulations that use the Virtual Client Engine, ``flwr-nightly`` should " +"be installed with the ``simulation`` extra::" msgstr "对于使用虚拟客户端引擎的模拟,`flwr-nightly`应与`simulation`一起安装:" #: ../../source/how-to-monitor-simulation.rst:2 @@ -5264,20 +5529,23 @@ msgstr "监控模拟" #: ../../source/how-to-monitor-simulation.rst:4 msgid "" -"Flower allows you to monitor system resources while running your " -"simulation. Moreover, the Flower simulation engine is powerful and " -"enables you to decide how to allocate resources per client manner and " -"constrain the total usage. Insights from resource consumption can help " -"you make smarter decisions and speed up the execution time." +"Flower allows you to monitor system resources while running your simulation. " +"Moreover, the Flower simulation engine is powerful and enables you to decide " +"how to allocate resources per client manner and constrain the total usage. " +"Insights from resource consumption can help you make smarter decisions and " +"speed up the execution time." msgstr "" -"Flower 允许您在运行模拟时监控系统资源。此外,Flower " -"仿真引擎功能强大,能让您决定如何按客户端方式分配资源并限制总使用量。从资源消耗中获得的观察可以帮助您做出更明智的决策,并加快执行时间。" +"Flower 允许您在运行模拟时监控系统资源。此外,Flower 仿真引擎功能强大,能让您" +"决定如何按客户端方式分配资源并限制总使用量。从资源消耗中获得的观察可以帮助您" +"做出更明智的决策,并加快执行时间。" #: ../../source/how-to-monitor-simulation.rst:6 msgid "" -"The specific instructions assume you are using macOS and have the " -"`Homebrew `_ package manager installed." -msgstr "具体说明假定你使用的是 macOS,并且安装了 `Homebrew `_ 软件包管理器。" +"The specific instructions assume you are using macOS and have the `Homebrew " +"`_ package manager installed." +msgstr "" +"具体说明假定你使用的是 macOS,并且安装了 `Homebrew `_ 软件" +"包管理器。" #: ../../source/how-to-monitor-simulation.rst:10 msgid "Downloads" @@ -5285,14 +5553,14 @@ msgstr "下载" #: ../../source/how-to-monitor-simulation.rst:16 msgid "" -"`Prometheus `_ is used for data collection, while" -" `Grafana `_ will enable you to visualize the " -"collected data. They are both well integrated with `Ray " -"`_ which Flower uses under the hood." +"`Prometheus `_ is used for data collection, while " +"`Grafana `_ will enable you to visualize the collected " +"data. They are both well integrated with `Ray `_ which " +"Flower uses under the hood." msgstr "" -"`Prometheus `_ 用于收集数据,而 `Grafana " -"`_ 则能让你将收集到的数据可视化。它们都与 Flower 在引擎下使用的 `Ray " -"`_ 紧密集成。" +"`Prometheus `_ 用于收集数据,而 `Grafana `_ 则能让你将收集到的数据可视化。它们都与 Flower 在引擎下使用的 " +"`Ray `_ 紧密集成。" #: ../../source/how-to-monitor-simulation.rst:18 msgid "" @@ -5310,23 +5578,26 @@ msgstr "在上一代英特尔 Mac 设备上,应该是这样:" #: ../../source/how-to-monitor-simulation.rst:34 msgid "" -"Open the respective configuration files and change them. Depending on " -"your device, use one of the two following commands:" +"Open the respective configuration files and change them. Depending on your " +"device, use one of the two following commands:" msgstr "打开相应的配置文件并修改它们。根据设备情况,使用以下两个命令之一:" #: ../../source/how-to-monitor-simulation.rst:44 msgid "" -"and then delete all the text in the file and paste a new Prometheus " -"config you see below. You may adjust the time intervals to your " -"requirements:" -msgstr "然后删除文件中的所有文本,粘贴一个新的 Prometheus 配置文件,如下所示。您可以根据需要调整时间间隔:" +"and then delete all the text in the file and paste a new Prometheus config " +"you see below. You may adjust the time intervals to your requirements:" +msgstr "" +"然后删除文件中的所有文本,粘贴一个新的 Prometheus 配置文件,如下所示。您可以" +"根据需要调整时间间隔:" #: ../../source/how-to-monitor-simulation.rst:59 msgid "" -"Now after you have edited the Prometheus configuration, do the same with " -"the Grafana configuration files. Open those using one of the following " -"commands as before:" -msgstr "编辑完 Prometheus 配置后,请对 Grafana 配置文件执行同样的操作。与之前一样,使用以下命令之一打开这些文件:" +"Now after you have edited the Prometheus configuration, do the same with the " +"Grafana configuration files. Open those using one of the following commands " +"as before:" +msgstr "" +"编辑完 Prometheus 配置后,请对 Grafana 配置文件执行同样的操作。与之前一样,使" +"用以下命令之一打开这些文件:" #: ../../source/how-to-monitor-simulation.rst:69 msgid "" @@ -5336,8 +5607,8 @@ msgstr "您的终端编辑器应该会打开,并允许您像之前一样应用 #: ../../source/how-to-monitor-simulation.rst:84 msgid "" -"Congratulations, you just downloaded all the necessary software needed " -"for metrics tracking. Now, let’s start it." +"Congratulations, you just downloaded all the necessary software needed for " +"metrics tracking. Now, let’s start it." msgstr "恭喜您,您刚刚下载了指标跟踪所需的所有软件。现在,让我们开始吧。" #: ../../source/how-to-monitor-simulation.rst:88 @@ -5352,8 +5623,8 @@ msgstr "在运行 Flower 模拟之前,您必须启动刚刚安装和配置的 #: ../../source/how-to-monitor-simulation.rst:97 msgid "" -"Please include the following argument in your Python code when starting a" -" simulation." +"Please include the following argument in your Python code when starting a " +"simulation." msgstr "开始模拟时,请在 Python 代码中加入以下参数。" #: ../../source/how-to-monitor-simulation.rst:108 @@ -5362,8 +5633,8 @@ msgstr "现在,您可以开始工作了。" #: ../../source/how-to-monitor-simulation.rst:110 msgid "" -"Shortly after the simulation starts, you should see the following logs in" -" your terminal:" +"Shortly after the simulation starts, you should see the following logs in " +"your terminal:" msgstr "模拟启动后不久,您就会在终端中看到以下日志:" #: ../../source/how-to-monitor-simulation.rst:117 @@ -5372,28 +5643,31 @@ msgstr "您可以在 ``_ 查看所有内容。" #: ../../source/how-to-monitor-simulation.rst:119 msgid "" -"It's a Ray Dashboard. You can navigate to Metrics (on the left panel, the" -" lowest option)." -msgstr "这是一个 Ray Dashboard。您可以导航到 \"度量标准\"(左侧面板,最低选项)。" +"It's a Ray Dashboard. You can navigate to Metrics (on the left panel, the " +"lowest option)." +msgstr "" +"这是一个 Ray Dashboard。您可以导航到 \"度量标准\"(左侧面板,最低选项)。" #: ../../source/how-to-monitor-simulation.rst:121 msgid "" -"Or alternatively, you can just see them in Grafana by clicking on the " -"right-up corner, “View in Grafana”. Please note that the Ray dashboard is" -" only accessible during the simulation. After the simulation ends, you " -"can only use Grafana to explore the metrics. You can start Grafana by " -"going to ``http://localhost:3000/``." +"Or alternatively, you can just see them in Grafana by clicking on the right-" +"up corner, “View in Grafana”. Please note that the Ray dashboard is only " +"accessible during the simulation. After the simulation ends, you can only " +"use Grafana to explore the metrics. You can start Grafana by going to " +"``http://localhost:3000/``." msgstr "" -"或者,您也可以点击右上角的 \"在 Grafana 中查看\",在 Grafana 中查看它们。请注意,Ray " -"仪表盘只能在模拟期间访问。模拟结束后,您只能使用 Grafana 浏览指标。您可以访问 ``http://localhost:3000/``启动 " -"Grafana。" +"或者,您也可以点击右上角的 \"在 Grafana 中查看\",在 Grafana 中查看它们。请注" +"意,Ray 仪表盘只能在模拟期间访问。模拟结束后,您只能使用 Grafana 浏览指标。您" +"可以访问 ``http://localhost:3000/``启动 Grafana。" #: ../../source/how-to-monitor-simulation.rst:123 msgid "" "After you finish the visualization, stop Prometheus and Grafana. This is " "important as they will otherwise block, for example port :code:`3000` on " "your machine as long as they are running." -msgstr "完成可视化后,请停止 Prometheus 和 Grafana。这一点很重要,否则只要它们在运行,就会阻塞机器上的端口 :code:`3000`。" +msgstr "" +"完成可视化后,请停止 Prometheus 和 Grafana。这一点很重要,否则只要它们在运" +"行,就会阻塞机器上的端口 :code:`3000`。" #: ../../source/how-to-monitor-simulation.rst:132 msgid "Resource allocation" @@ -5401,21 +5675,24 @@ msgstr "资源分配" #: ../../source/how-to-monitor-simulation.rst:134 msgid "" -"You must understand how the Ray library works to efficiently allocate " -"system resources to simulation clients on your own." -msgstr "您必须了解 Ray 库是如何工作的,才能有效地为自己的仿真客户端分配系统资源。" +"You must understand how the Ray library works to efficiently allocate system " +"resources to simulation clients on your own." +msgstr "" +"您必须了解 Ray 库是如何工作的,才能有效地为自己的仿真客户端分配系统资源。" #: ../../source/how-to-monitor-simulation.rst:136 msgid "" "Initially, the simulation (which Ray handles under the hood) starts by " "default with all the available resources on the system, which it shares " -"among the clients. It doesn't mean it divides it equally among all of " -"them, nor that the model training happens at all of them simultaneously. " -"You will learn more about that in the later part of this blog. You can " -"check the system resources by running the following:" +"among the clients. It doesn't mean it divides it equally among all of them, " +"nor that the model training happens at all of them simultaneously. You will " +"learn more about that in the later part of this blog. You can check the " +"system resources by running the following:" msgstr "" -"最初,模拟(由 Ray " -"在引擎下处理)默认使用系统上的所有可用资源启动,并在客户端之间共享。但这并不意味着它会将资源平均分配给所有客户端,也不意味着模型训练会在所有客户端同时进行。您将在本博客的后半部分了解到更多相关信息。您可以运行以下命令检查系统资源:" +"最初,模拟(由 Ray 在引擎下处理)默认使用系统上的所有可用资源启动,并在客户端" +"之间共享。但这并不意味着它会将资源平均分配给所有客户端,也不意味着模型训练会" +"在所有客户端同时进行。您将在本博客的后半部分了解到更多相关信息。您可以运行以" +"下命令检查系统资源:" #: ../../source/how-to-monitor-simulation.rst:143 msgid "In Google Colab, the result you see might be similar to this:" @@ -5423,8 +5700,8 @@ msgstr "在 Google Colab 中,您看到的结果可能与此类似:" #: ../../source/how-to-monitor-simulation.rst:155 msgid "" -"However, you can overwrite the defaults. When starting a simulation, do " -"the following (you don't need to overwrite all of them):" +"However, you can overwrite the defaults. When starting a simulation, do the " +"following (you don't need to overwrite all of them):" msgstr "不过,您可以覆盖默认值。开始模拟时,请执行以下操作(不必全部覆盖):" #: ../../source/how-to-monitor-simulation.rst:175 @@ -5433,23 +5710,26 @@ msgstr "我们还可以为单个客户指定资源。" #: ../../source/how-to-monitor-simulation.rst:205 msgid "" -"Now comes the crucial part. Ray will start a new client only when it has " -"all the required resources (such that they run in parallel) when the " -"resources allow." -msgstr "现在到了关键部分。只有在资源允许的情况下,Ray 才会在拥有所有所需资源(如并行运行)时启动新客户端。" +"Now comes the crucial part. Ray will start a new client only when it has all " +"the required resources (such that they run in parallel) when the resources " +"allow." +msgstr "" +"现在到了关键部分。只有在资源允许的情况下,Ray 才会在拥有所有所需资源(如并行" +"运行)时启动新客户端。" #: ../../source/how-to-monitor-simulation.rst:207 msgid "" -"In the example above, only one client will be run, so your clients won't " -"run concurrently. Setting :code:`client_num_gpus = 0.5` would allow " -"running two clients and therefore enable them to run concurrently. Be " -"careful not to require more resources than available. If you specified " -":code:`client_num_gpus = 2`, the simulation wouldn't start (even if you " -"had 2 GPUs but decided to set 1 in :code:`ray_init_args`)." +"In the example above, only one client will be run, so your clients won't run " +"concurrently. Setting :code:`client_num_gpus = 0.5` would allow running two " +"clients and therefore enable them to run concurrently. Be careful not to " +"require more resources than available. If you specified :code:" +"`client_num_gpus = 2`, the simulation wouldn't start (even if you had 2 GPUs " +"but decided to set 1 in :code:`ray_init_args`)." msgstr "" -"在上面的示例中,将只运行一个客户端,因此您的客户端不会并发运行。设置 :code:`client_num_gpus = 0.5` " -"将允许运行两个客户端,从而使它们能够并发运行。请注意,所需的资源不要超过可用资源。如果您指定 :code:`client_num_gpus = " -"2`,模拟将无法启动(即使您有 2 个 GPU,但决定在 :code:`ray_init_args` 中设置为 1)。" +"在上面的示例中,将只运行一个客户端,因此您的客户端不会并发运行。设置 :code:" +"`client_num_gpus = 0.5` 将允许运行两个客户端,从而使它们能够并发运行。请注" +"意,所需的资源不要超过可用资源。如果您指定 :code:`client_num_gpus = 2`,模拟" +"将无法启动(即使您有 2 个 GPU,但决定在 :code:`ray_init_args` 中设置为 1)。" #: ../../source/how-to-monitor-simulation.rst:212 ../../source/ref-faq.rst:2 msgid "FAQ" @@ -5461,22 +5741,25 @@ msgstr "问:我没有看到任何指标记录。" #: ../../source/how-to-monitor-simulation.rst:216 msgid "" -"A: The timeframe might not be properly set. The setting is in the top " -"right corner (\"Last 30 minutes\" by default). Please change the " -"timeframe to reflect the period when the simulation was running." -msgstr "答:时间范围可能没有正确设置。设置在右上角(默认为 \"最后 30 分钟\")。请更改时间框架,以反映模拟运行的时间段。" +"A: The timeframe might not be properly set. The setting is in the top right " +"corner (\"Last 30 minutes\" by default). Please change the timeframe to " +"reflect the period when the simulation was running." +msgstr "" +"答:时间范围可能没有正确设置。设置在右上角(默认为 \"最后 30 分钟\")。请更改" +"时间框架,以反映模拟运行的时间段。" #: ../../source/how-to-monitor-simulation.rst:218 msgid "" -"Q: I see “Grafana server not detected. Please make sure the Grafana " -"server is running and refresh this page” after going to the Metrics tab " -"in Ray Dashboard." -msgstr "问:我看到 \"未检测到 Grafana 服务器。请确保 Grafana 服务器正在运行并刷新此页面\"。" +"Q: I see “Grafana server not detected. Please make sure the Grafana server " +"is running and refresh this page” after going to the Metrics tab in Ray " +"Dashboard." +msgstr "" +"问:我看到 \"未检测到 Grafana 服务器。请确保 Grafana 服务器正在运行并刷新此页" +"面\"。" #: ../../source/how-to-monitor-simulation.rst:220 msgid "" -"A: You probably don't have Grafana running. Please check the running " -"services" +"A: You probably don't have Grafana running. Please check the running services" msgstr "答:您可能没有运行 Grafana。请检查正在运行的服务" #: ../../source/how-to-monitor-simulation.rst:226 @@ -5487,8 +5770,8 @@ msgstr "问:在访问 ``_时,我看到 \"无法访问 #: ../../source/how-to-monitor-simulation.rst:228 msgid "" -"A: Either the simulation has already finished, or you still need to start" -" Prometheus." +"A: Either the simulation has already finished, or you still need to start " +"Prometheus." msgstr "答:要么模拟已经完成,要么您还需要启动Prometheus。" #: ../../source/how-to-monitor-simulation.rst:232 @@ -5500,14 +5783,15 @@ msgstr "资源" msgid "" "Ray Dashboard: ``_" -msgstr "Ray 仪表盘: ``_" +msgstr "" +"Ray 仪表盘: ``_" #: ../../source/how-to-monitor-simulation.rst:236 #, fuzzy msgid "Ray Metrics: ``_" msgstr "" -"Ray 指标: ``_" +"Ray 指标: ``_" #: ../../source/how-to-run-flower-using-docker.rst:2 #, fuzzy @@ -5517,9 +5801,9 @@ msgstr "使用 Docker 运行 Flower" #: ../../source/how-to-run-flower-using-docker.rst:4 #, fuzzy msgid "" -"The simplest way to get started with Flower is by using the pre-made " -"Docker images, which you can find on `Docker Hub " -"`_." +"The simplest way to get started with Flower is by using the pre-made Docker " +"images, which you can find on `Docker Hub `_." msgstr "" "开始使用 Flower 的最简单方法是使用预制的 Docker 镜像,您可以在 `Docker Hub " "`_ 上找到这些镜像。" @@ -5532,25 +5816,24 @@ msgstr "开始之前,请确保 Docker 守护进程正在运行:" #: ../../source/how-to-run-flower-using-docker.rst:14 #, fuzzy msgid "" -"If you do not see the version of Docker but instead get an error saying " -"that the command was not found, you will need to install Docker first. " -"You can find installation instruction `here `_." +"If you do not see the version of Docker but instead get an error saying that " +"the command was not found, you will need to install Docker first. You can " +"find installation instruction `here `_." msgstr "" -"如果没有看到 Docker 的版本,而是出现找不到命令的错误,则需要先安装 Docker。" -"你可以在 `_ 找到安装说明。" +"如果没有看到 Docker 的版本,而是出现找不到命令的错误,则需要先安装 Docker。你" +"可以在 `_ 找到安装说明。" #: ../../source/how-to-run-flower-using-docker.rst:20 #, fuzzy msgid "" -"On Linux, Docker commands require ``sudo`` privilege. If you want to " -"avoid using ``sudo``, you can follow the `Post-installation steps " -"`_ on the " -"official Docker website." +"On Linux, Docker commands require ``sudo`` privilege. If you want to avoid " +"using ``sudo``, you can follow the `Post-installation steps `_ on the official Docker " +"website." msgstr "" -"在 Linux 上,Docker 命令需要 ``sudo`` 权限。如果你想避免使用 ``sudo``," -"可以按照 Docker 官方网站上的 `安装后步骤 `_进行操作。" +"在 Linux 上,Docker 命令需要 ``sudo`` 权限。如果你想避免使用 ``sudo``,可以按" +"照 Docker 官方网站上的 `安装后步骤 `_进行操作。" #: ../../source/how-to-run-flower-using-docker.rst:25 #, fuzzy @@ -5570,23 +5853,22 @@ msgstr "如果您想试用 Flower,可以使用以下命令:" #: ../../source/how-to-run-flower-using-docker.rst:37 #, fuzzy msgid "" -"The command will pull the Docker image with the tag " -"``1.7.0-py3.11-ubuntu22.04`` from Docker Hub. The tag contains the " -"information which Flower, Python and Ubuntu is used. In this case, it " -"uses Flower 1.7.0, Python 3.11 and Ubuntu 22.04. The ``--rm`` flag tells " -"Docker to remove the container after it exits." +"The command will pull the Docker image with the tag ``1.7.0-py3.11-" +"ubuntu22.04`` from Docker Hub. The tag contains the information which " +"Flower, Python and Ubuntu is used. In this case, it uses Flower 1.7.0, " +"Python 3.11 and Ubuntu 22.04. The ``--rm`` flag tells Docker to remove the " +"container after it exits." msgstr "" "该命令将从 Docker Hub 提取标签为``1.7.0-py3.11-ubuntu22.04``的 Docker 镜像。" -"标签包含使用 Flower、Python 和 Ubuntu 的信息。在本例中,它使用了 Flower 1.7." -"0、Python 3.11 和 Ubuntu 22.04。rm \"标记告诉 Docker 在退出后移除容器。" +"标签包含使用 Flower、Python 和 Ubuntu 的信息。在本例中,它使用了 Flower " +"1.7.0、Python 3.11 和 Ubuntu 22.04。rm \"标记告诉 Docker 在退出后移除容器。" #: ../../source/how-to-run-flower-using-docker.rst:44 #, fuzzy msgid "" -"By default, the Flower server keeps state in-memory. When using the " -"Docker flag ``--rm``, the state is not persisted between container " -"starts. We will show below how to save the state in a file on your host " -"system." +"By default, the Flower server keeps state in-memory. When using the Docker " +"flag ``--rm``, the state is not persisted between container starts. We will " +"show below how to save the state in a file on your host system." msgstr "" "默认情况下,Flower 服务器会将状态保存在内存中。使用 Docker 标志 ``--rm`` 时," "状态不会在容器启动之间持久化。下面我们将展示如何将状态保存到主机系统上的文件" @@ -5595,37 +5877,35 @@ msgstr "" #: ../../source/how-to-run-flower-using-docker.rst:48 #, fuzzy msgid "" -"The ``-p :`` flag tells Docker to map the ports " -"``9091``/``9092`` of the host to ``9091``/``9092`` of the container, " -"allowing you to access the Driver API on ``http://localhost:9091`` and " -"the Fleet API on ``http://localhost:9092``. Lastly, any flag that comes " -"after the tag is passed to the Flower server. Here, we are passing the " -"flag ``--insecure``." +"The ``-p :`` flag tells Docker to map the ports ``9091``/" +"``9092`` of the host to ``9091``/``9092`` of the container, allowing you to " +"access the Driver API on ``http://localhost:9091`` and the Fleet API on " +"``http://localhost:9092``. Lastly, any flag that comes after the tag is " +"passed to the Flower server. Here, we are passing the flag ``--insecure``." msgstr "" -"``-p :`` 标记会告诉 Docker 将主机的端口 ``9091``/``9092`` " -"映射到容器的端口 ``9091``/`9092``,这样你就可以在 ``http://localhost:9091`` " -"上访问 Driver API,在 ``http://localhost:9092`` 上访问 Fleet API。最后," -"标签后面的任何标志都会传递给 Flower 服务器。在这里,我们传递的标志是 " -"``--insecure`` 。" +"``-p :`` 标记会告诉 Docker 将主机的端口 ``9091``/``9092`` 映" +"射到容器的端口 ``9091``/`9092``,这样你就可以在 ``http://localhost:9091`` 上" +"访问 Driver API,在 ``http://localhost:9092`` 上访问 Fleet API。最后,标签后" +"面的任何标志都会传递给 Flower 服务器。在这里,我们传递的标志是 ``--" +"insecure`` 。" #: ../../source/how-to-run-flower-using-docker.rst:55 #, fuzzy msgid "" "The ``--insecure`` flag enables insecure communication (using HTTP, not " -"HTTPS) and should only be used for testing purposes. We strongly " -"recommend enabling `SSL `_ when " -"deploying to a production environment." +"HTTPS) and should only be used for testing purposes. We strongly recommend " +"enabling `SSL `_ when deploying to a " +"production environment." msgstr "" -"不安全 \"标志启用不安全通信(使用 HTTP,而非 HTTPS),只能用于测试目的。" -"我们强烈建议在部署到生产环境时启用 `SSL `_。" +"不安全 \"标志启用不安全通信(使用 HTTP,而非 HTTPS),只能用于测试目的。我们" +"强烈建议在部署到生产环境时启用 `SSL `_。" #: ../../source/how-to-run-flower-using-docker.rst:60 #, fuzzy msgid "" -"You can use ``--help`` to view all available flags that the server " -"supports:" +"You can use ``--help`` to view all available flags that the server supports:" msgstr "您可以使用 ``--help`` 查看服务器支持的所有可用标记:" #: ../../source/how-to-run-flower-using-docker.rst:67 @@ -5636,26 +5916,25 @@ msgstr "在主机系统上挂载卷以存储状态" #: ../../source/how-to-run-flower-using-docker.rst:69 #, fuzzy msgid "" -"If you want to persist the state of the server on your host system, all " -"you need to do is specify a path where you want to save the file on your " -"host system and a name for the database file. In the example below, we " -"tell Docker via the flag ``-v`` to mount the user's home directory " -"(``~/`` on your host) into the ``/app/`` directory of the container. " -"Furthermore, we use the flag ``--database`` to specify the name of the " -"database file." +"If you want to persist the state of the server on your host system, all you " +"need to do is specify a path where you want to save the file on your host " +"system and a name for the database file. In the example below, we tell " +"Docker via the flag ``-v`` to mount the user's home directory (``~/`` on " +"your host) into the ``/app/`` directory of the container. Furthermore, we " +"use the flag ``--database`` to specify the name of the database file." msgstr "" "如果想在主机系统上持久保存服务器的状态,只需在主机系统上指定保存文件的路径和" -"数据库文件的名称即可。在下面的示例中,我们通过标志 ``-v`` 告诉 Docker " -"将用户的主目录(主机上的 ``~/``)挂载到容器的 ``/app/`` 目录中。此外," -"我们使用标志 ``--database`` 来指定数据库文件的名称。" +"数据库文件的名称即可。在下面的示例中,我们通过标志 ``-v`` 告诉 Docker 将用户" +"的主目录(主机上的 ``~/``)挂载到容器的 ``/app/`` 目录中。此外,我们使用标志 " +"``--database`` 来指定数据库文件的名称。" #: ../../source/how-to-run-flower-using-docker.rst:82 #, fuzzy msgid "" -"As soon as the server starts, the file ``state.db`` is created in the " -"user's home directory on your host system. If the file already exists, " -"the server tries to restore the state from the file. To start the server " -"with an empty database, simply remove the ``state.db`` file." +"As soon as the server starts, the file ``state.db`` is created in the user's " +"home directory on your host system. If the file already exists, the server " +"tries to restore the state from the file. To start the server with an empty " +"database, simply remove the ``state.db`` file." msgstr "" "服务器一启动,就会在主机系统的用户主目录下创建文件 ``state.db``。如果该文件已" "经存在,服务器会尝试从该文件恢复状态。要以空数据库启动服务器,只需删除 " @@ -5669,35 +5948,34 @@ msgstr "启用 SSL 连接" #: ../../source/how-to-run-flower-using-docker.rst:89 #, fuzzy msgid "" -"To enable SSL, you will need a CA certificate, a server certificate and a" -" server private key." +"To enable SSL, you will need a CA certificate, a server certificate and a " +"server private key." msgstr "要启用 SSL,需要 CA 证书、服务器证书和服务器私钥。" #: ../../source/how-to-run-flower-using-docker.rst:92 #, fuzzy msgid "" -"For testing purposes, you can generate your own self-signed certificates." -" The `Enable SSL connections `_ page contains a section that " -"will guide you through the process." +"For testing purposes, you can generate your own self-signed certificates. " +"The `Enable SSL connections `_ page contains a section that will guide " +"you through the process." msgstr "" "出于测试目的,你可以生成自己的自签名证书。启用 SSL 连接 `_ " -"页面中有一个部分将指导你完成这一过程。" +"docs/framework/how-to-enable-ssl-connections.html#certificates>`_ 页面中有一" +"个部分将指导你完成这一过程。" #: ../../source/how-to-run-flower-using-docker.rst:96 #, fuzzy msgid "" -"Assuming all files we need are in the local ``certificates`` directory, " -"we can use the flag ``-v`` to mount the local directory into the " -"``/app/`` directory of the container. This allows the server to access " -"the files within the container. Finally, we pass the names of the " -"certificates to the server with the ``--certificates`` flag." +"Assuming all files we need are in the local ``certificates`` directory, we " +"can use the flag ``-v`` to mount the local directory into the ``/app/`` " +"directory of the container. This allows the server to access the files " +"within the container. Finally, we pass the names of the certificates to the " +"server with the ``--certificates`` flag." msgstr "" "假设我们需要的所有文件都在本地的 ``certificates`` 目录中,我们可以使用标记 " -"``-v`` 将本地目录挂载到容器的 ``/app/`` " -"目录中。这样,服务器就可以访问容器内的文件。最后,我们使用 ``--certificates``" -" 标志将证书名称传递给服务器。" +"``-v`` 将本地目录挂载到容器的 ``/app/`` 目录中。这样,服务器就可以访问容器内" +"的文件。最后,我们使用 ``--certificates`` 标志将证书名称传递给服务器。" #: ../../source/how-to-run-flower-using-docker.rst:108 #, fuzzy @@ -5707,13 +5985,13 @@ msgstr "使用不同的 Flower 或 Python 版本" #: ../../source/how-to-run-flower-using-docker.rst:110 #, fuzzy msgid "" -"If you want to use a different version of Flower or Python, you can do so" -" by changing the tag. All versions we provide are available on `Docker " -"Hub `_." +"If you want to use a different version of Flower or Python, you can do so by " +"changing the tag. All versions we provide are available on `Docker Hub " +"`_." msgstr "" -"如果您想使用不同版本的 Flower 或 Python,可以通过更改标签来实现。" -"我们提供的所有版本都可以在 `Docker Hub `_ 上找到。" +"如果您想使用不同版本的 Flower 或 Python,可以通过更改标签来实现。我们提供的所" +"有版本都可以在 `Docker Hub `_ 上找" +"到。" #: ../../source/how-to-run-flower-using-docker.rst:114 #, fuzzy @@ -5725,9 +6003,9 @@ msgstr "将 Docker 映像固定到特定版本" msgid "" "It may happen that we update the images behind the tags. Such updates " "usually include security updates of system dependencies that should not " -"change the functionality of Flower. However, if you want to ensure that " -"you always use the same image, you can specify the hash of the image " -"instead of the tag." +"change the functionality of Flower. However, if you want to ensure that you " +"always use the same image, you can specify the hash of the image instead of " +"the tag." msgstr "" "我们可能会更新标签后面的图像。此类更新通常包括系统依赖项的安全更新,不会改变 " "Flower 的功能。不过,如果您想确保始终使用同一张图片,可以指定图片的哈希值而不" @@ -5738,8 +6016,9 @@ msgstr "" msgid "" "The following command returns the current image hash referenced by the " "``server:1.7.0-py3.11-ubuntu22.04`` tag:" -msgstr "下面的命令将返回由 ``server:1.7.0-py3.11-ubuntu22.04`` " -"标记引用的当前图像哈希值:" +msgstr "" +"下面的命令将返回由 ``server:1.7.0-py3.11-ubuntu22.04`` 标记引用的当前图像哈希" +"值:" #: ../../source/how-to-run-flower-using-docker.rst:128 #, fuzzy @@ -5765,81 +6044,85 @@ msgstr "运行模拟" #: ../../source/how-to-run-simulations.rst:8 msgid "" "Simulating Federated Learning workloads is useful for a multitude of use-" -"cases: you might want to run your workload on a large cohort of clients " -"but without having to source, configure and mange a large number of " -"physical devices; you might want to run your FL workloads as fast as " -"possible on the compute systems you have access to without having to go " -"through a complex setup process; you might want to validate your " -"algorithm on different scenarios at varying levels of data and system " -"heterogeneity, client availability, privacy budgets, etc. These are among" -" some of the use-cases where simulating FL workloads makes sense. Flower " -"can accommodate these scenarios by means of its `VirtualClientEngine " -"`_ or " -"VCE." -msgstr "" -"模拟联邦学习工作负载可用于多种案例:您可能希望在大量客户端上运行您的工作负载,但无需采购、配置和管理大量物理设备;您可能希望在您可以访问的计算系统上尽可能快地运行您的" -" FL 工作负载,而无需经过复杂的设置过程;您可能希望在不同数据和系统异构性、客户端可用性、隐私预算等不同水平的场景中验证您的算法。这些都是模拟 " -"FL 工作负载的一些案例。Flower 可以通过其 \"虚拟客户端引擎\"(VirtualClientEngine)_或 VCE 来匹配这些情况。" +"cases: you might want to run your workload on a large cohort of clients but " +"without having to source, configure and mange a large number of physical " +"devices; you might want to run your FL workloads as fast as possible on the " +"compute systems you have access to without having to go through a complex " +"setup process; you might want to validate your algorithm on different " +"scenarios at varying levels of data and system heterogeneity, client " +"availability, privacy budgets, etc. These are among some of the use-cases " +"where simulating FL workloads makes sense. Flower can accommodate these " +"scenarios by means of its `VirtualClientEngine `_ or VCE." +msgstr "" +"模拟联邦学习工作负载可用于多种案例:您可能希望在大量客户端上运行您的工作负" +"载,但无需采购、配置和管理大量物理设备;您可能希望在您可以访问的计算系统上尽" +"可能快地运行您的 FL 工作负载,而无需经过复杂的设置过程;您可能希望在不同数据" +"和系统异构性、客户端可用性、隐私预算等不同水平的场景中验证您的算法。这些都是" +"模拟 FL 工作负载的一些案例。Flower 可以通过其 \"虚拟客户端引擎" +"\"(VirtualClientEngine)_或 VCE 来匹配这些情况。" #: ../../source/how-to-run-simulations.rst:10 msgid "" -"The :code:`VirtualClientEngine` schedules, launches and manages `virtual`" -" clients. These clients are identical to `non-virtual` clients (i.e. the " -"ones you launch via the command `flwr.client.start_client `_) in the sense that they can be configure by " -"creating a class inheriting, for example, from `flwr.client.NumPyClient " -"`_ and therefore behave in an " -"identical way. In addition to that, clients managed by the " -":code:`VirtualClientEngine` are:" +"The :code:`VirtualClientEngine` schedules, launches and manages `virtual` " +"clients. These clients are identical to `non-virtual` clients (i.e. the ones " +"you launch via the command `flwr.client.start_client `_) in the sense that they can be configure by creating a " +"class inheriting, for example, from `flwr.client.NumPyClient `_ and therefore behave in an identical way. In " +"addition to that, clients managed by the :code:`VirtualClientEngine` are:" msgstr "" -":code:`VirtualClientEngine`用来规划,启动和管理`虚拟`客户端。这些客户端跟`非虚拟`客户端是一样的(即为您通过`flwr.client.start_client" -" `_启动的客户端),因为它们可以通过创建一个继承自 " -"`flwr.client.NumPyClient `_ " -"的类进行配置,因此其行为方式相同。另外,由 `VirtualClientEngine` 管理的客户端有:" +":code:`VirtualClientEngine`用来规划,启动和管理`虚拟`客户端。这些客户端跟`非" +"虚拟`客户端是一样的(即为您通过`flwr.client.start_client `_启动的客户端),因为它们可以通过创建一个继承自 `flwr." +"client.NumPyClient `_ 的类进行配" +"置,因此其行为方式相同。另外,由 `VirtualClientEngine` 管理的客户端有:" #: ../../source/how-to-run-simulations.rst:12 msgid "" -"resource-aware: this means that each client gets assigned a portion of " -"the compute and memory on your system. You as a user can control this at " -"the beginning of the simulation and allows you to control the degree of " +"resource-aware: this means that each client gets assigned a portion of the " +"compute and memory on your system. You as a user can control this at the " +"beginning of the simulation and allows you to control the degree of " "parallelism of your Flower FL simulation. The fewer the resources per " "client, the more clients can run concurrently on the same hardware." msgstr "" -"资源感知:这意味着每个客户端都会分配到系统中的一部分计算和内存。作为用户,您可以在模拟开始时对其进行控制,从而控制 Flower FL " -"模拟的并行程度。每个客户端的资源越少,在同一硬件上并发运行的客户端就越多。" +"资源感知:这意味着每个客户端都会分配到系统中的一部分计算和内存。作为用户,您" +"可以在模拟开始时对其进行控制,从而控制 Flower FL 模拟的并行程度。每个客户端的" +"资源越少,在同一硬件上并发运行的客户端就越多。" #: ../../source/how-to-run-simulations.rst:13 msgid "" -"self-managed: this means that you as a user do not need to launch clients" -" manually, instead this gets delegated to :code:`VirtualClientEngine`'s " +"self-managed: this means that you as a user do not need to launch clients " +"manually, instead this gets delegated to :code:`VirtualClientEngine`'s " "internals." -msgstr "自管理:这意味着用户无需手动启动客户端,而是由 :code:`VirtualClientEngine` 负责。" +msgstr "" +"自管理:这意味着用户无需手动启动客户端,而是由 :code:`VirtualClientEngine` 负" +"责。" #: ../../source/how-to-run-simulations.rst:14 msgid "" -"ephemeral: this means that a client is only materialized when it is " -"required in the FL process (e.g. to do `fit() `_). The object is destroyed afterwards," -" releasing the resources it was assigned and allowing in this way other " -"clients to participate." +"ephemeral: this means that a client is only materialized when it is required " +"in the FL process (e.g. to do `fit() `_). The object is destroyed afterwards, releasing the resources it was " +"assigned and allowing in this way other clients to participate." msgstr "" -"即时性:这意味着客户端只有在 FL 进程中需要它时才会被实体化(例如执行 `fit() `_ " -")。之后该对象将被销毁,释放分配给它的资源,并允许其他客户端以这种方式参与。" +"即时性:这意味着客户端只有在 FL 进程中需要它时才会被实体化(例如执行 `fit() " +"`_ )。之后该对象将被销毁,释放分配" +"给它的资源,并允许其他客户端以这种方式参与。" #: ../../source/how-to-run-simulations.rst:16 msgid "" "The :code:`VirtualClientEngine` implements `virtual` clients using `Ray " "`_, an open-source framework for scalable Python " -"workloads. In particular, Flower's :code:`VirtualClientEngine` makes use " -"of `Actors `_ to " -"spawn `virtual` clients and run their workload." +"workloads. In particular, Flower's :code:`VirtualClientEngine` makes use of " +"`Actors `_ to spawn " +"`virtual` clients and run their workload." msgstr "" -":code:`VirtualClientEngine`使用`Ray " -"`_来实现`虚拟`客户端,这是一个用于可扩展 Python 工作负载的开源框架。特别地,Flower 的" -" :code:`VirtualClientEngine` 使用 `Actors `_ 来生成 `virtual` 客户端并运行它们的工作负载。" +":code:`VirtualClientEngine`使用`Ray `_来实现`虚拟`客户" +"端,这是一个用于可扩展 Python 工作负载的开源框架。特别地,Flower 的 :code:" +"`VirtualClientEngine` 使用 `Actors `_ 来生成 `virtual` 客户端并运行它们的工作负载。" #: ../../source/how-to-run-simulations.rst:20 msgid "Launch your Flower simulation" @@ -5847,16 +6130,16 @@ msgstr "启动 Flower 模拟" #: ../../source/how-to-run-simulations.rst:22 msgid "" -"Running Flower simulations still require you to define your client class," -" a strategy, and utility functions to download and load (and potentially " -"partition) your dataset. With that out of the way, launching your " -"simulation is done with `start_simulation `_ and a minimal example looks" -" as follows:" +"Running Flower simulations still require you to define your client class, a " +"strategy, and utility functions to download and load (and potentially " +"partition) your dataset. With that out of the way, launching your simulation " +"is done with `start_simulation `_ and a minimal example looks as follows:" msgstr "" -"运行 Flower 模拟器仍然需要定义客户端类、策略以及下载和加载(可能还需要分割)数据集的实用程序。在完成这些工作后,就可以使用 " -"\"start_simulation `_\" 来启动模拟了,一个最简单的示例如下:" +"运行 Flower 模拟器仍然需要定义客户端类、策略以及下载和加载(可能还需要分割)" +"数据集的实用程序。在完成这些工作后,就可以使用 \"start_simulation `_\" 来启动模拟了,一个最简单的示" +"例如下:" #: ../../source/how-to-run-simulations.rst:44 msgid "VirtualClientEngine resources" @@ -5864,23 +6147,24 @@ msgstr "虚拟客户端引擎资源" #: ../../source/how-to-run-simulations.rst:45 msgid "" -"By default the VCE has access to all system resources (i.e. all CPUs, all" -" GPUs, etc) since that is also the default behavior when starting Ray. " -"However, in some settings you might want to limit how many of your system" -" resources are used for simulation. You can do this via the " -":code:`ray_init_args` input argument to :code:`start_simulation` which " -"the VCE internally passes to Ray's :code:`ray.init` command. For a " -"complete list of settings you can configure check the `ray.init " -"`_" -" documentation. Do not set :code:`ray_init_args` if you want the VCE to " -"use all your system's CPUs and GPUs." -msgstr "" -"默认情况下,VCE 可以访问所有系统资源(即所有 CPU、所有 GPU 等),因为这也是启动 Ray " -"时的默认行为。不过,在某些设置中,您可能希望限制有多少系统资源用于模拟。您可以通过 :code:`ray_init_args` 输入到 " -":code:`start_simulation` 的参数来做到这一点,VCE 会在内部将该参数传递给 Ray 的 :code:`ray.init`" -" 命令。有关您可以配置的设置的完整列表,请查看 `ray.init `_ 文档。如果希望 VCE 使用系统中所有的 CPU 和 " -"GPU,请不要设置 :code:`ray_init_args`。" +"By default the VCE has access to all system resources (i.e. all CPUs, all " +"GPUs, etc) since that is also the default behavior when starting Ray. " +"However, in some settings you might want to limit how many of your system " +"resources are used for simulation. You can do this via the :code:" +"`ray_init_args` input argument to :code:`start_simulation` which the VCE " +"internally passes to Ray's :code:`ray.init` command. For a complete list of " +"settings you can configure check the `ray.init `_ documentation. Do not set :" +"code:`ray_init_args` if you want the VCE to use all your system's CPUs and " +"GPUs." +msgstr "" +"默认情况下,VCE 可以访问所有系统资源(即所有 CPU、所有 GPU 等),因为这也是启" +"动 Ray 时的默认行为。不过,在某些设置中,您可能希望限制有多少系统资源用于模" +"拟。您可以通过 :code:`ray_init_args` 输入到 :code:`start_simulation` 的参数来" +"做到这一点,VCE 会在内部将该参数传递给 Ray 的 :code:`ray.init` 命令。有关您可" +"以配置的设置的完整列表,请查看 `ray.init `_ 文档。如果希望 VCE 使用系统中所有的 " +"CPU 和 GPU,请不要设置 :code:`ray_init_args`。" #: ../../source/how-to-run-simulations.rst:62 msgid "Assigning client resources" @@ -5888,27 +6172,27 @@ msgstr "分配客户端资源" #: ../../source/how-to-run-simulations.rst:63 msgid "" -"By default the :code:`VirtualClientEngine` assigns a single CPU core (and" -" nothing else) to each virtual client. This means that if your system has" -" 10 cores, that many virtual clients can be concurrently running." +"By default the :code:`VirtualClientEngine` assigns a single CPU core (and " +"nothing else) to each virtual client. This means that if your system has 10 " +"cores, that many virtual clients can be concurrently running." msgstr "" -"默认情况下,:code:`VirtualClientEngine` 会为每个虚拟客户端分配一个 CPU " -"内核(不分配其他任何内核)。这意味着,如果系统有 10 个内核,那么可以同时运行这么多虚拟客户端。" +"默认情况下,:code:`VirtualClientEngine` 会为每个虚拟客户端分配一个 CPU 内核" +"(不分配其他任何内核)。这意味着,如果系统有 10 个内核,那么可以同时运行这么" +"多虚拟客户端。" #: ../../source/how-to-run-simulations.rst:65 msgid "" -"More often than not, you would probably like to adjust the resources your" -" clients get assigned based on the complexity (i.e. compute and memory " -"footprint) of your FL workload. You can do so when starting your " -"simulation by setting the argument `client_resources` to " -"`start_simulation `_." -" Two keys are internally used by Ray to schedule and spawn workloads (in " -"our case Flower clients):" +"More often than not, you would probably like to adjust the resources your " +"clients get assigned based on the complexity (i.e. compute and memory " +"footprint) of your FL workload. You can do so when starting your simulation " +"by setting the argument `client_resources` to `start_simulation `_. Two keys are internally used " +"by Ray to schedule and spawn workloads (in our case Flower clients):" msgstr "" -"通常情况下,您可能希望根据 FL 工作负载的复杂性(即计算和内存占用)来调整分配给客户端的资源。您可以在启动模拟时将参数 " -"`client_resources` 设置为 `start_simulation `_ 。Ray " -"内部使用两个键来调度和生成工作负载(在我们的例子中是 Flower 客户端):" +"通常情况下,您可能希望根据 FL 工作负载的复杂性(即计算和内存占用)来调整分配" +"给客户端的资源。您可以在启动模拟时将参数 `client_resources` 设置为 " +"`start_simulation `_ 。" +"Ray 内部使用两个键来调度和生成工作负载(在我们的例子中是 Flower 客户端):" #: ../../source/how-to-run-simulations.rst:67 msgid ":code:`num_cpus` indicates the number of CPU cores a client would get." @@ -5928,29 +6212,30 @@ msgstr "让我们来看几个例子:" msgid "" "While the :code:`client_resources` can be used to control the degree of " "concurrency in your FL simulation, this does not stop you from running " -"dozens, hundreds or even thousands of clients in the same round and " -"having orders of magnitude more `dormant` (i.e. not participating in a " -"round) clients. Let's say you want to have 100 clients per round but your" -" system can only accommodate 8 clients concurrently. The " -":code:`VirtualClientEngine` will schedule 100 jobs to run (each " -"simulating a client sampled by the strategy) and then will execute them " -"in a resource-aware manner in batches of 8." -msgstr "" -"虽然 :code:`client_resources` 可用来控制 FL " -"模拟的并发程度,但这并不能阻止您在同一轮模拟中运行几十、几百甚至上千个客户端,并拥有数量级更多的 " -"\"休眠\"(即不参与一轮模拟)客户端。比方说,您希望每轮有 100 个客户端,但您的系统只能同时容纳 8 " -"个客户端。:code:`VirtualClientEngine` 将安排运行 100 " -"个工作(每个工作模拟策略采样的一个客户端),然后以资源感知的方式分批执行。" +"dozens, hundreds or even thousands of clients in the same round and having " +"orders of magnitude more `dormant` (i.e. not participating in a round) " +"clients. Let's say you want to have 100 clients per round but your system " +"can only accommodate 8 clients concurrently. The :code:`VirtualClientEngine` " +"will schedule 100 jobs to run (each simulating a client sampled by the " +"strategy) and then will execute them in a resource-aware manner in batches " +"of 8." +msgstr "" +"虽然 :code:`client_resources` 可用来控制 FL 模拟的并发程度,但这并不能阻止您" +"在同一轮模拟中运行几十、几百甚至上千个客户端,并拥有数量级更多的 \"休眠\"(即" +"不参与一轮模拟)客户端。比方说,您希望每轮有 100 个客户端,但您的系统只能同时" +"容纳 8 个客户端。:code:`VirtualClientEngine` 将安排运行 100 个工作(每个工作" +"模拟策略采样的一个客户端),然后以资源感知的方式分批执行。" #: ../../source/how-to-run-simulations.rst:91 msgid "" "To understand all the intricate details on how resources are used to " -"schedule FL clients and how to define custom resources, please take a " -"look at the `Ray documentation `_." +"schedule FL clients and how to define custom resources, please take a look " +"at the `Ray documentation `_." msgstr "" -"要了解资源如何用于调度 FL 客户端以及如何定义自定义资源的所有复杂细节,请查看 `Ray 文档 " -"`_。" +"要了解资源如何用于调度 FL 客户端以及如何定义自定义资源的所有复杂细节,请查看 " +"`Ray 文档 `_。" #: ../../source/how-to-run-simulations.rst:94 msgid "Simulation examples" @@ -5958,26 +6243,27 @@ msgstr "模拟示例" #: ../../source/how-to-run-simulations.rst:96 msgid "" -"A few ready-to-run complete examples for Flower simulation in " -"Tensorflow/Keras and PyTorch are provided in the `Flower repository " -"`_. You can run them on Google Colab too:" +"A few ready-to-run complete examples for Flower simulation in Tensorflow/" +"Keras and PyTorch are provided in the `Flower repository `_. You can run them on Google Colab too:" msgstr "" -"在 Tensorflow/Keras 和 PyTorch 中进行 Flower 模拟的几个可随时运行的完整示例已在 `Flower 库 " -"`_ 中提供。您也可以在 Google Colab 上运行它们:" +"在 Tensorflow/Keras 和 PyTorch 中进行 Flower 模拟的几个可随时运行的完整示例已" +"在 `Flower 库 `_ 中提供。您也可以在 Google " +"Colab 上运行它们:" #: ../../source/how-to-run-simulations.rst:98 msgid "" -"`Tensorflow/Keras Simulation " -"`_: 100 clients collaboratively train a MLP model on MNIST." +"`Tensorflow/Keras Simulation `_: 100 clients collaboratively train a MLP " +"model on MNIST." msgstr "" -"Tensorflow/Keras模拟 `_:100个客户端在MNIST上协作训练一个MLP模型。" +"Tensorflow/Keras模拟 `_:100个客户端在MNIST上协作训练一个MLP模型。" #: ../../source/how-to-run-simulations.rst:99 msgid "" -"`PyTorch Simulation `_: 100 clients collaboratively train a CNN model on " +"`PyTorch Simulation `_: 100 clients collaboratively train a CNN model on " "MNIST." msgstr "" "PyTorch 模拟 `)" -msgstr "在所有节点中都有一份数据集副本(更多相关信息请参阅 :ref:`模拟注意事项`)" +"Have a copy of your dataset in all nodes (more about this in :ref:" +"`simulation considerations `)" +msgstr "" +"在所有节点中都有一份数据集副本(更多相关信息请参阅 :ref:`模拟注意事项" +"`)" #: ../../source/how-to-run-simulations.rst:111 msgid "" -"Pass :code:`ray_init_args={\"address\"=\"auto\"}` to `start_simulation " -"`_ so the " -":code:`VirtualClientEngine` attaches to a running Ray instance." +"Pass :code:`ray_init_args={\"address\"=\"auto\"}` to `start_simulation `_ so the :code:" +"`VirtualClientEngine` attaches to a running Ray instance." msgstr "" "将 :code:`ray_init_args={\"address\"=\"auto\"}`传递给 `start_simulation `_ ,这样 " -":code:`VirtualClientEngine`就会连接到正在运行的 Ray 实例。" +"api-flwr.html#flwr.simulation.start_simulation>`_ ,这样 :code:" +"`VirtualClientEngine`就会连接到正在运行的 Ray 实例。" #: ../../source/how-to-run-simulations.rst:112 msgid "" -"Start Ray on you head node: on the terminal type :code:`ray start " -"--head`. This command will print a few lines, one of which indicates how " -"to attach other nodes to the head node." +"Start Ray on you head node: on the terminal type :code:`ray start --head`. " +"This command will print a few lines, one of which indicates how to attach " +"other nodes to the head node." msgstr "" -"在头部节点上启动 Ray:在终端上输入 :code:`raystart--" -"head`。该命令将打印几行输出,其中一行说明如何将其他节点连接到头部节点。" +"在头部节点上启动 Ray:在终端上输入 :code:`raystart--head`。该命令将打印几行输" +"出,其中一行说明如何将其他节点连接到头部节点。" #: ../../source/how-to-run-simulations.rst:113 msgid "" -"Attach other nodes to the head node: copy the command shown after " -"starting the head and execute it on terminal of a new node: for example " -":code:`ray start --address='192.168.1.132:6379'`" +"Attach other nodes to the head node: copy the command shown after starting " +"the head and execute it on terminal of a new node: for example :code:`ray " +"start --address='192.168.1.132:6379'`" msgstr "" -"将其他节点附加到头部节点:复制启动头部后显示的命令,并在新节点的终端上执行:例如 :code:`ray start " -"--address='192.168.1.132:6379'`" +"将其他节点附加到头部节点:复制启动头部后显示的命令,并在新节点的终端上执行:" +"例如 :code:`ray start --address='192.168.1.132:6379'`" #: ../../source/how-to-run-simulations.rst:115 msgid "" "With all the above done, you can run your code from the head node as you " "would if the simulation was running on a single node." -msgstr "完成上述所有操作后,您就可以在头部节点上运行代码了,就像在单个节点上运行模拟一样。" +msgstr "" +"完成上述所有操作后,您就可以在头部节点上运行代码了,就像在单个节点上运行模拟" +"一样。" #: ../../source/how-to-run-simulations.rst:117 msgid "" -"Once your simulation is finished, if you'd like to dismantle your cluster" -" you simply need to run the command :code:`ray stop` in each node's " -"terminal (including the head node)." -msgstr "模拟结束后,如果要拆除集群,只需在每个节点(包括头部节点)的终端运行 :code:`ray stop` 命令即可。" +"Once your simulation is finished, if you'd like to dismantle your cluster " +"you simply need to run the command :code:`ray stop` in each node's terminal " +"(including the head node)." +msgstr "" +"模拟结束后,如果要拆除集群,只需在每个节点(包括头部节点)的终端运行 :code:" +"`ray stop` 命令即可。" #: ../../source/how-to-run-simulations.rst:120 msgid "Multi-node simulation good-to-know" @@ -6061,28 +6355,27 @@ msgstr "在此,我们列举了运行多节点 FL 模拟时的一些有趣功 #: ../../source/how-to-run-simulations.rst:124 msgid "" -"User :code:`ray status` to check all nodes connected to your head node as" -" well as the total resources available to the " -":code:`VirtualClientEngine`." +"User :code:`ray status` to check all nodes connected to your head node as " +"well as the total resources available to the :code:`VirtualClientEngine`." msgstr "" -"使用 :code:`ray status` 查看连接到头部节点的所有节点,以及 :code:`VirtualClientEngine` " -"可用的总资源。" +"使用 :code:`ray status` 查看连接到头部节点的所有节点,以及 :code:" +"`VirtualClientEngine` 可用的总资源。" #: ../../source/how-to-run-simulations.rst:126 msgid "" -"When attaching a new node to the head, all its resources (i.e. all CPUs, " -"all GPUs) will be visible by the head node. This means that the " -":code:`VirtualClientEngine` can schedule as many `virtual` clients as " -"that node can possible run. In some settings you might want to exclude " -"certain resources from the simulation. You can do this by appending " -"`--num-cpus=` and/or `--num-" -"gpus=` in any :code:`ray start` command (including " -"when starting the head)" +"When attaching a new node to the head, all its resources (i.e. all CPUs, all " +"GPUs) will be visible by the head node. This means that the :code:" +"`VirtualClientEngine` can schedule as many `virtual` clients as that node " +"can possible run. In some settings you might want to exclude certain " +"resources from the simulation. You can do this by appending `--num-" +"cpus=` and/or `--num-gpus=` in any :" +"code:`ray start` command (including when starting the head)" msgstr "" -"将新节点附加到头部节点时,头部节点将可见其所有资源(即所有 CPU 和 GPU)。这意味着 :code:`VirtualClientEngine`" -" 可以调度尽可能多的 \"虚拟 \"客户端来运行该节点。在某些设置中,您可能希望将某些资源排除在模拟之外。为此,您可以在任何 :code:`ray" -" start` 命令(包括启动头部时)中添加 `--num-cpus=`和/或 `--num-" -"gpus=`" +"将新节点附加到头部节点时,头部节点将可见其所有资源(即所有 CPU 和 GPU)。这意" +"味着 :code:`VirtualClientEngine` 可以调度尽可能多的 \"虚拟 \"客户端来运行该节" +"点。在某些设置中,您可能希望将某些资源排除在模拟之外。为此,您可以在任何 :" +"code:`ray start` 命令(包括启动头部时)中添加 `--num-" +"cpus=`和/或 `--num-gpus=`" #: ../../source/how-to-run-simulations.rst:132 msgid "Considerations for simulations" @@ -6090,23 +6383,26 @@ msgstr "模拟的注意事项" #: ../../source/how-to-run-simulations.rst:135 msgid "" -"We are actively working on these fronts so to make it trivial to run any " -"FL workload with Flower simulation." -msgstr "我们正在积极开展这些方面的工作,以便使 FL 工作负载与 Flower 模拟的运行变得轻而易举。" +"We are actively working on these fronts so to make it trivial to run any FL " +"workload with Flower simulation." +msgstr "" +"我们正在积极开展这些方面的工作,以便使 FL 工作负载与 Flower 模拟的运行变得轻" +"而易举。" #: ../../source/how-to-run-simulations.rst:138 msgid "" -"The current VCE allows you to run Federated Learning workloads in " -"simulation mode whether you are prototyping simple scenarios on your " -"personal laptop or you want to train a complex FL pipeline across " -"multiple high-performance GPU nodes. While we add more capabilities to " -"the VCE, the points below highlight some of the considerations to keep in" -" mind when designing your FL pipeline with Flower. We also highlight a " -"couple of current limitations in our implementation." +"The current VCE allows you to run Federated Learning workloads in simulation " +"mode whether you are prototyping simple scenarios on your personal laptop or " +"you want to train a complex FL pipeline across multiple high-performance GPU " +"nodes. While we add more capabilities to the VCE, the points below highlight " +"some of the considerations to keep in mind when designing your FL pipeline " +"with Flower. We also highlight a couple of current limitations in our " +"implementation." msgstr "" -"当前的 VCE 允许您在模拟模式下运行联邦学习工作负载,无论您是在个人笔记本电脑上建立简单的场景原型,还是要在多个高性能 GPU 节点上训练复杂的" -" FL情景。虽然我们为 VCE 增加了更多的功能,但以下几点强调了在使用 Flower 设计 FL " -"时需要注意的一些事项。我们还强调了我们的实现中目前存在的一些局限性。" +"当前的 VCE 允许您在模拟模式下运行联邦学习工作负载,无论您是在个人笔记本电脑上" +"建立简单的场景原型,还是要在多个高性能 GPU 节点上训练复杂的 FL情景。虽然我们" +"为 VCE 增加了更多的功能,但以下几点强调了在使用 Flower 设计 FL 时需要注意的一" +"些事项。我们还强调了我们的实现中目前存在的一些局限性。" #: ../../source/how-to-run-simulations.rst:141 msgid "GPU resources" @@ -6114,28 +6410,29 @@ msgstr "GPU 资源" #: ../../source/how-to-run-simulations.rst:143 msgid "" -"The VCE assigns a share of GPU memory to a client that specifies the key " -":code:`num_gpus` in :code:`client_resources`. This being said, Ray (used " +"The VCE assigns a share of GPU memory to a client that specifies the key :" +"code:`num_gpus` in :code:`client_resources`. This being said, Ray (used " "internally by the VCE) is by default:" msgstr "" -"VCE 会为指定 :code:`client_resources` 中 :code:`num_gpus` 关键字的客户端分配 GPU " -"内存份额。也就是说,Ray(VCE 内部使用)是默认的:" +"VCE 会为指定 :code:`client_resources` 中 :code:`num_gpus` 关键字的客户端分配 " +"GPU 内存份额。也就是说,Ray(VCE 内部使用)是默认的:" #: ../../source/how-to-run-simulations.rst:146 msgid "" -"not aware of the total VRAM available on the GPUs. This means that if you" -" set :code:`num_gpus=0.5` and you have two GPUs in your system with " -"different (e.g. 32GB and 8GB) VRAM amounts, they both would run 2 clients" -" concurrently." +"not aware of the total VRAM available on the GPUs. This means that if you " +"set :code:`num_gpus=0.5` and you have two GPUs in your system with different " +"(e.g. 32GB and 8GB) VRAM amounts, they both would run 2 clients concurrently." msgstr "" -"不知道 GPU 上可用的总 VRAM。这意味着,如果您设置 :code:`num_gpus=0.5`,而系统中有两个不同(如 32GB 和 " -"8GB)VRAM 的 GPU,它们都将同时运行 2 个客户端。" +"不知道 GPU 上可用的总 VRAM。这意味着,如果您设置 :code:`num_gpus=0.5`,而系统" +"中有两个不同(如 32GB 和 8GB)VRAM 的 GPU,它们都将同时运行 2 个客户端。" #: ../../source/how-to-run-simulations.rst:147 msgid "" "not aware of other unrelated (i.e. not created by the VCE) workloads are " "running on the GPU. Two takeaways from this are:" -msgstr "不知道 GPU 上正在运行其他无关(即不是由 VCE 创建)的工作负载。从中可以得到以下两点启示:" +msgstr "" +"不知道 GPU 上正在运行其他无关(即不是由 VCE 创建)的工作负载。从中可以得到以" +"下两点启示:" #: ../../source/how-to-run-simulations.rst:149 msgid "" @@ -6143,28 +6440,27 @@ msgid "" "aggregation (by instance when making use of the `evaluate method `_)" msgstr "" -"您的 Flower 服务器可能需要 GPU 来评估聚合后的 \"全局模型\"(例如在使用 \"评估方法\"`_时)" +"您的 Flower 服务器可能需要 GPU 来评估聚合后的 \"全局模型\"(例如在使用 \"评估" +"方法\"`_时)" #: ../../source/how-to-run-simulations.rst:150 msgid "" "If you want to run several independent Flower simulations on the same " -"machine you need to mask-out your GPUs with " -":code:`CUDA_VISIBLE_DEVICES=\"\"` when launching your " -"experiment." +"machine you need to mask-out your GPUs with :code:" +"`CUDA_VISIBLE_DEVICES=\"\"` when launching your experiment." msgstr "" -"如果您想在同一台机器上运行多个独立的 Flower 模拟,则需要在启动实验时使用 " -":code:`CUDA_VISIBLE_DEVICES=\"\"` 屏蔽 GPU。" +"如果您想在同一台机器上运行多个独立的 Flower 模拟,则需要在启动实验时使用 :" +"code:`CUDA_VISIBLE_DEVICES=\"\"` 屏蔽 GPU。" #: ../../source/how-to-run-simulations.rst:153 msgid "" -"In addition, the GPU resource limits passed to :code:`client_resources` " -"are not `enforced` (i.e. they can be exceeded) which can result in the " -"situation of client using more VRAM than the ratio specified when " -"starting the simulation." +"In addition, the GPU resource limits passed to :code:`client_resources` are " +"not `enforced` (i.e. they can be exceeded) which can result in the situation " +"of client using more VRAM than the ratio specified when starting the " +"simulation." msgstr "" -"此外,传递给 :code:`client_resources` 的 GPU 资源限制并不是 \"强制 \"的(即可以超出),这可能导致客户端使用的" -" VRAM 超过启动模拟时指定的比例。" +"此外,传递给 :code:`client_resources` 的 GPU 资源限制并不是 \"强制 \"的(即可" +"以超出),这可能导致客户端使用的 VRAM 超过启动模拟时指定的比例。" #: ../../source/how-to-run-simulations.rst:156 msgid "TensorFlow with GPUs" @@ -6172,42 +6468,40 @@ msgstr "使用 GPU 的 TensorFlow" #: ../../source/how-to-run-simulations.rst:158 msgid "" -"When `using a GPU with TensorFlow " -"`_ nearly your entire GPU memory of" -" all your GPUs visible to the process will be mapped. This is done by " -"TensorFlow for optimization purposes. However, in settings such as FL " -"simulations where we want to split the GPU into multiple `virtual` " -"clients, this is not a desirable mechanism. Luckily we can disable this " -"default behavior by `enabling memory growth " -"`_." +"When `using a GPU with TensorFlow `_ " +"nearly your entire GPU memory of all your GPUs visible to the process will " +"be mapped. This is done by TensorFlow for optimization purposes. However, in " +"settings such as FL simulations where we want to split the GPU into multiple " +"`virtual` clients, this is not a desirable mechanism. Luckily we can disable " +"this default behavior by `enabling memory growth `_." msgstr "" -"在 TensorFlow `_ 中使用 GPU 时,几乎所有进程可见的" -" GPU 内存都将被映射。TensorFlow 这样做是出于优化目的。然而,在 FL 模拟等设置中,我们希望将 GPU 分割成多个 \"虚拟 " -"\"客户端,这并不是一个理想的机制。幸运的是,我们可以通过 `启用内存增长 " -"`_来禁用这一默认行为。" +"在 TensorFlow `_ 中使用 GPU 时,几乎所" +"有进程可见的 GPU 内存都将被映射。TensorFlow 这样做是出于优化目的。然而,在 " +"FL 模拟等设置中,我们希望将 GPU 分割成多个 \"虚拟 \"客户端,这并不是一个理想" +"的机制。幸运的是,我们可以通过 `启用内存增长 `_来禁用这一默认行为。" #: ../../source/how-to-run-simulations.rst:160 msgid "" -"This would need to be done in the main process (which is where the server" -" would run) and in each Actor created by the VCE. By means of " -":code:`actor_kwargs` we can pass the reserved key `\"on_actor_init_fn\"` " -"in order to specify a function to be executed upon actor initialization. " -"In this case, to enable GPU growth for TF workloads. It would look as " -"follows:" +"This would need to be done in the main process (which is where the server " +"would run) and in each Actor created by the VCE. By means of :code:" +"`actor_kwargs` we can pass the reserved key `\"on_actor_init_fn\"` in order " +"to specify a function to be executed upon actor initialization. In this " +"case, to enable GPU growth for TF workloads. It would look as follows:" msgstr "" -"这需要在主进程(也就是服务器运行的地方)和 VCE 创建的每个角色中完成。通过 " -":code:`actor_kwargs`,我们可以传递保留关键字`\"on_actor_init_fn\"`,以指定在角色初始化时执行的函数。在本例中,为了使" -" TF 工作负载的 GPU 增长,它看起来如下:" +"这需要在主进程(也就是服务器运行的地方)和 VCE 创建的每个角色中完成。通过 :" +"code:`actor_kwargs`,我们可以传递保留关键字`\"on_actor_init_fn\"`,以指定在角" +"色初始化时执行的函数。在本例中,为了使 TF 工作负载的 GPU 增长,它看起来如下:" #: ../../source/how-to-run-simulations.rst:179 msgid "" "This is precisely the mechanism used in `Tensorflow/Keras Simulation " -"`_ example." +"`_ " +"example." msgstr "" -"这正是 \"Tensorflow/Keras 模拟 " -"`_\"示例中使用的机制。" +"这正是 \"Tensorflow/Keras 模拟 `_\"示例中使用的机制。" #: ../../source/how-to-run-simulations.rst:183 msgid "Multi-node setups" @@ -6215,35 +6509,35 @@ msgstr "多节点设置" #: ../../source/how-to-run-simulations.rst:185 msgid "" -"The VCE does not currently offer a way to control on which node a " -"particular `virtual` client is executed. In other words, if more than a " -"single node have the resources needed by a client to run, then any of " -"those nodes could get the client workload scheduled onto. Later in the FL" -" process (i.e. in a different round) the same client could be executed by" -" a different node. Depending on how your clients access their datasets, " -"this might require either having a copy of all dataset partitions on all " -"nodes or a dataset serving mechanism (e.g. using nfs, a database) to " -"circumvent data duplication." -msgstr "" -"VCE 目前不提供控制特定 \"虚拟 " -"\"客户端在哪个节点上执行的方法。换句话说,如果不止一个节点拥有客户端运行所需的资源,那么这些节点中的任何一个都可能被调度到客户端工作负载上。在 " -"FL " -"进程的稍后阶段(即在另一轮中),同一客户端可以由不同的节点执行。根据客户访问数据集的方式,这可能需要在所有节点上复制所有数据集分区,或采用数据集服务机制(如使用" -" nfs 或数据库)来避免数据重复。" +"The VCE does not currently offer a way to control on which node a particular " +"`virtual` client is executed. In other words, if more than a single node " +"have the resources needed by a client to run, then any of those nodes could " +"get the client workload scheduled onto. Later in the FL process (i.e. in a " +"different round) the same client could be executed by a different node. " +"Depending on how your clients access their datasets, this might require " +"either having a copy of all dataset partitions on all nodes or a dataset " +"serving mechanism (e.g. using nfs, a database) to circumvent data " +"duplication." +msgstr "" +"VCE 目前不提供控制特定 \"虚拟 \"客户端在哪个节点上执行的方法。换句话说,如果" +"不止一个节点拥有客户端运行所需的资源,那么这些节点中的任何一个都可能被调度到" +"客户端工作负载上。在 FL 进程的稍后阶段(即在另一轮中),同一客户端可以由不同" +"的节点执行。根据客户访问数据集的方式,这可能需要在所有节点上复制所有数据集分" +"区,或采用数据集服务机制(如使用 nfs 或数据库)来避免数据重复。" #: ../../source/how-to-run-simulations.rst:187 msgid "" -"By definition virtual clients are `stateless` due to their ephemeral " -"nature. A client state can be implemented as part of the Flower client " -"class but users need to ensure this saved to persistent storage (e.g. a " -"database, disk) and that can be retrieve later by the same client " -"regardless on which node it is running from. This is related to the point" -" above also since, in some way, the client's dataset could be seen as a " -"type of `state`." +"By definition virtual clients are `stateless` due to their ephemeral nature. " +"A client state can be implemented as part of the Flower client class but " +"users need to ensure this saved to persistent storage (e.g. a database, " +"disk) and that can be retrieve later by the same client regardless on which " +"node it is running from. This is related to the point above also since, in " +"some way, the client's dataset could be seen as a type of `state`." msgstr "" -"根据定义,虚拟客户端是 \"无状态 \"的,因为它们具有即时性。客户机状态可以作为 Flower " -"客户机类的一部分来实现,但用户需要确保将其保存到持久存储(如数据库、磁盘)中,而且无论客户机在哪个节点上运行,都能在以后检索到。这也与上述观点有关,因为在某种程度上,客户端的数据集可以被视为一种" -" \"状态\"。" +"根据定义,虚拟客户端是 \"无状态 \"的,因为它们具有即时性。客户机状态可以作为 " +"Flower 客户机类的一部分来实现,但用户需要确保将其保存到持久存储(如数据库、磁" +"盘)中,而且无论客户机在哪个节点上运行,都能在以后检索到。这也与上述观点有" +"关,因为在某种程度上,客户端的数据集可以被视为一种 \"状态\"。" #: ../../source/how-to-save-and-load-model-checkpoints.rst:2 msgid "Save and load model checkpoints" @@ -6251,10 +6545,12 @@ msgstr "保存和加载模型检查点" #: ../../source/how-to-save-and-load-model-checkpoints.rst:4 msgid "" -"Flower does not automatically save model updates on the server-side. This" -" how-to guide describes the steps to save (and load) model checkpoints in" -" Flower." -msgstr "Flower 不会在服务器端自动保存模型更新。本指南将介绍在 Flower 中保存(和加载)模型检查点的步骤。" +"Flower does not automatically save model updates on the server-side. This " +"how-to guide describes the steps to save (and load) model checkpoints in " +"Flower." +msgstr "" +"Flower 不会在服务器端自动保存模型更新。本指南将介绍在 Flower 中保存(和加载)" +"模型检查点的步骤。" #: ../../source/how-to-save-and-load-model-checkpoints.rst:8 msgid "Model checkpointing" @@ -6262,22 +6558,22 @@ msgstr "模型检查点" #: ../../source/how-to-save-and-load-model-checkpoints.rst:10 msgid "" -"Model updates can be persisted on the server-side by customizing " -":code:`Strategy` methods. Implementing custom strategies is always an " -"option, but for many cases it may be more convenient to simply customize " -"an existing strategy. The following code example defines a new " -":code:`SaveModelStrategy` which customized the existing built-in " -":code:`FedAvg` strategy. In particular, it customizes " -":code:`aggregate_fit` by calling :code:`aggregate_fit` in the base class " -"(:code:`FedAvg`). It then continues to save returned (aggregated) weights" -" before it returns those aggregated weights to the caller (i.e., the " -"server):" -msgstr "" -"模型更新可通过自定义 :code:`Strategy` " -"方法在服务器端持久化。实现自定义策略始终是一种选择,但在许多情况下,简单地自定义现有策略可能更方便。下面的代码示例定义了一个新的 " -":code:`SaveModelStrategy`,它自定义了现有的内置 :code:`FedAvg` " -"策略。特别是,它通过调用基类(:code:`FedAvg`)中的 :code:`aggregate_fit` 来定制 " -":code:`aggregate_fit`。然后继续保存返回的(聚合)参数,然后再将这些聚合参数返回给调用者(即服务器):" +"Model updates can be persisted on the server-side by customizing :code:" +"`Strategy` methods. Implementing custom strategies is always an option, but " +"for many cases it may be more convenient to simply customize an existing " +"strategy. The following code example defines a new :code:`SaveModelStrategy` " +"which customized the existing built-in :code:`FedAvg` strategy. In " +"particular, it customizes :code:`aggregate_fit` by calling :code:" +"`aggregate_fit` in the base class (:code:`FedAvg`). It then continues to " +"save returned (aggregated) weights before it returns those aggregated " +"weights to the caller (i.e., the server):" +msgstr "" +"模型更新可通过自定义 :code:`Strategy` 方法在服务器端持久化。实现自定义策略始" +"终是一种选择,但在许多情况下,简单地自定义现有策略可能更方便。下面的代码示例" +"定义了一个新的 :code:`SaveModelStrategy`,它自定义了现有的内置 :code:" +"`FedAvg` 策略。特别是,它通过调用基类(:code:`FedAvg`)中的 :code:" +"`aggregate_fit` 来定制 :code:`aggregate_fit`。然后继续保存返回的(聚合)参" +"数,然后再将这些聚合参数返回给调用者(即服务器):" #: ../../source/how-to-save-and-load-model-checkpoints.rst:47 msgid "Save and load PyTorch checkpoints" @@ -6285,29 +6581,32 @@ msgstr "保存和加载 PyTorch 检查点" #: ../../source/how-to-save-and-load-model-checkpoints.rst:49 msgid "" -"Similar to the previous example but with a few extra steps, we'll show " -"how to store a PyTorch checkpoint we'll use the ``torch.save`` function. " -"Firstly, ``aggregate_fit`` returns a ``Parameters`` object that has to be" -" transformed into a list of NumPy ``ndarray``'s, then those are " -"transformed into the PyTorch ``state_dict`` following the ``OrderedDict``" -" class structure." +"Similar to the previous example but with a few extra steps, we'll show how " +"to store a PyTorch checkpoint we'll use the ``torch.save`` function. " +"Firstly, ``aggregate_fit`` returns a ``Parameters`` object that has to be " +"transformed into a list of NumPy ``ndarray``'s, then those are transformed " +"into the PyTorch ``state_dict`` following the ``OrderedDict`` class " +"structure." msgstr "" -"与前面的例子类似,但多了几个步骤,我们将展示如何存储一个 PyTorch 检查点,我们将使用 ``torch.save`` " -"函数。首先,``aggregate_fit`` 返回一个 ``Parameters`` 对象,它必须被转换成一个 NumPy " -"``ndarray`` 的列表,然后这些对象按照 ``OrderedDict`` 类结构被转换成 PyTorch `state_dict` 对象。" +"与前面的例子类似,但多了几个步骤,我们将展示如何存储一个 PyTorch 检查点,我们" +"将使用 ``torch.save`` 函数。首先,``aggregate_fit`` 返回一个 ``Parameters`` " +"对象,它必须被转换成一个 NumPy ``ndarray`` 的列表,然后这些对象按照 " +"``OrderedDict`` 类结构被转换成 PyTorch `state_dict` 对象。" #: ../../source/how-to-save-and-load-model-checkpoints.rst:85 msgid "" -"To load your progress, you simply append the following lines to your " -"code. Note that this will iterate over all saved checkpoints and load the" -" latest one:" -msgstr "要加载进度,只需在代码中添加以下几行。请注意,这将遍历所有已保存的检查点,并加载最新的检查点:" +"To load your progress, you simply append the following lines to your code. " +"Note that this will iterate over all saved checkpoints and load the latest " +"one:" +msgstr "" +"要加载进度,只需在代码中添加以下几行。请注意,这将遍历所有已保存的检查点,并" +"加载最新的检查点:" #: ../../source/how-to-save-and-load-model-checkpoints.rst:97 #, fuzzy msgid "" -"Return/use this object of type ``Parameters`` wherever necessary, such as" -" in the ``initial_parameters`` when defining a ``Strategy``." +"Return/use this object of type ``Parameters`` wherever necessary, such as in " +"the ``initial_parameters`` when defining a ``Strategy``." msgstr "" "在必要时返回/使用此 ``Parameters`` 类型的对象,例如在定义 ``Strategy` 时的 " "``initial_parameters` 中。" @@ -6318,13 +6617,14 @@ msgstr "升级至 Flower 1.0" #: ../../source/how-to-upgrade-to-flower-1.0.rst:4 msgid "" -"Flower 1.0 is here. Along with new features, Flower 1.0 provides a stable" -" foundation for future growth. Compared to Flower 0.19 (and other 0.x " -"series releases), there are a few breaking changes that make it necessary" -" to change the code of existing 0.x-series projects." +"Flower 1.0 is here. Along with new features, Flower 1.0 provides a stable " +"foundation for future growth. Compared to Flower 0.19 (and other 0.x series " +"releases), there are a few breaking changes that make it necessary to change " +"the code of existing 0.x-series projects." msgstr "" -"Flower 1.0 正式发布。除了新功能,Flower 1.0 还为未来的发展奠定了稳定的基础。与 Flower 0.19(以及其他 0.x " -"系列版本)相比,有一些破坏性改动需要修改现有 0.x 系列项目的代码。" +"Flower 1.0 正式发布。除了新功能,Flower 1.0 还为未来的发展奠定了稳定的基础。" +"与 Flower 0.19(以及其他 0.x 系列版本)相比,有一些破坏性改动需要修改现有 0." +"x 系列项目的代码。" #: ../../source/how-to-upgrade-to-flower-1.0.rst:8 msgid "Install update" @@ -6332,8 +6632,8 @@ msgstr "安装更新" #: ../../source/how-to-upgrade-to-flower-1.0.rst:10 msgid "" -"Here's how to update an existing installation to Flower 1.0 using either " -"pip or Poetry:" +"Here's how to update an existing installation to Flower 1.0 using either pip " +"or Poetry:" msgstr "下面介绍如何使用 pip 或 Poetry 将现有安装更新到 Flower 1.0:" #: ../../source/how-to-upgrade-to-flower-1.0.rst:12 @@ -6344,13 +6644,15 @@ msgstr "pip: 安装时添加 ``-U``." msgid "" "``python -m pip install -U flwr`` (when using ``start_server`` and " "``start_client``)" -msgstr "`python -m pip install -U flwr``(当使用`start_server`和`start_client`时)" +msgstr "" +"`python -m pip install -U flwr``(当使用`start_server`和`start_client`时)" #: ../../source/how-to-upgrade-to-flower-1.0.rst:15 msgid "" "``python -m pip install -U flwr[simulation]`` (when using " "``start_simulation``)" -msgstr "`python -m pip install -U flwr[simulation]``(当使用`start_simulation``时)" +msgstr "" +"`python -m pip install -U flwr[simulation]``(当使用`start_simulation``时)" #: ../../source/how-to-upgrade-to-flower-1.0.rst:17 msgid "" @@ -6358,20 +6660,21 @@ msgid "" "reinstall (don't forget to delete ``poetry.lock`` via ``rm poetry.lock`` " "before running ``poetry install``)." msgstr "" -"Poetry:更新 ``pyproject.toml`` 中的 ``flwr`` 依赖包,然后重新安装(运行 ``poetry install``" -" 前,别忘了通过 ``rm poetry.lock` 删除 ``poetry.lock`)。" +"Poetry:更新 ``pyproject.toml`` 中的 ``flwr`` 依赖包,然后重新安装(运行 " +"``poetry install`` 前,别忘了通过 ``rm poetry.lock` 删除 ``poetry.lock`)。" #: ../../source/how-to-upgrade-to-flower-1.0.rst:19 -msgid "``flwr = \"^1.0.0\"`` (when using ``start_server`` and ``start_client``)" +msgid "" +"``flwr = \"^1.0.0\"`` (when using ``start_server`` and ``start_client``)" msgstr "``flwr = \"^1.0.0\"`` (当使用 ``start_server` 和 ``start_client` 时)" #: ../../source/how-to-upgrade-to-flower-1.0.rst:20 msgid "" -"``flwr = { version = \"^1.0.0\", extras = [\"simulation\"] }`` (when " -"using ``start_simulation``)" +"``flwr = { version = \"^1.0.0\", extras = [\"simulation\"] }`` (when using " +"``start_simulation``)" msgstr "" -"``flwr = { version = \"^1.0.0\", extras = [\"simulation\"] " -"}``(当使用``start_simulation``时)" +"``flwr = { version = \"^1.0.0\", extras = [\"simulation\"] }``(当使用" +"``start_simulation``时)" #: ../../source/how-to-upgrade-to-flower-1.0.rst:24 msgid "Required changes" @@ -6395,13 +6698,13 @@ msgstr "将所有参数作为关键字参数传递(而不是位置参数)。 msgid "" "Flower 0.19 (positional arguments): ``start_client(\"127.0.0.1:8080\", " "FlowerClient())``" -msgstr "Flower 0.19 (位置参数): ``start_client(\"127.0.0.1:8080\", FlowerClient())``" +msgstr "" +"Flower 0.19 (位置参数): ``start_client(\"127.0.0.1:8080\", FlowerClient())``" #: ../../source/how-to-upgrade-to-flower-1.0.rst:34 msgid "" "Flower 1.0 (keyword arguments): " -"``start_client(server_address=\"127.0.0.1:8080\", " -"client=FlowerClient())``" +"``start_client(server_address=\"127.0.0.1:8080\", client=FlowerClient())``" msgstr "" "Flower 1.0(关键字参数): ``start_client(server_address=\"127.0.0.1:8080\", " "client=FlowerClient())``" @@ -6419,8 +6722,8 @@ msgid "" "Subclasses of ``Client``: change ``def get_parameters(self):``` to ``def " "get_parameters(self, ins: GetParametersIns):``" msgstr "" -"客户端 \"的子类:将 \"get_parameters(self): \"改为 \"get_parameters(self, ins: " -"GetParametersIns):\"" +"客户端 \"的子类:将 \"get_parameters(self): \"改为 \"get_parameters(self, " +"ins: GetParametersIns):\"" #: ../../source/how-to-upgrade-to-flower-1.0.rst:43 msgid "Strategies / ``start_server`` / ``start_simulation``" @@ -6444,29 +6747,30 @@ msgstr "" #: ../../source/how-to-upgrade-to-flower-1.0.rst:48 msgid "" -"Flower 1.0: ``start_server(..., " -"config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " -"...)``" +"Flower 1.0: ``start_server(..., config=flwr.server." +"ServerConfig(num_rounds=3, round_timeout=600.0), ...)``" msgstr "" -"Flower 1.0: ``start_server(..., " -"config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " -"...)``" +"Flower 1.0: ``start_server(..., config=flwr.server." +"ServerConfig(num_rounds=3, round_timeout=600.0), ...)``" #: ../../source/how-to-upgrade-to-flower-1.0.rst:50 msgid "" "Replace ``num_rounds=1`` in ``start_simulation`` with the new " "``config=ServerConfig(...)`` (see previous item)" -msgstr "将`start_simulation``中的`num_rounds=1``替换为新的`config=ServerConfig(...)`(参见前一项)" +msgstr "" +"将`start_simulation``中的`num_rounds=1``替换为新的`config=ServerConfig(...)`" +"(参见前一项)" #: ../../source/how-to-upgrade-to-flower-1.0.rst:51 msgid "" "Remove ``force_final_distributed_eval`` parameter from calls to " -"``start_server``. Distributed evaluation on all clients can be enabled by" -" configuring the strategy to sample all clients for evaluation after the " -"last round of training." +"``start_server``. Distributed evaluation on all clients can be enabled by " +"configuring the strategy to sample all clients for evaluation after the last " +"round of training." msgstr "" -"删除调用 ``start_server`` 时的 ``force_final_distributed_eval` " -"参数。可以通过配置策略,在最后一轮训练后对所有客户端进行抽样评估,从而启用对所有客户端的分布式评估。" +"删除调用 ``start_server`` 时的 ``force_final_distributed_eval` 参数。可以通过" +"配置策略,在最后一轮训练后对所有客户端进行抽样评估,从而启用对所有客户端的分" +"布式评估。" #: ../../source/how-to-upgrade-to-flower-1.0.rst:52 msgid "Rename parameter/ndarray conversion functions:" @@ -6482,17 +6786,18 @@ msgstr "``weights_to_parameters`` --> ``ndarrays_to_parameters``" #: ../../source/how-to-upgrade-to-flower-1.0.rst:57 msgid "" -"Strategy initialization: if the strategy relies on the default values for" -" ``fraction_fit`` and ``fraction_evaluate``, set ``fraction_fit`` and " +"Strategy initialization: if the strategy relies on the default values for " +"``fraction_fit`` and ``fraction_evaluate``, set ``fraction_fit`` and " "``fraction_evaluate`` manually to ``0.1``. Projects that do not manually " "create a strategy (by calling ``start_server`` or ``start_simulation`` " -"without passing a strategy instance) should now manually initialize " -"FedAvg with ``fraction_fit`` and ``fraction_evaluate`` set to ``0.1``." +"without passing a strategy instance) should now manually initialize FedAvg " +"with ``fraction_fit`` and ``fraction_evaluate`` set to ``0.1``." msgstr "" -"策略初始化:如果策略依赖于 ``fraction_fit`` 和 ``fraction_evaluate`` 的默认值,请手动将 " -"``fraction_fit`` 和 ``fraction_evaluate`` 设置为 ``0.1``。未手动创建策略的项目(调用 " -"``start_server` 或 ``start_simulation` 时未传递策略实例)现在应手动初始化 FedAvg,并将 " -"`fraction_fit` 和 `fraction_evaluate` 设为 `0.1``。" +"策略初始化:如果策略依赖于 ``fraction_fit`` 和 ``fraction_evaluate`` 的默认" +"值,请手动将 ``fraction_fit`` 和 ``fraction_evaluate`` 设置为 ``0.1``。未手动" +"创建策略的项目(调用 ``start_server` 或 ``start_simulation` 时未传递策略实" +"例)现在应手动初始化 FedAvg,并将 `fraction_fit` 和 `fraction_evaluate` 设为 " +"`0.1``。" #: ../../source/how-to-upgrade-to-flower-1.0.rst:58 msgid "Rename built-in strategy parameters (e.g., ``FedAvg``):" @@ -6517,8 +6822,8 @@ msgid "" "``configure_evaluate``, ``aggregate_evaluate``, and ``evaluate_fn``." msgstr "" "将 `rnd` 更名为 `server_round`。这会影响多个方法和函数,例如 " -"``configure_fit``、``aggregate_fit``、``configure_evaluate``、`aggregate_evaluate``" -" 和 ``evaluate_fn``。" +"``configure_fit``、``aggregate_fit``、``configure_evaluate``、" +"`aggregate_evaluate`` 和 ``evaluate_fn``。" #: ../../source/how-to-upgrade-to-flower-1.0.rst:65 msgid "Add ``server_round`` and ``config`` to ``evaluate_fn``:" @@ -6526,21 +6831,19 @@ msgstr "在 ``evaluate_fn` 中添加 ``server_round` 和 ``config`:" #: ../../source/how-to-upgrade-to-flower-1.0.rst:67 msgid "" -"Flower 0.19: ``def evaluate(parameters: NDArrays) -> " -"Optional[Tuple[float, Dict[str, Scalar]]]:``" +"Flower 0.19: ``def evaluate(parameters: NDArrays) -> Optional[Tuple[float, " +"Dict[str, Scalar]]]:``" msgstr "" -"Flower 0.19: ``def evaluate(parameters: NDArrays) -> " -"Optional[Tuple[float, Dict[str, Scalar]]]:``" +"Flower 0.19: ``def evaluate(parameters: NDArrays) -> Optional[Tuple[float, " +"Dict[str, Scalar]]]:``" #: ../../source/how-to-upgrade-to-flower-1.0.rst:68 msgid "" -"Flower 1.0: ``def evaluate(server_round: int, parameters: NDArrays, " -"config: Dict[str, Scalar]) -> Optional[Tuple[float, Dict[str, " -"Scalar]]]:``" +"Flower 1.0: ``def evaluate(server_round: int, parameters: NDArrays, config: " +"Dict[str, Scalar]) -> Optional[Tuple[float, Dict[str, Scalar]]]:``" msgstr "" -"Flower 1.0: ``def evaluate(server_round: int, parameters: NDArrays, " -"config: Dict[str, Scalar]) -> Optional[Tuple[float, Dict[str, " -"Scalar]]]:``" +"Flower 1.0: ``def evaluate(server_round: int, parameters: NDArrays, config: " +"Dict[str, Scalar]) -> Optional[Tuple[float, Dict[str, Scalar]]]:``" #: ../../source/how-to-upgrade-to-flower-1.0.rst:71 msgid "Custom strategies" @@ -6548,22 +6851,23 @@ msgstr "定制策略" #: ../../source/how-to-upgrade-to-flower-1.0.rst:73 msgid "" -"The type of parameter ``failures`` has changed from " -"``List[BaseException]`` to ``List[Union[Tuple[ClientProxy, FitRes], " -"BaseException]]`` (in ``aggregate_fit``) and " -"``List[Union[Tuple[ClientProxy, EvaluateRes], BaseException]]`` (in " -"``aggregate_evaluate``)" +"The type of parameter ``failures`` has changed from ``List[BaseException]`` " +"to ``List[Union[Tuple[ClientProxy, FitRes], BaseException]]`` (in " +"``aggregate_fit``) and ``List[Union[Tuple[ClientProxy, EvaluateRes], " +"BaseException]]`` (in ``aggregate_evaluate``)" msgstr "" -"参数``failures``的类型已从``List[BaseException]``变为``List[Union[Tuple[ClientProxy," -" FitRes], " -"BaseException]]``(在``agregate_fit``中)和``List[Union[Tuple[ClientProxy, " -"EvaluateRes], BaseException]]``(在``agregate_evaluate``中)" +"参数``failures``的类型已从``List[BaseException]``变为" +"``List[Union[Tuple[ClientProxy, FitRes], BaseException]]``(在" +"``agregate_fit``中)和``List[Union[Tuple[ClientProxy, EvaluateRes], " +"BaseException]]``(在``agregate_evaluate``中)" #: ../../source/how-to-upgrade-to-flower-1.0.rst:74 msgid "" "The ``Strategy`` method ``evaluate`` now receives the current round of " "federated learning/evaluation as the first parameter:" -msgstr "``Strategy``方法 的``evaluate``现在会接收当前一轮联邦学习/评估作为第一个参数:" +msgstr "" +"``Strategy``方法 的``evaluate``现在会接收当前一轮联邦学习/评估作为第一个参" +"数:" #: ../../source/how-to-upgrade-to-flower-1.0.rst:76 msgid "" @@ -6575,11 +6879,11 @@ msgstr "" #: ../../source/how-to-upgrade-to-flower-1.0.rst:77 msgid "" -"Flower 1.0: ``def evaluate(self, server_round: int, parameters: " -"Parameters) -> Optional[Tuple[float, Dict[str, Scalar]]]:``" +"Flower 1.0: ``def evaluate(self, server_round: int, parameters: Parameters) -" +"> Optional[Tuple[float, Dict[str, Scalar]]]:``" msgstr "" -"Flower 1.0: ``def evaluate(self, server_round: int, parameters: " -"Parameters) -> Optional[Tuple[float, Dict[str, Scalar]]]:``" +"Flower 1.0: ``def evaluate(self, server_round: int, parameters: Parameters) -" +"> Optional[Tuple[float, Dict[str, Scalar]]]:``" #: ../../source/how-to-upgrade-to-flower-1.0.rst:80 msgid "Optional improvements" @@ -6594,12 +6898,11 @@ msgstr "除了上述必要的改动之外,还有一些潜在的改进措施: #: ../../source/how-to-upgrade-to-flower-1.0.rst:84 msgid "" "Remove \"placeholder\" methods from subclasses of ``Client`` or " -"``NumPyClient``. If you, for example, use server-side evaluation, then " -"empty placeholder implementations of ``evaluate`` are no longer " -"necessary." +"``NumPyClient``. If you, for example, use server-side evaluation, then empty " +"placeholder implementations of ``evaluate`` are no longer necessary." msgstr "" -"删除 ``Client`` 或 ``NumPyClient`` 子类中的 \"占位符 " -"\"方法。例如,如果你使用服务器端评估,那么就不再需要``evaluate``的 \"空占位符 \"实现。" +"删除 ``Client`` 或 ``NumPyClient`` 子类中的 \"占位符 \"方法。例如,如果你使用" +"服务器端评估,那么就不再需要``evaluate``的 \"空占位符 \"实现。" #: ../../source/how-to-upgrade-to-flower-1.0.rst:85 msgid "" @@ -6607,9 +6910,8 @@ msgid "" "``start_simulation(..., config=flwr.server.ServerConfig(num_rounds=3, " "round_timeout=600.0), ...)``" msgstr "" -"通过 ``start_simulation`` 配置循环超时: ``start_simulation(..., " -"config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " -"...)``" +"通过 ``start_simulation`` 配置循环超时: ``start_simulation(..., config=flwr." +"server.ServerConfig(num_rounds=3, round_timeout=600.0), ...)``" #: ../../source/how-to-upgrade-to-flower-1.0.rst:89 msgid "Further help" @@ -6617,15 +6919,16 @@ msgstr "更多帮助" #: ../../source/how-to-upgrade-to-flower-1.0.rst:91 msgid "" -"Most official `Flower code examples " -"`_ are already updated" -" to Flower 1.0, they can serve as a reference for using the Flower 1.0 " -"API. If there are further questions, `join the Flower Slack " -"`_ and use the channel ``#questions``." +"Most official `Flower code examples `_ are already updated to Flower 1.0, they can serve as a " +"reference for using the Flower 1.0 API. If there are further questions, " +"`join the Flower Slack `_ and use the channel " +"``#questions``." msgstr "" -"大多数官方的 `Flower 代码示例 `_" -" 已经更新到 Flower 1.0,它们可以作为使用 Flower 1.0 API 的参考。如果还有其他问题,请加入 Flower Slack " -"`_ 并使用 \"#questions``\"。" +"大多数官方的 `Flower 代码示例 `_ 已经更新到 Flower 1.0,它们可以作为使用 Flower 1.0 API 的参考。如" +"果还有其他问题,请加入 Flower Slack `_ 并使用 " +"\"#questions``\"。" #: ../../source/how-to-use-built-in-mods.rst:2 #, fuzzy @@ -6635,21 +6938,20 @@ msgstr "使用内置调制器" #: ../../source/how-to-use-built-in-mods.rst:4 #, fuzzy msgid "" -"**Note: This tutorial covers experimental features. The functionality and" -" interfaces may change in future versions.**" +"**Note: This tutorial covers experimental features. The functionality and " +"interfaces may change in future versions.**" msgstr "**注:本教程涵盖实验性功能。功能和界面可能会在未来版本中发生变化。" #: ../../source/how-to-use-built-in-mods.rst:6 #, fuzzy msgid "" -"In this tutorial, we will learn how to utilize built-in mods to augment " -"the behavior of a ``ClientApp``. Mods (sometimes also called Modifiers) " -"allow us to perform operations before and after a task is processed in " -"the ``ClientApp``." +"In this tutorial, we will learn how to utilize built-in mods to augment the " +"behavior of a ``ClientApp``. Mods (sometimes also called Modifiers) allow us " +"to perform operations before and after a task is processed in the " +"``ClientApp``." msgstr "" -"在本教程中,我们将学习如何利用内置模块来增强 ``ClientApp`` " -"的行为。修改器(有时也称为修改器)允许我们在 ``ClientApp`` " -"处理任务之前和之后执行操作。" +"在本教程中,我们将学习如何利用内置模块来增强 ``ClientApp`` 的行为。修改器(有" +"时也称为修改器)允许我们在 ``ClientApp`` 处理任务之前和之后执行操作。" #: ../../source/how-to-use-built-in-mods.rst:9 #, fuzzy @@ -6659,9 +6961,9 @@ msgstr "什么是 Mods?" #: ../../source/how-to-use-built-in-mods.rst:11 #, fuzzy msgid "" -"A Mod is a callable that wraps around a ``ClientApp``. It can manipulate " -"or inspect the incoming ``Message`` and the resulting outgoing " -"``Message``. The signature for a ``Mod`` is as follows:" +"A Mod is a callable that wraps around a ``ClientApp``. It can manipulate or " +"inspect the incoming ``Message`` and the resulting outgoing ``Message``. The " +"signature for a ``Mod`` is as follows:" msgstr "" "Mod 是包裹在 ``ClientApp`` 周围的可调用程序。它可以操作或检查传入的 " "``Message`` 和由此产生的传出的 ``Message`` 。一个 ``Mod`` 的签名如下:" @@ -6713,8 +7015,9 @@ msgstr "3. 用模块创建 ``ClientApp``" msgid "" "Create your ``ClientApp`` and pass the mods as a list to the ``mods`` " "argument. The order in which you provide the mods matters:" -msgstr "创建您的 ``ClientApp`` 并将 mods 作为列表传递给 ``mods`` 参数。提供 mod " -"的顺序很重要:" +msgstr "" +"创建您的 ``ClientApp`` 并将 mods 作为列表传递给 ``mods`` 参数。提供 mod 的顺" +"序很重要:" #: ../../source/how-to-use-built-in-mods.rst:72 #, fuzzy @@ -6758,21 +7061,22 @@ msgstr "``example_mod_1`` (返回途中最外层的模式)" #: ../../source/how-to-use-built-in-mods.rst:82 #, fuzzy msgid "" -"Each mod has a chance to inspect and modify the incoming ``Message`` " -"before passing it to the next mod, and likewise with the outgoing " -"``Message`` before returning it up the stack." -msgstr "每个模块都有机会检查和修改传入的 \"信息\",然后再将其传递给下一个模块,同样," +"Each mod has a chance to inspect and modify the incoming ``Message`` before " +"passing it to the next mod, and likewise with the outgoing ``Message`` " +"before returning it up the stack." +msgstr "" +"每个模块都有机会检查和修改传入的 \"信息\",然后再将其传递给下一个模块,同样," "也有机会检查和修改传出的 \"信息\",然后再将其返回堆栈。" #: ../../source/how-to-use-built-in-mods.rst:87 #, fuzzy msgid "" "By following this guide, you have learned how to effectively use mods to " -"enhance your ``ClientApp``'s functionality. Remember that the order of " -"mods is crucial and affects how the input and output are processed." +"enhance your ``ClientApp``'s functionality. Remember that the order of mods " +"is crucial and affects how the input and output are processed." msgstr "" -"通过本指南,您已学会如何有效地使用 mod 来增强您的 ``ClientApp`` " -"的功能。请记住,mod 的顺序至关重要,它会影响输入和输出的处理方式。" +"通过本指南,您已学会如何有效地使用 mod 来增强您的 ``ClientApp`` 的功能。请记" +"住,mod 的顺序至关重要,它会影响输入和输出的处理方式。" #: ../../source/how-to-use-built-in-mods.rst:89 #, fuzzy @@ -6787,20 +7091,20 @@ msgstr "差分隐私" #: ../../source/how-to-use-differential-privacy.rst:3 #, fuzzy msgid "" -"This guide explains how you can utilize differential privacy in the " -"Flower framework. If you are not yet familiar with differential privacy, " -"you can refer to :doc:`explanation-differential-privacy`." +"This guide explains how you can utilize differential privacy in the Flower " +"framework. If you are not yet familiar with differential privacy, you can " +"refer to :doc:`explanation-differential-privacy`." msgstr "" -"本指南解释了如何在 Flower 框架中使用差分隐私。如果您还不熟悉差分隐私," -"可以参考 :doc:`explanation-differential-privacy` 。" +"本指南解释了如何在 Flower 框架中使用差分隐私。如果您还不熟悉差分隐私,可以参" +"考 :doc:`explanation-differential-privacy` 。" #: ../../source/how-to-use-differential-privacy.rst:7 #, fuzzy msgid "" "Differential Privacy in Flower is in a preview phase. If you plan to use " -"these features in a production environment with sensitive data, feel free" -" contact us to discuss your requirements and to receive guidance on how " -"to best use these features." +"these features in a production environment with sensitive data, feel free " +"contact us to discuss your requirements and to receive guidance on how to " +"best use these features." msgstr "" "Flower 中的差异隐私处于预览阶段。如果您计划在生产环境中使用这些敏感数据功能," "请随时联系我们,讨论您的需求,并获得如何最好地使用这些功能的指导。" @@ -6808,12 +7112,13 @@ msgstr "" #: ../../source/how-to-use-differential-privacy.rst:12 #, fuzzy msgid "" -"This approach consists of two seprate phases: clipping of the updates and" -" adding noise to the aggregated model. For the clipping phase, Flower " -"framework has made it possible to decide whether to perform clipping on " -"the server side or the client side." -msgstr "这种方法包括两个独立的阶段:对更新进行剪切和在聚合模型中添加噪声。在剪切阶段" -",Flower 框架可以决定是在服务器端还是在客户端执行剪切。" +"This approach consists of two seprate phases: clipping of the updates and " +"adding noise to the aggregated model. For the clipping phase, Flower " +"framework has made it possible to decide whether to perform clipping on the " +"server side or the client side." +msgstr "" +"这种方法包括两个独立的阶段:对更新进行剪切和在聚合模型中添加噪声。在剪切阶" +"段,Flower 框架可以决定是在服务器端还是在客户端执行剪切。" #: ../../source/how-to-use-differential-privacy.rst:15 #, fuzzy @@ -6821,21 +7126,22 @@ msgid "" "**Server-side Clipping**: This approach has the advantage of the server " "enforcing uniform clipping across all clients' updates and reducing the " "communication overhead for clipping values. However, it also has the " -"disadvantage of increasing the computational load on the server due to " -"the need to perform the clipping operation for all clients." +"disadvantage of increasing the computational load on the server due to the " +"need to perform the clipping operation for all clients." msgstr "" -"** 服务器端剪切**: 这种方法的优点是服务器可对所有客户端的更新执行统一的剪切" -",并减少剪切值的通信开销。不过,这种方法也有缺点,那就是需要为所有客户端执行" -"剪切操作,从而增加了服务器的计算负荷。" +"** 服务器端剪切**: 这种方法的优点是服务器可对所有客户端的更新执行统一的剪" +"切,并减少剪切值的通信开销。不过,这种方法也有缺点,那就是需要为所有客户端执" +"行剪切操作,从而增加了服务器的计算负荷。" #: ../../source/how-to-use-differential-privacy.rst:16 #, fuzzy msgid "" -"**Client-side Clipping**: This approach has the advantage of reducing the" -" computational overhead on the server. However, it also has the " -"disadvantage of lacking centralized control, as the server has less " -"control over the clipping process." -msgstr "**客户端剪切**: 这种方法的优点是可以减少服务器的计算开销。不过,它也有缺乏集" +"**Client-side Clipping**: This approach has the advantage of reducing the " +"computational overhead on the server. However, it also has the disadvantage " +"of lacking centralized control, as the server has less control over the " +"clipping process." +msgstr "" +"**客户端剪切**: 这种方法的优点是可以减少服务器的计算开销。不过,它也有缺乏集" "中控制的缺点,因为服务器对剪切过程的控制较少。" #: ../../source/how-to-use-differential-privacy.rst:21 @@ -6848,16 +7154,15 @@ msgstr "服务器端逻辑" msgid "" "For central DP with server-side clipping, there are two :code:`Strategy` " "classes that act as wrappers around the actual :code:`Strategy` instance " -"(for example, :code:`FedAvg`). The two wrapper classes are " -":code:`DifferentialPrivacyServerSideFixedClipping` and " -":code:`DifferentialPrivacyServerSideAdaptiveClipping` for fixed and " -"adaptive clipping." +"(for example, :code:`FedAvg`). The two wrapper classes are :code:" +"`DifferentialPrivacyServerSideFixedClipping` and :code:" +"`DifferentialPrivacyServerSideAdaptiveClipping` for fixed and adaptive " +"clipping." msgstr "" -"对于具有服务器端剪裁功能的中央 DP,有两个 :code:`Strategy` 类作为实际 " -":code:`Strategy` 实例(例如 :code:`FedAvg`)的包装器。这两个封装类分别是 " -":code:`DifferentialPrivacyServerSideFixedClipping` 和 " -":code:`DifferentialPrivacyServerSideAdaptiveClipping` " -",用于固定剪辑和自适应剪辑。" +"对于具有服务器端剪裁功能的中央 DP,有两个 :code:`Strategy` 类作为实际 :code:" +"`Strategy` 实例(例如 :code:`FedAvg`)的包装器。这两个封装类分别是 :code:" +"`DifferentialPrivacyServerSideFixedClipping` 和 :code:" +"`DifferentialPrivacyServerSideAdaptiveClipping` ,用于固定剪辑和自适应剪辑。" #: ../../source/how-to-use-differential-privacy.rst:-1 #, fuzzy @@ -6867,16 +7172,15 @@ msgstr "服务器端逻辑" #: ../../source/how-to-use-differential-privacy.rst:31 #, fuzzy msgid "" -"The code sample below enables the :code:`FedAvg` strategy to use server-" -"side fixed clipping using the " -":code:`DifferentialPrivacyServerSideFixedClipping` wrapper class. The " -"same approach can be used with " -":code:`DifferentialPrivacyServerSideAdaptiveClipping` by adjusting the " +"The code sample below enables the :code:`FedAvg` strategy to use server-side " +"fixed clipping using the :code:`DifferentialPrivacyServerSideFixedClipping` " +"wrapper class. The same approach can be used with :code:" +"`DifferentialPrivacyServerSideAdaptiveClipping` by adjusting the " "corresponding input parameters." msgstr "" -"下面的代码示例使用 :code:`DifferentialPrivacyServerSideFixedClipping` " -"封装类使 :code:`FedAvg` 策略使用服务器端固定剪辑。通过调整相应的输入参数," -"同样的方法也可用于 :code:`DifferentialPrivacyServerSideAdaptiveClipping`。" +"下面的代码示例使用 :code:`DifferentialPrivacyServerSideFixedClipping` 封装类" +"使 :code:`FedAvg` 策略使用服务器端固定剪辑。通过调整相应的输入参数,同样的方" +"法也可用于 :code:`DifferentialPrivacyServerSideAdaptiveClipping`。" #: ../../source/how-to-use-differential-privacy.rst:52 #, fuzzy @@ -6887,18 +7191,18 @@ msgstr "客户端逻辑" #, fuzzy msgid "" "For central DP with client-side clipping, the server sends the clipping " -"value to selected clients on each round. Clients can use existing Flower " -":code:`Mods` to perform the clipping. Two mods are available for fixed " -"and adaptive client-side clipping: :code:`fixedclipping_mod` and " -":code:`adaptiveclipping_mod` with corresponding server-side wrappers " -":code:`DifferentialPrivacyClientSideFixedClipping` and " -":code:`DifferentialPrivacyClientSideAdaptiveClipping`." -msgstr "" -"对于带有客户端剪裁功能的中央 DP,服务器会在每一轮向选定的客户端发送剪裁值。" -"客户端可以使用现有的 Flower :code:`Mods`来执行剪裁。有两种模式可用于固定和自" -"适应客户端剪辑::code:`fixedclipping_mod` 和 :code:`adaptiveclipping_mod`," -"以及相应的服务器端封装 :code:`DifferentialPrivacyClientSideFixedClipping` 和 " -":code:`DifferentialPrivacyClientSideAdaptiveClipping`。" +"value to selected clients on each round. Clients can use existing Flower :" +"code:`Mods` to perform the clipping. Two mods are available for fixed and " +"adaptive client-side clipping: :code:`fixedclipping_mod` and :code:" +"`adaptiveclipping_mod` with corresponding server-side wrappers :code:" +"`DifferentialPrivacyClientSideFixedClipping` and :code:" +"`DifferentialPrivacyClientSideAdaptiveClipping`." +msgstr "" +"对于带有客户端剪裁功能的中央 DP,服务器会在每一轮向选定的客户端发送剪裁值。客" +"户端可以使用现有的 Flower :code:`Mods`来执行剪裁。有两种模式可用于固定和自适" +"应客户端剪辑::code:`fixedclipping_mod` 和 :code:`adaptiveclipping_mod`,以及" +"相应的服务器端封装 :code:`DifferentialPrivacyClientSideFixedClipping` 和 :" +"code:`DifferentialPrivacyClientSideAdaptiveClipping`。" #: ../../source/how-to-use-differential-privacy.rst:-1 #, fuzzy @@ -6909,35 +7213,35 @@ msgstr "客户端逻辑" #, fuzzy msgid "" "The code sample below enables the :code:`FedAvg` strategy to use " -"differential privacy with client-side fixed clipping using both the " -":code:`DifferentialPrivacyClientSideFixedClipping` wrapper class and, on " -"the client, :code:`fixedclipping_mod`:" +"differential privacy with client-side fixed clipping using both the :code:" +"`DifferentialPrivacyClientSideFixedClipping` wrapper class and, on the " +"client, :code:`fixedclipping_mod`:" msgstr "" -"下面的代码示例使用 :code:`DifferentialPrivacyClientSideFixedClipping` " -"封装类和客户端的 :code:`fixedclipping_mod` 使 :code:`FedAvg` " -"策略在客户端固定剪辑的情况下使用差分隐私:" +"下面的代码示例使用 :code:`DifferentialPrivacyClientSideFixedClipping` 封装类" +"和客户端的 :code:`fixedclipping_mod` 使 :code:`FedAvg` 策略在客户端固定剪辑的" +"情况下使用差分隐私:" #: ../../source/how-to-use-differential-privacy.rst:80 #, fuzzy msgid "" -"In addition to the server-side strategy wrapper, the :code:`ClientApp` " -"needs to configure the matching :code:`fixedclipping_mod` to perform the " -"client-side clipping:" +"In addition to the server-side strategy wrapper, the :code:`ClientApp` needs " +"to configure the matching :code:`fixedclipping_mod` to perform the client-" +"side clipping:" msgstr "" -"除了服务器端策略包装器外,:code:`ClientApp` 还需要配置匹配的 " -":code:`fixedclipping_mod` 以执行客户端剪切:" +"除了服务器端策略包装器外,:code:`ClientApp` 还需要配置匹配的 :code:" +"`fixedclipping_mod` 以执行客户端剪切:" #: ../../source/how-to-use-differential-privacy.rst:97 #, fuzzy msgid "" -"To utilize local differential privacy (DP) and add noise to the client " -"model parameters before transmitting them to the server in Flower, you " -"can use the `LocalDpMod`. The following hyperparameters need to be set: " -"clipping norm value, sensitivity, epsilon, and delta." +"To utilize local differential privacy (DP) and add noise to the client model " +"parameters before transmitting them to the server in Flower, you can use the " +"`LocalDpMod`. The following hyperparameters need to be set: clipping norm " +"value, sensitivity, epsilon, and delta." msgstr "" -"要利用本地差分隐私(DP)并在将客户端模型参数传输到 Flower " -"服务器之前为其添加噪声,可以使用 " -"`LocalDpMod`。需要设置以下超参数:剪切规范值、灵敏度、ε 和 delta。" +"要利用本地差分隐私(DP)并在将客户端模型参数传输到 Flower 服务器之前为其添加" +"噪声,可以使用 `LocalDpMod`。需要设置以下超参数:剪切规范值、灵敏度、ε 和 " +"delta。" #: ../../source/how-to-use-differential-privacy.rst:-1 #, fuzzy @@ -6952,11 +7256,11 @@ msgstr "下面的代码示例展示了如何使用 :code:`LocalDpMod`:" #: ../../source/how-to-use-differential-privacy.rst:122 #, fuzzy msgid "" -"Please note that the order of mods, especially those that modify " -"parameters, is important when using multiple modifiers. Typically, " -"differential privacy (DP) modifiers should be the last to operate on " -"parameters." -msgstr "请注意,在使用多个修改器时,修改器(尤其是修改参数的修改器)的顺序非常重要。" +"Please note that the order of mods, especially those that modify parameters, " +"is important when using multiple modifiers. Typically, differential privacy " +"(DP) modifiers should be the last to operate on parameters." +msgstr "" +"请注意,在使用多个修改器时,修改器(尤其是修改参数的修改器)的顺序非常重要。" "通常情况下,差分隐私 (DP) 修改器应最后对参数进行操作。" #: ../../source/how-to-use-differential-privacy.rst:125 @@ -6967,17 +7271,16 @@ msgstr "使用隐私引擎进行本地培训" #: ../../source/how-to-use-differential-privacy.rst:126 #, fuzzy msgid "" -"For ensuring data instance-level privacy during local model training on " -"the client side, consider leveraging privacy engines such as Opacus and " -"TensorFlow Privacy. For examples of using Flower with these engines, " -"please refer to the Flower examples directory (`Opacus " -"`_, `Tensorflow" -" Privacy `_)." +"For ensuring data instance-level privacy during local model training on the " +"client side, consider leveraging privacy engines such as Opacus and " +"TensorFlow Privacy. For examples of using Flower with these engines, please " +"refer to the Flower examples directory (`Opacus `_, `Tensorflow Privacy `_)." msgstr "" "要在客户端本地模型训练期间确保数据实例级隐私,可考虑利用 Opacus 和 " -"TensorFlow Privacy 等隐私引擎。有关将 Flower 与这些引擎结合使用的示例," -"请参阅 Flower 示例目录(`Opacus `_, `Tensorflow Privacy `_)。" @@ -6987,15 +7290,17 @@ msgstr "使用策略" #: ../../source/how-to-use-strategies.rst:4 msgid "" -"Flower allows full customization of the learning process through the " -":code:`Strategy` abstraction. A number of built-in strategies are " -"provided in the core framework." -msgstr "Flower 允许通过 :code:`Strategy` 抽象类对学习过程进行完全定制。核心框架中提供了许多内置策略。" +"Flower allows full customization of the learning process through the :code:" +"`Strategy` abstraction. A number of built-in strategies are provided in the " +"core framework." +msgstr "" +"Flower 允许通过 :code:`Strategy` 抽象类对学习过程进行完全定制。核心框架中提供" +"了许多内置策略。" #: ../../source/how-to-use-strategies.rst:6 msgid "" -"There are three ways to customize the way Flower orchestrates the " -"learning process on the server side:" +"There are three ways to customize the way Flower orchestrates the learning " +"process on the server side:" msgstr "有三种方法可以自定义 Flower 在服务器端协调学习过程的方式:" #: ../../source/how-to-use-strategies.rst:8 @@ -7018,23 +7323,27 @@ msgstr "使用现有策略" #: ../../source/how-to-use-strategies.rst:16 msgid "" -"Flower comes with a number of popular federated learning strategies " -"built-in. A built-in strategy can be instantiated as follows:" +"Flower comes with a number of popular federated learning strategies built-" +"in. A built-in strategy can be instantiated as follows:" msgstr "Flower 内置了许多流行的联邦学习策略。内置策略的实例化方法如下:" #: ../../source/how-to-use-strategies.rst:25 msgid "" -"This creates a strategy with all parameters left at their default values " -"and passes it to the :code:`start_server` function. It is usually " -"recommended to adjust a few parameters during instantiation:" -msgstr "这会创建一个所有参数都保持默认值的策略,并将其传递给 :code:`start_server` 函数。通常建议在实例化过程中调整一些参数:" +"This creates a strategy with all parameters left at their default values and " +"passes it to the :code:`start_server` function. It is usually recommended to " +"adjust a few parameters during instantiation:" +msgstr "" +"这会创建一个所有参数都保持默认值的策略,并将其传递给 :code:`start_server` 函" +"数。通常建议在实例化过程中调整一些参数:" #: ../../source/how-to-use-strategies.rst:42 msgid "" "Existing strategies provide several ways to customize their behaviour. " "Callback functions allow strategies to call user-provided code during " "execution." -msgstr "现有的策略提供了多种自定义行为的方法。回调函数允许策略在执行过程中调用用户提供的代码。" +msgstr "" +"现有的策略提供了多种自定义行为的方法。回调函数允许策略在执行过程中调用用户提" +"供的代码。" #: ../../source/how-to-use-strategies.rst:45 msgid "Configuring client fit and client evaluate" @@ -7043,36 +7352,37 @@ msgstr "配置客户匹配和客户评估" #: ../../source/how-to-use-strategies.rst:47 msgid "" "The server can pass new configuration values to the client each round by " -"providing a function to :code:`on_fit_config_fn`. The provided function " -"will be called by the strategy and must return a dictionary of " -"configuration key values pairs that will be sent to the client. It must " -"return a dictionary of arbitrary configuration values :code:`client.fit`" -" and :code:`client.evaluate` functions during each round of federated " -"learning." +"providing a function to :code:`on_fit_config_fn`. The provided function will " +"be called by the strategy and must return a dictionary of configuration key " +"values pairs that will be sent to the client. It must return a dictionary of " +"arbitrary configuration values :code:`client.fit` and :code:`client." +"evaluate` functions during each round of federated learning." msgstr "" -"服务器可以通过向 :code:`on_fit_config_fn` " -"提供一个函数,在每一轮向客户端传递新的配置值。提供的函数将被策略调用,并且必须返回一个配置键值对的字典,该字典将被发送到客户端。在每一轮联邦学习期间,它必须返回一个任意配置值" -" dictionary :code:`client.fit`和 :code:`client.evaluate`函数。" +"服务器可以通过向 :code:`on_fit_config_fn` 提供一个函数,在每一轮向客户端传递" +"新的配置值。提供的函数将被策略调用,并且必须返回一个配置键值对的字典,该字典" +"将被发送到客户端。在每一轮联邦学习期间,它必须返回一个任意配置值 dictionary :" +"code:`client.fit`和 :code:`client.evaluate`函数。" #: ../../source/how-to-use-strategies.rst:75 msgid "" "The :code:`on_fit_config_fn` can be used to pass arbitrary configuration " "values from server to client, and poetentially change these values each " -"round, for example, to adjust the learning rate. The client will receive " -"the dictionary returned by the :code:`on_fit_config_fn` in its own " -":code:`client.fit()` function." +"round, for example, to adjust the learning rate. The client will receive the " +"dictionary returned by the :code:`on_fit_config_fn` in its own :code:`client." +"fit()` function." msgstr "" -":code:`on_fit_config_fn`可用于将任意配置值从服务器传递到客户端,并在每一轮改变这些值,例如,调整学习率。客户端将在自己的 " -":code:`client.fit()` 函数中接收 :code:`on_fit_config_fn` 返回的字典。" +":code:`on_fit_config_fn`可用于将任意配置值从服务器传递到客户端,并在每一轮改" +"变这些值,例如,调整学习率。客户端将在自己的 :code:`client.fit()` 函数中接" +"收 :code:`on_fit_config_fn` 返回的字典。" #: ../../source/how-to-use-strategies.rst:78 msgid "" -"Similar to :code:`on_fit_config_fn`, there is also " -":code:`on_evaluate_config_fn` to customize the configuration sent to " -":code:`client.evaluate()`" +"Similar to :code:`on_fit_config_fn`, there is also :code:" +"`on_evaluate_config_fn` to customize the configuration sent to :code:`client." +"evaluate()`" msgstr "" -"与 :code:`on_fit_config_fn` 类似,还有 :code:`on_evaluate_config_fn` 用于定制发送到 " -":code:`client.evaluate()` 的配置" +"与 :code:`on_fit_config_fn` 类似,还有 :code:`on_evaluate_config_fn` 用于定制" +"发送到 :code:`client.evaluate()` 的配置" #: ../../source/how-to-use-strategies.rst:81 msgid "Configuring server-side evaluation" @@ -7080,18 +7390,18 @@ msgstr "配置服务器端评估" #: ../../source/how-to-use-strategies.rst:83 msgid "" -"Server-side evaluation can be enabled by passing an evaluation function " -"to :code:`evaluate_fn`." +"Server-side evaluation can be enabled by passing an evaluation function to :" +"code:`evaluate_fn`." msgstr "服务器端评估可通过向 :code:`evaluate_fn` 传递评估函数来启用。" #: ../../source/how-to-use-strategies.rst:89 msgid "" -"Writing a fully custom strategy is a bit more involved, but it provides " -"the most flexibility. Read the `Implementing Strategies `_ guide to learn more." +"Writing a fully custom strategy is a bit more involved, but it provides the " +"most flexibility. Read the `Implementing Strategies `_ guide to learn more." msgstr "" -"编写完全自定义的策略涉及的内容较多,但灵活性最高。阅读 `实施策略 _ " -"指南,了解更多信息。" +"编写完全自定义的策略涉及的内容较多,但灵活性最高。阅读 `实施策略 _ 指南,了解更多信息。" #: ../../source/index.rst:34 msgid "Tutorial" @@ -7151,7 +7461,9 @@ msgstr "Flower 框架文档" msgid "" "Welcome to Flower's documentation. `Flower `_ is a " "friendly federated learning framework." -msgstr "欢迎访问 Flower 文档。`Flower `_ 是一个友好的联邦学习框架。" +msgstr "" +"欢迎访问 Flower 文档。`Flower `_ 是一个友好的联邦学习框" +"架。" #: ../../source/index.rst:11 msgid "Join the Flower Community" @@ -7162,7 +7474,9 @@ msgid "" "The Flower Community is growing quickly - we're a friendly group of " "researchers, engineers, students, professionals, academics, and other " "enthusiasts." -msgstr "Flower 社区发展迅速--我们是一个由研究人员、工程师、学生、专业人士、学者和其他爱好者组成的友好团体。" +msgstr "" +"Flower 社区发展迅速--我们是一个由研究人员、工程师、学生、专业人士、学者和其他" +"爱好者组成的友好团体。" #: ../../source/index.rst:15 msgid "Join us on Slack" @@ -7176,11 +7490,12 @@ msgstr "Flower 框架" msgid "" "The user guide is targeted at researchers and developers who want to use " "Flower to bring existing machine learning workloads into a federated " -"setting. One of Flower's design goals was to make this simple. Read on to" -" learn more." +"setting. One of Flower's design goals was to make this simple. Read on to " +"learn more." msgstr "" -"该用户指南面向希望使用 Flower 将现有机器学习工作负载引入联邦环境的研究人员和开发人员。Flower " -"的设计目标之一就是让这一切变得简单。请继续阅读,了解更多信息。" +"该用户指南面向希望使用 Flower 将现有机器学习工作负载引入联邦环境的研究人员和" +"开发人员。Flower 的设计目标之一就是让这一切变得简单。请继续阅读,了解更多信" +"息。" #: ../../source/index.rst:30 msgid "Tutorials" @@ -7188,31 +7503,31 @@ msgstr "教程" #: ../../source/index.rst:32 msgid "" -"A learning-oriented series of federated learning tutorials, the best " -"place to start." +"A learning-oriented series of federated learning tutorials, the best place " +"to start." msgstr "以学习为导向的联邦学习教程系列,最好的起点。" #: ../../source/index.rst:62 msgid "" -"QUICKSTART TUTORIALS: :doc:`PyTorch ` | " -":doc:`TensorFlow ` | :doc:`🤗 Transformers" -" ` | :doc:`JAX ` | :doc:`Pandas ` | :doc:`fastai " -"` | :doc:`PyTorch Lightning ` | :doc:`MXNet `" -" | :doc:`scikit-learn ` | :doc:`XGBoost " -"` | :doc:`Android ` | :doc:`iOS `" -msgstr "" -"快速入门教程: :doc:`PyTorch ` | :doc:`TensorFlow " -"` | :doc:`🤗 Transformers ` | :doc:`JAX ` | " -":doc:`Pandas ` | :doc:`fastai ` | :doc:`PyTorch Lightning ` | :doc:`MXNet ` | :doc" -":`scikit-learn ` | :doc:`XGBoost " -"` | :doc:`Android ` | :doc:`iOS `" +"QUICKSTART TUTORIALS: :doc:`PyTorch ` | :doc:" +"`TensorFlow ` | :doc:`🤗 Transformers " +"` | :doc:`JAX ` | :" +"doc:`Pandas ` | :doc:`fastai ` | :doc:`PyTorch Lightning ` | :doc:`MXNet ` | :doc:`scikit-learn " +"` | :doc:`XGBoost ` | :doc:`Android ` | :doc:`iOS " +"`" +msgstr "" +"快速入门教程: :doc:`PyTorch ` | :doc:" +"`TensorFlow ` | :doc:`🤗 Transformers " +"` | :doc:`JAX ` | :" +"doc:`Pandas ` | :doc:`fastai ` | :doc:`PyTorch Lightning ` | :doc:`MXNet ` | :doc:`scikit-learn " +"` | :doc:`XGBoost ` | :doc:`Android ` | :doc:`iOS " +"`" #: ../../source/index.rst:64 msgid "We also made video tutorials for PyTorch:" @@ -7224,15 +7539,17 @@ msgstr "还有 TensorFlow:" #: ../../source/index.rst:77 msgid "" -"Problem-oriented how-to guides show step-by-step how to achieve a " -"specific goal." +"Problem-oriented how-to guides show step-by-step how to achieve a specific " +"goal." msgstr "以问题为导向的 \"如何做 \"指南逐步展示如何实现特定目标。" #: ../../source/index.rst:111 msgid "" "Understanding-oriented concept guides explain and discuss key topics and " "underlying ideas behind Flower and collaborative AI." -msgstr "以理解为导向的概念指南解释并讨论了Flower和协作式人工智能背后的关键主题和基本思想。" +msgstr "" +"以理解为导向的概念指南解释并讨论了Flower和协作式人工智能背后的关键主题和基本" +"思想。" #: ../../source/index.rst:121 msgid "References" @@ -7258,8 +7575,8 @@ msgstr "贡献者文档" #: ../../source/index.rst:151 msgid "" -"The Flower community welcomes contributions. The following docs are " -"intended to help along the way." +"The Flower community welcomes contributions. The following docs are intended " +"to help along the way." msgstr "Flower 社区欢迎您的贡献。以下文档旨在为您提供帮助。" #: ../../source/ref-api-cli.rst:2 @@ -7377,8 +7694,8 @@ msgstr "启动一个 Flower 客户节点,连接到 Flower 服务器。" #: ../../source/ref-api/flwr.client.rst:24::1 #, fuzzy msgid "" -":py:obj:`start_numpy_client `\\ \\(\\*\\," -" server\\_address\\, client\\)" +":py:obj:`start_numpy_client `\\ \\(\\*\\, " +"server\\_address\\, client\\)" msgstr "" ":py:obj:`start_numpy_client `\\ \\(\\*\\, " "server\\_address\\, client\\)" @@ -7410,8 +7727,7 @@ msgstr "Flower 客户端的抽象基类。" #: ../../source/ref-api/flwr.client.rst:33::1 #, fuzzy msgid "" -":py:obj:`ClientApp `\\ \\(\\[client\\_fn\\, " -"mods\\]\\)" +":py:obj:`ClientApp `\\ \\(\\[client\\_fn\\, mods\\]\\)" msgstr "" ":py:obj:`ClientApp `\\ \\(\\[client\\_fn\\, mods\\]\\)" @@ -7542,8 +7858,9 @@ msgstr "评估客户端的反应。" #: ../../source/ref-api/flwr.client.Client.rst:44::1 #, fuzzy -msgid ":py:obj:`get_parameters `\\ \\(ins\\)" -msgstr "" +msgid "" +":py:obj:`get_parameters `\\ \\(ins\\)" +msgstr "" ":py:obj:`get_parameters `\\ \\(ins\\)" #: ../../source/ref-api/flwr.client.Client.rst:44::1 @@ -7555,7 +7872,8 @@ msgstr "返回当前本地模型参数。" #: ../../source/ref-api/flwr.client.Client.rst:44::1 #, fuzzy -msgid ":py:obj:`get_properties `\\ \\(ins\\)" +msgid "" +":py:obj:`get_properties `\\ \\(ins\\)" msgstr "" ":py:obj:`get_properties `\\ \\(ins\\)" @@ -7676,10 +7994,12 @@ msgstr "参数" #: flwr.client.client.Client.evaluate:3 of msgid "" -"The evaluation instructions containing (global) model parameters received" -" from the server and a dictionary of configuration values used to " -"customize the local evaluation process." -msgstr "评估指令包含从服务器接收的(全局)模型参数,以及用于定制本地评估流程的配置值字典。" +"The evaluation instructions containing (global) model parameters received " +"from the server and a dictionary of configuration values used to customize " +"the local evaluation process." +msgstr "" +"评估指令包含从服务器接收的(全局)模型参数,以及用于定制本地评估流程的配置值" +"字典。" #: flwr.client.client.Client.evaluate flwr.client.client.Client.fit #: flwr.client.client.Client.get_parameters @@ -7715,7 +8035,8 @@ msgstr "返回" msgid "" "The evaluation result containing the loss on the local dataset and other " "details such as the number of local data examples used for evaluation." -msgstr "评估结果包含本地数据集上的损失值和其他详细信息,如用于评估的本地数据的数量。" +msgstr "" +"评估结果包含本地数据集上的损失值和其他详细信息,如用于评估的本地数据的数量。" #: flwr.client.client.Client.evaluate flwr.client.client.Client.fit #: flwr.client.client.Client.get_parameters @@ -7747,15 +8068,17 @@ msgstr "返回类型" #: flwr.client.client.Client.fit:3 of msgid "" -"The training instructions containing (global) model parameters received " -"from the server and a dictionary of configuration values used to " -"customize the local training process." -msgstr "训练指令,包含从服务器接收的(全局)模型参数,以及用于定制本地训练过程的配置值字典。" +"The training instructions containing (global) model parameters received from " +"the server and a dictionary of configuration values used to customize the " +"local training process." +msgstr "" +"训练指令,包含从服务器接收的(全局)模型参数,以及用于定制本地训练过程的配置" +"值字典。" #: flwr.client.client.Client.fit:8 of msgid "" -"The training result containing updated parameters and other details such " -"as the number of local training examples used for training." +"The training result containing updated parameters and other details such as " +"the number of local training examples used for training." msgstr "训练结果包含更新的参数和其他详细信息,如用于训练的本地训练示例的数量。" #: flwr.client.client.Client.get_parameters:3 of @@ -7824,26 +8147,28 @@ msgstr "实例" msgid "" "Assuming a typical `Client` implementation named `FlowerClient`, you can " "wrap it in a `ClientApp` as follows:" -msgstr "假定有一个名为 `FlowerClient` 的典型 `Client` 实现,可以将其封装在一个 " +msgstr "" +"假定有一个名为 `FlowerClient` 的典型 `Client` 实现,可以将其封装在一个 " "`ClientApp` 中,如下所示:" #: flwr.client.client_app.ClientApp:16 of #, fuzzy msgid "" -"If the above code is in a Python module called `client`, it can be " -"started as follows:" -msgstr "如果上述代码位于一个名为 \"客户端 \"的 Python " -"模块中,则可以按如下方式启动它:" +"If the above code is in a Python module called `client`, it can be started " +"as follows:" +msgstr "" +"如果上述代码位于一个名为 \"客户端 \"的 Python 模块中,则可以按如下方式启动" +"它:" #: flwr.client.client_app.ClientApp:21 of #, fuzzy msgid "" -"In this `client:app` example, `client` refers to the Python module " -"`client.py` in which the previous code lives in and `app` refers to the " -"global attribute `app` that points to an object of type `ClientApp`." +"In this `client:app` example, `client` refers to the Python module `client." +"py` in which the previous code lives in and `app` refers to the global " +"attribute `app` that points to an object of type `ClientApp`." msgstr "" -"在这个 `client:app` 例子中,`client` 指的是前面代码所在的 Python 模块 `client" -".py`,而 `app` 指的是指向 `ClientApp` 类型对象的全局属性 `app` 。" +"在这个 `client:app` 例子中,`client` 指的是前面代码所在的 Python 模块 " +"`client.py`,而 `app` 指的是指向 `ClientApp` 类型对象的全局属性 `app` 。" #: flwr.client.client_app.ClientApp.evaluate:1::1 of #, fuzzy @@ -7893,7 +8218,8 @@ msgstr "" #: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 #, fuzzy -msgid ":py:obj:`fit `\\ \\(parameters\\, config\\)" +msgid "" +":py:obj:`fit `\\ \\(parameters\\, config\\)" msgstr "" ":py:obj:`fit `\\ \\(parameters\\, config\\)" @@ -7913,8 +8239,8 @@ msgid "" ":py:obj:`get_parameters `\\ " "\\(config\\)" msgstr "" -":py:obj:`get_parameters `\\ \\(" -"config\\)" +":py:obj:`get_parameters `\\ " +"\\(config\\)" #: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 #, fuzzy @@ -7922,8 +8248,8 @@ msgid "" ":py:obj:`get_properties `\\ " "\\(config\\)" msgstr "" -":py:obj:`get_properties `\\ \\(" -"config\\)" +":py:obj:`get_properties `\\ " +"\\(config\\)" #: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 #: flwr.client.numpy_client.NumPyClient.get_properties:1 of @@ -7933,8 +8259,7 @@ msgstr "返回客户端的属性集。" #: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 #, fuzzy msgid "" -":py:obj:`set_context `\\ " -"\\(context\\)" +":py:obj:`set_context `\\ \\(context\\)" msgstr "" ":py:obj:`set_context `\\ \\(context\\)" @@ -7965,28 +8290,30 @@ msgstr "当前(全局)模型参数。" #: flwr.client.numpy_client.NumPyClient.evaluate:5 of msgid "" -"Configuration parameters which allow the server to influence evaluation " -"on the client. It can be used to communicate arbitrary values from the " -"server to the client, for example, to influence the number of examples " -"used for evaluation." -msgstr "允许服务器影响客户端评估的配置参数。它可用于将任意值从服务器传送到客户端,例如,影响用于评估的示例数量。" +"Configuration parameters which allow the server to influence evaluation on " +"the client. It can be used to communicate arbitrary values from the server " +"to the client, for example, to influence the number of examples used for " +"evaluation." +msgstr "" +"允许服务器影响客户端评估的配置参数。它可用于将任意值从服务器传送到客户端,例" +"如,影响用于评估的示例数量。" #: flwr.client.numpy_client.NumPyClient.evaluate:11 of msgid "" "* **loss** (*float*) -- The evaluation loss of the model on the local " "dataset. * **num_examples** (*int*) -- The number of examples used for " "evaluation. * **metrics** (*Dict[str, Scalar]*) -- A dictionary mapping " -"arbitrary string keys to values of type bool, bytes, float, int, or " -"str. It can be used to communicate arbitrary values back to the server." +"arbitrary string keys to values of type bool, bytes, float, int, or str. " +"It can be used to communicate arbitrary values back to the server." msgstr "" -"**loss** (*float*) -- 模型在本地数据集上的评估损失值。**num_examples** (*int*) -- " -"用于评估的示例数量。**metrics** (*Dict[str, Scalar]*) -- 将任意字符串键映射到 " -"bool、bytes、float、int 或 str 类型值的字典。它可用于将任意值传回服务器。" +"**loss** (*float*) -- 模型在本地数据集上的评估损失值。**num_examples** " +"(*int*) -- 用于评估的示例数量。**metrics** (*Dict[str, Scalar]*) -- 将任意字" +"符串键映射到 bool、bytes、float、int 或 str 类型值的字典。它可用于将任意值传" +"回服务器。" #: flwr.client.numpy_client.NumPyClient.evaluate:11 of msgid "" -"**loss** (*float*) -- The evaluation loss of the model on the local " -"dataset." +"**loss** (*float*) -- The evaluation loss of the model on the local dataset." msgstr "**loss** (*float*) -- 模型在本地数据集上的评估损失值。" #: flwr.client.numpy_client.NumPyClient.evaluate:12 of @@ -7996,41 +8323,43 @@ msgstr "**num_examples** (*int*) -- 用于评估的示例数量。" #: flwr.client.numpy_client.NumPyClient.evaluate:13 #: flwr.client.numpy_client.NumPyClient.fit:13 of msgid "" -"**metrics** (*Dict[str, Scalar]*) -- A dictionary mapping arbitrary " -"string keys to values of type bool, bytes, float, int, or str. It can be " -"used to communicate arbitrary values back to the server." +"**metrics** (*Dict[str, Scalar]*) -- A dictionary mapping arbitrary string " +"keys to values of type bool, bytes, float, int, or str. It can be used to " +"communicate arbitrary values back to the server." msgstr "" -"**metrics** (*Dict[str, Scalar]*) -- 将任意字符串键映射到 bool、bytes、float、int 或 " -"str 类型值的字典。它可用于将任意值传回服务器。" +"**metrics** (*Dict[str, Scalar]*) -- 将任意字符串键映射到 bool、bytes、" +"float、int 或 str 类型值的字典。它可用于将任意值传回服务器。" #: flwr.client.numpy_client.NumPyClient.evaluate:19 of msgid "" -"The previous return type format (int, float, float) and the extended " -"format (int, float, float, Dict[str, Scalar]) have been deprecated and " -"removed since Flower 0.19." +"The previous return type format (int, float, float) and the extended format " +"(int, float, float, Dict[str, Scalar]) have been deprecated and removed " +"since Flower 0.19." msgstr "" -"自 Flower 0.19 起,之前的返回类型格式(int、float、float)和扩展格式(int、float、float、Dict[str," -" Scalar])已被弃用和移除。" +"自 Flower 0.19 起,之前的返回类型格式(int、float、float)和扩展格式(int、" +"float、float、Dict[str, Scalar])已被弃用和移除。" #: flwr.client.numpy_client.NumPyClient.fit:5 of msgid "" -"Configuration parameters which allow the server to influence training on " -"the client. It can be used to communicate arbitrary values from the " -"server to the client, for example, to set the number of (local) training " -"epochs." -msgstr "允许服务器影响客户端训练的配置参数。它可用于将任意值从服务器传送到客户端,例如设置(本地)训练遍历数。" +"Configuration parameters which allow the server to influence training on the " +"client. It can be used to communicate arbitrary values from the server to " +"the client, for example, to set the number of (local) training epochs." +msgstr "" +"允许服务器影响客户端训练的配置参数。它可用于将任意值从服务器传送到客户端,例" +"如设置(本地)训练遍历数。" #: flwr.client.numpy_client.NumPyClient.fit:11 of msgid "" "* **parameters** (*NDArrays*) -- The locally updated model parameters. * " "**num_examples** (*int*) -- The number of examples used for training. * " -"**metrics** (*Dict[str, Scalar]*) -- A dictionary mapping arbitrary " -"string keys to values of type bool, bytes, float, int, or str. It can " -"be used to communicate arbitrary values back to the server." +"**metrics** (*Dict[str, Scalar]*) -- A dictionary mapping arbitrary string " +"keys to values of type bool, bytes, float, int, or str. It can be used to " +"communicate arbitrary values back to the server." msgstr "" -"**parameters** (*NDArrays*) -- 本地更新的模型参数。**num_examples** (*int*) -- " -"用于训练的示例数量。**metrics** (*Dict[str, Scalar]*) -- 将任意字符串键映射到 " -"bool、bytes、float、int 或 str 类型值的字典。它可用于将任意值传回服务器。" +"**parameters** (*NDArrays*) -- 本地更新的模型参数。**num_examples** (*int*) " +"-- 用于训练的示例数量。**metrics** (*Dict[str, Scalar]*) -- 将任意字符串键映" +"射到 bool、bytes、float、int 或 str 类型值的字典。它可用于将任意值传回服务" +"器。" #: flwr.client.numpy_client.NumPyClient.fit:11 of msgid "**parameters** (*NDArrays*) -- The locally updated model parameters." @@ -8042,30 +8371,31 @@ msgstr "**num_examples** (*int*) -- 用于训练的数据数量。" #: flwr.client.numpy_client.NumPyClient.get_parameters:3 of msgid "" -"Configuration parameters requested by the server. This can be used to " -"tell the client which parameters are needed along with some Scalar " -"attributes." -msgstr "服务器请求的配置参数。这可以用来告诉客户端需要哪些参数以及一些标量属性。" +"Configuration parameters requested by the server. This can be used to tell " +"the client which parameters are needed along with some Scalar attributes." +msgstr "" +"服务器请求的配置参数。这可以用来告诉客户端需要哪些参数以及一些标量属性。" #: flwr.client.numpy_client.NumPyClient.get_parameters:8 of -msgid "**parameters** -- The local model parameters as a list of NumPy ndarrays." +msgid "" +"**parameters** -- The local model parameters as a list of NumPy ndarrays." msgstr "**parameters** -- NumPy ndarrays 的本地模型参数列表。" #: flwr.client.numpy_client.NumPyClient.get_properties:3 of msgid "" -"Configuration parameters requested by the server. This can be used to " -"tell the client which properties are needed along with some Scalar " -"attributes." -msgstr "服务器请求的配置参数。这可以用来告诉客户端需要哪些属性以及一些标量属性。" +"Configuration parameters requested by the server. This can be used to tell " +"the client which properties are needed along with some Scalar attributes." +msgstr "" +"服务器请求的配置参数。这可以用来告诉客户端需要哪些属性以及一些标量属性。" #: flwr.client.numpy_client.NumPyClient.get_properties:8 of msgid "" -"**properties** -- A dictionary mapping arbitrary string keys to values of" -" type bool, bytes, float, int, or str. It can be used to communicate " +"**properties** -- A dictionary mapping arbitrary string keys to values of " +"type bool, bytes, float, int, or str. It can be used to communicate " "arbitrary property values back to the server." msgstr "" -"**properties** -- 将任意字符串键映射到 bool、bytes、float、int 或 str " -"类型值的字典。它可用于将任意属性值传回服务器。" +"**properties** -- 将任意字符串键映射到 bool、bytes、float、int 或 str 类型值" +"的字典。它可用于将任意属性值传回服务器。" #: ../../source/ref-api/flwr.client.run_client_app.rst:2 #, fuzzy @@ -8080,8 +8410,7 @@ msgstr "启动客户端" #: flwr.client.app.start_client:3 flwr.client.app.start_numpy_client:9 of msgid "" "The IPv4 or IPv6 address of the server. If the Flower server runs on the " -"same machine on port 8080, then `server_address` would be " -"`\"[::]:8080\"`." +"same machine on port 8080, then `server_address` would be `\"[::]:8080\"`." msgstr "" "服务器的 IPv4 或 IPv6 地址:如果 Flower 服务器在同一台机器上运行,端口为 " "8080,则`server_address`应为`\"[::]:8080\"`。" @@ -8092,35 +8421,38 @@ msgstr "用于实例化客户端的可调用程序。(默认值:无)" #: flwr.client.app.start_client:9 of msgid "" -"An implementation of the abstract base class `flwr.client.Client` " -"(default: None)" +"An implementation of the abstract base class `flwr.client.Client` (default: " +"None)" msgstr "抽象基类 `flwr.client.Client` 的实现(默认值:无)" #: flwr.client.app.start_client:12 flwr.client.app.start_numpy_client:15 of msgid "" -"The maximum length of gRPC messages that can be exchanged with the Flower" -" server. The default should be sufficient for most models. Users who " -"train very large models might need to increase this value. Note that the " -"Flower server needs to be started with the same value (see " -"`flwr.server.start_server`), otherwise it will not know about the " -"increased limit and block larger messages." +"The maximum length of gRPC messages that can be exchanged with the Flower " +"server. The default should be sufficient for most models. Users who train " +"very large models might need to increase this value. Note that the Flower " +"server needs to be started with the same value (see `flwr.server." +"start_server`), otherwise it will not know about the increased limit and " +"block larger messages." msgstr "" -"可与 Flower 服务器交换的 gRPC 信息的最大长度:默认值对大多数模型都足够了。训练超大模型的用户可能需要增加该值。请注意,Flower " -"服务器需要以相同的值启动(请参阅 `flwr.server.start_server`),否则它将不知道增加的限制并阻止更大的消息。" +"可与 Flower 服务器交换的 gRPC 信息的最大长度:默认值对大多数模型都足够了。训" +"练超大模型的用户可能需要增加该值。请注意,Flower 服务器需要以相同的值启动(请" +"参阅 `flwr.server.start_server`),否则它将不知道增加的限制并阻止更大的消息。" #: flwr.client.app.start_client:19 flwr.client.app.start_numpy_client:22 #: flwr.server.compat.app.start_driver:21 of msgid "" "The PEM-encoded root certificates as a byte string or a path string. If " -"provided, a secure connection using the certificates will be established " -"to an SSL-enabled Flower server." -msgstr "字节字符串或路径字符串形式的 PEM 编码根证书。如果提供,将使用这些证书与启用 SSL 的 Flower 服务器建立安全连接。" +"provided, a secure connection using the certificates will be established to " +"an SSL-enabled Flower server." +msgstr "" +"字节字符串或路径字符串形式的 PEM 编码根证书。如果提供,将使用这些证书与启用 " +"SSL 的 Flower 服务器建立安全连接。" #: flwr.client.app.start_client:23 flwr.client.app.start_numpy_client:26 of #, fuzzy msgid "" -"Starts an insecure gRPC connection when True. Enables HTTPS connection " -"when False, using system certificates if `root_certificates` is None." +"Starts an insecure gRPC connection when True. Enables HTTPS connection when " +"False, using system certificates if `root_certificates` is None." msgstr "" "为 True 时启动不安全的 gRPC 连接。False 时启用 HTTPS 连接,如果 " "`root_certificates` 为 None,则使用系统证书。" @@ -8128,28 +8460,30 @@ msgstr "" #: flwr.client.app.start_client:26 flwr.client.app.start_numpy_client:29 of msgid "" "Configure the transport layer. Allowed values: - 'grpc-bidi': gRPC, " -"bidirectional streaming - 'grpc-rere': gRPC, request-response " -"(experimental) - 'rest': HTTP (experimental)" +"bidirectional streaming - 'grpc-rere': gRPC, request-response (experimental) " +"- 'rest': HTTP (experimental)" msgstr "" -"配置传输层:允许的值包括 - 'grpc-bidi': gRPC,双向流 - 'grpc-rere': gRPC,请求-响应(实验性) - " -"'rest': HTTP(实验性)" +"配置传输层:允许的值包括 - 'grpc-bidi': gRPC,双向流 - 'grpc-rere': gRPC,请" +"求-响应(实验性) - 'rest': HTTP(实验性)" #: flwr.client.app.start_client:31 of #, fuzzy msgid "" "The maximum number of times the client will try to connect to the server " -"before giving up in case of a connection error. If set to None, there is " -"no limit to the number of tries." -msgstr "客户端在出现连接错误时放弃连接服务器的最大尝试次数。如果设置为 \"无\"" -",则不限制尝试次数。" +"before giving up in case of a connection error. If set to None, there is no " +"limit to the number of tries." +msgstr "" +"客户端在出现连接错误时放弃连接服务器的最大尝试次数。如果设置为 \"无\",则不限" +"制尝试次数。" #: flwr.client.app.start_client:35 of #, fuzzy msgid "" -"The maximum duration before the client stops trying to connect to the " -"server in case of connection error. If set to None, there is no limit to " -"the total time." -msgstr "在出现连接错误时,客户端停止尝试连接服务器之前的最长持续时间。如果设置为 \"无" +"The maximum duration before the client stops trying to connect to the server " +"in case of connection error. If set to None, there is no limit to the total " +"time." +msgstr "" +"在出现连接错误时,客户端停止尝试连接服务器之前的最长持续时间。如果设置为 \"无" "\",则总时间没有限制。" #: flwr.client.app.start_client:42 flwr.client.app.start_numpy_client:37 of @@ -8174,14 +8508,13 @@ msgstr "start_numpy_client" #: flwr.client.app.start_numpy_client:5 of #, fuzzy msgid "" -"This function is deprecated since 1.7.0. Use " -":code:`flwr.client.start_client` instead and first convert your " -":code:`NumPyClient` to type :code:`flwr.client.Client` by executing its " -":code:`to_client()` method." +"This function is deprecated since 1.7.0. Use :code:`flwr.client." +"start_client` instead and first convert your :code:`NumPyClient` to type :" +"code:`flwr.client.Client` by executing its :code:`to_client()` method." msgstr "" -"自 1.7.0 起该函数已被弃用。请使用 :code:`flwr.client.start_client`," -"并首先通过执行 :code:`to_client()`方法将 :code:`NumPyClient`转换为 " -":code:`flwr.client.Client`。" +"自 1.7.0 起该函数已被弃用。请使用 :code:`flwr.client.start_client`,并首先通" +"过执行 :code:`to_client()`方法将 :code:`NumPyClient`转换为 :code:`flwr." +"client.Client`。" #: flwr.client.app.start_numpy_client:13 of msgid "An implementation of the abstract base class `flwr.client.NumPyClient`." @@ -8193,7 +8526,8 @@ msgstr "常见" #: ../../source/ref-api/flwr.common.rst:30::1 #, fuzzy -msgid ":py:obj:`array_from_numpy `\\ \\(ndarray\\)" +msgid "" +":py:obj:`array_from_numpy `\\ \\(ndarray\\)" msgstr "" ":py:obj:`array_from_numpy `\\ \\(ndarray\\)" @@ -8205,7 +8539,8 @@ msgstr "将参数对象转换为 NumPy ndarrays。" #: ../../source/ref-api/flwr.common.rst:30::1 #, fuzzy -msgid ":py:obj:`bytes_to_ndarray `\\ \\(tensor\\)" +msgid "" +":py:obj:`bytes_to_ndarray `\\ \\(tensor\\)" msgstr "" ":py:obj:`bytes_to_ndarray `\\ \\(tensor\\)" @@ -8220,8 +8555,8 @@ msgid "" ":py:obj:`configure `\\ \\(identifier\\[\\, " "filename\\, host\\]\\)" msgstr "" -":py:obj:`configure `\\ \\(identifier\\[\\, filename\\" -", host\\]\\)" +":py:obj:`configure `\\ \\(identifier\\[\\, " +"filename\\, host\\]\\)" #: ../../source/ref-api/flwr.common.rst:30::1 #: flwr.common.logger.configure:1 of @@ -8234,8 +8569,8 @@ msgid "" ":py:obj:`event `\\ \\(event\\_type\\[\\, " "event\\_details\\]\\)" msgstr "" -":py:obj:`event `\\ \\(event\\_type\\[\\, event\\_details\\" -"]\\)" +":py:obj:`event `\\ \\(event\\_type\\[\\, " +"event\\_details\\]\\)" #: ../../source/ref-api/flwr.common.rst:30::1 #: flwr.common.telemetry.event:1 of @@ -8249,8 +8584,8 @@ msgid "" ":py:obj:`log `\\ \\(level\\, msg\\, \\*args\\, " "\\*\\*kwargs\\)" msgstr "" -":py:obj:`log `\\ \\(level\\, msg\\, \\*args\\, \\*\\*" -"kwargs\\)" +":py:obj:`log `\\ \\(level\\, msg\\, \\*args\\, " +"\\*\\*kwargs\\)" #: ../../source/ref-api/flwr.common.rst:30::1 logging.Logger.log:1 #: of @@ -8259,7 +8594,8 @@ msgstr "以整数严重性 \"级别 \"记录 \"msg % args\"。" #: ../../source/ref-api/flwr.common.rst:30::1 #, fuzzy -msgid ":py:obj:`ndarray_to_bytes `\\ \\(ndarray\\)" +msgid "" +":py:obj:`ndarray_to_bytes `\\ \\(ndarray\\)" msgstr "" ":py:obj:`ndarray_to_bytes `\\ \\(ndarray\\)" @@ -8284,8 +8620,8 @@ msgid "" ":py:obj:`ndarrays_to_parameters `\\ " "\\(ndarrays\\)" msgstr "" -":py:obj:`ndarrays_to_parameters `\\ \\(" -"ndarrays\\)" +":py:obj:`ndarrays_to_parameters `\\ " +"\\(ndarrays\\)" #: ../../source/ref-api/flwr.common.rst:30::1 #: flwr.common.parameter.ndarrays_to_parameters:1 @@ -8301,8 +8637,8 @@ msgid "" ":py:obj:`parameters_to_ndarrays `\\ " "\\(parameters\\)" msgstr "" -":py:obj:`parameters_to_ndarrays `\\ \\(" -"parameters\\)" +":py:obj:`parameters_to_ndarrays `\\ " +"\\(parameters\\)" #: ../../source/ref-api/flwr.common.rst:30::1 #: flwr.common.parameter.parameters_to_ndarrays:1 of @@ -8312,8 +8648,7 @@ msgstr "将参数对象转换为 NumPy ndarrays。" #: ../../source/ref-api/flwr.common.rst:64::1 #, fuzzy msgid "" -":py:obj:`Array `\\ \\(dtype\\, shape\\, stype\\, " -"data\\)" +":py:obj:`Array `\\ \\(dtype\\, shape\\, stype\\, data\\)" msgstr "" ":py:obj:`Array `\\ \\(dtype\\, shape\\, stype\\, data\\)" @@ -8329,8 +8664,8 @@ msgid "" ":py:obj:`ClientMessage `\\ " "\\(\\[get\\_properties\\_res\\, ...\\]\\)" msgstr "" -":py:obj:`ClientMessage `\\ \\(\\[get\\_properties" -"\\_res\\, ...\\]\\)" +":py:obj:`ClientMessage `\\ " +"\\(\\[get\\_properties\\_res\\, ...\\]\\)" #: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.typing.ClientMessage:1 of @@ -8353,9 +8688,8 @@ msgid "" ":py:obj:`ConfigsRecord `\\ " "\\(\\[configs\\_dict\\, keep\\_input\\]\\)" msgstr "" -"Flower 1.0: ``start_server(..., " -"config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " -"...)``" +"Flower 1.0: ``start_server(..., config=flwr.server." +"ServerConfig(num_rounds=3, round_timeout=600.0), ...)``" #: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.record.configsrecord.ConfigsRecord:1 of @@ -8387,8 +8721,7 @@ msgstr "客户端向服务器发送 DisconnectRes 信息。" #: ../../source/ref-api/flwr.common.rst:64::1 #, fuzzy msgid "" -":py:obj:`EvaluateIns `\\ \\(parameters\\, " -"config\\)" +":py:obj:`EvaluateIns `\\ \\(parameters\\, config\\)" msgstr "" ":py:obj:`EvaluateIns `\\ \\(parameters\\, config\\)" @@ -8458,7 +8791,8 @@ msgstr "数据类,用于存储所发生错误的相关信息。" #: ../../source/ref-api/flwr.common.rst:64::1 #, fuzzy -msgid ":py:obj:`GetParametersIns `\\ \\(config\\)" +msgid "" +":py:obj:`GetParametersIns `\\ \\(config\\)" msgstr "" ":py:obj:`GetParametersIns `\\ \\(config\\)" @@ -8483,7 +8817,8 @@ msgstr "要求返回参数时的响应。" #: ../../source/ref-api/flwr.common.rst:64::1 #, fuzzy -msgid ":py:obj:`GetPropertiesIns `\\ \\(config\\)" +msgid "" +":py:obj:`GetPropertiesIns `\\ \\(config\\)" msgstr "" ":py:obj:`GetPropertiesIns `\\ \\(config\\)" @@ -8546,8 +8881,8 @@ msgstr "传统信息类型。" #: ../../source/ref-api/flwr.common.rst:64::1 #, fuzzy msgid "" -":py:obj:`Metadata `\\ \\(run\\_id\\, " -"message\\_id\\, src\\_node\\_id\\, ...\\)" +":py:obj:`Metadata `\\ \\(run\\_id\\, message\\_id\\, " +"src\\_node\\_id\\, ...\\)" msgstr "" ":py:obj:`Metadata `\\ \\(run\\_id\\, message\\_id\\, " "src\\_node\\_id\\, ...\\)" @@ -8564,8 +8899,8 @@ msgid "" ":py:obj:`MetricsRecord `\\ " "\\(\\[metrics\\_dict\\, keep\\_input\\]\\)" msgstr "" -":py:obj:`MetricsRecord `\\ \\(\\[metrics\\_dict\\" -", keep\\_input\\]\\)" +":py:obj:`MetricsRecord `\\ " +"\\(\\[metrics\\_dict\\, keep\\_input\\]\\)" #: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.record.metricsrecord.MetricsRecord:1 of @@ -8581,11 +8916,11 @@ msgstr ":py:obj:`NDArray `\\" #: ../../source/ref-api/flwr.common.rst:64::1 #, fuzzy msgid "" -"alias of :py:class:`~numpy.ndarray`\\ [:py:obj:`~typing.Any`, " -":py:class:`~numpy.dtype`\\ [:py:obj:`~typing.Any`]]" +"alias of :py:class:`~numpy.ndarray`\\ [:py:obj:`~typing.Any`, :py:class:" +"`~numpy.dtype`\\ [:py:obj:`~typing.Any`]]" msgstr "" -"alias of :py:class:`~numpy.ndarray`\\ [:py:obj:`~typing.Any`, " -":py:class:`~numpy.dtype`\\ [:py:obj:`~typing.Any`]]" +"alias of :py:class:`~numpy.ndarray`\\ [:py:obj:`~typing.Any`, :py:class:" +"`~numpy.dtype`\\ [:py:obj:`~typing.Any`]]" #: ../../source/ref-api/flwr.common.rst:64::1 #, fuzzy @@ -8593,8 +8928,8 @@ msgid "" ":py:obj:`Parameters `\\ \\(tensors\\, " "tensor\\_type\\)" msgstr "" -":py:obj:`Parameters `\\ \\(tensors\\, tensor\\_type\\" -")" +":py:obj:`Parameters `\\ \\(tensors\\, " +"tensor\\_type\\)" #: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.typing.Parameters:1 of @@ -8607,8 +8942,8 @@ msgid "" ":py:obj:`ParametersRecord `\\ " "\\(\\[array\\_dict\\, keep\\_input\\]\\)" msgstr "" -":py:obj:`ParametersRecord `\\ \\(\\[" -"array\\_dict\\, keep\\_input\\]\\)" +":py:obj:`ParametersRecord `\\ " +"\\(\\[array\\_dict\\, keep\\_input\\]\\)" #: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.record.parametersrecord.ParametersRecord:1 of @@ -8632,8 +8967,8 @@ msgid "" ":py:obj:`RecordSet `\\ " "\\(\\[parameters\\_records\\, ...\\]\\)" msgstr "" -":py:obj:`RecordSet `\\ \\(\\[parameters\\_records\\, " -"...\\]\\)" +":py:obj:`RecordSet `\\ " +"\\(\\[parameters\\_records\\, ...\\]\\)" #: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.record.recordset.RecordSet:1 of @@ -8647,8 +8982,8 @@ msgid "" ":py:obj:`ServerMessage `\\ " "\\(\\[get\\_properties\\_ins\\, ...\\]\\)" msgstr "" -":py:obj:`ServerMessage `\\ \\(\\[get\\_properties" -"\\_ins\\, ...\\]\\)" +":py:obj:`ServerMessage `\\ " +"\\(\\[get\\_properties\\_ins\\, ...\\]\\)" #: ../../source/ref-api/flwr.common.rst:64::1 #: flwr.common.typing.ServerMessage:1 of @@ -8680,26 +9015,28 @@ msgstr "数据类,包含数组类或张量类对象的序列化数据以及相 #: flwr.common.record.parametersrecord.Array:6 of #, fuzzy msgid "" -"A string representing the data type of the serialised object (e.g. " -"`np.float32`)" +"A string representing the data type of the serialised object (e.g. `np." +"float32`)" msgstr "表示序列化对象数据类型的字符串(例如 `np.float32`)" #: flwr.common.record.parametersrecord.Array:8 of #, fuzzy msgid "" -"A list representing the shape of the unserialized array-like object. This" -" is used to deserialize the data (depending on the serialization method) " -"or simply as a metadata field." -msgstr "代表未序列化数组对象形状的列表。它可用于反序列化数据(取决于序列化方法),或" +"A list representing the shape of the unserialized array-like object. This is " +"used to deserialize the data (depending on the serialization method) or " +"simply as a metadata field." +msgstr "" +"代表未序列化数组对象形状的列表。它可用于反序列化数据(取决于序列化方法),或" "仅作为元数据字段使用。" #: flwr.common.record.parametersrecord.Array:12 of #, fuzzy msgid "" -"A string indicating the type of serialisation mechanism used to generate " -"the bytes in `data` from an array-like or tensor-like object." -msgstr "表示序列化机制类型的字符串,用于从类似数组或类似张量的对象中生成 `data` " -"中的字节。" +"A string indicating the type of serialisation mechanism used to generate the " +"bytes in `data` from an array-like or tensor-like object." +msgstr "" +"表示序列化机制类型的字符串,用于从类似数组或类似张量的对象中生成 `data` 中的" +"字节。" #: flwr.common.record.parametersrecord.Array:15 of #, fuzzy @@ -8755,16 +9092,14 @@ msgstr ":py:obj:`fit_res `\\" #: ../../source/ref-api/flwr.common.ClientMessage.rst:31::1 #, fuzzy msgid "" -":py:obj:`get_parameters_res " -"`\\" +":py:obj:`get_parameters_res `\\" msgstr "" ":py:obj:`get_parameters_res `\\" #: ../../source/ref-api/flwr.common.ClientMessage.rst:31::1 #, fuzzy msgid "" -":py:obj:`get_properties_res " -"`\\" +":py:obj:`get_properties_res `\\" msgstr "" ":py:obj:`get_properties_res `\\" @@ -8786,8 +9121,8 @@ msgstr ":py:obj:`OK `\\" #: ../../source/ref-api/flwr.common.Code.rst:26::1 #, fuzzy msgid "" -":py:obj:`GET_PROPERTIES_NOT_IMPLEMENTED " -"`\\" +":py:obj:`GET_PROPERTIES_NOT_IMPLEMENTED `\\" msgstr "" ":py:obj:`GET_PROPERTIES_NOT_IMPLEMENTED `\\" @@ -8795,8 +9130,8 @@ msgstr "" #: ../../source/ref-api/flwr.common.Code.rst:26::1 #, fuzzy msgid "" -":py:obj:`GET_PARAMETERS_NOT_IMPLEMENTED " -"`\\" +":py:obj:`GET_PARAMETERS_NOT_IMPLEMENTED `\\" msgstr "" ":py:obj:`GET_PARAMETERS_NOT_IMPLEMENTED `\\" @@ -8809,8 +9144,8 @@ msgstr ":py:obj:`FIT_NOT_IMPLEMENTED `\\" #: ../../source/ref-api/flwr.common.Code.rst:26::1 #, fuzzy msgid "" -":py:obj:`EVALUATE_NOT_IMPLEMENTED " -"`\\" +":py:obj:`EVALUATE_NOT_IMPLEMENTED `\\" msgstr "" ":py:obj:`EVALUATE_NOT_IMPLEMENTED `\\" @@ -8823,20 +9158,19 @@ msgstr "配置日志记录" #: flwr.common.record.configsrecord.ConfigsRecord:1 of #, fuzzy msgid "" -"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " -"[:py:class:`str`, :py:obj:`~typing.Union`\\ [:py:class:`int`, " -":py:class:`float`, :py:class:`str`, :py:class:`bytes`, :py:class:`bool`, " -":py:class:`~typing.List`\\ [:py:class:`int`], :py:class:`~typing.List`\\ " -"[:py:class:`float`], :py:class:`~typing.List`\\ [:py:class:`str`], " -":py:class:`~typing.List`\\ [:py:class:`bytes`], " -":py:class:`~typing.List`\\ [:py:class:`bool`]]]" +"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ [:py:class:" +"`str`, :py:obj:`~typing.Union`\\ [:py:class:`int`, :py:class:`float`, :py:" +"class:`str`, :py:class:`bytes`, :py:class:`bool`, :py:class:`~typing.List`\\ " +"[:py:class:`int`], :py:class:`~typing.List`\\ [:py:class:`float`], :py:class:" +"`~typing.List`\\ [:py:class:`str`], :py:class:`~typing.List`\\ [:py:class:" +"`bytes`], :py:class:`~typing.List`\\ [:py:class:`bool`]]]" msgstr "" -"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ [:py:class:`str`" -", :py:obj:`~typing.Union`\\ [:py:class:`int`, :py:class:`float`, " -":py:class:`str`, :py:class:`bytes`, :py:class:`bool`, :py:class:`~typing." -"List`\\ [:py:class:`int`], :py:class:`~typing.List`\\ [:py:class:`float`], " -":py:class:`~typing.List`\\ [:py:class:`str`], :py:class:`~typing.List`\\ " -"[:py:class:`bytes`], :py:class:`~typing.List`\\ [:py:class:`bool`]]]" +"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ [:py:class:" +"`str`, :py:obj:`~typing.Union`\\ [:py:class:`int`, :py:class:`float`, :py:" +"class:`str`, :py:class:`bytes`, :py:class:`bool`, :py:class:`~typing.List`\\ " +"[:py:class:`int`], :py:class:`~typing.List`\\ [:py:class:`float`], :py:class:" +"`~typing.List`\\ [:py:class:`str`], :py:class:`~typing.List`\\ [:py:class:" +"`bytes`], :py:class:`~typing.List`\\ [:py:class:`bool`]]]" #: flwr.common.record.typeddict.TypedDict.clear:1::1 of #, fuzzy @@ -8891,7 +9225,8 @@ msgstr ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" #: flwr.common.record.typeddict.TypedDict.clear:1::1 #: flwr.common.record.typeddict.TypedDict.pop:1 of #, fuzzy -msgid "If key is not found, d is returned if given, otherwise KeyError is raised." +msgid "" +"If key is not found, d is returned if given, otherwise KeyError is raised." msgstr "如果未找到 key,则返回 d(如果给定),否则引发 KeyError。" #: flwr.common.record.typeddict.TypedDict.clear:1::1 of @@ -8900,8 +9235,8 @@ msgid "" ":py:obj:`update `\\ \\(\\[E\\, " "\\]\\*\\*F\\)" msgstr "" -":py:obj:`update `\\ \\(\\[E\\, \\]\\*\\*F\\" -")" +":py:obj:`update `\\ \\(\\[E\\, " +"\\]\\*\\*F\\)" #: flwr.common.record.typeddict.TypedDict.clear:1::1 #: flwr.common.record.typeddict.TypedDict.update:1 of @@ -8927,12 +9262,11 @@ msgstr "背景" #: flwr.common.context.Context:3 of #, fuzzy msgid "" -"Holds records added by the entity in a given run and that will stay " -"local. This means that the data it holds will never leave the system it's" -" running from. This can be used as an intermediate storage or scratchpad " -"when executing mods. It can also be used as a memory to access at " -"different points during the lifecycle of this entity (e.g. across " -"multiple rounds)" +"Holds records added by the entity in a given run and that will stay local. " +"This means that the data it holds will never leave the system it's running " +"from. This can be used as an intermediate storage or scratchpad when " +"executing mods. It can also be used as a memory to access at different " +"points during the lifecycle of this entity (e.g. across multiple rounds)" msgstr "" "保存实体在给定运行中添加的记录,这些记录将保留在本地。这意味着它保存的数据永" "远不会离开运行的系统。在执行模式时,它可用作中间存储或抓取板。它还可以作为存" @@ -9045,81 +9379,79 @@ msgstr ":py:obj:`PING `\\" #: ../../source/ref-api/flwr.common.EventType.rst:42::1 #, fuzzy -msgid ":py:obj:`START_CLIENT_ENTER `\\" +msgid "" +":py:obj:`START_CLIENT_ENTER `\\" msgstr "" ":py:obj:`START_CLIENT_ENTER `\\" #: ../../source/ref-api/flwr.common.EventType.rst:42::1 #, fuzzy -msgid ":py:obj:`START_CLIENT_LEAVE `\\" +msgid "" +":py:obj:`START_CLIENT_LEAVE `\\" msgstr "" ":py:obj:`START_CLIENT_LEAVE `\\" #: ../../source/ref-api/flwr.common.EventType.rst:42::1 #, fuzzy -msgid ":py:obj:`START_SERVER_ENTER `\\" +msgid "" +":py:obj:`START_SERVER_ENTER `\\" msgstr "" ":py:obj:`START_SERVER_ENTER `\\" #: ../../source/ref-api/flwr.common.EventType.rst:42::1 #, fuzzy -msgid ":py:obj:`START_SERVER_LEAVE `\\" +msgid "" +":py:obj:`START_SERVER_LEAVE `\\" msgstr "" ":py:obj:`START_SERVER_LEAVE `\\" #: ../../source/ref-api/flwr.common.EventType.rst:42::1 #, fuzzy msgid "" -":py:obj:`RUN_DRIVER_API_ENTER " -"`\\" +":py:obj:`RUN_DRIVER_API_ENTER `\\" msgstr "" ":py:obj:`RUN_DRIVER_API_ENTER `\\" #: ../../source/ref-api/flwr.common.EventType.rst:42::1 #, fuzzy msgid "" -":py:obj:`RUN_DRIVER_API_LEAVE " -"`\\" +":py:obj:`RUN_DRIVER_API_LEAVE `\\" msgstr "" ":py:obj:`RUN_DRIVER_API_LEAVE `\\" #: ../../source/ref-api/flwr.common.EventType.rst:42::1 #, fuzzy msgid "" -":py:obj:`RUN_FLEET_API_ENTER " -"`\\" +":py:obj:`RUN_FLEET_API_ENTER `\\" msgstr "" ":py:obj:`RUN_FLEET_API_ENTER `\\" #: ../../source/ref-api/flwr.common.EventType.rst:42::1 #, fuzzy msgid "" -":py:obj:`RUN_FLEET_API_LEAVE " -"`\\" +":py:obj:`RUN_FLEET_API_LEAVE `\\" msgstr "" ":py:obj:`RUN_FLEET_API_LEAVE `\\" #: ../../source/ref-api/flwr.common.EventType.rst:42::1 #, fuzzy msgid "" -":py:obj:`RUN_SUPERLINK_ENTER " -"`\\" +":py:obj:`RUN_SUPERLINK_ENTER `\\" msgstr "" ":py:obj:`RUN_SUPERLINK_ENTER `\\" #: ../../source/ref-api/flwr.common.EventType.rst:42::1 #, fuzzy msgid "" -":py:obj:`RUN_SUPERLINK_LEAVE " -"`\\" +":py:obj:`RUN_SUPERLINK_LEAVE `\\" msgstr "" ":py:obj:`RUN_SUPERLINK_LEAVE `\\" #: ../../source/ref-api/flwr.common.EventType.rst:42::1 #, fuzzy msgid "" -":py:obj:`START_SIMULATION_ENTER " -"`\\" +":py:obj:`START_SIMULATION_ENTER `\\" msgstr "" ":py:obj:`START_SIMULATION_ENTER `\\" @@ -9127,8 +9459,8 @@ msgstr "" #: ../../source/ref-api/flwr.common.EventType.rst:42::1 #, fuzzy msgid "" -":py:obj:`START_SIMULATION_LEAVE " -"`\\" +":py:obj:`START_SIMULATION_LEAVE `\\" msgstr "" ":py:obj:`START_SIMULATION_LEAVE `\\" @@ -9141,49 +9473,48 @@ msgstr ":py:obj:`DRIVER_CONNECT `\\" #: ../../source/ref-api/flwr.common.EventType.rst:42::1 #, fuzzy msgid ":py:obj:`DRIVER_DISCONNECT `\\" -msgstr ":py:obj:`DRIVER_DISCONNECT `\\" +msgstr "" +":py:obj:`DRIVER_DISCONNECT `\\" #: ../../source/ref-api/flwr.common.EventType.rst:42::1 #, fuzzy -msgid ":py:obj:`START_DRIVER_ENTER `\\" +msgid "" +":py:obj:`START_DRIVER_ENTER `\\" msgstr "" ":py:obj:`START_DRIVER_ENTER `\\" #: ../../source/ref-api/flwr.common.EventType.rst:42::1 #, fuzzy -msgid ":py:obj:`START_DRIVER_LEAVE `\\" +msgid "" +":py:obj:`START_DRIVER_LEAVE `\\" msgstr "" ":py:obj:`START_DRIVER_LEAVE `\\" #: ../../source/ref-api/flwr.common.EventType.rst:42::1 #, fuzzy msgid "" -":py:obj:`RUN_CLIENT_APP_ENTER " -"`\\" +":py:obj:`RUN_CLIENT_APP_ENTER `\\" msgstr "" ":py:obj:`RUN_CLIENT_APP_ENTER `\\" #: ../../source/ref-api/flwr.common.EventType.rst:42::1 #, fuzzy msgid "" -":py:obj:`RUN_CLIENT_APP_LEAVE " -"`\\" +":py:obj:`RUN_CLIENT_APP_LEAVE `\\" msgstr "" ":py:obj:`RUN_CLIENT_APP_LEAVE `\\" #: ../../source/ref-api/flwr.common.EventType.rst:42::1 #, fuzzy msgid "" -":py:obj:`RUN_SERVER_APP_ENTER " -"`\\" +":py:obj:`RUN_SERVER_APP_ENTER `\\" msgstr "" ":py:obj:`RUN_SERVER_APP_ENTER `\\" #: ../../source/ref-api/flwr.common.EventType.rst:42::1 #, fuzzy msgid "" -":py:obj:`RUN_SERVER_APP_LEAVE " -"`\\" +":py:obj:`RUN_SERVER_APP_LEAVE `\\" msgstr "" ":py:obj:`RUN_SERVER_APP_LEAVE `\\" @@ -9291,16 +9622,17 @@ msgstr "数据类型,包括要执行的信息的相关信息。" #: flwr.common.message.Message:5 of #, fuzzy msgid "" -"Holds records either sent by another entity (e.g. sent by the server-side" -" logic to a client, or vice-versa) or that will be sent to it." -msgstr "保存由其他实体发送的记录(如由服务器端逻辑发送到客户端,反之亦然)或将发送到" +"Holds records either sent by another entity (e.g. sent by the server-side " +"logic to a client, or vice-versa) or that will be sent to it." +msgstr "" +"保存由其他实体发送的记录(如由服务器端逻辑发送到客户端,反之亦然)或将发送到" "该实体的记录。" #: flwr.common.message.Message:8 of #, fuzzy msgid "" -"A dataclass that captures information about an error that took place when" -" processing another message." +"A dataclass that captures information about an error that took place when " +"processing another message." msgstr "数据类,用于捕捉处理其他报文时发生的错误信息。" #: ../../source/ref-api/flwr.common.Message.rst:35::1 @@ -9309,8 +9641,8 @@ msgid "" ":py:obj:`create_error_reply `\\ " "\\(error\\, ttl\\)" msgstr "" -":py:obj:`create_error_reply `\\ \\(" -"error\\, ttl\\)" +":py:obj:`create_error_reply `\\ " +"\\(error\\, ttl\\)" #: ../../source/ref-api/flwr.common.Message.rst:35::1 #: flwr.common.message.Message.create_error_reply:1 of @@ -9321,8 +9653,8 @@ msgstr "构建一条回复信息,说明发生了错误。" #: ../../source/ref-api/flwr.common.Message.rst:35::1 #, fuzzy msgid "" -":py:obj:`create_reply `\\ \\(content\\," -" ttl\\)" +":py:obj:`create_reply `\\ \\(content\\, " +"ttl\\)" msgstr "" ":py:obj:`create_reply `\\ \\(content\\, " "ttl\\)" @@ -9397,9 +9729,9 @@ msgstr "该信息的有效时间。" #: flwr.common.message.Message.create_reply:3 of #, fuzzy msgid "" -"The method generates a new `Message` as a reply to this message. It " -"inherits 'run_id', 'src_node_id', 'dst_node_id', and 'message_type' from " -"this message and sets 'reply_to_message' to the ID of this message." +"The method generates a new `Message` as a reply to this message. It inherits " +"'run_id', 'src_node_id', 'dst_node_id', and 'message_type' from this message " +"and sets 'reply_to_message' to the ID of this message." msgstr "" "该方法会生成一条新的 \"信息\",作为对该信息的回复。该方法继承了该消息的 " "\"run_id\"、\"src_node_id\"、\"dst_node_id \"和 \"message_type\",并将 " @@ -9442,13 +9774,15 @@ msgstr "MessageTypeLegacy" #: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:29::1 #, fuzzy -msgid ":py:obj:`GET_PARAMETERS `\\" +msgid "" +":py:obj:`GET_PARAMETERS `\\" msgstr "" ":py:obj:`GET_PARAMETERS `\\" #: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:29::1 #, fuzzy -msgid ":py:obj:`GET_PROPERTIES `\\" +msgid "" +":py:obj:`GET_PROPERTIES `\\" msgstr "" ":py:obj:`GET_PROPERTIES `\\" @@ -9486,8 +9820,8 @@ msgstr "该信息回复的信息的标识符。" #: flwr.common.message.Metadata:13 of #, fuzzy msgid "" -"An identifier for grouping messages. In some settings, this is used as " -"the FL round." +"An identifier for grouping messages. In some settings, this is used as the " +"FL round." msgstr "用于分组报文的标识符。在某些设置中,它被用作 FL 轮。" #: flwr.common.Metadata.dst_node_id:1::1 @@ -9499,11 +9833,12 @@ msgstr "编码接收端要执行的操作的字符串。" #: flwr.common.message.Metadata:21 of #, fuzzy msgid "" -"An identifier that can be used when loading a particular data partition " -"for a ClientApp. Making use of this identifier is more relevant when " -"conducting simulations." -msgstr "为 ClientApp " -"加载特定数据分区时可使用的标识符。在进行模拟时,使用该标识符更有意义。" +"An identifier that can be used when loading a particular data partition for " +"a ClientApp. Making use of this identifier is more relevant when conducting " +"simulations." +msgstr "" +"为 ClientApp 加载特定数据分区时可使用的标识符。在进行模拟时,使用该标识符更有" +"意义。" #: flwr.common.Metadata.dst_node_id:1::1 of #, fuzzy @@ -9570,15 +9905,15 @@ msgstr "MetricsRecord" #: flwr.common.record.metricsrecord.MetricsRecord:1 of #, fuzzy msgid "" -"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " -"[:py:class:`str`, :py:obj:`~typing.Union`\\ [:py:class:`int`, " -":py:class:`float`, :py:class:`~typing.List`\\ [:py:class:`int`], " -":py:class:`~typing.List`\\ [:py:class:`float`]]]" +"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ [:py:class:" +"`str`, :py:obj:`~typing.Union`\\ [:py:class:`int`, :py:class:`float`, :py:" +"class:`~typing.List`\\ [:py:class:`int`], :py:class:`~typing.List`\\ [:py:" +"class:`float`]]]" msgstr "" -"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ [:py:class:`str`" -", :py:obj:`~typing.Union`\\ [:py:class:`int`, :py:class:`float`, " -":py:class:`~typing.List`\\ [:py:class:`int`], :py:class:`~typing.List`\\ " -"[:py:class:`float`]]]" +"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ [:py:class:" +"`str`, :py:obj:`~typing.Union`\\ [:py:class:`int`, :py:class:`float`, :py:" +"class:`~typing.List`\\ [:py:class:`int`], :py:class:`~typing.List`\\ [:py:" +"class:`float`]]]" #: flwr.common.record.typeddict.TypedDict.clear:1::1 of #, fuzzy @@ -9616,8 +9951,8 @@ msgid "" ":py:obj:`update `\\ \\(\\[E\\, " "\\]\\*\\*F\\)" msgstr "" -":py:obj:`update `\\ \\(\\[E\\, \\]\\*\\*F\\" -")" +":py:obj:`update `\\ \\(\\[E\\, " +"\\]\\*\\*F\\)" #: flwr.common.record.typeddict.TypedDict.clear:1::1 of #, fuzzy @@ -9647,23 +9982,22 @@ msgstr "参数" #: flwr.common.record.parametersrecord.ParametersRecord:1 of #, fuzzy msgid "" -"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " -"[:py:class:`str`, :py:class:`~flwr.common.record.parametersrecord.Array`]" +"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ [:py:class:" +"`str`, :py:class:`~flwr.common.record.parametersrecord.Array`]" msgstr "" -"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ [:py:class:`str`" -", :py:class:`~flwr.common.record.parametersrecord.Array`]" +"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ [:py:class:" +"`str`, :py:class:`~flwr.common.record.parametersrecord.Array`]" #: flwr.common.record.parametersrecord.ParametersRecord:3 of #, fuzzy msgid "" -"A dataclass storing named Arrays in order. This means that it holds " -"entries as an OrderedDict[str, Array]. ParametersRecord objects can be " -"viewed as an equivalent to PyTorch's state_dict, but holding serialised " -"tensors instead." +"A dataclass storing named Arrays in order. This means that it holds entries " +"as an OrderedDict[str, Array]. ParametersRecord objects can be viewed as an " +"equivalent to PyTorch's state_dict, but holding serialised tensors instead." msgstr "" -"按顺序存储命名数组的数据类。这意味着它以 OrderedDict[str, Array] " -"的形式保存条目。ParametersRecord 对象相当于 PyTorch 的 " -"state_dict,但它保存的是序列化的张量。" +"按顺序存储命名数组的数据类。这意味着它以 OrderedDict[str, Array] 的形式保存条" +"目。ParametersRecord 对象相当于 PyTorch 的 state_dict,但它保存的是序列化的张" +"量。" #: flwr.common.record.typeddict.TypedDict.clear:1::1 of #, fuzzy @@ -9672,7 +10006,8 @@ msgstr ":py:obj:`clear `\\ \\(\\)" #: flwr.common.record.typeddict.TypedDict.clear:1::1 of #, fuzzy -msgid ":py:obj:`count_bytes `\\ \\(\\)" +msgid "" +":py:obj:`count_bytes `\\ \\(\\)" msgstr "" ":py:obj:`count_bytes `\\ \\(\\)" @@ -9702,8 +10037,8 @@ msgid "" ":py:obj:`update `\\ \\(\\[E\\, " "\\]\\*\\*F\\)" msgstr "" -":py:obj:`update `\\ \\(\\[E\\, \\]\\*\\*" -"F\\)" +":py:obj:`update `\\ \\(\\[E\\, " +"\\]\\*\\*F\\)" #: flwr.common.record.typeddict.TypedDict.clear:1::1 of #, fuzzy @@ -9713,11 +10048,12 @@ msgstr ":py:obj:`values `\\ \\(\\)" #: flwr.common.record.parametersrecord.ParametersRecord.count_bytes:3 of #, fuzzy msgid "" -"Note that a small amount of Bytes might also be included in this counting" -" that correspond to metadata of the serialized object (e.g. of NumPy " -"array) needed for deseralization." -msgstr "请注意,该计数中还可能包含少量字节,这些字节与序列化对象(如 NumPy " -"数组)的元数据相对应,需要进行去eralization。" +"Note that a small amount of Bytes might also be included in this counting " +"that correspond to metadata of the serialized object (e.g. of NumPy array) " +"needed for deseralization." +msgstr "" +"请注意,该计数中还可能包含少量字节,这些字节与序列化对象(如 NumPy 数组)的元" +"数据相对应,需要进行去eralization。" #: ../../source/ref-api/flwr.common.ReconnectIns.rst:2 #, fuzzy @@ -9758,7 +10094,8 @@ msgstr "保存 MetricsRecord 实例的字典。" #: flwr.common.RecordSet.configs_records:1::1 of #, fuzzy -msgid ":py:obj:`parameters_records `\\" +msgid "" +":py:obj:`parameters_records `\\" msgstr "" ":py:obj:`parameters_records `\\" @@ -9786,16 +10123,14 @@ msgstr ":py:obj:`fit_ins `\\" #: ../../source/ref-api/flwr.common.ServerMessage.rst:31::1 #, fuzzy msgid "" -":py:obj:`get_parameters_ins " -"`\\" +":py:obj:`get_parameters_ins `\\" msgstr "" ":py:obj:`get_parameters_ins `\\" #: ../../source/ref-api/flwr.common.ServerMessage.rst:31::1 #, fuzzy msgid "" -":py:obj:`get_properties_ins " -"`\\" +":py:obj:`get_properties_ins `\\" msgstr "" ":py:obj:`get_properties_ins `\\" @@ -9841,14 +10176,15 @@ msgstr "登录" #: logging.Logger.log:3 of msgid "" -"To pass exception information, use the keyword argument exc_info with a " -"true value, e.g." +"To pass exception information, use the keyword argument exc_info with a true " +"value, e.g." msgstr "要传递异常信息,请使用带 true 值的关键字参数 exc_info,例如。" #: logging.Logger.log:6 of #, python-format msgid "logger.log(level, \"We have a %s\", \"mysterious problem\", exc_info=1)" -msgstr "logger.log(level, \"We have a %s\", \"mysterious problem\", exc_info=1)" +msgstr "" +"logger.log(level, \"We have a %s\", \"mysterious problem\", exc_info=1)" #: ../../source/ref-api/flwr.common.ndarray_to_bytes.rst:2 #, fuzzy @@ -9964,9 +10300,8 @@ msgid "" ":py:obj:`Driver `\\ " "\\(\\[driver\\_service\\_address\\, ...\\]\\)" msgstr "" -"Flower 1.0: ``start_server(..., " -"config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " -"...)``" +"Flower 1.0: ``start_server(..., config=flwr.server." +"ServerConfig(num_rounds=3, round_timeout=600.0), ...)``" #: ../../source/ref-api/flwr.server.rst:41::1 #: flwr.server.driver.driver.Driver:1 of @@ -9991,8 +10326,8 @@ msgid "" ":py:obj:`LegacyContext `\\ \\(state\\[\\, " "config\\, strategy\\, ...\\]\\)" msgstr "" -":py:obj:`LegacyContext `\\ \\(state\\[\\, config\\" -", strategy\\, ...\\]\\)" +":py:obj:`LegacyContext `\\ \\(state\\[\\, " +"config\\, strategy\\, ...\\]\\)" #: ../../source/ref-api/flwr.server.rst:41::1 #: flwr.server.compat.legacy_context.LegacyContext:1 of @@ -10025,12 +10360,11 @@ msgstr "Flower 服务器。" #: ../../source/ref-api/flwr.server.rst:41::1 #, fuzzy msgid "" -":py:obj:`ServerConfig `\\ \\(\\[num\\_rounds\\," -" round\\_timeout\\]\\)" +":py:obj:`ServerConfig `\\ \\(\\[num\\_rounds\\, " +"round\\_timeout\\]\\)" msgstr "" -"Flower 1.0: ``start_server(..., " -"config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " -"...)``" +"Flower 1.0: ``start_server(..., config=flwr.server." +"ServerConfig(num_rounds=3, round_timeout=600.0), ...)``" #: ../../source/ref-api/flwr.server.rst:41::1 #: flwr.server.server_config.ServerConfig:1 of @@ -10040,7 +10374,8 @@ msgstr "Flower 服务器。" #: ../../source/ref-api/flwr.server.rst:41::1 #, fuzzy -msgid ":py:obj:`SimpleClientManager `\\ \\(\\)" +msgid "" +":py:obj:`SimpleClientManager `\\ \\(\\)" msgstr "" ":py:obj:`SimpleClientManager `\\ \\(\\)" @@ -10091,7 +10426,8 @@ msgstr "返回所有可用客户。" #: flwr.server.client_manager.ClientManager.all:1::1 of #, fuzzy -msgid ":py:obj:`num_available `\\ \\(\\)" +msgid "" +":py:obj:`num_available `\\ \\(\\)" msgstr "" ":py:obj:`num_available `\\ \\(\\)" @@ -10119,8 +10455,8 @@ msgstr "注册 Flower ClientProxy 实例。" #: flwr.server.client_manager.ClientManager.all:1::1 of #, fuzzy msgid "" -":py:obj:`sample `\\ " -"\\(num\\_clients\\[\\, min\\_num\\_clients\\, criterion\\]\\)" +":py:obj:`sample `\\ \\(num\\_clients\\[\\, " +"min\\_num\\_clients\\, criterion\\]\\)" msgstr "" ":py:obj:`sample `\\ \\(num\\_clients\\[\\, " "min\\_num\\_clients\\, criterion\\]\\)" @@ -10135,7 +10471,8 @@ msgstr "取样若干 Flower ClientProxy 实例。" #: flwr.server.client_manager.ClientManager.all:1::1 of #, fuzzy -msgid ":py:obj:`unregister `\\ \\(client\\)" +msgid "" +":py:obj:`unregister `\\ \\(client\\)" msgstr "" ":py:obj:`unregister `\\ \\(client\\)" @@ -10153,8 +10490,8 @@ msgid "" ":py:obj:`wait_for `\\ " "\\(num\\_clients\\, timeout\\)" msgstr "" -":py:obj:`wait_for `\\ \\(num\\_clients\\" -", timeout\\)" +":py:obj:`wait_for `\\ " +"\\(num\\_clients\\, timeout\\)" #: flwr.server.client_manager.ClientManager.all:1::1 #: flwr.server.client_manager.ClientManager.wait_for:1 @@ -10175,9 +10512,9 @@ msgstr "**num_available** -- 当前可用客户端的数量。" #, fuzzy msgid "" "**success** -- Indicating if registration was successful. False if " -"ClientProxy is already registered or can not be registered for any " -"reason." -msgstr "**success** -- 表示注册是否成功。如果 ClientProxy 已注册或因故无法注册,则为 " +"ClientProxy is already registered or can not be registered for any reason." +msgstr "" +"**success** -- 表示注册是否成功。如果 ClientProxy 已注册或因故无法注册,则为 " "False。" #: flwr.server.client_manager.ClientManager.unregister:3 @@ -10200,20 +10537,22 @@ msgstr "服务器的 IPv4 或 IPv6 地址。默认为 `\"[::]:8080\"。" #: flwr.server.app.start_server:28 flwr.server.driver.driver.Driver:6 of msgid "" -"Tuple containing root certificate, server certificate, and private key to" -" start a secure SSL-enabled server. The tuple is expected to have three " -"bytes elements in the following order: * CA certificate. * " -"server certificate. * server private key." +"Tuple containing root certificate, server certificate, and private key to " +"start a secure SSL-enabled server. The tuple is expected to have three bytes " +"elements in the following order: * CA certificate. * server " +"certificate. * server private key." msgstr "" -"包含根证书、服务器证书和私钥的元组,用于启动启用 SSL 的安全服务器。元组应按以下顺序包含三个字节元素: * CA 证书,* 服务器证书, * " -"服务器私钥。" +"包含根证书、服务器证书和私钥的元组,用于启动启用 SSL 的安全服务器。元组应按以" +"下顺序包含三个字节元素: * CA 证书,* 服务器证书, * 服务器私钥。" #: flwr.server.app.start_server:28 flwr.server.driver.driver.Driver:6 of msgid "" -"Tuple containing root certificate, server certificate, and private key to" -" start a secure SSL-enabled server. The tuple is expected to have three " -"bytes elements in the following order:" -msgstr "包含根证书、服务器证书和私钥的元组,用于启动启用 SSL 的安全服务器。元组应按以下顺序包含三个字节元素:" +"Tuple containing root certificate, server certificate, and private key to " +"start a secure SSL-enabled server. The tuple is expected to have three bytes " +"elements in the following order:" +msgstr "" +"包含根证书、服务器证书和私钥的元组,用于启动启用 SSL 的安全服务器。元组应按以" +"下顺序包含三个字节元素:" #: flwr.server.app.start_server:32 flwr.server.driver.driver.Driver:10 of msgid "CA certificate." @@ -10241,8 +10580,8 @@ msgstr "如果已连接,请断开与超级链接的连接。" #: flwr.server.driver.driver.Driver.close:1::1 of #, fuzzy msgid "" -":py:obj:`create_message `\\ " -"\\(content\\, message\\_type\\, ...\\)" +":py:obj:`create_message `\\ \\(content\\, " +"message\\_type\\, ...\\)" msgstr "" ":py:obj:`create_message `\\ \\(content\\, " "message\\_type\\, ...\\)" @@ -10270,8 +10609,8 @@ msgid "" ":py:obj:`pull_messages `\\ " "\\(message\\_ids\\)" msgstr "" -":py:obj:`pull_messages `\\ \\(message\\_ids" -"\\)" +":py:obj:`pull_messages `\\ " +"\\(message\\_ids\\)" #: flwr.server.driver.driver.Driver.close:1::1 #: flwr.server.driver.driver.Driver.pull_messages:1 of @@ -10282,8 +10621,7 @@ msgstr "根据信息 ID 提取信息。" #: flwr.server.driver.driver.Driver.close:1::1 of #, fuzzy msgid "" -":py:obj:`push_messages `\\ " -"\\(messages\\)" +":py:obj:`push_messages `\\ \\(messages\\)" msgstr "" ":py:obj:`push_messages `\\ \\(messages\\)" @@ -10299,9 +10637,8 @@ msgid "" ":py:obj:`send_and_receive `\\ " "\\(messages\\, \\*\\[\\, timeout\\]\\)" msgstr "" -"Flower 1.0: ``start_server(..., " -"config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " -"...)``" +"Flower 1.0: ``start_server(..., config=flwr.server." +"ServerConfig(num_rounds=3, round_timeout=600.0), ...)``" #: flwr.server.driver.driver.Driver.close:1::1 #: flwr.server.driver.driver.Driver.send_and_receive:1 of @@ -10312,23 +10649,24 @@ msgstr "向指定的节点 ID 推送信息并提取回复信息。" #: flwr.server.driver.driver.Driver.create_message:3 of #, fuzzy msgid "" -"This method constructs a new `Message` with given content and metadata. " -"The `run_id` and `src_node_id` will be set automatically." -msgstr "本方法使用给定的内容和元数据构建新的 `Message` 。run_id \"和 \"src_node_id " +"This method constructs a new `Message` with given content and metadata. The " +"`run_id` and `src_node_id` will be set automatically." +msgstr "" +"本方法使用给定的内容和元数据构建新的 `Message` 。run_id \"和 \"src_node_id " "\"将自动设置。" #: flwr.server.driver.driver.Driver.create_message:6 of #, fuzzy msgid "" -"The content for the new message. This holds records that are to be sent " -"to the destination node." +"The content for the new message. This holds records that are to be sent to " +"the destination node." msgstr "新信息的内容。其中包含要发送到目的节点的记录。" #: flwr.server.driver.driver.Driver.create_message:9 of #, fuzzy msgid "" -"The type of the message, defining the action to be executed on the " -"receiving end." +"The type of the message, defining the action to be executed on the receiving " +"end." msgstr "信息类型,定义接收端要执行的操作。" #: flwr.server.driver.driver.Driver.create_message:12 of @@ -10339,17 +10677,18 @@ msgstr "信息发送目的地节点的 ID。" #: flwr.server.driver.driver.Driver.create_message:14 of #, fuzzy msgid "" -"The ID of the group to which this message is associated. In some " -"settings, this is used as the FL round." +"The ID of the group to which this message is associated. In some settings, " +"this is used as the FL round." msgstr "与该信息相关联的组的 ID。在某些设置中,它被用作 FL 轮。" #: flwr.server.driver.driver.Driver.create_message:17 of #, fuzzy msgid "" -"Time-to-live for the round trip of this message, i.e., the time from " -"sending this message to receiving a reply. It specifies the duration for " -"which the message and its potential reply are considered valid." -msgstr "此报文往返的有效时间,即从发送此报文到收到回复的时间。它规定了信息及其潜在回" +"Time-to-live for the round trip of this message, i.e., the time from sending " +"this message to receiving a reply. It specifies the duration for which the " +"message and its potential reply are considered valid." +msgstr "" +"此报文往返的有效时间,即从发送此报文到收到回复的时间。它规定了信息及其潜在回" "复被视为有效的持续时间。" #: flwr.server.driver.driver.Driver.create_message:22 of @@ -10362,13 +10701,14 @@ msgstr "**message** -- 具有指定内容和元数据的新 \"信息 \"实例。 #: flwr.server.driver.driver.Driver.pull_messages:3 of #, fuzzy msgid "" -"This method is used to collect messages from the SuperLink that " -"correspond to a set of given message IDs." +"This method is used to collect messages from the SuperLink that correspond " +"to a set of given message IDs." msgstr "该方法用于从超级链接中收集与一组给定消息 ID 相对应的消息。" #: flwr.server.driver.driver.Driver.pull_messages:6 of #, fuzzy -msgid "An iterable of message IDs for which reply messages are to be retrieved." +msgid "" +"An iterable of message IDs for which reply messages are to be retrieved." msgstr "要检索回复信息的信息 ID 的可迭代项。" #: flwr.server.driver.driver.Driver.pull_messages:9 of @@ -10379,9 +10719,10 @@ msgstr "**messages** -- 收到的信息迭代。" #: flwr.server.driver.driver.Driver.push_messages:3 of #, fuzzy msgid "" -"This method takes an iterable of messages and sends each message to the " -"node specified in `dst_node_id`." -msgstr "该方法接收一个可迭代的消息,并将每条消息发送到 `dst_node_id` 中指定的节点。" +"This method takes an iterable of messages and sends each message to the node " +"specified in `dst_node_id`." +msgstr "" +"该方法接收一个可迭代的消息,并将每条消息发送到 `dst_node_id` 中指定的节点。" #: flwr.server.driver.driver.Driver.push_messages:6 #: flwr.server.driver.driver.Driver.send_and_receive:7 of @@ -10392,31 +10733,34 @@ msgstr "要发送的信息迭代。" #: flwr.server.driver.driver.Driver.push_messages:9 of #, fuzzy msgid "" -"**message_ids** -- An iterable of IDs for the messages that were sent, " -"which can be used to pull replies." +"**message_ids** -- An iterable of IDs for the messages that were sent, which " +"can be used to pull replies." msgstr "**message_ids** -- 已发送信息的可迭代 ID,可用于提取回复信息。" #: flwr.server.driver.driver.Driver.send_and_receive:3 of #, fuzzy msgid "" -"This method sends a list of messages to their destination node IDs and " -"then waits for the replies. It continues to pull replies until either all" -" replies are received or the specified timeout duration is exceeded." -msgstr "该方法会向目标节点 ID 发送信息列表,然后等待回复。它会继续提取回复,直到收到" +"This method sends a list of messages to their destination node IDs and then " +"waits for the replies. It continues to pull replies until either all replies " +"are received or the specified timeout duration is exceeded." +msgstr "" +"该方法会向目标节点 ID 发送信息列表,然后等待回复。它会继续提取回复,直到收到" "所有回复或超过指定的超时时间。" #: flwr.server.driver.driver.Driver.send_and_receive:9 of #, fuzzy msgid "" "The timeout duration in seconds. If specified, the method will wait for " -"replies for this duration. If `None`, there is no time limit and the " -"method will wait until replies for all messages are received." -msgstr "超时时间(秒)。如果指定,该方法将在此期限内等待回复。如果指定为 \"无\"" -",则没有时间限制,该方法将等待直到收到所有信息的回复。" +"replies for this duration. If `None`, there is no time limit and the method " +"will wait until replies for all messages are received." +msgstr "" +"超时时间(秒)。如果指定,该方法将在此期限内等待回复。如果指定为 \"无\",则没" +"有时间限制,该方法将等待直到收到所有信息的回复。" #: flwr.server.driver.driver.Driver.send_and_receive:14 of #, fuzzy -msgid "**replies** -- An iterable of reply messages received from the SuperLink." +msgid "" +"**replies** -- An iterable of reply messages received from the SuperLink." msgstr "**replies** -- 从超级链接收到的回复信息的迭代。" #: flwr.server.driver.driver.Driver.send_and_receive:18 @@ -10430,14 +10774,14 @@ msgstr "无" #: flwr.server.driver.driver.Driver.send_and_receive:19 of #, fuzzy msgid "" -"This method uses `push_messages` to send the messages and `pull_messages`" -" to collect the replies. If `timeout` is set, the method may not return " -"replies for all sent messages. A message remains valid until its TTL, " -"which is not affected by `timeout`." +"This method uses `push_messages` to send the messages and `pull_messages` to " +"collect the replies. If `timeout` is set, the method may not return replies " +"for all sent messages. A message remains valid until its TTL, which is not " +"affected by `timeout`." msgstr "" -"该方法使用 `push_messages` 发送信息,并使用 `pull_messages` 收集回复。" -"如果设置了 `timeout`,该方法可能不会返回所有已发送消息的回复。消息在其 TTL " -"之前一直有效,不受 `timeout` 影响。" +"该方法使用 `push_messages` 发送信息,并使用 `pull_messages` 收集回复。如果设" +"置了 `timeout`,该方法可能不会返回所有已发送消息的回复。消息在其 TTL 之前一直" +"有效,不受 `timeout` 影响。" #: ../../source/ref-api/flwr.server.History.rst:2 #, fuzzy @@ -10447,9 +10791,8 @@ msgstr "历史" #: flwr.server.history.History.add_loss_centralized:1::1 of #, fuzzy msgid "" -":py:obj:`add_loss_centralized " -"`\\ \\(server\\_round\\, " -"loss\\)" +":py:obj:`add_loss_centralized `\\ " +"\\(server\\_round\\, loss\\)" msgstr "" ":py:obj:`add_loss_centralized `\\ " "\\(server\\_round\\, loss\\)" @@ -10463,9 +10806,8 @@ msgstr "集中评估" #: flwr.server.history.History.add_loss_centralized:1::1 of #, fuzzy msgid "" -":py:obj:`add_loss_distributed " -"`\\ \\(server\\_round\\, " -"loss\\)" +":py:obj:`add_loss_distributed `\\ " +"\\(server\\_round\\, loss\\)" msgstr "" ":py:obj:`add_loss_distributed `\\ " "\\(server\\_round\\, loss\\)" @@ -10479,9 +10821,8 @@ msgstr "增加一个损失条目(来自分布式评估)。" #: flwr.server.history.History.add_loss_centralized:1::1 of #, fuzzy msgid "" -":py:obj:`add_metrics_centralized " -"`\\ \\(server\\_round\\, " -"metrics\\)" +":py:obj:`add_metrics_centralized `\\ \\(server\\_round\\, metrics\\)" msgstr "" ":py:obj:`add_metrics_centralized `\\ \\(server\\_round\\, metrics\\)" @@ -10495,9 +10836,8 @@ msgstr "集中评估" #: flwr.server.history.History.add_loss_centralized:1::1 of #, fuzzy msgid "" -":py:obj:`add_metrics_distributed " -"`\\ \\(server\\_round\\, " -"metrics\\)" +":py:obj:`add_metrics_distributed `\\ \\(server\\_round\\, metrics\\)" msgstr "" ":py:obj:`add_metrics_distributed `\\ \\(server\\_round\\, metrics\\)" @@ -10511,9 +10851,8 @@ msgstr "定制的集中/分布式评估" #: flwr.server.history.History.add_loss_centralized:1::1 of #, fuzzy msgid "" -":py:obj:`add_metrics_distributed_fit " -"`\\ \\(server\\_round\\," -" ...\\)" +":py:obj:`add_metrics_distributed_fit `\\ \\(server\\_round\\, ...\\)" msgstr "" ":py:obj:`add_metrics_distributed_fit `\\ \\(server\\_round\\, ...\\)" @@ -10573,11 +10912,11 @@ msgstr "返回客户端(本身)。" #: flwr.server.server.Server.client_manager:1::1 of #, fuzzy msgid "" -":py:obj:`disconnect_all_clients " -"`\\ \\(timeout\\)" +":py:obj:`disconnect_all_clients `\\ \\(timeout\\)" msgstr "" -":py:obj:`disconnect_all_clients `" -"\\ \\(timeout\\)" +":py:obj:`disconnect_all_clients `\\ \\(timeout\\)" #: flwr.server.server.Server.client_manager:1::1 #: flwr.server.server.Server.disconnect_all_clients:1 of @@ -10591,8 +10930,8 @@ msgid "" ":py:obj:`evaluate_round `\\ " "\\(server\\_round\\, timeout\\)" msgstr "" -":py:obj:`evaluate_round `\\ \\(" -"server\\_round\\, timeout\\)" +":py:obj:`evaluate_round `\\ " +"\\(server\\_round\\, timeout\\)" #: flwr.server.server.Server.client_manager:1::1 #: flwr.server.server.Server.evaluate_round:1 of @@ -10614,8 +10953,8 @@ msgstr "联邦平均动量策略。" #: flwr.server.server.Server.client_manager:1::1 of #, fuzzy msgid "" -":py:obj:`fit_round `\\ \\(server\\_round\\," -" timeout\\)" +":py:obj:`fit_round `\\ \\(server\\_round\\, " +"timeout\\)" msgstr "" ":py:obj:`fit_round `\\ \\(server\\_round\\, " "timeout\\)" @@ -10632,8 +10971,8 @@ msgid "" ":py:obj:`set_max_workers `\\ " "\\(max\\_workers\\)" msgstr "" -":py:obj:`set_max_workers `\\ \\(" -"max\\_workers\\)" +":py:obj:`set_max_workers `\\ " +"\\(max\\_workers\\)" #: flwr.server.server.Server.client_manager:1::1 #: flwr.server.server.Server.set_max_workers:1 of @@ -10643,7 +10982,8 @@ msgstr "设置 ThreadPoolExecutor 使用的最大工作器数。" #: flwr.server.server.Server.client_manager:1::1 of #, fuzzy -msgid ":py:obj:`set_strategy `\\ \\(strategy\\)" +msgid "" +":py:obj:`set_strategy `\\ \\(strategy\\)" msgstr "" ":py:obj:`set_strategy `\\ \\(strategy\\)" @@ -10687,8 +11027,8 @@ msgstr "服务器" #: flwr.server.server_config.ServerConfig:3 of #, fuzzy msgid "" -"All attributes have default values which allows users to configure just " -"the ones they care about." +"All attributes have default values which allows users to configure just the " +"ones they care about." msgstr "所有属性都有默认值,用户只需配置自己关心的属性即可。" #: ../../source/ref-api/flwr.server.ServerConfig.rst:29::1 @@ -10719,17 +11059,16 @@ msgstr ":py:obj:`all `\\ \\(\\)" #: flwr.server.client_manager.SimpleClientManager.all:1::1 of #, fuzzy msgid "" -":py:obj:`num_available `\\" -" \\(\\)" +":py:obj:`num_available `\\ " +"\\(\\)" msgstr "" -":py:obj:`num_available `\\ \\(" -"\\)" +":py:obj:`num_available `\\ " +"\\(\\)" #: flwr.server.client_manager.SimpleClientManager.all:1::1 of #, fuzzy msgid "" -":py:obj:`register `\\ " -"\\(client\\)" +":py:obj:`register `\\ \\(client\\)" msgstr "" ":py:obj:`register `\\ \\(client\\)" @@ -10739,8 +11078,8 @@ msgid "" ":py:obj:`sample `\\ " "\\(num\\_clients\\[\\, min\\_num\\_clients\\, criterion\\]\\)" msgstr "" -":py:obj:`sample `\\ \\(num\\_clients" -"\\[\\, min\\_num\\_clients\\, criterion\\]\\)" +":py:obj:`sample `\\ " +"\\(num\\_clients\\[\\, min\\_num\\_clients\\, criterion\\]\\)" #: flwr.server.client_manager.SimpleClientManager.all:1::1 of #, fuzzy @@ -10748,8 +11087,8 @@ msgid "" ":py:obj:`unregister `\\ " "\\(client\\)" msgstr "" -":py:obj:`unregister `\\ \\(" -"client\\)" +":py:obj:`unregister `\\ " +"\\(client\\)" #: flwr.server.client_manager.SimpleClientManager.all:1::1 of #, fuzzy @@ -10757,14 +11096,14 @@ msgid "" ":py:obj:`wait_for `\\ " "\\(num\\_clients\\[\\, timeout\\]\\)" msgstr "" -":py:obj:`wait_for `\\ \\(" -"num\\_clients\\[\\, timeout\\]\\)" +":py:obj:`wait_for `\\ " +"\\(num\\_clients\\[\\, timeout\\]\\)" #: flwr.server.client_manager.SimpleClientManager.wait_for:3 of #, fuzzy msgid "" -"Blocks until the requested number of clients is available or until a " -"timeout is reached. Current timeout default: 1 day." +"Blocks until the requested number of clients is available or until a timeout " +"is reached. Current timeout default: 1 day." msgstr "阻塞,直到请求的客户端数量可用或达到超时为止。当前超时默认值:1 天。" #: flwr.server.client_manager.SimpleClientManager.wait_for:6 of @@ -10817,35 +11156,39 @@ msgstr "服务器的 IPv4 或 IPv6 地址。默认为 `\"[::]:8080\"。" #: flwr.server.compat.app.start_driver:6 of #, fuzzy msgid "" -"A server implementation, either `flwr.server.Server` or a subclass " -"thereof. If no instance is provided, then `start_driver` will create one." -msgstr "服务器实现,可以是 `flwr.server.Server` 或其子类。如果没有提供实例,`start_server` 将创建一个。" +"A server implementation, either `flwr.server.Server` or a subclass thereof. " +"If no instance is provided, then `start_driver` will create one." +msgstr "" +"服务器实现,可以是 `flwr.server.Server` 或其子类。如果没有提供实例," +"`start_server` 将创建一个。" #: flwr.server.app.start_server:9 flwr.server.compat.app.start_driver:10 #: flwr.simulation.app.start_simulation:28 of msgid "" "Currently supported values are `num_rounds` (int, default: 1) and " "`round_timeout` in seconds (float, default: None)." -msgstr "目前支持的值有:`num_rounds`(int,默认值:1)和以秒为单位的`round_timeout`(float,默认值:无)。" +msgstr "" +"目前支持的值有:`num_rounds`(int,默认值:1)和以秒为单位的`round_timeout`" +"(float,默认值:无)。" #: flwr.server.app.start_server:12 flwr.server.compat.app.start_driver:13 of msgid "" -"An implementation of the abstract base class " -"`flwr.server.strategy.Strategy`. If no strategy is provided, then " -"`start_server` will use `flwr.server.strategy.FedAvg`." +"An implementation of the abstract base class `flwr.server.strategy." +"Strategy`. If no strategy is provided, then `start_server` will use `flwr." +"server.strategy.FedAvg`." msgstr "" -"抽象基类 `flwr.server.strategy.Strategy` 的实现。如果没有提供策略,`start_server` 将使用 " -"`flwr.server.strategy.FedAvg`。" +"抽象基类 `flwr.server.strategy.Strategy` 的实现。如果没有提供策略," +"`start_server` 将使用 `flwr.server.strategy.FedAvg`。" #: flwr.server.compat.app.start_driver:17 of #, fuzzy msgid "" "An implementation of the class `flwr.server.ClientManager`. If no " -"implementation is provided, then `start_driver` will use " -"`flwr.server.SimpleClientManager`." +"implementation is provided, then `start_driver` will use `flwr.server." +"SimpleClientManager`." msgstr "" -"抽象基类 `flwr.server.ClientManager` 的实现。如果没有提供实现,`start_server` 将使用 " -"`flwr.server.client_manager.SimpleClientManager`。" +"抽象基类 `flwr.server.ClientManager` 的实现。如果没有提供实现," +"`start_server` 将使用 `flwr.server.client_manager.SimpleClientManager`。" #: flwr.server.compat.app.start_driver:25 of #, fuzzy @@ -10877,30 +11220,34 @@ msgstr "服务器的 IPv4 或 IPv6 地址。默认为 `\"[::]:8080\"。" #: flwr.server.app.start_server:5 of msgid "" -"A server implementation, either `flwr.server.Server` or a subclass " -"thereof. If no instance is provided, then `start_server` will create one." -msgstr "服务器实现,可以是 `flwr.server.Server` 或其子类。如果没有提供实例,`start_server` 将创建一个。" +"A server implementation, either `flwr.server.Server` or a subclass thereof. " +"If no instance is provided, then `start_server` will create one." +msgstr "" +"服务器实现,可以是 `flwr.server.Server` 或其子类。如果没有提供实例," +"`start_server` 将创建一个。" #: flwr.server.app.start_server:16 of msgid "" -"An implementation of the abstract base class `flwr.server.ClientManager`." -" If no implementation is provided, then `start_server` will use " -"`flwr.server.client_manager.SimpleClientManager`." +"An implementation of the abstract base class `flwr.server.ClientManager`. If " +"no implementation is provided, then `start_server` will use `flwr.server." +"client_manager.SimpleClientManager`." msgstr "" -"抽象基类 `flwr.server.ClientManager` 的实现。如果没有提供实现,`start_server` 将使用 " -"`flwr.server.client_manager.SimpleClientManager`。" +"抽象基类 `flwr.server.ClientManager` 的实现。如果没有提供实现," +"`start_server` 将使用 `flwr.server.client_manager.SimpleClientManager`。" #: flwr.server.app.start_server:21 of msgid "" -"The maximum length of gRPC messages that can be exchanged with the Flower" -" clients. The default should be sufficient for most models. Users who " -"train very large models might need to increase this value. Note that the " -"Flower clients need to be started with the same value (see " -"`flwr.client.start_client`), otherwise clients will not know about the " -"increased limit and block larger messages." +"The maximum length of gRPC messages that can be exchanged with the Flower " +"clients. The default should be sufficient for most models. Users who train " +"very large models might need to increase this value. Note that the Flower " +"clients need to be started with the same value (see `flwr.client." +"start_client`), otherwise clients will not know about the increased limit " +"and block larger messages." msgstr "" -"可与 Flower 客户端交换的 gRPC 消息的最大长度:默认值对大多数模型都足够了。训练超大模型的用户可能需要增加该值。请注意,Flower " -"客户端需要以相同的值启动(请参阅 `flwr.client.start_client`),否则客户端将不知道已增加的限制并阻止更大的消息。" +"可与 Flower 客户端交换的 gRPC 消息的最大长度:默认值对大多数模型都足够了。训" +"练超大模型的用户可能需要增加该值。请注意,Flower 客户端需要以相同的值启动(请" +"参阅 `flwr.client.start_client`),否则客户端将不知道已增加的限制并阻止更大的" +"消息。" #: flwr.server.app.start_server:42 of msgid "Starting an insecure server:" @@ -10918,8 +11265,8 @@ msgstr "Krum 策略。" #: ../../source/ref-api/flwr.server.strategy.rst:45::1 #, fuzzy msgid "" -":py:obj:`Bulyan `\\ \\(\\*\\, " -"fraction\\_fit\\, fraction\\_evaluate\\, ...\\)" +":py:obj:`Bulyan `\\ \\(\\*\\, fraction\\_fit\\, " +"fraction\\_evaluate\\, ...\\)" msgstr "" ":py:obj:`Bulyan `\\ \\(\\*\\, fraction\\_fit\\, " "fraction\\_evaluate\\, ...\\)" @@ -10935,8 +11282,8 @@ msgid "" ":py:obj:`DPFedAvgAdaptive `\\ " "\\(strategy\\, num\\_sampled\\_clients\\)" msgstr "" -":py:obj:`DPFedAvgAdaptive `\\ \\(" -"strategy\\, num\\_sampled\\_clients\\)" +":py:obj:`DPFedAvgAdaptive `\\ " +"\\(strategy\\, num\\_sampled\\_clients\\)" #: ../../source/ref-api/flwr.server.strategy.rst:45::1 #: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive:1 of @@ -10949,8 +11296,8 @@ msgid "" ":py:obj:`DPFedAvgFixed `\\ " "\\(strategy\\, num\\_sampled\\_clients\\, ...\\)" msgstr "" -":py:obj:`DPFedAvgFixed `\\ \\(strategy\\" -", num\\_sampled\\_clients\\, ...\\)" +":py:obj:`DPFedAvgFixed `\\ " +"\\(strategy\\, num\\_sampled\\_clients\\, ...\\)" #: ../../source/ref-api/flwr.server.strategy.rst:45::1 #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed:1 of @@ -10960,9 +11307,8 @@ msgstr "封装器,用于为具有固定剪切功能的 DP 配置策略。" #: ../../source/ref-api/flwr.server.strategy.rst:45::1 #, fuzzy msgid "" -":py:obj:`DifferentialPrivacyClientSideAdaptiveClipping " -"`\\ " -"\\(...\\)" +":py:obj:`DifferentialPrivacyClientSideAdaptiveClipping `\\ \\(...\\)" msgstr "" ":py:obj:`DifferentialPrivacyClientSideAdaptiveClipping `\\ \\(...\\)" @@ -10977,9 +11323,8 @@ msgstr "用于配置具有自适应剪切功能的 DP 策略的包装器。" #: ../../source/ref-api/flwr.server.strategy.rst:45::1 #, fuzzy msgid "" -":py:obj:`DifferentialPrivacyServerSideAdaptiveClipping " -"`\\ " -"\\(...\\)" +":py:obj:`DifferentialPrivacyServerSideAdaptiveClipping `\\ \\(...\\)" msgstr "" ":py:obj:`DifferentialPrivacyServerSideAdaptiveClipping `\\ \\(...\\)" @@ -10994,9 +11339,8 @@ msgstr "用于配置具有自适应剪切功能的 DP 策略的包装器。" #: ../../source/ref-api/flwr.server.strategy.rst:45::1 #, fuzzy msgid "" -":py:obj:`DifferentialPrivacyClientSideFixedClipping " -"`\\ " -"\\(...\\)" +":py:obj:`DifferentialPrivacyClientSideFixedClipping `\\ \\(...\\)" msgstr "" ":py:obj:`DifferentialPrivacyClientSideFixedClipping `\\ \\(...\\)" @@ -11011,9 +11355,8 @@ msgstr "封装器,用于为具有固定剪切功能的 DP 配置策略。" #: ../../source/ref-api/flwr.server.strategy.rst:45::1 #, fuzzy msgid "" -":py:obj:`DifferentialPrivacyServerSideFixedClipping " -"`\\ " -"\\(...\\)" +":py:obj:`DifferentialPrivacyServerSideFixedClipping `\\ \\(...\\)" msgstr "" ":py:obj:`DifferentialPrivacyServerSideFixedClipping `\\ \\(...\\)" @@ -11059,8 +11402,8 @@ msgid "" ":py:obj:`FedAvg `\\ \\(\\*\\[\\, " "fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" msgstr "" -":py:obj:`FedAvg `\\ \\(\\*\\[\\, fraction\\_fit" -"\\, fraction\\_evaluate\\, ...\\]\\)" +":py:obj:`FedAvg `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" #: ../../source/ref-api/flwr.server.strategy.rst:45::1 #: flwr.server.strategy.fedavg.FedAvg:1 @@ -11071,8 +11414,8 @@ msgstr "联邦平均策略。" #: ../../source/ref-api/flwr.server.strategy.rst:45::1 #, fuzzy msgid "" -":py:obj:`FedAvgAndroid `\\ " -"\\(\\*\\[\\, fraction\\_fit\\, ...\\]\\)" +":py:obj:`FedAvgAndroid `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" msgstr "" ":py:obj:`FedAvgAndroid `\\ \\(\\*\\[\\, " "fraction\\_fit\\, ...\\]\\)" @@ -11112,8 +11455,8 @@ msgid "" ":py:obj:`FedOpt `\\ \\(\\*\\[\\, " "fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" msgstr "" -":py:obj:`FedOpt `\\ \\(\\*\\[\\, fraction\\_fit" -"\\, fraction\\_evaluate\\, ...\\]\\)" +":py:obj:`FedOpt `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" #: ../../source/ref-api/flwr.server.strategy.rst:45::1 #: flwr.server.strategy.fedopt.FedOpt:1 of @@ -11138,8 +11481,8 @@ msgstr "联邦优化策略。" #: ../../source/ref-api/flwr.server.strategy.rst:45::1 #, fuzzy msgid "" -":py:obj:`FedTrimmedAvg `\\ " -"\\(\\*\\[\\, fraction\\_fit\\, ...\\]\\)" +":py:obj:`FedTrimmedAvg `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" msgstr "" ":py:obj:`FedTrimmedAvg `\\ \\(\\*\\[\\, " "fraction\\_fit\\, ...\\]\\)" @@ -11155,8 +11498,8 @@ msgid "" ":py:obj:`FedXgbBagging `\\ " "\\(\\[evaluate\\_function\\]\\)" msgstr "" -":py:obj:`FedXgbBagging `\\ \\(\\[" -"evaluate\\_function\\]\\)" +":py:obj:`FedXgbBagging `\\ " +"\\(\\[evaluate\\_function\\]\\)" #: ../../source/ref-api/flwr.server.strategy.rst:45::1 #: flwr.server.strategy.fedxgb_bagging.FedXgbBagging:1 of @@ -11170,8 +11513,8 @@ msgid "" ":py:obj:`FedXgbCyclic `\\ " "\\(\\*\\*kwargs\\)" msgstr "" -":py:obj:`FedXgbCyclic `\\ \\(\\*\\*" -"kwargs\\)" +":py:obj:`FedXgbCyclic `\\ " +"\\(\\*\\*kwargs\\)" #: ../../source/ref-api/flwr.server.strategy.rst:45::1 #: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic:1 of @@ -11185,8 +11528,8 @@ msgid "" ":py:obj:`FedXgbNnAvg `\\ \\(\\*args\\, " "\\*\\*kwargs\\)" msgstr "" -":py:obj:`FedXgbNnAvg `\\ \\(\\*args\\, \\*" -"\\*kwargs\\)" +":py:obj:`FedXgbNnAvg `\\ \\(\\*args\\, " +"\\*\\*kwargs\\)" #: ../../source/ref-api/flwr.server.strategy.rst:45::1 #: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg:1 of @@ -11210,12 +11553,11 @@ msgstr "FedYogi [Reddi 等人,2020] 策略。" #: ../../source/ref-api/flwr.server.strategy.rst:45::1 #, fuzzy msgid "" -":py:obj:`FaultTolerantFedAvg " -"`\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" +":py:obj:`FaultTolerantFedAvg `\\ " +"\\(\\*\\[\\, fraction\\_fit\\, ...\\]\\)" msgstr "" -":py:obj:`FaultTolerantFedAvg `\\ \\" -"(\\*\\[\\, fraction\\_fit\\, ...\\]\\)" +":py:obj:`FaultTolerantFedAvg `\\ " +"\\(\\*\\[\\, fraction\\_fit\\, ...\\]\\)" #: ../../source/ref-api/flwr.server.strategy.rst:45::1 #: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg:1 of @@ -11225,8 +11567,8 @@ msgstr "可配置的容错 FedAvg 策略实施。" #: ../../source/ref-api/flwr.server.strategy.rst:45::1 #, fuzzy msgid "" -":py:obj:`Krum `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" +":py:obj:`Krum `\\ \\(\\*\\[\\, fraction\\_fit\\, " +"fraction\\_evaluate\\, ...\\]\\)" msgstr "" ":py:obj:`Krum `\\ \\(\\*\\[\\, fraction\\_fit\\, " "fraction\\_evaluate\\, ...\\]\\)" @@ -11240,8 +11582,8 @@ msgstr "FedYogi [Reddi 等人,2020] 策略。" #: ../../source/ref-api/flwr.server.strategy.rst:45::1 #, fuzzy msgid "" -":py:obj:`QFedAvg `\\ \\(\\*\\[\\, " -"q\\_param\\, qffl\\_learning\\_rate\\, ...\\]\\)" +":py:obj:`QFedAvg `\\ \\(\\*\\[\\, q\\_param\\, " +"qffl\\_learning\\_rate\\, ...\\]\\)" msgstr "" ":py:obj:`QFedAvg `\\ \\(\\*\\[\\, q\\_param\\, " "qffl\\_learning\\_rate\\, ...\\]\\)" @@ -11414,8 +11756,8 @@ msgstr "初始全局模型参数。" #: flwr.server.strategy.bulyan.Bulyan:27 of msgid "" -"Byzantine resilient aggregation rule that is used as the first step of " -"the Bulyan (e.g., Krum)" +"Byzantine resilient aggregation rule that is used as the first step of the " +"Bulyan (e.g., Krum)" msgstr "Byzantine弹性聚合规则,用作 Bulyan 的第一步(如 Krum)" #: flwr.server.strategy.bulyan.Bulyan:29 of @@ -11425,12 +11767,11 @@ msgstr "第一聚类规则的参数" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\, " -"results\\, ...\\)" +":py:obj:`aggregate_evaluate `\\ \\(server\\_round\\, results\\, ...\\)" msgstr "" -":py:obj:`aggregate_evaluate `" -"\\ \\(server\\_round\\, results\\, ...\\)" +":py:obj:`aggregate_evaluate `\\ \\(server\\_round\\, results\\, ...\\)" #: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1 #: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 @@ -11449,8 +11790,8 @@ msgid "" ":py:obj:`aggregate_fit `\\ " "\\(server\\_round\\, results\\, failures\\)" msgstr "" -":py:obj:`aggregate_fit `\\ \\(" -"server\\_round\\, results\\, failures\\)" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" #: flwr.server.strategy.bulyan.Bulyan.aggregate_fit:1 #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of @@ -11460,12 +11801,11 @@ msgstr "使用 Bulyan 技术汇总拟合结果。" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\, " -"parameters\\, ...\\)" +":py:obj:`configure_evaluate `\\ \\(server\\_round\\, parameters\\, ...\\)" msgstr "" -":py:obj:`configure_evaluate `" -"\\ \\(server\\_round\\, parameters\\, ...\\)" +":py:obj:`configure_evaluate `\\ \\(server\\_round\\, parameters\\, ...\\)" #: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 #: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.configure_evaluate:1 @@ -11496,8 +11836,8 @@ msgid "" ":py:obj:`configure_fit `\\ " "\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -":py:obj:`configure_fit `\\ \\(" -"server\\_round\\, parameters\\, ...\\)" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" #: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 #: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.configure_fit:1 @@ -11531,8 +11871,8 @@ msgid "" ":py:obj:`evaluate `\\ " "\\(server\\_round\\, parameters\\)" msgstr "" -":py:obj:`evaluate `\\ \\(server\\_round" -"\\, parameters\\)" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" #: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 @@ -11550,9 +11890,8 @@ msgstr "使用评估函数评估模型参数。" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +":py:obj:`initialize_parameters `\\ \\(client\\_manager\\)" msgstr "" ":py:obj:`initialize_parameters `\\ \\(client\\_manager\\)" @@ -11572,9 +11911,8 @@ msgstr "初始化全局模型参数。" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +":py:obj:`num_evaluation_clients `\\ \\(num\\_available\\_clients\\)" msgstr "" ":py:obj:`num_evaluation_clients `\\ \\(num\\_available\\_clients\\)" @@ -11594,11 +11932,11 @@ msgstr "使用部分可用客户进行评估。" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -":py:obj:`num_fit_clients `\\" -" \\(num\\_available\\_clients\\)" +":py:obj:`num_fit_clients `\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -":py:obj:`num_fit_clients `\\ \\(" -"num\\_available\\_clients\\)" +":py:obj:`num_fit_clients `\\ " +"\\(num\\_available\\_clients\\)" #: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 @@ -11631,9 +11969,8 @@ msgstr "该类已被弃用,将在以后的版本中删除。" #: of #, fuzzy msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +":py:obj:`aggregate_evaluate `\\ \\(server\\_round\\, results\\, ...\\)" msgstr "" ":py:obj:`aggregate_evaluate `\\ \\(server\\_round\\, results\\, ...\\)" @@ -11656,12 +11993,11 @@ msgstr "使用给定的策略汇总评估损失。" #: of #, fuzzy msgid "" -":py:obj:`aggregate_fit " -"`\\ " -"\\(server\\_round\\, results\\, failures\\)" +":py:obj:`aggregate_fit `\\ \\(server\\_round\\, results\\, failures\\)" msgstr "" -":py:obj:`aggregate_fit `" -"\\ \\(server\\_round\\, results\\, failures\\)" +":py:obj:`aggregate_fit `\\ \\(server\\_round\\, results\\, failures\\)" #: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive.aggregate_fit:1 #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 @@ -11673,9 +12009,8 @@ msgstr "汇总 DPFedAvgFixed 中的训练结果并更新片段标准。" #: of #, fuzzy msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +":py:obj:`configure_evaluate `\\ \\(server\\_round\\, parameters\\, ...\\)" msgstr "" ":py:obj:`configure_evaluate `\\ \\(server\\_round\\, parameters\\, ...\\)" @@ -11689,12 +12024,11 @@ msgstr "使用指定策略配置下一轮评估。" #: of #, fuzzy msgid "" -":py:obj:`configure_fit " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +":py:obj:`configure_fit `\\ \\(server\\_round\\, parameters\\, ...\\)" msgstr "" -":py:obj:`configure_fit `" -"\\ \\(server\\_round\\, parameters\\, ...\\)" +":py:obj:`configure_fit `\\ \\(server\\_round\\, parameters\\, ...\\)" #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 #: of @@ -11703,8 +12037,8 @@ msgid "" ":py:obj:`evaluate `\\ " "\\(server\\_round\\, parameters\\)" msgstr "" -":py:obj:`evaluate `\\ \\(" -"server\\_round\\, parameters\\)" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" #: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 #: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.evaluate:1 @@ -11716,16 +12050,16 @@ msgstr "" #: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.evaluate:1 #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.evaluate:1 of -msgid "Evaluate model parameters using an evaluation function from the strategy." +msgid "" +"Evaluate model parameters using an evaluation function from the strategy." msgstr "使用策略中的评估函数评估模型参数。" #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 #: of #, fuzzy msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +":py:obj:`initialize_parameters `\\ \\(client\\_manager\\)" msgstr "" ":py:obj:`initialize_parameters `\\ \\(client\\_manager\\)" @@ -11766,13 +12100,14 @@ msgstr "客户端管理器,用于管理当前连接的所有客户端。" msgid "" "**evaluate_configuration** -- A list of tuples. Each tuple in the list " "identifies a `ClientProxy` and the `EvaluateIns` for this particular " -"`ClientProxy`. If a particular `ClientProxy` is not included in this " -"list, it means that this `ClientProxy` will not participate in the next " -"round of federated evaluation." +"`ClientProxy`. If a particular `ClientProxy` is not included in this list, " +"it means that this `ClientProxy` will not participate in the next round of " +"federated evaluation." msgstr "" -"**evaluate_configuration** -- " -"一个元组列表。列表中的每个元组都标识了一个`ClientProxy`和该特定`ClientProxy`的`EvaluateIns`。如果某个特定的" -" `ClientProxy` 未包含在此列表中,则表示该 `ClientProxy` 将不参与下一轮联合评估。" +"**evaluate_configuration** -- 一个元组列表。列表中的每个元组都标识了一个" +"`ClientProxy`和该特定`ClientProxy`的`EvaluateIns`。如果某个特定的 " +"`ClientProxy` 未包含在此列表中,则表示该 `ClientProxy` 将不参与下一轮联合评" +"估。" #: ../../source/ref-api/flwr.server.strategy.DPFedAvgFixed.rst:2 msgid "DPFedAvgFixed" @@ -11793,9 +12128,8 @@ msgstr "Bases: :py:class:`~flwr.server.strategy.strategy.Strategy`" #: of #, fuzzy msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +":py:obj:`aggregate_evaluate `\\ \\(server\\_round\\, results\\, ...\\)" msgstr "" ":py:obj:`aggregate_evaluate `\\ \\(server\\_round\\, results\\, ...\\)" @@ -11804,8 +12138,7 @@ msgstr "" #: of #, fuzzy msgid "" -":py:obj:`aggregate_fit " -"`\\ " +":py:obj:`aggregate_fit `\\ " "\\(server\\_round\\, results\\, failures\\)" msgstr "" ":py:obj:`aggregate_fit `\\ " @@ -11820,9 +12153,8 @@ msgstr "使用非加权汇总法汇总训练结果。" #: of #, fuzzy msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +":py:obj:`configure_evaluate `\\ \\(server\\_round\\, parameters\\, ...\\)" msgstr "" ":py:obj:`configure_evaluate `\\ \\(server\\_round\\, parameters\\, ...\\)" @@ -11831,8 +12163,7 @@ msgstr "" #: of #, fuzzy msgid "" -":py:obj:`configure_fit " -"`\\ " +":py:obj:`configure_fit `\\ " "\\(server\\_round\\, parameters\\, ...\\)" msgstr "" ":py:obj:`configure_fit `\\ " @@ -11841,8 +12172,7 @@ msgstr "" #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:1 of msgid "" -"Configure the next round of training incorporating Differential Privacy " -"(DP)." +"Configure the next round of training incorporating Differential Privacy (DP)." msgstr "配置包含差分隐私 (DP) 的下一轮训练。" #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 @@ -11852,37 +12182,36 @@ msgid "" ":py:obj:`evaluate `\\ " "\\(server\\_round\\, parameters\\)" msgstr "" -":py:obj:`evaluate `\\ \\(" -"server\\_round\\, parameters\\)" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 #: of #, fuzzy msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +":py:obj:`initialize_parameters `\\ \\(client\\_manager\\)" msgstr "" ":py:obj:`initialize_parameters `\\ \\(client\\_manager\\)" #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:3 of msgid "" -"Configuration of the next training round includes information related to " -"DP, such as clip norm and noise stddev." +"Configuration of the next training round includes information related to DP, " +"such as clip norm and noise stddev." msgstr "下一轮训练的配置包括与 DP 相关的信息,如片段规范和噪声 stddev。" #: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:13 #: flwr.server.strategy.strategy.Strategy.configure_fit:10 of msgid "" -"**fit_configuration** -- A list of tuples. Each tuple in the list " -"identifies a `ClientProxy` and the `FitIns` for this particular " -"`ClientProxy`. If a particular `ClientProxy` is not included in this " -"list, it means that this `ClientProxy` will not participate in the next " -"round of federated learning." +"**fit_configuration** -- A list of tuples. Each tuple in the list identifies " +"a `ClientProxy` and the `FitIns` for this particular `ClientProxy`. If a " +"particular `ClientProxy` is not included in this list, it means that this " +"`ClientProxy` will not participate in the next round of federated learning." msgstr "" -"**fit_configuration** -- " -"一个元组列表。列表中的每个元组都标识了一个`ClientProxy`和该特定`ClientProxy`的`FitIns'。如果某个特定的`ClientProxy`不在此列表中,则表示该`ClientProxy`将不参加下一轮联合学习。" +"**fit_configuration** -- 一个元组列表。列表中的每个元组都标识了一个" +"`ClientProxy`和该特定`ClientProxy`的`FitIns'。如果某个特定的`ClientProxy`不在" +"此列表中,则表示该`ClientProxy`将不参加下一轮联合学习。" #: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideAdaptiveClipping.rst:2 #, fuzzy @@ -11901,13 +12230,12 @@ msgstr "在客户端使用 \"adaptiveclipping_mod \"修改器。" msgid "" "In comparison to `DifferentialPrivacyServerSideAdaptiveClipping`, which " "performs clipping on the server-side, " -"`DifferentialPrivacyClientSideAdaptiveClipping` expects clipping to " -"happen on the client-side, usually by using the built-in " -"`adaptiveclipping_mod`." +"`DifferentialPrivacyClientSideAdaptiveClipping` expects clipping to happen " +"on the client-side, usually by using the built-in `adaptiveclipping_mod`." msgstr "" -"与在服务器端执行剪切的 `DifferentialPrivacyServerSideAdaptiveClipping` " -"相比,`DifferentialPrivacyClientSideAdaptiveClipping` 希望在客户端进行剪切," -"通常使用内置的 `adaptiveclipping_mod`。" +"与在服务器端执行剪切的 `DifferentialPrivacyServerSideAdaptiveClipping` 相比," +"`DifferentialPrivacyClientSideAdaptiveClipping` 希望在客户端进行剪切,通常使" +"用内置的 `adaptiveclipping_mod`。" #: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:10 #: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:3 @@ -11947,7 +12275,8 @@ msgstr "剪切规范的初始值。默认为 0.1。安德鲁等人建议设置 #: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:12 #: of #, fuzzy -msgid "The desired quantile of updates which should be clipped. Defaults to 0.5." +msgid "" +"The desired quantile of updates which should be clipped. Defaults to 0.5." msgstr "需要剪切的更新量化值。默认为 0.5。" #: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:21 @@ -11955,17 +12284,18 @@ msgstr "需要剪切的更新量化值。默认为 0.5。" #: of #, fuzzy msgid "" -"The learning rate for the clipping norm adaptation. Defaults to 0.2. " -"Andrew et al. recommends to set to 0.2." +"The learning rate for the clipping norm adaptation. Defaults to 0.2. Andrew " +"et al. recommends to set to 0.2." msgstr "剪切规范适应的学习率。默认为 0.2。安德鲁等人建议设置为 0.2。" #: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:24 #: of #, fuzzy msgid "" -"The stddev of the noise added to the count of updates currently below the" -" estimate. Andrew et al. recommends to set to `expected_num_records/20`" -msgstr "添加到当前低于估计值的更新计数中的噪声的 stddev。安德鲁等人建议设置为 " +"The stddev of the noise added to the count of updates currently below the " +"estimate. Andrew et al. recommends to set to `expected_num_records/20`" +msgstr "" +"添加到当前低于估计值的更新计数中的噪声的 stddev。安德鲁等人建议设置为 " "\"expected_num_records/20" #: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:30 @@ -11981,9 +12311,10 @@ msgstr "server.strategy" #: of #, fuzzy msgid "" -"Wrap the strategy with the " -"`DifferentialPrivacyClientSideAdaptiveClipping` wrapper:" -msgstr "用 \"DifferentialPrivacyClientSideAdaptiveClipping \"包装器对策略进行包装:" +"Wrap the strategy with the `DifferentialPrivacyClientSideAdaptiveClipping` " +"wrapper:" +msgstr "" +"用 \"DifferentialPrivacyClientSideAdaptiveClipping \"包装器对策略进行包装:" #: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:40 #: of @@ -11995,25 +12326,25 @@ msgstr "在客户端,将 \"adaptiveclipping_mod \"添加到客户端模块中 #: of #, fuzzy msgid "" -":py:obj:`aggregate_evaluate " -"`\\" -" \\(server\\_round\\, results\\, ...\\)" +":py:obj:`aggregate_evaluate `\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" ":py:obj:`aggregate_evaluate `\\ \\(" -"server\\_round\\, results\\, ...\\)" +"DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate>`\\ " +"\\(server\\_round\\, results\\, ...\\)" #: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 #: of #, fuzzy msgid "" -":py:obj:`aggregate_fit " -"`\\" -" \\(server\\_round\\, results\\, failures\\)" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" ":py:obj:`aggregate_fit `\\ \\(" -"server\\_round\\, results\\, failures\\)" +"DifferentialPrivacyClientSideAdaptiveClipping.aggregate_fit>`\\ " +"\\(server\\_round\\, results\\, failures\\)" #: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 #: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_fit:1 @@ -12028,49 +12359,49 @@ msgstr "汇总 DPFedAvgFixed 中的训练结果并更新片段标准。" #: of #, fuzzy msgid "" -":py:obj:`configure_evaluate " -"`\\" -" \\(server\\_round\\, parameters\\, ...\\)" +":py:obj:`configure_evaluate `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" ":py:obj:`configure_evaluate `\\ \\(" -"server\\_round\\, parameters\\, ...\\)" +"DifferentialPrivacyClientSideAdaptiveClipping.configure_evaluate>`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" #: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 #: of #, fuzzy msgid "" -":py:obj:`configure_fit " -"`\\" -" \\(server\\_round\\, parameters\\, ...\\)" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" ":py:obj:`configure_fit `\\ \\(" -"server\\_round\\, parameters\\, ...\\)" +"DifferentialPrivacyClientSideAdaptiveClipping.configure_fit>`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" #: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 #: of #, fuzzy msgid "" -":py:obj:`evaluate " -"`\\" -" \\(server\\_round\\, parameters\\)" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" ":py:obj:`evaluate `\\ \\(server\\_round" -"\\, parameters\\)" +"DifferentialPrivacyClientSideAdaptiveClipping.evaluate>`\\ " +"\\(server\\_round\\, parameters\\)" #: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 #: of #, fuzzy msgid "" -":py:obj:`initialize_parameters " -"`\\" -" \\(client\\_manager\\)" +":py:obj:`initialize_parameters `\\ " +"\\(client\\_manager\\)" msgstr "" ":py:obj:`initialize_parameters `\\ \\(" -"client\\_manager\\)" +"DifferentialPrivacyClientSideAdaptiveClipping.initialize_parameters>`\\ " +"\\(client\\_manager\\)" #: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideFixedClipping.rst:2 #, fuzzy @@ -12089,21 +12420,22 @@ msgstr "在客户端使用 `fixedclipping_mod` 修改器。" msgid "" "In comparison to `DifferentialPrivacyServerSideFixedClipping`, which " "performs clipping on the server-side, " -"`DifferentialPrivacyClientSideFixedClipping` expects clipping to happen " -"on the client-side, usually by using the built-in `fixedclipping_mod`." +"`DifferentialPrivacyClientSideFixedClipping` expects clipping to happen on " +"the client-side, usually by using the built-in `fixedclipping_mod`." msgstr "" -"与在服务器端执行剪切的 \"DifferentialPrivacyServerSideFixedClipping \"相比," -"\"DifferentialPrivacyClientSideFixedClipping \"希望在客户端进行剪切," -"通常是使用内置的 \"fixedclipping_mod\"。" +"与在服务器端执行剪切的 \"DifferentialPrivacyServerSideFixedClipping \"相" +"比,\"DifferentialPrivacyClientSideFixedClipping \"希望在客户端进行剪切,通常" +"是使用内置的 \"fixedclipping_mod\"。" #: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:12 #: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:5 #: of #, fuzzy msgid "" -"The noise multiplier for the Gaussian mechanism for model updates. A " -"value of 1.0 or higher is recommended for strong privacy." -msgstr "模型更新高斯机制的噪声乘数。建议使用 1.0 或更高的值,以获得较强的隐私性。" +"The noise multiplier for the Gaussian mechanism for model updates. A value " +"of 1.0 or higher is recommended for strong privacy." +msgstr "" +"模型更新高斯机制的噪声乘数。建议使用 1.0 或更高的值,以获得较强的隐私性。" #: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:15 #: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:8 @@ -12130,25 +12462,25 @@ msgstr "在客户端,将 \"fixedclipping_mod \"添加到客户端模块中:" #: of #, fuzzy msgid "" -":py:obj:`aggregate_evaluate " -"`\\" -" \\(server\\_round\\, results\\, ...\\)" +":py:obj:`aggregate_evaluate `\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" ":py:obj:`aggregate_evaluate `\\ \\(" -"server\\_round\\, results\\, ...\\)" +"DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate>`\\ " +"\\(server\\_round\\, results\\, ...\\)" #: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 #: of #, fuzzy msgid "" -":py:obj:`aggregate_fit " -"`\\" -" \\(server\\_round\\, results\\, failures\\)" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" ":py:obj:`aggregate_fit `\\ \\(" -"server\\_round\\, results\\, failures\\)" +"DifferentialPrivacyClientSideFixedClipping.aggregate_fit>`\\ " +"\\(server\\_round\\, results\\, failures\\)" #: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 #: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_fit:1 @@ -12161,33 +12493,33 @@ msgstr "然后将汇总结果序列化:" #: of #, fuzzy msgid "" -":py:obj:`configure_evaluate " -"`\\" -" \\(server\\_round\\, parameters\\, ...\\)" +":py:obj:`configure_evaluate `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" ":py:obj:`configure_evaluate `\\ \\(" -"server\\_round\\, parameters\\, ...\\)" +"DifferentialPrivacyClientSideFixedClipping.configure_evaluate>`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" #: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 #: of #, fuzzy msgid "" -":py:obj:`configure_fit " -"`\\" -" \\(server\\_round\\, parameters\\, ...\\)" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" ":py:obj:`configure_fit `\\ \\(" -"server\\_round\\, parameters\\, ...\\)" +"DifferentialPrivacyClientSideFixedClipping.configure_fit>`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" #: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 #: of #, fuzzy msgid "" -":py:obj:`evaluate " -"`\\" -" \\(server\\_round\\, parameters\\)" +":py:obj:`evaluate `\\ \\(server\\_round\\, " +"parameters\\)" msgstr "" ":py:obj:`evaluate `\\ \\(server\\_round\\, " @@ -12197,13 +12529,13 @@ msgstr "" #: of #, fuzzy msgid "" -":py:obj:`initialize_parameters " -"`\\" -" \\(client\\_manager\\)" +":py:obj:`initialize_parameters `\\ " +"\\(client\\_manager\\)" msgstr "" ":py:obj:`initialize_parameters `\\ \\(" -"client\\_manager\\)" +"DifferentialPrivacyClientSideFixedClipping.initialize_parameters>`\\ " +"\\(client\\_manager\\)" #: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideAdaptiveClipping.rst:2 #, fuzzy @@ -12214,10 +12546,10 @@ msgstr "DifferentialPrivacyServerSideAdaptiveClipping" #: of #, fuzzy msgid "" -"The standard deviation of the noise added to the count of updates below " -"the estimate. Andrew et al. recommends to set to " -"`expected_num_records/20`" -msgstr "添加到低于估计值的更新计数中的噪声标准偏差。安德鲁等人建议设置为 " +"The standard deviation of the noise added to the count of updates below the " +"estimate. Andrew et al. recommends to set to `expected_num_records/20`" +msgstr "" +"添加到低于估计值的更新计数中的噪声标准偏差。安德鲁等人建议设置为 " "\"expected_num_records/20" #: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:27 @@ -12232,73 +12564,73 @@ msgstr "用 DifferentialPrivacyServerSideAdaptiveClipping 封装器封装策略" #: of #, fuzzy msgid "" -":py:obj:`aggregate_evaluate " -"`\\" -" \\(server\\_round\\, results\\, ...\\)" +":py:obj:`aggregate_evaluate `\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" ":py:obj:`aggregate_evaluate `\\ \\(" -"server\\_round\\, results\\, ...\\)" +"DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate>`\\ " +"\\(server\\_round\\, results\\, ...\\)" #: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 #: of #, fuzzy msgid "" -":py:obj:`aggregate_fit " -"`\\" -" \\(server\\_round\\, results\\, failures\\)" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" ":py:obj:`aggregate_fit `\\ \\(" -"server\\_round\\, results\\, failures\\)" +"DifferentialPrivacyServerSideAdaptiveClipping.aggregate_fit>`\\ " +"\\(server\\_round\\, results\\, failures\\)" #: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 #: of #, fuzzy msgid "" -":py:obj:`configure_evaluate " -"`\\" -" \\(server\\_round\\, parameters\\, ...\\)" +":py:obj:`configure_evaluate `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" ":py:obj:`configure_evaluate `\\ \\(" -"server\\_round\\, parameters\\, ...\\)" +"DifferentialPrivacyServerSideAdaptiveClipping.configure_evaluate>`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" #: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 #: of #, fuzzy msgid "" -":py:obj:`configure_fit " -"`\\" -" \\(server\\_round\\, parameters\\, ...\\)" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" ":py:obj:`configure_fit `\\ \\(" -"server\\_round\\, parameters\\, ...\\)" +"DifferentialPrivacyServerSideAdaptiveClipping.configure_fit>`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" #: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 #: of #, fuzzy msgid "" -":py:obj:`evaluate " -"`\\" -" \\(server\\_round\\, parameters\\)" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" ":py:obj:`evaluate `\\ \\(server\\_round" -"\\, parameters\\)" +"DifferentialPrivacyServerSideAdaptiveClipping.evaluate>`\\ " +"\\(server\\_round\\, parameters\\)" #: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 #: of #, fuzzy msgid "" -":py:obj:`initialize_parameters " -"`\\" -" \\(client\\_manager\\)" +":py:obj:`initialize_parameters `\\ " +"\\(client\\_manager\\)" msgstr "" ":py:obj:`initialize_parameters `\\ \\(" -"client\\_manager\\)" +"DifferentialPrivacyServerSideAdaptiveClipping.initialize_parameters>`\\ " +"\\(client\\_manager\\)" #: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideFixedClipping.rst:2 #, fuzzy @@ -12309,33 +12641,32 @@ msgstr "差分隐私" #: of #, fuzzy msgid "" -"Wrap the strategy with the DifferentialPrivacyServerSideFixedClipping " -"wrapper" +"Wrap the strategy with the DifferentialPrivacyServerSideFixedClipping wrapper" msgstr "用 DifferentialPrivacyServerSideFixedClipping 封装器封装策略" #: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 #: of #, fuzzy msgid "" -":py:obj:`aggregate_evaluate " -"`\\" -" \\(server\\_round\\, results\\, ...\\)" +":py:obj:`aggregate_evaluate `\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" ":py:obj:`aggregate_evaluate `\\ \\(" -"server\\_round\\, results\\, ...\\)" +"DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate>`\\ " +"\\(server\\_round\\, results\\, ...\\)" #: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 #: of #, fuzzy msgid "" -":py:obj:`aggregate_fit " -"`\\" -" \\(server\\_round\\, results\\, failures\\)" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" ":py:obj:`aggregate_fit `\\ \\(" -"server\\_round\\, results\\, failures\\)" +"DifferentialPrivacyServerSideFixedClipping.aggregate_fit>`\\ " +"\\(server\\_round\\, results\\, failures\\)" #: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 #: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_fit:1 @@ -12348,33 +12679,33 @@ msgstr "计算更新、剪辑并将其传递给聚合。" #: of #, fuzzy msgid "" -":py:obj:`configure_evaluate " -"`\\" -" \\(server\\_round\\, parameters\\, ...\\)" +":py:obj:`configure_evaluate `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" ":py:obj:`configure_evaluate `\\ \\(" -"server\\_round\\, parameters\\, ...\\)" +"DifferentialPrivacyServerSideFixedClipping.configure_evaluate>`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" #: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 #: of #, fuzzy msgid "" -":py:obj:`configure_fit " -"`\\" -" \\(server\\_round\\, parameters\\, ...\\)" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" ":py:obj:`configure_fit `\\ \\(" -"server\\_round\\, parameters\\, ...\\)" +"DifferentialPrivacyServerSideFixedClipping.configure_fit>`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" #: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 #: of #, fuzzy msgid "" -":py:obj:`evaluate " -"`\\" -" \\(server\\_round\\, parameters\\)" +":py:obj:`evaluate `\\ \\(server\\_round\\, " +"parameters\\)" msgstr "" ":py:obj:`evaluate `\\ \\(server\\_round\\, " @@ -12384,13 +12715,13 @@ msgstr "" #: of #, fuzzy msgid "" -":py:obj:`initialize_parameters " -"`\\" -" \\(client\\_manager\\)" +":py:obj:`initialize_parameters `\\ " +"\\(client\\_manager\\)" msgstr "" ":py:obj:`initialize_parameters `\\ \\(" -"client\\_manager\\)" +"DifferentialPrivacyServerSideFixedClipping.initialize_parameters>`\\ " +"\\(client\\_manager\\)" #: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_fit:3 #: of @@ -12407,9 +12738,8 @@ msgstr "server.strategy.FaultTolerantFedAvg" #: of #, fuzzy msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +":py:obj:`aggregate_evaluate `\\ \\(server\\_round\\, results\\, ...\\)" msgstr "" ":py:obj:`aggregate_evaluate `\\ \\(server\\_round\\, results\\, ...\\)" @@ -12418,9 +12748,8 @@ msgstr "" #: of #, fuzzy msgid "" -":py:obj:`aggregate_fit " -"`\\ " -"\\(server\\_round\\, results\\, failures\\)" +":py:obj:`aggregate_fit `\\ \\(server\\_round\\, results\\, failures\\)" msgstr "" ":py:obj:`aggregate_fit `\\ \\(server\\_round\\, results\\, failures\\)" @@ -12445,9 +12774,8 @@ msgstr "使用加权平均法汇总拟合结果。" #: of #, fuzzy msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +":py:obj:`configure_evaluate `\\ \\(server\\_round\\, parameters\\, ...\\)" msgstr "" ":py:obj:`configure_evaluate `\\ \\(server\\_round\\, parameters\\, ...\\)" @@ -12456,9 +12784,8 @@ msgstr "" #: of #, fuzzy msgid "" -":py:obj:`configure_fit " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +":py:obj:`configure_fit `\\ \\(server\\_round\\, parameters\\, ...\\)" msgstr "" ":py:obj:`configure_fit `\\ \\(server\\_round\\, parameters\\, ...\\)" @@ -12470,16 +12797,15 @@ msgid "" ":py:obj:`evaluate `\\ " "\\(server\\_round\\, parameters\\)" msgstr "" -":py:obj:`evaluate `\\ \\(" -"server\\_round\\, parameters\\)" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" #: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 #: of #, fuzzy msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +":py:obj:`initialize_parameters `\\ \\(client\\_manager\\)" msgstr "" ":py:obj:`initialize_parameters `\\ \\(client\\_manager\\)" @@ -12488,9 +12814,8 @@ msgstr "" #: of #, fuzzy msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +":py:obj:`num_evaluation_clients `\\ \\(num\\_available\\_clients\\)" msgstr "" ":py:obj:`num_evaluation_clients `\\ \\(num\\_available\\_clients\\)" @@ -12499,9 +12824,8 @@ msgstr "" #: of #, fuzzy msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +":py:obj:`num_fit_clients `\\ \\(num\\_available\\_clients\\)" msgstr "" ":py:obj:`num_fit_clients `\\ \\(num\\_available\\_clients\\)" @@ -12558,9 +12882,8 @@ msgstr "控制算法的适应度。默认为 1e-9。" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +":py:obj:`aggregate_evaluate `\\ \\(server\\_round\\, results\\, ...\\)" msgstr "" ":py:obj:`aggregate_evaluate `\\ \\(server\\_round\\, results\\, ...\\)" @@ -12568,18 +12891,17 @@ msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -":py:obj:`aggregate_fit `\\" -" \\(server\\_round\\, results\\, failures\\)" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -":py:obj:`aggregate_fit `\\ \\(" -"server\\_round\\, results\\, failures\\)" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +":py:obj:`configure_evaluate `\\ \\(server\\_round\\, parameters\\, ...\\)" msgstr "" ":py:obj:`configure_evaluate `\\ \\(server\\_round\\, parameters\\, ...\\)" @@ -12587,11 +12909,11 @@ msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -":py:obj:`configure_fit `\\" -" \\(server\\_round\\, parameters\\, ...\\)" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -":py:obj:`configure_fit `\\ \\(" -"server\\_round\\, parameters\\, ...\\)" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy @@ -12599,15 +12921,14 @@ msgid "" ":py:obj:`evaluate `\\ " "\\(server\\_round\\, parameters\\)" msgstr "" -":py:obj:`evaluate `\\ \\(" -"server\\_round\\, parameters\\)" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +":py:obj:`initialize_parameters `\\ \\(client\\_manager\\)" msgstr "" ":py:obj:`initialize_parameters `\\ \\(client\\_manager\\)" @@ -12615,9 +12936,8 @@ msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +":py:obj:`num_evaluation_clients `\\ \\(num\\_available\\_clients\\)" msgstr "" ":py:obj:`num_evaluation_clients `\\ \\(num\\_available\\_clients\\)" @@ -12625,12 +12945,11 @@ msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +":py:obj:`num_fit_clients `\\ \\(num\\_available\\_clients\\)" msgstr "" -":py:obj:`num_fit_clients `\\" -" \\(num\\_available\\_clients\\)" +":py:obj:`num_fit_clients `\\ \\(num\\_available\\_clients\\)" #: ../../source/ref-api/flwr.server.strategy.FedAdam.rst:2 #, fuzzy @@ -12650,9 +12969,8 @@ msgstr "第二动量参数。默认为 0.99。" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\," -" results\\, ...\\)" +":py:obj:`aggregate_evaluate `\\ \\(server\\_round\\, results\\, ...\\)" msgstr "" ":py:obj:`aggregate_evaluate `\\ \\(server\\_round\\, results\\, ...\\)" @@ -12663,15 +12981,14 @@ msgid "" ":py:obj:`aggregate_fit `\\ " "\\(server\\_round\\, results\\, failures\\)" msgstr "" -":py:obj:`aggregate_fit `\\ \\(" -"server\\_round\\, results\\, failures\\)" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\," -" parameters\\, ...\\)" +":py:obj:`configure_evaluate `\\ \\(server\\_round\\, parameters\\, ...\\)" msgstr "" ":py:obj:`configure_evaluate `\\ \\(server\\_round\\, parameters\\, ...\\)" @@ -12682,8 +12999,8 @@ msgid "" ":py:obj:`configure_fit `\\ " "\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -":py:obj:`configure_fit `\\ \\(" -"server\\_round\\, parameters\\, ...\\)" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy @@ -12691,15 +13008,14 @@ msgid "" ":py:obj:`evaluate `\\ " "\\(server\\_round\\, parameters\\)" msgstr "" -":py:obj:`evaluate `\\ \\(" -"server\\_round\\, parameters\\)" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +":py:obj:`initialize_parameters `\\ \\(client\\_manager\\)" msgstr "" ":py:obj:`initialize_parameters `\\ \\(client\\_manager\\)" @@ -12707,9 +13023,8 @@ msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +":py:obj:`num_evaluation_clients `\\ \\(num\\_available\\_clients\\)" msgstr "" ":py:obj:`num_evaluation_clients `\\ \\(num\\_available\\_clients\\)" @@ -12717,12 +13032,11 @@ msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -":py:obj:`num_fit_clients " -"`\\ " +":py:obj:`num_fit_clients `\\ " "\\(num\\_available\\_clients\\)" msgstr "" -":py:obj:`num_fit_clients `\\ \\" -"(num\\_available\\_clients\\)" +":py:obj:`num_fit_clients `\\ " +"\\(num\\_available\\_clients\\)" #: ../../source/ref-api/flwr.server.strategy.FedAvg.rst:2 #, fuzzy @@ -12738,8 +13052,8 @@ msgstr "实施基于 https://arxiv.org/abs/1602.05629" #: of msgid "" "Fraction of clients used during training. In case `min_fit_clients` is " -"larger than `fraction_fit * available_clients`, `min_fit_clients` will " -"still be sampled. Defaults to 1.0." +"larger than `fraction_fit * available_clients`, `min_fit_clients` will still " +"be sampled. Defaults to 1.0." msgstr "" "训练过程中使用的客户端比例。如果 `min_fit_clients` 大于 `fraction_fit * " "available_clients`,则仍会对 `min_fit_clients` 进行采样。默认为 1.0。" @@ -12747,13 +13061,13 @@ msgstr "" #: flwr.server.strategy.fedavg.FedAvg:9 flwr.server.strategy.fedprox.FedProx:41 #: of msgid "" -"Fraction of clients used during validation. In case " -"`min_evaluate_clients` is larger than `fraction_evaluate * " -"available_clients`, `min_evaluate_clients` will still be sampled. " -"Defaults to 1.0." +"Fraction of clients used during validation. In case `min_evaluate_clients` " +"is larger than `fraction_evaluate * available_clients`, " +"`min_evaluate_clients` will still be sampled. Defaults to 1.0." msgstr "" -"验证过程中使用的客户端的比例。如果 `min_evaluate_clients` 大于 `fraction_evaluate * " -"available_clients`,则仍会对 `min_evaluate_clients` 进行采样。默认为 1.0。" +"验证过程中使用的客户端的比例。如果 `min_evaluate_clients` 大于 " +"`fraction_evaluate * available_clients`,则仍会对 `min_evaluate_clients` 进行" +"采样。默认为 1.0。" #: flwr.server.strategy.fedavg.FedAvg:33 of #, fuzzy @@ -12763,12 +13077,11 @@ msgstr "启用(真)或禁用(假)模型更新的就地聚合。" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\, " -"results\\, ...\\)" +":py:obj:`aggregate_evaluate `\\ \\(server\\_round\\, results\\, ...\\)" msgstr "" -":py:obj:`aggregate_evaluate `" -"\\ \\(server\\_round\\, results\\, ...\\)" +":py:obj:`aggregate_evaluate `\\ \\(server\\_round\\, results\\, ...\\)" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy @@ -12776,18 +13089,17 @@ msgid "" ":py:obj:`aggregate_fit `\\ " "\\(server\\_round\\, results\\, failures\\)" msgstr "" -":py:obj:`aggregate_fit `\\ \\(" -"server\\_round\\, results\\, failures\\)" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\, " -"parameters\\, ...\\)" +":py:obj:`configure_evaluate `\\ \\(server\\_round\\, parameters\\, ...\\)" msgstr "" -":py:obj:`configure_evaluate `" -"\\ \\(server\\_round\\, parameters\\, ...\\)" +":py:obj:`configure_evaluate `\\ \\(server\\_round\\, parameters\\, ...\\)" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy @@ -12795,8 +13107,8 @@ msgid "" ":py:obj:`configure_fit `\\ " "\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -":py:obj:`configure_fit `\\ \\(" -"server\\_round\\, parameters\\, ...\\)" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy @@ -12804,15 +13116,14 @@ msgid "" ":py:obj:`evaluate `\\ " "\\(server\\_round\\, parameters\\)" msgstr "" -":py:obj:`evaluate `\\ \\(server\\_round" -"\\, parameters\\)" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +":py:obj:`initialize_parameters `\\ \\(client\\_manager\\)" msgstr "" ":py:obj:`initialize_parameters `\\ \\(client\\_manager\\)" @@ -12820,9 +13131,8 @@ msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +":py:obj:`num_evaluation_clients `\\ \\(num\\_available\\_clients\\)" msgstr "" ":py:obj:`num_evaluation_clients `\\ \\(num\\_available\\_clients\\)" @@ -12830,11 +13140,11 @@ msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -":py:obj:`num_fit_clients `\\" -" \\(num\\_available\\_clients\\)" +":py:obj:`num_fit_clients `\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -":py:obj:`num_fit_clients `\\ \\(" -"num\\_available\\_clients\\)" +":py:obj:`num_fit_clients `\\ " +"\\(num\\_available\\_clients\\)" #: ../../source/ref-api/flwr.server.strategy.FedAvgAndroid.rst:2 #, fuzzy @@ -12845,9 +13155,8 @@ msgstr "DPFedAvgAdaptive" #: of #, fuzzy msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +":py:obj:`aggregate_evaluate `\\ \\(server\\_round\\, results\\, ...\\)" msgstr "" ":py:obj:`aggregate_evaluate `\\ \\(server\\_round\\, results\\, ...\\)" @@ -12856,8 +13165,7 @@ msgstr "" #: of #, fuzzy msgid "" -":py:obj:`aggregate_fit " -"`\\ " +":py:obj:`aggregate_fit `\\ " "\\(server\\_round\\, results\\, failures\\)" msgstr "" ":py:obj:`aggregate_fit `\\ " @@ -12867,8 +13175,8 @@ msgstr "" #: of #, fuzzy msgid "" -":py:obj:`bytes_to_ndarray " -"`\\ \\(tensor\\)" +":py:obj:`bytes_to_ndarray `\\ \\(tensor\\)" msgstr "" ":py:obj:`bytes_to_ndarray `\\ \\(tensor\\)" @@ -12883,9 +13191,8 @@ msgstr "从字节反序列化 NumPy ndarray。" #: of #, fuzzy msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +":py:obj:`configure_evaluate `\\ \\(server\\_round\\, parameters\\, ...\\)" msgstr "" ":py:obj:`configure_evaluate `\\ \\(server\\_round\\, parameters\\, ...\\)" @@ -12894,8 +13201,7 @@ msgstr "" #: of #, fuzzy msgid "" -":py:obj:`configure_fit " -"`\\ " +":py:obj:`configure_fit `\\ " "\\(server\\_round\\, parameters\\, ...\\)" msgstr "" ":py:obj:`configure_fit `\\ " @@ -12908,16 +13214,15 @@ msgid "" ":py:obj:`evaluate `\\ " "\\(server\\_round\\, parameters\\)" msgstr "" -":py:obj:`evaluate `\\ \\(" -"server\\_round\\, parameters\\)" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" #: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 #: of #, fuzzy msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +":py:obj:`initialize_parameters `\\ \\(client\\_manager\\)" msgstr "" ":py:obj:`initialize_parameters `\\ \\(client\\_manager\\)" @@ -12926,8 +13231,8 @@ msgstr "" #: of #, fuzzy msgid "" -":py:obj:`ndarray_to_bytes " -"`\\ \\(ndarray\\)" +":py:obj:`ndarray_to_bytes `\\ \\(ndarray\\)" msgstr "" ":py:obj:`ndarray_to_bytes `\\ \\(ndarray\\)" @@ -12942,9 +13247,8 @@ msgstr "将 NumPy ndarray 序列化为字节。" #: of #, fuzzy msgid "" -":py:obj:`ndarrays_to_parameters " -"`\\ " -"\\(ndarrays\\)" +":py:obj:`ndarrays_to_parameters `\\ \\(ndarrays\\)" msgstr "" ":py:obj:`ndarrays_to_parameters `\\ \\(ndarrays\\)" @@ -12953,9 +13257,8 @@ msgstr "" #: of #, fuzzy msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +":py:obj:`num_evaluation_clients `\\ \\(num\\_available\\_clients\\)" msgstr "" ":py:obj:`num_evaluation_clients `\\ \\(num\\_available\\_clients\\)" @@ -12964,9 +13267,8 @@ msgstr "" #: of #, fuzzy msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +":py:obj:`num_fit_clients `\\ \\(num\\_available\\_clients\\)" msgstr "" ":py:obj:`num_fit_clients `\\ \\(num\\_available\\_clients\\)" @@ -12975,9 +13277,8 @@ msgstr "" #: of #, fuzzy msgid "" -":py:obj:`parameters_to_ndarrays " -"`\\ " -"\\(parameters\\)" +":py:obj:`parameters_to_ndarrays `\\ \\(parameters\\)" msgstr "" ":py:obj:`parameters_to_ndarrays `\\ \\(parameters\\)" @@ -13001,8 +13302,7 @@ msgstr "实施基于 https://arxiv.org/pdf/1909.06335.pdf" #: flwr.server.strategy.fedavgm.FedAvgM:25 of msgid "" -"Server-side learning rate used in server-side optimization. Defaults to " -"1.0." +"Server-side learning rate used in server-side optimization. Defaults to 1.0." msgstr "服务器端优化中使用的服务器端学习率。默认为 1.0。" #: flwr.server.strategy.fedavgm.FedAvgM:28 of @@ -13012,9 +13312,8 @@ msgstr "用于 FedAvgM 的服务器端动量因子。默认为 0.0。" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\," -" results\\, ...\\)" +":py:obj:`aggregate_evaluate `\\ \\(server\\_round\\, results\\, ...\\)" msgstr "" ":py:obj:`aggregate_evaluate `\\ \\(server\\_round\\, results\\, ...\\)" @@ -13025,15 +13324,14 @@ msgid "" ":py:obj:`aggregate_fit `\\ " "\\(server\\_round\\, results\\, failures\\)" msgstr "" -":py:obj:`aggregate_fit `\\ \\(" -"server\\_round\\, results\\, failures\\)" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\," -" parameters\\, ...\\)" +":py:obj:`configure_evaluate `\\ \\(server\\_round\\, parameters\\, ...\\)" msgstr "" ":py:obj:`configure_evaluate `\\ \\(server\\_round\\, parameters\\, ...\\)" @@ -13044,8 +13342,8 @@ msgid "" ":py:obj:`configure_fit `\\ " "\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -":py:obj:`configure_fit `\\ \\(" -"server\\_round\\, parameters\\, ...\\)" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy @@ -13053,15 +13351,14 @@ msgid "" ":py:obj:`evaluate `\\ " "\\(server\\_round\\, parameters\\)" msgstr "" -":py:obj:`evaluate `\\ \\(" -"server\\_round\\, parameters\\)" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +":py:obj:`initialize_parameters `\\ \\(client\\_manager\\)" msgstr "" ":py:obj:`initialize_parameters `\\ \\(client\\_manager\\)" @@ -13069,9 +13366,8 @@ msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +":py:obj:`num_evaluation_clients `\\ \\(num\\_available\\_clients\\)" msgstr "" ":py:obj:`num_evaluation_clients `\\ \\(num\\_available\\_clients\\)" @@ -13079,12 +13375,11 @@ msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -":py:obj:`num_fit_clients " -"`\\ " +":py:obj:`num_fit_clients `\\ " "\\(num\\_available\\_clients\\)" msgstr "" -":py:obj:`num_fit_clients `\\ \\" -"(num\\_available\\_clients\\)" +":py:obj:`num_fit_clients `\\ " +"\\(num\\_available\\_clients\\)" #: ../../source/ref-api/flwr.server.strategy.FedMedian.rst:2 #, fuzzy @@ -13094,9 +13389,8 @@ msgstr "联邦医保" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +":py:obj:`aggregate_evaluate `\\ \\(server\\_round\\, results\\, ...\\)" msgstr "" ":py:obj:`aggregate_evaluate `\\ \\(server\\_round\\, results\\, ...\\)" @@ -13107,8 +13401,8 @@ msgid "" ":py:obj:`aggregate_fit `\\ " "\\(server\\_round\\, results\\, failures\\)" msgstr "" -":py:obj:`aggregate_fit `\\ \\(" -"server\\_round\\, results\\, failures\\)" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 #: flwr.server.strategy.fedmedian.FedMedian.aggregate_fit:1 of @@ -13118,9 +13412,8 @@ msgstr "使用中位数汇总拟合结果。" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +":py:obj:`configure_evaluate `\\ \\(server\\_round\\, parameters\\, ...\\)" msgstr "" ":py:obj:`configure_evaluate `\\ \\(server\\_round\\, parameters\\, ...\\)" @@ -13131,8 +13424,8 @@ msgid "" ":py:obj:`configure_fit `\\ " "\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -":py:obj:`configure_fit `\\ \\(" -"server\\_round\\, parameters\\, ...\\)" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy @@ -13140,15 +13433,14 @@ msgid "" ":py:obj:`evaluate `\\ " "\\(server\\_round\\, parameters\\)" msgstr "" -":py:obj:`evaluate `\\ \\(" -"server\\_round\\, parameters\\)" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +":py:obj:`initialize_parameters `\\ \\(client\\_manager\\)" msgstr "" ":py:obj:`initialize_parameters `\\ \\(client\\_manager\\)" @@ -13156,9 +13448,8 @@ msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +":py:obj:`num_evaluation_clients `\\ \\(num\\_available\\_clients\\)" msgstr "" ":py:obj:`num_evaluation_clients `\\ \\(num\\_available\\_clients\\)" @@ -13166,8 +13457,7 @@ msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -":py:obj:`num_fit_clients " -"`\\ " +":py:obj:`num_fit_clients `\\ " "\\(num\\_available\\_clients\\)" msgstr "" ":py:obj:`num_fit_clients `\\ " @@ -13189,12 +13479,11 @@ msgstr "第二动量参数。默认为 0.0。" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\, " -"results\\, ...\\)" +":py:obj:`aggregate_evaluate `\\ \\(server\\_round\\, results\\, ...\\)" msgstr "" -":py:obj:`aggregate_evaluate `" -"\\ \\(server\\_round\\, results\\, ...\\)" +":py:obj:`aggregate_evaluate `\\ \\(server\\_round\\, results\\, ...\\)" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy @@ -13202,18 +13491,17 @@ msgid "" ":py:obj:`aggregate_fit `\\ " "\\(server\\_round\\, results\\, failures\\)" msgstr "" -":py:obj:`aggregate_fit `\\ \\(" -"server\\_round\\, results\\, failures\\)" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\, " -"parameters\\, ...\\)" +":py:obj:`configure_evaluate `\\ \\(server\\_round\\, parameters\\, ...\\)" msgstr "" -":py:obj:`configure_evaluate `" -"\\ \\(server\\_round\\, parameters\\, ...\\)" +":py:obj:`configure_evaluate `\\ \\(server\\_round\\, parameters\\, ...\\)" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy @@ -13221,8 +13509,8 @@ msgid "" ":py:obj:`configure_fit `\\ " "\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -":py:obj:`configure_fit `\\ \\(" -"server\\_round\\, parameters\\, ...\\)" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy @@ -13230,15 +13518,14 @@ msgid "" ":py:obj:`evaluate `\\ " "\\(server\\_round\\, parameters\\)" msgstr "" -":py:obj:`evaluate `\\ \\(server\\_round" -"\\, parameters\\)" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +":py:obj:`initialize_parameters `\\ \\(client\\_manager\\)" msgstr "" ":py:obj:`initialize_parameters `\\ \\(client\\_manager\\)" @@ -13246,9 +13533,8 @@ msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +":py:obj:`num_evaluation_clients `\\ \\(num\\_available\\_clients\\)" msgstr "" ":py:obj:`num_evaluation_clients `\\ \\(num\\_available\\_clients\\)" @@ -13256,11 +13542,11 @@ msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -":py:obj:`num_fit_clients `\\" -" \\(num\\_available\\_clients\\)" +":py:obj:`num_fit_clients `\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -":py:obj:`num_fit_clients `\\ \\(" -"num\\_available\\_clients\\)" +":py:obj:`num_fit_clients `\\ " +"\\(num\\_available\\_clients\\)" #: ../../source/ref-api/flwr.server.strategy.FedProx.rst:2 #, fuzzy @@ -13273,10 +13559,12 @@ msgstr "实施基于 https://arxiv.org/abs/1812.06127" #: flwr.server.strategy.fedprox.FedProx:5 of msgid "" -"The strategy in itself will not be different than FedAvg, the client " -"needs to be adjusted. A proximal term needs to be added to the loss " -"function during the training:" -msgstr "策略本身与 FedAvg 并无不同,客户端需要进行调整。在训练过程中,需要在损失函数中添加一个近端项:" +"The strategy in itself will not be different than FedAvg, the client needs " +"to be adjusted. A proximal term needs to be added to the loss function " +"during the training:" +msgstr "" +"策略本身与 FedAvg 并无不同,客户端需要进行调整。在训练过程中,需要在损失函数" +"中添加一个近端项:" #: flwr.server.strategy.fedprox.FedProx:9 of msgid "" @@ -13310,18 +13598,17 @@ msgstr "其中,\"global_params \"是训练前的参数副本。" msgid "" "The weight of the proximal term used in the optimization. 0.0 makes this " "strategy equivalent to FedAvg, and the higher the coefficient, the more " -"regularization will be used (that is, the client parameters will need to " -"be closer to the server parameters during training)." +"regularization will be used (that is, the client parameters will need to be " +"closer to the server parameters during training)." msgstr "" -"优化中使用的近端项权重。0.0 使该策略等同于 " -"FedAvg,系数越大,使用的正则化就越多(也就是说,在训练过程中,客户端参数需要更接近服务器参数)。" +"优化中使用的近端项权重。0.0 使该策略等同于 FedAvg,系数越大,使用的正则化就越" +"多(也就是说,在训练过程中,客户端参数需要更接近服务器参数)。" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\," -" results\\, ...\\)" +":py:obj:`aggregate_evaluate `\\ \\(server\\_round\\, results\\, ...\\)" msgstr "" ":py:obj:`aggregate_evaluate `\\ \\(server\\_round\\, results\\, ...\\)" @@ -13332,15 +13619,14 @@ msgid "" ":py:obj:`aggregate_fit `\\ " "\\(server\\_round\\, results\\, failures\\)" msgstr "" -":py:obj:`aggregate_fit `\\ \\(" -"server\\_round\\, results\\, failures\\)" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\," -" parameters\\, ...\\)" +":py:obj:`configure_evaluate `\\ \\(server\\_round\\, parameters\\, ...\\)" msgstr "" ":py:obj:`configure_evaluate `\\ \\(server\\_round\\, parameters\\, ...\\)" @@ -13351,8 +13637,8 @@ msgid "" ":py:obj:`configure_fit `\\ " "\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -":py:obj:`configure_fit `\\ \\(" -"server\\_round\\, parameters\\, ...\\)" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy @@ -13360,15 +13646,14 @@ msgid "" ":py:obj:`evaluate `\\ " "\\(server\\_round\\, parameters\\)" msgstr "" -":py:obj:`evaluate `\\ \\(" -"server\\_round\\, parameters\\)" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +":py:obj:`initialize_parameters `\\ \\(client\\_manager\\)" msgstr "" ":py:obj:`initialize_parameters `\\ \\(client\\_manager\\)" @@ -13376,9 +13661,8 @@ msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +":py:obj:`num_evaluation_clients `\\ \\(num\\_available\\_clients\\)" msgstr "" ":py:obj:`num_evaluation_clients `\\ \\(num\\_available\\_clients\\)" @@ -13386,12 +13670,11 @@ msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -":py:obj:`num_fit_clients " -"`\\ " +":py:obj:`num_fit_clients `\\ " "\\(num\\_available\\_clients\\)" msgstr "" -":py:obj:`num_fit_clients `\\ \\" -"(num\\_available\\_clients\\)" +":py:obj:`num_fit_clients `\\ " +"\\(num\\_available\\_clients\\)" #: flwr.server.strategy.fedprox.FedProx.configure_fit:3 of msgid "Sends the proximal factor mu to the clients" @@ -13414,9 +13697,8 @@ msgstr "截取分布两个尾部的分数。默认为 0.2。" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +":py:obj:`aggregate_evaluate `\\ \\(server\\_round\\, results\\, ...\\)" msgstr "" ":py:obj:`aggregate_evaluate `\\ \\(server\\_round\\, results\\, ...\\)" @@ -13424,8 +13706,7 @@ msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -":py:obj:`aggregate_fit " -"`\\ " +":py:obj:`aggregate_fit `\\ " "\\(server\\_round\\, results\\, failures\\)" msgstr "" ":py:obj:`aggregate_fit `\\ " @@ -13439,9 +13720,8 @@ msgstr "使用修剪平均值汇总拟合结果。" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +":py:obj:`configure_evaluate `\\ \\(server\\_round\\, parameters\\, ...\\)" msgstr "" ":py:obj:`configure_evaluate `\\ \\(server\\_round\\, parameters\\, ...\\)" @@ -13449,8 +13729,7 @@ msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -":py:obj:`configure_fit " -"`\\ " +":py:obj:`configure_fit `\\ " "\\(server\\_round\\, parameters\\, ...\\)" msgstr "" ":py:obj:`configure_fit `\\ " @@ -13462,15 +13741,14 @@ msgid "" ":py:obj:`evaluate `\\ " "\\(server\\_round\\, parameters\\)" msgstr "" -":py:obj:`evaluate `\\ \\(" -"server\\_round\\, parameters\\)" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +":py:obj:`initialize_parameters `\\ \\(client\\_manager\\)" msgstr "" ":py:obj:`initialize_parameters `\\ \\(client\\_manager\\)" @@ -13478,9 +13756,8 @@ msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +":py:obj:`num_evaluation_clients `\\ \\(num\\_available\\_clients\\)" msgstr "" ":py:obj:`num_evaluation_clients `\\ \\(num\\_available\\_clients\\)" @@ -13488,9 +13765,8 @@ msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +":py:obj:`num_fit_clients `\\ \\(num\\_available\\_clients\\)" msgstr "" ":py:obj:`num_fit_clients `\\ \\(num\\_available\\_clients\\)" @@ -13504,9 +13780,8 @@ msgstr "FedXgbBagging" #: of #, fuzzy msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +":py:obj:`aggregate_evaluate `\\ \\(server\\_round\\, results\\, ...\\)" msgstr "" ":py:obj:`aggregate_evaluate `\\ \\(server\\_round\\, results\\, ...\\)" @@ -13524,8 +13799,7 @@ msgstr "采用加权平均法计算评估损失总额。" #: of #, fuzzy msgid "" -":py:obj:`aggregate_fit " -"`\\ " +":py:obj:`aggregate_fit `\\ " "\\(server\\_round\\, results\\, failures\\)" msgstr "" ":py:obj:`aggregate_fit `\\ " @@ -13543,9 +13817,8 @@ msgstr "使用 Bulyan 技术汇总拟合结果。" #: of #, fuzzy msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +":py:obj:`configure_evaluate `\\ \\(server\\_round\\, parameters\\, ...\\)" msgstr "" ":py:obj:`configure_evaluate `\\ \\(server\\_round\\, parameters\\, ...\\)" @@ -13554,8 +13827,7 @@ msgstr "" #: of #, fuzzy msgid "" -":py:obj:`configure_fit " -"`\\ " +":py:obj:`configure_fit `\\ " "\\(server\\_round\\, parameters\\, ...\\)" msgstr "" ":py:obj:`configure_fit `\\ " @@ -13568,16 +13840,15 @@ msgid "" ":py:obj:`evaluate `\\ " "\\(server\\_round\\, parameters\\)" msgstr "" -":py:obj:`evaluate `\\ \\(" -"server\\_round\\, parameters\\)" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" #: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 #: of #, fuzzy msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +":py:obj:`initialize_parameters `\\ \\(client\\_manager\\)" msgstr "" ":py:obj:`initialize_parameters `\\ \\(client\\_manager\\)" @@ -13586,9 +13857,8 @@ msgstr "" #: of #, fuzzy msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +":py:obj:`num_evaluation_clients `\\ \\(num\\_available\\_clients\\)" msgstr "" ":py:obj:`num_evaluation_clients `\\ \\(num\\_available\\_clients\\)" @@ -13597,9 +13867,8 @@ msgstr "" #: of #, fuzzy msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +":py:obj:`num_fit_clients `\\ \\(num\\_available\\_clients\\)" msgstr "" ":py:obj:`num_fit_clients `\\ \\(num\\_available\\_clients\\)" @@ -13613,9 +13882,8 @@ msgstr "FedXgbCyclic" #: of #, fuzzy msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +":py:obj:`aggregate_evaluate `\\ \\(server\\_round\\, results\\, ...\\)" msgstr "" ":py:obj:`aggregate_evaluate `\\ \\(server\\_round\\, results\\, ...\\)" @@ -13624,9 +13892,8 @@ msgstr "" #: of #, fuzzy msgid "" -":py:obj:`aggregate_fit " -"`\\ \\(server\\_round\\," -" results\\, failures\\)" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" ":py:obj:`aggregate_fit `\\ " "\\(server\\_round\\, results\\, failures\\)" @@ -13635,9 +13902,8 @@ msgstr "" #: of #, fuzzy msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +":py:obj:`configure_evaluate `\\ \\(server\\_round\\, parameters\\, ...\\)" msgstr "" ":py:obj:`configure_evaluate `\\ \\(server\\_round\\, parameters\\, ...\\)" @@ -13646,9 +13912,8 @@ msgstr "" #: of #, fuzzy msgid "" -":py:obj:`configure_fit " -"`\\ \\(server\\_round\\," -" parameters\\, ...\\)" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" ":py:obj:`configure_fit `\\ " "\\(server\\_round\\, parameters\\, ...\\)" @@ -13660,16 +13925,15 @@ msgid "" ":py:obj:`evaluate `\\ " "\\(server\\_round\\, parameters\\)" msgstr "" -":py:obj:`evaluate `\\ \\(" -"server\\_round\\, parameters\\)" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" #: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 #: of #, fuzzy msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +":py:obj:`initialize_parameters `\\ \\(client\\_manager\\)" msgstr "" ":py:obj:`initialize_parameters `\\ \\(client\\_manager\\)" @@ -13678,9 +13942,8 @@ msgstr "" #: of #, fuzzy msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +":py:obj:`num_evaluation_clients `\\ \\(num\\_available\\_clients\\)" msgstr "" ":py:obj:`num_evaluation_clients `\\ \\(num\\_available\\_clients\\)" @@ -13689,12 +13952,11 @@ msgstr "" #: of #, fuzzy msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +":py:obj:`num_fit_clients `\\ \\(num\\_available\\_clients\\)" msgstr "" -":py:obj:`num_fit_clients `" -"\\ \\(num\\_available\\_clients\\)" +":py:obj:`num_fit_clients `\\ \\(num\\_available\\_clients\\)" #: ../../source/ref-api/flwr.server.strategy.FedXgbNnAvg.rst:2 #, fuzzy @@ -13705,8 +13967,7 @@ msgstr "DP-FedAvg" #, fuzzy msgid "" "This strategy is deprecated, but a copy of it is available in Flower " -"Baselines: " -"https://github.com/adap/flower/tree/main/baselines/hfedxgboost." +"Baselines: https://github.com/adap/flower/tree/main/baselines/hfedxgboost." msgstr "" "该策略已被弃用,但在 Flower Baselines: https://github.com/adap/flower/tree/" "main/baselines/hfedxgboost 中有其副本。" @@ -13714,9 +13975,8 @@ msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +":py:obj:`aggregate_evaluate `\\ \\(server\\_round\\, results\\, ...\\)" msgstr "" ":py:obj:`aggregate_evaluate `\\ \\(server\\_round\\, results\\, ...\\)" @@ -13724,19 +13984,17 @@ msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -":py:obj:`aggregate_fit " -"`\\ \\(server\\_round\\, " -"results\\, failures\\)" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -":py:obj:`aggregate_fit `\\ \\" -"(server\\_round\\, results\\, failures\\)" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +":py:obj:`configure_evaluate `\\ \\(server\\_round\\, parameters\\, ...\\)" msgstr "" ":py:obj:`configure_evaluate `\\ \\(server\\_round\\, parameters\\, ...\\)" @@ -13744,12 +14002,11 @@ msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -":py:obj:`configure_fit " -"`\\ \\(server\\_round\\, " -"parameters\\, ...\\)" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -":py:obj:`configure_fit `\\ \\" -"(server\\_round\\, parameters\\, ...\\)" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy @@ -13757,15 +14014,14 @@ msgid "" ":py:obj:`evaluate `\\ " "\\(server\\_round\\, parameters\\)" msgstr "" -":py:obj:`evaluate `\\ \\(" -"server\\_round\\, parameters\\)" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +":py:obj:`initialize_parameters `\\ \\(client\\_manager\\)" msgstr "" ":py:obj:`initialize_parameters `\\ \\(client\\_manager\\)" @@ -13773,9 +14029,8 @@ msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +":py:obj:`num_evaluation_clients `\\ \\(num\\_available\\_clients\\)" msgstr "" ":py:obj:`num_evaluation_clients `\\ \\(num\\_available\\_clients\\)" @@ -13783,12 +14038,11 @@ msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +":py:obj:`num_fit_clients `\\ \\(num\\_available\\_clients\\)" msgstr "" -":py:obj:`num_fit_clients `" -"\\ \\(num\\_available\\_clients\\)" +":py:obj:`num_fit_clients `\\ \\(num\\_available\\_clients\\)" #: ../../source/ref-api/flwr.server.strategy.FedYogi.rst:2 #, fuzzy @@ -13813,9 +14067,8 @@ msgstr "控制算法的适应度。默认为 1e-9。" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\," -" results\\, ...\\)" +":py:obj:`aggregate_evaluate `\\ \\(server\\_round\\, results\\, ...\\)" msgstr "" ":py:obj:`aggregate_evaluate `\\ \\(server\\_round\\, results\\, ...\\)" @@ -13826,15 +14079,14 @@ msgid "" ":py:obj:`aggregate_fit `\\ " "\\(server\\_round\\, results\\, failures\\)" msgstr "" -":py:obj:`aggregate_fit `\\ \\(" -"server\\_round\\, results\\, failures\\)" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\," -" parameters\\, ...\\)" +":py:obj:`configure_evaluate `\\ \\(server\\_round\\, parameters\\, ...\\)" msgstr "" ":py:obj:`configure_evaluate `\\ \\(server\\_round\\, parameters\\, ...\\)" @@ -13845,8 +14097,8 @@ msgid "" ":py:obj:`configure_fit `\\ " "\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -":py:obj:`configure_fit `\\ \\(" -"server\\_round\\, parameters\\, ...\\)" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy @@ -13854,15 +14106,14 @@ msgid "" ":py:obj:`evaluate `\\ " "\\(server\\_round\\, parameters\\)" msgstr "" -":py:obj:`evaluate `\\ \\(" -"server\\_round\\, parameters\\)" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +":py:obj:`initialize_parameters `\\ \\(client\\_manager\\)" msgstr "" ":py:obj:`initialize_parameters `\\ \\(client\\_manager\\)" @@ -13870,9 +14121,8 @@ msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +":py:obj:`num_evaluation_clients `\\ \\(num\\_available\\_clients\\)" msgstr "" ":py:obj:`num_evaluation_clients `\\ \\(num\\_available\\_clients\\)" @@ -13880,12 +14130,11 @@ msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -":py:obj:`num_fit_clients " -"`\\ " +":py:obj:`num_fit_clients `\\ " "\\(num\\_available\\_clients\\)" msgstr "" -":py:obj:`num_fit_clients `\\ \\" -"(num\\_available\\_clients\\)" +":py:obj:`num_fit_clients `\\ " +"\\(num\\_available\\_clients\\)" #: ../../source/ref-api/flwr.server.strategy.Krum.rst:2 #, fuzzy @@ -13899,19 +14148,20 @@ msgstr "实施基于 https://arxiv.org/abs/2304.07537。" #: flwr.server.strategy.krum.Krum:17 of msgid "" -"Number of clients to keep before averaging (MultiKrum). Defaults to 0, in" -" that case classical Krum is applied." -msgstr "求平均值前保留的客户端数量(MultiKrum)。默认值为 0,在这种情况下会应用经典 Krum。" +"Number of clients to keep before averaging (MultiKrum). Defaults to 0, in " +"that case classical Krum is applied." +msgstr "" +"求平均值前保留的客户端数量(MultiKrum)。默认值为 0,在这种情况下会应用经典 " +"Krum。" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\, " -"results\\, ...\\)" +":py:obj:`aggregate_evaluate `\\ \\(server\\_round\\, results\\, ...\\)" msgstr "" -":py:obj:`aggregate_evaluate `\\" -" \\(server\\_round\\, results\\, ...\\)" +":py:obj:`aggregate_evaluate `\\ \\(server\\_round\\, results\\, ...\\)" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy @@ -13919,8 +14169,8 @@ msgid "" ":py:obj:`aggregate_fit `\\ " "\\(server\\_round\\, results\\, failures\\)" msgstr "" -":py:obj:`aggregate_fit `\\ \\(" -"server\\_round\\, results\\, failures\\)" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 #: flwr.server.strategy.krum.Krum.aggregate_fit:1 of @@ -13930,12 +14180,11 @@ msgstr "使用 Krum 汇总拟合结果。" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\, " -"parameters\\, ...\\)" +":py:obj:`configure_evaluate `\\ \\(server\\_round\\, parameters\\, ...\\)" msgstr "" -":py:obj:`configure_evaluate `\\" -" \\(server\\_round\\, parameters\\, ...\\)" +":py:obj:`configure_evaluate `\\ \\(server\\_round\\, parameters\\, ...\\)" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy @@ -13943,8 +14192,8 @@ msgid "" ":py:obj:`configure_fit `\\ " "\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -":py:obj:`configure_fit `\\ \\(" -"server\\_round\\, parameters\\, ...\\)" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy @@ -13952,15 +14201,14 @@ msgid "" ":py:obj:`evaluate `\\ " "\\(server\\_round\\, parameters\\)" msgstr "" -":py:obj:`evaluate `\\ \\(server\\_round\\" -", parameters\\)" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +":py:obj:`initialize_parameters `\\ \\(client\\_manager\\)" msgstr "" ":py:obj:`initialize_parameters `\\ \\(client\\_manager\\)" @@ -13968,9 +14216,8 @@ msgstr "" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +":py:obj:`num_evaluation_clients `\\ \\(num\\_available\\_clients\\)" msgstr "" ":py:obj:`num_evaluation_clients `\\ \\(num\\_available\\_clients\\)" @@ -13981,8 +14228,8 @@ msgid "" ":py:obj:`num_fit_clients `\\ " "\\(num\\_available\\_clients\\)" msgstr "" -":py:obj:`num_fit_clients `\\ \\(" -"num\\_available\\_clients\\)" +":py:obj:`num_fit_clients `\\ " +"\\(num\\_available\\_clients\\)" #: ../../source/ref-api/flwr.server.strategy.QFedAvg.rst:2 #, fuzzy @@ -13992,9 +14239,8 @@ msgstr "DP-FedAvg" #: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\," -" results\\, ...\\)" +":py:obj:`aggregate_evaluate `\\ \\(server\\_round\\, results\\, ...\\)" msgstr "" ":py:obj:`aggregate_evaluate `\\ \\(server\\_round\\, results\\, ...\\)" @@ -14005,15 +14251,14 @@ msgid "" ":py:obj:`aggregate_fit `\\ " "\\(server\\_round\\, results\\, failures\\)" msgstr "" -":py:obj:`aggregate_fit `\\ \\(" -"server\\_round\\, results\\, failures\\)" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" #: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\," -" parameters\\, ...\\)" +":py:obj:`configure_evaluate `\\ \\(server\\_round\\, parameters\\, ...\\)" msgstr "" ":py:obj:`configure_evaluate `\\ \\(server\\_round\\, parameters\\, ...\\)" @@ -14024,8 +14269,8 @@ msgid "" ":py:obj:`configure_fit `\\ " "\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -":py:obj:`configure_fit `\\ \\(" -"server\\_round\\, parameters\\, ...\\)" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" #: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of #, fuzzy @@ -14033,15 +14278,14 @@ msgid "" ":py:obj:`evaluate `\\ " "\\(server\\_round\\, parameters\\)" msgstr "" -":py:obj:`evaluate `\\ \\(" -"server\\_round\\, parameters\\)" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" #: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +":py:obj:`initialize_parameters `\\ \\(client\\_manager\\)" msgstr "" ":py:obj:`initialize_parameters `\\ \\(client\\_manager\\)" @@ -14049,9 +14293,8 @@ msgstr "" #: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +":py:obj:`num_evaluation_clients `\\ \\(num\\_available\\_clients\\)" msgstr "" ":py:obj:`num_evaluation_clients `\\ \\(num\\_available\\_clients\\)" @@ -14059,12 +14302,11 @@ msgstr "" #: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -":py:obj:`num_fit_clients " -"`\\ " +":py:obj:`num_fit_clients `\\ " "\\(num\\_available\\_clients\\)" msgstr "" -":py:obj:`num_fit_clients `\\ \\" -"(num\\_available\\_clients\\)" +":py:obj:`num_fit_clients `\\ " +"\\(num\\_available\\_clients\\)" #: ../../source/ref-api/flwr.server.strategy.Strategy.rst:2 #, fuzzy @@ -14075,9 +14317,8 @@ msgstr "Krum 策略。" #: of #, fuzzy msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +":py:obj:`aggregate_evaluate `\\ \\(server\\_round\\, results\\, ...\\)" msgstr "" ":py:obj:`aggregate_evaluate `\\ \\(server\\_round\\, results\\, ...\\)" @@ -14095,8 +14336,8 @@ msgid "" ":py:obj:`aggregate_fit `\\ " "\\(server\\_round\\, results\\, failures\\)" msgstr "" -":py:obj:`aggregate_fit `\\ \\(" -"server\\_round\\, results\\, failures\\)" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" #: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 #: flwr.server.strategy.strategy.Strategy.aggregate_fit:1 of @@ -14107,9 +14348,8 @@ msgstr "汇总训练结果。" #: of #, fuzzy msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +":py:obj:`configure_evaluate `\\ \\(server\\_round\\, parameters\\, ...\\)" msgstr "" ":py:obj:`configure_evaluate `\\ \\(server\\_round\\, parameters\\, ...\\)" @@ -14121,8 +14361,8 @@ msgid "" ":py:obj:`configure_fit `\\ " "\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -":py:obj:`configure_fit `\\ \\(" -"server\\_round\\, parameters\\, ...\\)" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" #: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 #: of @@ -14131,8 +14371,8 @@ msgid "" ":py:obj:`evaluate `\\ " "\\(server\\_round\\, parameters\\)" msgstr "" -":py:obj:`evaluate `\\ \\(" -"server\\_round\\, parameters\\)" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" #: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 #: flwr.server.strategy.strategy.Strategy.evaluate:1 of @@ -14143,9 +14383,8 @@ msgstr "评估当前的模型参数。" #: of #, fuzzy msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +":py:obj:`initialize_parameters `\\ \\(client\\_manager\\)" msgstr "" ":py:obj:`initialize_parameters `\\ \\(client\\_manager\\)" @@ -14157,19 +14396,22 @@ msgstr "初始化(全局)模型参数。" #: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:5 of msgid "" -"Successful updates from the previously selected and configured clients. " -"Each pair of `(ClientProxy, FitRes` constitutes a successful update from " -"one of the previously selected clients. Not that not all previously " -"selected clients are necessarily included in this list: a client might " -"drop out and not submit a result. For each client that did not submit an " -"update, there should be an `Exception` in `failures`." +"Successful updates from the previously selected and configured clients. Each " +"pair of `(ClientProxy, FitRes` constitutes a successful update from one of " +"the previously selected clients. Not that not all previously selected " +"clients are necessarily included in this list: a client might drop out and " +"not submit a result. For each client that did not submit an update, there " +"should be an `Exception` in `failures`." msgstr "" -"从先前选定和配置的客户端进行的成功更新。每一对`(ClientProxy, " -"FitRes)`都是来自先前选定客户端的一次成功更新。但并非所有先前选定的客户机都一定包含在此列表中:客户机可能会退出,不提交结果。对于每个没有提交更新的客户端,`failures`中都应该有一个`Exception`。" +"从先前选定和配置的客户端进行的成功更新。每一对`(ClientProxy, FitRes)`都是来自" +"先前选定客户端的一次成功更新。但并非所有先前选定的客户机都一定包含在此列表" +"中:客户机可能会退出,不提交结果。对于每个没有提交更新的客户端,`failures`中" +"都应该有一个`Exception`。" #: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:13 #: flwr.server.strategy.strategy.Strategy.aggregate_fit:13 of -msgid "Exceptions that occurred while the server was waiting for client updates." +msgid "" +"Exceptions that occurred while the server was waiting for client updates." msgstr "服务器等待客户端更新时发生的异常。" #: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:16 of @@ -14180,29 +14422,31 @@ msgstr "**aggregation_result** -- 汇总的评估结果。聚合通常使用某 #: flwr.server.strategy.strategy.Strategy.aggregate_fit:5 of msgid "" -"Successful updates from the previously selected and configured clients. " -"Each pair of `(ClientProxy, FitRes)` constitutes a successful update from" -" one of the previously selected clients. Not that not all previously " -"selected clients are necessarily included in this list: a client might " -"drop out and not submit a result. For each client that did not submit an " -"update, there should be an `Exception` in `failures`." +"Successful updates from the previously selected and configured clients. Each " +"pair of `(ClientProxy, FitRes)` constitutes a successful update from one of " +"the previously selected clients. Not that not all previously selected " +"clients are necessarily included in this list: a client might drop out and " +"not submit a result. For each client that did not submit an update, there " +"should be an `Exception` in `failures`." msgstr "" -"来自先前选定和配置的客户端的成功更新。每一对`(ClientProxy, " -"FitRes)`都构成先前选定的客户端之一的一次成功更新。但并非所有先前选定的客户机都一定包含在此列表中:客户机可能会退出,不提交结果。对于每个没有提交更新的客户端,\"失败" -" \"中都应该有一个 \"异常\"。" +"来自先前选定和配置的客户端的成功更新。每一对`(ClientProxy, FitRes)`都构成先前" +"选定的客户端之一的一次成功更新。但并非所有先前选定的客户机都一定包含在此列表" +"中:客户机可能会退出,不提交结果。对于每个没有提交更新的客户端,\"失败 \"中都" +"应该有一个 \"异常\"。" #: flwr.server.strategy.strategy.Strategy.aggregate_fit:17 of msgid "" "**parameters** -- If parameters are returned, then the server will treat " -"these as the new global model parameters (i.e., it will replace the " -"previous parameters with the ones returned from this method). If `None` " -"is returned (e.g., because there were only failures and no viable " -"results) then the server will no update the previous model parameters, " -"the updates received in this round are discarded, and the global model " -"parameters remain the same." +"these as the new global model parameters (i.e., it will replace the previous " +"parameters with the ones returned from this method). If `None` is returned " +"(e.g., because there were only failures and no viable results) then the " +"server will no update the previous model parameters, the updates received in " +"this round are discarded, and the global model parameters remain the same." msgstr "" -"**parameters** -- 如果返回参数,那么服务器将把这些参数作为新的全局模型参数(即用本方法返回的参数替换之前的参数)。如果返回 " -"\"无\"(例如,因为只有失败而没有可行的结果),那么服务器将不再更新之前的模型参数,本轮收到的更新将被丢弃,全局模型参数保持不变。" +"**parameters** -- 如果返回参数,那么服务器将把这些参数作为新的全局模型参数" +"(即用本方法返回的参数替换之前的参数)。如果返回 \"无\"(例如,因为只有失败而" +"没有可行的结果),那么服务器将不再更新之前的模型参数,本轮收到的更新将被丢" +"弃,全局模型参数保持不变。" #: flwr.server.strategy.strategy.Strategy.evaluate:3 of msgid "" @@ -14212,16 +14456,18 @@ msgstr "该函数可用于对模型参数进行集中(即服务器端)评估 #: flwr.server.strategy.strategy.Strategy.evaluate:11 of msgid "" -"**evaluation_result** -- The evaluation result, usually a Tuple " -"containing loss and a dictionary containing task-specific metrics (e.g., " -"accuracy)." -msgstr "**evaluation_result** -- 评估结果,通常是一个元组,包含损失值和一个字典,字典中包含特定任务的指标(如准确率)。" +"**evaluation_result** -- The evaluation result, usually a Tuple containing " +"loss and a dictionary containing task-specific metrics (e.g., accuracy)." +msgstr "" +"**evaluation_result** -- 评估结果,通常是一个元组,包含损失值和一个字典,字典" +"中包含特定任务的指标(如准确率)。" #: flwr.server.strategy.strategy.Strategy.initialize_parameters:6 of msgid "" "**parameters** -- If parameters are returned, then the server will treat " "these as the initial global model parameters." -msgstr "**parameters** -- 如果返回参数,服务器将把这些参数视为初始全局模型参数。" +msgstr "" +"**parameters** -- 如果返回参数,服务器将把这些参数视为初始全局模型参数。" #: ../../source/ref-api/flwr.server.workflow.rst:2 #, fuzzy @@ -14234,8 +14480,8 @@ msgid "" ":py:obj:`DefaultWorkflow `\\ " "\\(\\[fit\\_workflow\\, ...\\]\\)" msgstr "" -":py:obj:`DefaultWorkflow `\\ \\(\\[" -"fit\\_workflow\\, ...\\]\\)" +":py:obj:`DefaultWorkflow `\\ " +"\\(\\[fit\\_workflow\\, ...\\]\\)" #: ../../source/ref-api/flwr.server.workflow.rst:24::1 #: flwr.server.workflow.default_workflows.DefaultWorkflow:1 of @@ -14249,8 +14495,8 @@ msgid "" ":py:obj:`SecAggPlusWorkflow `\\ " "\\(num\\_shares\\, ...\\[\\, ...\\]\\)" msgstr "" -":py:obj:`SecAggPlusWorkflow `\\ \\(" -"num\\_shares\\, ...\\[\\, ...\\]\\)" +":py:obj:`SecAggPlusWorkflow `\\ " +"\\(num\\_shares\\, ...\\[\\, ...\\]\\)" #: ../../source/ref-api/flwr.server.workflow.rst:24::1 #: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:1 @@ -14265,8 +14511,8 @@ msgid "" ":py:obj:`SecAggWorkflow `\\ " "\\(reconstruction\\_threshold\\, \\*\\)" msgstr "" -":py:obj:`SecAggWorkflow `\\ \\(" -"reconstruction\\_threshold\\, \\*\\)" +":py:obj:`SecAggWorkflow `\\ " +"\\(reconstruction\\_threshold\\, \\*\\)" #: ../../source/ref-api/flwr.server.workflow.rst:24::1 #: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:1 of @@ -14288,25 +14534,25 @@ msgstr "工作流程" #: of #, fuzzy msgid "" -"The SecAgg+ protocol ensures the secure summation of integer vectors " -"owned by multiple parties, without accessing any individual integer " -"vector. This workflow allows the server to compute the weighted average " -"of model parameters across all clients, ensuring individual contributions" -" remain private. This is achieved by clients sending both, a weighting " -"factor and a weighted version of the locally updated parameters, both of " -"which are masked for privacy. Specifically, each client uploads \"[w, w *" -" params]\" with masks, where weighting factor 'w' is the number of " -"examples ('num_examples') and 'params' represents the model parameters " -"('parameters') from the client's `FitRes`. The server then aggregates " -"these contributions to compute the weighted average of model parameters." +"The SecAgg+ protocol ensures the secure summation of integer vectors owned " +"by multiple parties, without accessing any individual integer vector. This " +"workflow allows the server to compute the weighted average of model " +"parameters across all clients, ensuring individual contributions remain " +"private. This is achieved by clients sending both, a weighting factor and a " +"weighted version of the locally updated parameters, both of which are masked " +"for privacy. Specifically, each client uploads \"[w, w * params]\" with " +"masks, where weighting factor 'w' is the number of examples ('num_examples') " +"and 'params' represents the model parameters ('parameters') from the " +"client's `FitRes`. The server then aggregates these contributions to compute " +"the weighted average of model parameters." msgstr "" "SecAgg+ 协议可确保对多方拥有的整数向量进行安全求和,而不会访问任何单个整数向" "量。该工作流程允许服务器计算所有客户端模型参数的加权平均值,确保个人贡献保持" "私密。这可以通过客户端同时发送加权因子和本地更新参数的加权版本来实现,为了保" -"护隐私,两者都会被屏蔽。具体来说,每个客户端都会上传带掩码的\"[w, w * params]" -"\",其中加权因子 \"w \"是示例数(\"num_examples\"),\"params \"代表客户端 " -"\"FitRes \"中的模型参数(\"parameters\"" -")。然后,服务器会汇总这些贡献,计算模型参数的加权平均值。" +"护隐私,两者都会被屏蔽。具体来说,每个客户端都会上传带掩码的\"[w, w * " +"params]\",其中加权因子 \"w \"是示例数(\"num_examples\"),\"params \"代表客" +"户端 \"FitRes \"中的模型参数(\"parameters\")。然后,服务器会汇总这些贡献," +"计算模型参数的加权平均值。" #: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:14 #: of @@ -14316,8 +14562,8 @@ msgid "" "configuration to clients and collect their public keys. - 'share keys': " "Broadcast public keys among clients and collect encrypted secret" msgstr "" -"协议包括四个主要阶段: - 设置\": 向客户端发送 SecAgg+ 配置并收集其公钥。- " -"共享密钥\": 在客户端之间广播公钥,并收集加密密钥。" +"协议包括四个主要阶段: - 设置\": 向客户端发送 SecAgg+ 配置并收集其公钥。- 共" +"享密钥\": 在客户端之间广播公钥,并收集加密密钥。" #: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:17 #: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:17 @@ -14349,21 +14595,22 @@ msgstr "解密\": 收集密钥共享,解密并汇总模型参数。" #: of #, fuzzy msgid "" -"Only the aggregated model parameters are exposed and passed to " -"`Strategy.aggregate_fit`, ensuring individual data privacy." -msgstr "只有聚合模型参数才会公开并传递给 `Strategy." -"aggregate_fit`,从而确保个人数据隐私。" +"Only the aggregated model parameters are exposed and passed to `Strategy." +"aggregate_fit`, ensuring individual data privacy." +msgstr "" +"只有聚合模型参数才会公开并传递给 `Strategy.aggregate_fit`,从而确保个人数据隐" +"私。" #: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:25 #: of #, fuzzy msgid "" -"The number of shares into which each client's private key is split under " -"the SecAgg+ protocol. If specified as a float, it represents the " -"proportion of all selected clients, and the number of shares will be set " -"dynamically in the run time. A private key can be reconstructed from " -"these shares, allowing for the secure aggregation of model updates. Each " -"client sends one share to each of its neighbors while retaining one." +"The number of shares into which each client's private key is split under the " +"SecAgg+ protocol. If specified as a float, it represents the proportion of " +"all selected clients, and the number of shares will be set dynamically in " +"the run time. A private key can be reconstructed from these shares, allowing " +"for the secure aggregation of model updates. Each client sends one share to " +"each of its neighbors while retaining one." msgstr "" "在 SecAgg+ 协议下,每个客户的私钥被分成的份数。如果指定为浮点数,则代表所有选" "定客户的比例,份额数将在运行时动态设置。私钥可以从这些份额中重建,从而实现模" @@ -14374,11 +14621,11 @@ msgstr "" #: of #, fuzzy msgid "" -"The minimum number of shares required to reconstruct a client's private " -"key, or, if specified as a float, it represents the proportion of the " -"total number of shares needed for reconstruction. This threshold ensures " -"privacy by allowing for the recovery of contributions from dropped " -"clients during aggregation, without compromising individual client data." +"The minimum number of shares required to reconstruct a client's private key, " +"or, if specified as a float, it represents the proportion of the total " +"number of shares needed for reconstruction. This threshold ensures privacy " +"by allowing for the recovery of contributions from dropped clients during " +"aggregation, without compromising individual client data." msgstr "" "重建客户私钥所需的最小份数,如果指定为浮动,则表示重建所需的份数占总份数的比" "例。这个阈值允许在聚合过程中恢复掉线客户的贡献,从而确保隐私,而不会泄露单个" @@ -14389,11 +14636,12 @@ msgstr "" #: of #, fuzzy msgid "" -"The maximum value of the weight that can be assigned to any single " -"client's update during the weighted average calculation on the server " -"side, e.g., in the FedAvg algorithm." -msgstr "在服务器端进行加权平均计算(如 FedAvg " -"算法)时,可分配给任何单个客户端更新的权重的最大值。" +"The maximum value of the weight that can be assigned to any single client's " +"update during the weighted average calculation on the server side, e.g., in " +"the FedAvg algorithm." +msgstr "" +"在服务器端进行加权平均计算(如 FedAvg 算法)时,可分配给任何单个客户端更新的" +"权重的最大值。" #: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:35 #: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:42 @@ -14401,9 +14649,10 @@ msgstr "在服务器端进行加权平均计算(如 FedAvg " #, fuzzy msgid "" "The range within which model parameters are clipped before quantization. " -"This parameter ensures each model parameter is bounded within " -"[-clipping_range, clipping_range], facilitating quantization." -msgstr "量化前模型参数的裁剪范围。该参数可确保每个模型参数都在 [-clipping_range, " +"This parameter ensures each model parameter is bounded within [-" +"clipping_range, clipping_range], facilitating quantization." +msgstr "" +"量化前模型参数的裁剪范围。该参数可确保每个模型参数都在 [-clipping_range, " "clipping_range] 范围内,便于量化。" #: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:39 @@ -14415,8 +14664,9 @@ msgid "" "quantized, mapping each parameter to an integer in [0, " "quantization_range-1]. This facilitates cryptographic operations on the " "model updates." -msgstr "浮点模型参数量化范围的大小,将每个参数映射为 [0, quantization_range-1] " -"中的整数。这有助于对模型更新进行加密操作。" +msgstr "" +"浮点模型参数量化范围的大小,将每个参数映射为 [0, quantization_range-1] 中的整" +"数。这有助于对模型更新进行加密操作。" #: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:43 #: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:50 @@ -14424,21 +14674,21 @@ msgstr "浮点模型参数量化范围的大小,将每个参数映射为 [0, q #, fuzzy msgid "" "The range of values from which random mask entries are uniformly sampled " -"([0, modulus_range-1]). `modulus_range` must be less than 4294967296. " -"Please use 2**n values for `modulus_range` to prevent overflow issues." +"([0, modulus_range-1]). `modulus_range` must be less than 4294967296. Please " +"use 2**n values for `modulus_range` to prevent overflow issues." msgstr "" "对随机掩码条目进行均匀采样的数值范围([0, modulus_range-1])。modulus_range " -"\"必须小于 4294967296。为防止出现溢出问题,请为 `modulus_range` 使用 2**n " -"的值。" +"\"必须小于 4294967296。为防止出现溢出问题,请为 `modulus_range` 使用 2**n 的" +"值。" #: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:47 #: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:54 #: of #, fuzzy msgid "" -"The timeout duration in seconds. If specified, the workflow will wait for" -" replies for this duration each time. If `None`, there is no time limit " -"and the workflow will wait until replies for all messages are received." +"The timeout duration in seconds. If specified, the workflow will wait for " +"replies for this duration each time. If `None`, there is no time limit and " +"the workflow will wait until replies for all messages are received." msgstr "" "超时时间(秒)。如果指定,工作流将在每次等待回复的时间内等待回复。如果指定为 " "\"无\",则没有时间限制,工作流程将一直等待到收到所有信息的回复。" @@ -14448,8 +14698,8 @@ msgstr "" #, fuzzy msgid "" "Generally, higher `num_shares` means more robust to dropouts while " -"increasing the computational costs; higher `reconstruction_threshold` " -"means better privacy guarantees but less tolerance to dropouts." +"increasing the computational costs; higher `reconstruction_threshold` means " +"better privacy guarantees but less tolerance to dropouts." msgstr "" "一般来说,\"份额数 \"越高,意味着对丢弃的鲁棒性越强,同时计算成本也会增加;" "\"重构阈值 \"越高,意味着隐私保证越好,但对丢弃的容忍度越低。" @@ -14458,7 +14708,8 @@ msgstr "" #: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:64 #: of #, fuzzy -msgid "Too large `max_weight` may compromise the precision of the quantization." +msgid "" +"Too large `max_weight` may compromise the precision of the quantization." msgstr "过大的 `max_weight` 可能会影响量化的精度。" #: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:59 @@ -14473,9 +14724,9 @@ msgstr "modulus_range \"必须为 2**n,且大于 \"quantization_range\"。" #, fuzzy msgid "" "When `num_shares` is a float, it is interpreted as the proportion of all " -"selected clients, and hence the number of shares will be determined in " -"the runtime. This allows for dynamic adjustment based on the total number" -" of participating clients." +"selected clients, and hence the number of shares will be determined in the " +"runtime. This allows for dynamic adjustment based on the total number of " +"participating clients." msgstr "" "当 `num_shares` 为浮点数时,它被解释为所有选定客户端的比例,因此份额数将在运" "行时确定。这样就可以根据参与客户端的总数进行动态调整。" @@ -14484,10 +14735,10 @@ msgstr "" #: of #, fuzzy msgid "" -"Similarly, when `reconstruction_threshold` is a float, it is interpreted " -"as the proportion of the number of shares needed for the reconstruction " -"of a private key. This feature enables flexibility in setting the " -"security threshold relative to the number of distributed shares." +"Similarly, when `reconstruction_threshold` is a float, it is interpreted as " +"the proportion of the number of shares needed for the reconstruction of a " +"private key. This feature enables flexibility in setting the security " +"threshold relative to the number of distributed shares." msgstr "" "同样,当 `reconstruction_threshold` 为浮点数时,它被解释为重建私钥所需的份额" "数比例。这一功能使我们可以根据分发的份额数灵活设置安全阈值。" @@ -14496,10 +14747,10 @@ msgstr "" #: of #, fuzzy msgid "" -"`num_shares`, `reconstruction_threshold`, and the quantization parameters" -" (`clipping_range`, `quantization_range`, `modulus_range`) play critical " -"roles in balancing privacy, robustness, and efficiency within the SecAgg+" -" protocol." +"`num_shares`, `reconstruction_threshold`, and the quantization parameters " +"(`clipping_range`, `quantization_range`, `modulus_range`) play critical " +"roles in balancing privacy, robustness, and efficiency within the SecAgg+ " +"protocol." msgstr "" "份额数\"、\"重建阈值 \"和量化参数(\"裁剪范围\"、\"量化范围\"、\"模数范围\")" "在平衡 SecAgg+ 协议的隐私性、稳健性和效率方面发挥着关键作用。" @@ -14508,9 +14759,8 @@ msgstr "" #: of #, fuzzy msgid "" -":py:obj:`collect_masked_vectors_stage " -"`\\" -" \\(driver\\, ...\\)" +":py:obj:`collect_masked_vectors_stage `\\ \\(driver\\, ...\\)" msgstr "" ":py:obj:`collect_masked_vectors_stage `\\ \\(driver\\, ...\\)" @@ -14526,12 +14776,11 @@ msgstr "执行 \"收集屏蔽向量 \"阶段。" #: of #, fuzzy msgid "" -":py:obj:`setup_stage " -"`\\ \\(driver\\, " -"context\\, state\\)" +":py:obj:`setup_stage `\\ \\(driver\\, context\\, state\\)" msgstr "" -":py:obj:`setup_stage `\\" -" \\(driver\\, context\\, state\\)" +":py:obj:`setup_stage `\\ \\(driver\\, context\\, state\\)" #: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 #: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.setup_stage:1 @@ -14544,9 +14793,8 @@ msgstr "执行 \"设置 \"阶段。" #: of #, fuzzy msgid "" -":py:obj:`share_keys_stage " -"`\\ " -"\\(driver\\, context\\, state\\)" +":py:obj:`share_keys_stage `\\ \\(driver\\, context\\, state\\)" msgstr "" ":py:obj:`share_keys_stage `\\ \\(driver\\, context\\, state\\)" @@ -14562,12 +14810,11 @@ msgstr "执行 \"共享密钥 \"阶段。" #: of #, fuzzy msgid "" -":py:obj:`unmask_stage " -"`\\ \\(driver\\, " -"context\\, state\\)" +":py:obj:`unmask_stage `\\ \\(driver\\, context\\, state\\)" msgstr "" -":py:obj:`unmask_stage `" -"\\ \\(driver\\, context\\, state\\)" +":py:obj:`unmask_stage `\\ \\(driver\\, context\\, state\\)" #: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 #: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.unmask_stage:1 @@ -14582,51 +14829,50 @@ msgstr "工作流程" #: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:1 of msgid "" -"Bases: " -":py:class:`~flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow`" +"Bases: :py:class:`~flwr.server.workflow.secure_aggregation." +"secaggplus_workflow.SecAggPlusWorkflow`" msgstr "" #: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:3 of msgid "" -"The SecAgg protocol ensures the secure summation of integer vectors owned" -" by multiple parties, without accessing any individual integer vector. " -"This workflow allows the server to compute the weighted average of model " +"The SecAgg protocol ensures the secure summation of integer vectors owned by " +"multiple parties, without accessing any individual integer vector. This " +"workflow allows the server to compute the weighted average of model " "parameters across all clients, ensuring individual contributions remain " -"private. This is achieved by clients sending both, a weighting factor and" -" a weighted version of the locally updated parameters, both of which are " -"masked for privacy. Specifically, each client uploads \"[w, w * params]\"" -" with masks, where weighting factor 'w' is the number of examples " -"('num_examples') and 'params' represents the model parameters " -"('parameters') from the client's `FitRes`. The server then aggregates " -"these contributions to compute the weighted average of model parameters." +"private. This is achieved by clients sending both, a weighting factor and a " +"weighted version of the locally updated parameters, both of which are masked " +"for privacy. Specifically, each client uploads \"[w, w * params]\" with " +"masks, where weighting factor 'w' is the number of examples ('num_examples') " +"and 'params' represents the model parameters ('parameters') from the " +"client's `FitRes`. The server then aggregates these contributions to compute " +"the weighted average of model parameters." msgstr "" #: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:14 of msgid "" -"The protocol involves four main stages: - 'setup': Send SecAgg " -"configuration to clients and collect their public keys. - 'share keys': " -"Broadcast public keys among clients and collect encrypted secret" +"The protocol involves four main stages: - 'setup': Send SecAgg configuration " +"to clients and collect their public keys. - 'share keys': Broadcast public " +"keys among clients and collect encrypted secret" msgstr "" #: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:54 of msgid "" -"Each client's private key is split into N shares under the SecAgg " -"protocol, where N is the number of selected clients." +"Each client's private key is split into N shares under the SecAgg protocol, " +"where N is the number of selected clients." msgstr "" #: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:56 of msgid "" -"Generally, higher `reconstruction_threshold` means better privacy " -"guarantees but less tolerance to dropouts." +"Generally, higher `reconstruction_threshold` means better privacy guarantees " +"but less tolerance to dropouts." msgstr "" #: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:60 of msgid "" "When `reconstruction_threshold` is a float, it is interpreted as the " "proportion of the number of all selected clients needed for the " -"reconstruction of a private key. This feature enables flexibility in " -"setting the security threshold relative to the number of selected " -"clients." +"reconstruction of a private key. This feature enables flexibility in setting " +"the security threshold relative to the number of selected clients." msgstr "" #: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:64 of @@ -14640,32 +14886,29 @@ msgstr "" #: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 #: of msgid "" -":py:obj:`collect_masked_vectors_stage " -"`\\ " -"\\(driver\\, ...\\)" +":py:obj:`collect_masked_vectors_stage `\\ \\(driver\\, ...\\)" msgstr "" #: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 #: of msgid "" -":py:obj:`setup_stage `\\" -" \\(driver\\, context\\, state\\)" +":py:obj:`setup_stage `\\ " +"\\(driver\\, context\\, state\\)" msgstr "" #: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 #: of msgid "" -":py:obj:`share_keys_stage " -"`\\ \\(driver\\, " -"context\\, state\\)" +":py:obj:`share_keys_stage `\\ \\(driver\\, context\\, state\\)" msgstr "" #: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 #: of msgid "" -":py:obj:`unmask_stage " -"`\\ \\(driver\\, " -"context\\, state\\)" +":py:obj:`unmask_stage `\\ " +"\\(driver\\, context\\, state\\)" msgstr "" #: ../../source/ref-api/flwr.simulation.rst:2 @@ -14675,8 +14918,8 @@ msgstr "运行模拟" #: ../../source/ref-api/flwr.simulation.rst:19::1 msgid "" -":py:obj:`start_simulation `\\ \\(\\*\\," -" client\\_fn\\[\\, ...\\]\\)" +":py:obj:`start_simulation `\\ \\(\\*\\, " +"client\\_fn\\[\\, ...\\]\\)" msgstr "" #: ../../source/ref-api/flwr.simulation.rst:19::1 @@ -14686,8 +14929,8 @@ msgstr "启动基于 Ray 的Flower模拟服务器。" #: ../../source/ref-api/flwr.simulation.rst:19::1 msgid "" -":py:obj:`run_simulation_from_cli " -"`\\ \\(\\)" +":py:obj:`run_simulation_from_cli `\\ \\(\\)" msgstr "" #: ../../source/ref-api/flwr.simulation.rst:19::1 @@ -14719,15 +14962,14 @@ msgstr "" #: flwr.simulation.run_simulation.run_simulation:6 of msgid "" -"The `ClientApp` to be executed by each of the SuperNodes. It will receive" -" messages sent by the `ServerApp`." +"The `ClientApp` to be executed by each of the SuperNodes. It will receive " +"messages sent by the `ServerApp`." msgstr "" #: flwr.simulation.run_simulation.run_simulation:9 of msgid "" -"Number of nodes that run a ClientApp. They can be sampled by a Driver in " -"the ServerApp and receive a Message describing what the ClientApp should " -"perform." +"Number of nodes that run a ClientApp. They can be sampled by a Driver in the " +"ServerApp and receive a Message describing what the ClientApp should perform." msgstr "" #: flwr.simulation.run_simulation.run_simulation:13 of @@ -14736,26 +14978,26 @@ msgstr "" #: flwr.simulation.run_simulation.run_simulation:15 of msgid "" -"'A dictionary, e.g {\"\": , \"\": } to " -"configure a backend. Values supported in are those included by " -"`flwr.common.typing.ConfigsRecordValues`." +"'A dictionary, e.g {\"\": , \"\": } to configure a " +"backend. Values supported in are those included by `flwr.common." +"typing.ConfigsRecordValues`." msgstr "" #: flwr.simulation.run_simulation.run_simulation:19 of msgid "" -"A boolean to indicate whether to enable GPU growth on the main thread. " -"This is desirable if you make use of a TensorFlow model on your " -"`ServerApp` while having your `ClientApp` running on the same GPU. " -"Without enabling this, you might encounter an out-of-memory error because" -" TensorFlow, by default, allocates all GPU memory. Read more about how " -"`tf.config.experimental.set_memory_growth()` works in the TensorFlow " -"documentation: https://www.tensorflow.org/api/stable." +"A boolean to indicate whether to enable GPU growth on the main thread. This " +"is desirable if you make use of a TensorFlow model on your `ServerApp` while " +"having your `ClientApp` running on the same GPU. Without enabling this, you " +"might encounter an out-of-memory error because TensorFlow, by default, " +"allocates all GPU memory. Read more about how `tf.config.experimental." +"set_memory_growth()` works in the TensorFlow documentation: https://www." +"tensorflow.org/api/stable." msgstr "" #: flwr.simulation.run_simulation.run_simulation:26 of msgid "" -"When diabled, only INFO, WARNING and ERROR log messages will be shown. If" -" enabled, DEBUG-level logs will be displayed." +"When diabled, only INFO, WARNING and ERROR log messages will be shown. If " +"enabled, DEBUG-level logs will be displayed." msgstr "" #: ../../source/ref-api/flwr.simulation.run_simulation_from_cli.rst:2 @@ -14770,68 +15012,76 @@ msgstr "start_simulation" #: flwr.simulation.app.start_simulation:3 of msgid "" -"A function creating client instances. The function must take a single " -"`str` argument called `cid`. It should return a single client instance of" -" type Client. Note that the created client instances are ephemeral and " -"will often be destroyed after a single method invocation. Since client " -"instances are not long-lived, they should not attempt to carry state over" -" method invocations. Any state required by the instance (model, dataset, " +"A function creating client instances. The function must take a single `str` " +"argument called `cid`. It should return a single client instance of type " +"Client. Note that the created client instances are ephemeral and will often " +"be destroyed after a single method invocation. Since client instances are " +"not long-lived, they should not attempt to carry state over method " +"invocations. Any state required by the instance (model, dataset, " "hyperparameters, ...) should be (re-)created in either the call to " -"`client_fn` or the call to any of the client methods (e.g., load " -"evaluation data in the `evaluate` method itself)." +"`client_fn` or the call to any of the client methods (e.g., load evaluation " +"data in the `evaluate` method itself)." msgstr "" -"创建客户端实例的函数。该函数必须接受一个名为 `cid` 的 `str` 参数。它应返回一个 Client " -"类型的客户端实例。请注意,创建的客户端实例是短暂的,通常在调用一个方法后就会被销毁。由于客户机实例不是长期存在的,它们不应试图在方法调用时携带状态数据。实例所需的任何状态数据(模型、数据集、超参数......)都应在调用" -" `client_fn` 或任何客户端方法(例如,在 `evaluate` 方法中加载评估数据)时(重新)创建。" +"创建客户端实例的函数。该函数必须接受一个名为 `cid` 的 `str` 参数。它应返回一" +"个 Client 类型的客户端实例。请注意,创建的客户端实例是短暂的,通常在调用一个" +"方法后就会被销毁。由于客户机实例不是长期存在的,它们不应试图在方法调用时携带" +"状态数据。实例所需的任何状态数据(模型、数据集、超参数......)都应在调用 " +"`client_fn` 或任何客户端方法(例如,在 `evaluate` 方法中加载评估数据)时(重" +"新)创建。" #: flwr.simulation.app.start_simulation:13 of msgid "" "The total number of clients in this simulation. This must be set if " "`clients_ids` is not set and vice-versa." -msgstr "本次模拟的客户总数。如果未设置 `clients_ids`,则必须设置该参数,反之亦然。" +msgstr "" +"本次模拟的客户总数。如果未设置 `clients_ids`,则必须设置该参数,反之亦然。" #: flwr.simulation.app.start_simulation:16 of msgid "" -"List `client_id`s for each client. This is only required if `num_clients`" -" is not set. Setting both `num_clients` and `clients_ids` with " +"List `client_id`s for each client. This is only required if `num_clients` is " +"not set. Setting both `num_clients` and `clients_ids` with " "`len(clients_ids)` not equal to `num_clients` generates an error." msgstr "" -"列出每个客户的 `client_id`。只有在未设置 `num_clients` " -"时才需要这样做。同时设置`num_clients`和`clients_ids`,且`len(clients_ids)`不等于`num_clients`,会产生错误。" +"列出每个客户的 `client_id`。只有在未设置 `num_clients` 时才需要这样做。同时设" +"置`num_clients`和`clients_ids`,且`len(clients_ids)`不等于`num_clients`,会产" +"生错误。" #: flwr.simulation.app.start_simulation:20 of #, fuzzy msgid "" -"CPU and GPU resources for a single client. Supported keys are `num_cpus` " -"and `num_gpus`. To understand the GPU utilization caused by `num_gpus`, " -"as well as using custom resources, please consult the Ray documentation." +"CPU and GPU resources for a single client. Supported keys are `num_cpus` and " +"`num_gpus`. To understand the GPU utilization caused by `num_gpus`, as well " +"as using custom resources, please consult the Ray documentation." msgstr "" -"\"num_gpus\": 0.0` 单个客户端的 CPU 和 GPU 资源。支持的键值为 `num_cpus` 和 `num_gpus`。要了解" -" `num_gpus` 所导致的 GPU 利用率,以及使用自定义资源的情况,请查阅 Ray 文档。" +"\"num_gpus\": 0.0` 单个客户端的 CPU 和 GPU 资源。支持的键值为 `num_cpus` 和 " +"`num_gpus`。要了解 `num_gpus` 所导致的 GPU 利用率,以及使用自定义资源的情况," +"请查阅 Ray 文档。" #: flwr.simulation.app.start_simulation:25 of msgid "" "An implementation of the abstract base class `flwr.server.Server`. If no " "instance is provided, then `start_server` will create one." -msgstr "抽象基类 `flwr.server.Server`的实现。如果没有提供实例,`start_server` 将创建一个。" +msgstr "" +"抽象基类 `flwr.server.Server`的实现。如果没有提供实例,`start_server` 将创建" +"一个。" #: flwr.simulation.app.start_simulation:31 of msgid "" -"An implementation of the abstract base class `flwr.server.Strategy`. If " -"no strategy is provided, then `start_server` will use " -"`flwr.server.strategy.FedAvg`." +"An implementation of the abstract base class `flwr.server.Strategy`. If no " +"strategy is provided, then `start_server` will use `flwr.server.strategy." +"FedAvg`." msgstr "" -"抽象基类 `flwr.server.strategy` 的实现。如果没有提供策略,`start_server` 将使用 " -"`flwr.server.strategy.FedAvg`。" +"抽象基类 `flwr.server.strategy` 的实现。如果没有提供策略,`start_server` 将使" +"用 `flwr.server.strategy.FedAvg`。" #: flwr.simulation.app.start_simulation:35 of msgid "" -"An implementation of the abstract base class `flwr.server.ClientManager`." -" If no implementation is provided, then `start_simulation` will use " -"`flwr.server.client_manager.SimpleClientManager`." +"An implementation of the abstract base class `flwr.server.ClientManager`. If " +"no implementation is provided, then `start_simulation` will use `flwr.server." +"client_manager.SimpleClientManager`." msgstr "" -"抽象基类 `flwr.server.ClientManager` 的实现。如果没有提供实现,`start_simulation` 将使用 " -"`flwr.server.client_manager.SimpleClientManager`。" +"抽象基类 `flwr.server.ClientManager` 的实现。如果没有提供实现," +"`start_simulation` 将使用 `flwr.server.client_manager.SimpleClientManager`。" #: flwr.simulation.app.start_simulation:39 of msgid "" @@ -14839,19 +15089,21 @@ msgid "" "ray_init_args is None (the default), Ray will be initialized with the " "following default args: { \"ignore_reinit_error\": True, " "\"include_dashboard\": False } An empty dictionary can be used " -"(ray_init_args={}) to prevent any arguments from being passed to " -"ray.init." +"(ray_init_args={}) to prevent any arguments from being passed to ray.init." msgstr "" -"可选字典,包含调用 `ray.init` 时的参数。如果 ray_init_args 为 None(默认值),则将使用以下默认参数初始化 Ray:" -" { \"ignore_reinit_error\": True, \"include_dashboard\": False } " -"可以使用空字典(ray_init_args={})来防止向 ray.init 传递任何参数。" +"可选字典,包含调用 `ray.init` 时的参数。如果 ray_init_args 为 None(默认" +"值),则将使用以下默认参数初始化 Ray: { \"ignore_reinit_error\": True, " +"\"include_dashboard\": False } 可以使用空字典(ray_init_args={})来防止向 " +"ray.init 传递任何参数。" #: flwr.simulation.app.start_simulation:39 of msgid "" "Optional dictionary containing arguments for the call to `ray.init`. If " "ray_init_args is None (the default), Ray will be initialized with the " "following default args:" -msgstr "可选字典,包含调用 `ray.init` 时的参数。如果 ray_init_args 为 None(默认值),则将使用以下默认参数初始化 Ray:" +msgstr "" +"可选字典,包含调用 `ray.init` 时的参数。如果 ray_init_args 为 None(默认" +"值),则将使用以下默认参数初始化 Ray:" #: flwr.simulation.app.start_simulation:43 of msgid "{ \"ignore_reinit_error\": True, \"include_dashboard\": False }" @@ -14859,15 +15111,15 @@ msgstr "{ \"ignore_reinit_error\": True, \"include_dashboard\": False }" #: flwr.simulation.app.start_simulation:45 of msgid "" -"An empty dictionary can be used (ray_init_args={}) to prevent any " -"arguments from being passed to ray.init." +"An empty dictionary can be used (ray_init_args={}) to prevent any arguments " +"from being passed to ray.init." msgstr "可以使用空字典 (ray_init_args={}) 来防止向 ray.init 传递任何参数。" #: flwr.simulation.app.start_simulation:48 of msgid "" -"Set to True to prevent `ray.shutdown()` in case " -"`ray.is_initialized()=True`." -msgstr "设为 True 可在 `ray.is_initialized()=True` 情况下阻止 `ray.shutdown()` 。" +"Set to True to prevent `ray.shutdown()` in case `ray.is_initialized()=True`." +msgstr "" +"设为 True 可在 `ray.is_initialized()=True` 情况下阻止 `ray.shutdown()` 。" #: flwr.simulation.app.start_simulation:50 of #, fuzzy @@ -14875,28 +15127,33 @@ msgid "" "Optionally specify the type of actor to use. The actor object, which " "persists throughout the simulation, will be the process in charge of " "executing a ClientApp wrapping input argument `client_fn`." -msgstr "可选择指定要使用的actor类型。actor对象将在整个模拟过程中持续存在,它将是负责运行客户端作业(即其 `fit()`方法)的进程。" +msgstr "" +"可选择指定要使用的actor类型。actor对象将在整个模拟过程中持续存在,它将是负责" +"运行客户端作业(即其 `fit()`方法)的进程。" #: flwr.simulation.app.start_simulation:54 of msgid "" -"If you want to create your own Actor classes, you might need to pass some" -" input argument. You can use this dictionary for such purpose." -msgstr "如果您想创建自己的 Actor 类,可能需要传递一些输入参数。为此,您可以使用本字典。" +"If you want to create your own Actor classes, you might need to pass some " +"input argument. You can use this dictionary for such purpose." +msgstr "" +"如果您想创建自己的 Actor 类,可能需要传递一些输入参数。为此,您可以使用本字" +"典。" #: flwr.simulation.app.start_simulation:57 of msgid "" -"(default: \"DEFAULT\") Optional string (\"DEFAULT\" or \"SPREAD\") for " -"the VCE to choose in which node the actor is placed. If you are an " -"advanced user needed more control you can use lower-level scheduling " -"strategies to pin actors to specific compute nodes (e.g. via " -"NodeAffinitySchedulingStrategy). Please note this is an advanced feature." -" For all details, please refer to the Ray documentation: " -"https://docs.ray.io/en/latest/ray-core/scheduling/index.html" +"(default: \"DEFAULT\") Optional string (\"DEFAULT\" or \"SPREAD\") for the " +"VCE to choose in which node the actor is placed. If you are an advanced user " +"needed more control you can use lower-level scheduling strategies to pin " +"actors to specific compute nodes (e.g. via NodeAffinitySchedulingStrategy). " +"Please note this is an advanced feature. For all details, please refer to " +"the Ray documentation: https://docs.ray.io/en/latest/ray-core/scheduling/" +"index.html" msgstr "" -"(默认:\"DEFAULT\")可选字符串(\"DEFAULT \"或 \"SPREAD\"),供 VCE " -"选择将行为体放置在哪个节点上。如果你是需要更多控制权的高级用户,可以使用低级调度策略将actor固定到特定计算节点(例如,通过 " -"NodeAffinitySchedulingStrategy)。请注意,这是一项高级功能。有关详细信息,请参阅 Ray " -"文档:https://docs.ray.io/en/latest/ray-core/scheduling/index.html" +"(默认:\"DEFAULT\")可选字符串(\"DEFAULT \"或 \"SPREAD\"),供 VCE 选择将行" +"为体放置在哪个节点上。如果你是需要更多控制权的高级用户,可以使用低级调度策略" +"将actor固定到特定计算节点(例如,通过 NodeAffinitySchedulingStrategy)。请注" +"意,这是一项高级功能。有关详细信息,请参阅 Ray 文档:https://docs.ray.io/en/" +"latest/ray-core/scheduling/index.html" #: flwr.simulation.app.start_simulation:66 of msgid "**hist** -- Object containing metrics from training." @@ -14947,19 +15204,21 @@ msgstr "感谢我们的贡献者" #: ../../source/ref-changelog.md:206 ../../source/ref-changelog.md:290 #: ../../source/ref-changelog.md:354 ../../source/ref-changelog.md:412 msgid "" -"We would like to give our special thanks to all the contributors who made" -" the new version of Flower possible (in `git shortlog` order):" -msgstr "在此,我们要特别感谢所有为 Flower 的新版本做出贡献的人员(按 `git shortlog` 顺序排列):" +"We would like to give our special thanks to all the contributors who made " +"the new version of Flower possible (in `git shortlog` order):" +msgstr "" +"在此,我们要特别感谢所有为 Flower 的新版本做出贡献的人员(按 `git shortlog` " +"顺序排列):" #: ../../source/ref-changelog.md:15 #, fuzzy msgid "" -"`Aasheesh Singh`, `Adam Narozniak`, `Aml Hassan Esmil`, `Charles " -"Beauville`, `Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo " -"Gabrielli`, `Gustavo Bertoli`, `HelinLin`, `Heng Pan`, `Javier`, `M S " -"Chaitanya Kumar`, `Mohammad Naseri`, `Nikos Vlachakis`, `Pritam Neog`, " -"`Robert Kuska`, `Robert Steiner`, `Taner Topal`, `Yahia Salaheldin " -"Shaaban`, `Yan Gao`, `Yasar Abbas` " +"`Aasheesh Singh`, `Adam Narozniak`, `Aml Hassan Esmil`, `Charles Beauville`, " +"`Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo Gabrielli`, `Gustavo " +"Bertoli`, `HelinLin`, `Heng Pan`, `Javier`, `M S Chaitanya Kumar`, `Mohammad " +"Naseri`, `Nikos Vlachakis`, `Pritam Neog`, `Robert Kuska`, `Robert Steiner`, " +"`Taner Topal`, `Yahia Salaheldin Shaaban`, `Yan Gao`, `Yasar Abbas` " msgstr "" "`Adam Narozniak`, `Anass Anhari`, `Charles Beauville`, `Dana-Farber`, " "`Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo Gabrielli`, `Gustavo " @@ -14970,104 +15229,98 @@ msgstr "" #: ../../source/ref-changelog.md:19 #, fuzzy msgid "" -"**Introduce stateful clients (experimental)** " -"([#2770](https://github.com/adap/flower/pull/2770), " -"[#2686](https://github.com/adap/flower/pull/2686), " -"[#2696](https://github.com/adap/flower/pull/2696), " -"[#2643](https://github.com/adap/flower/pull/2643), " -"[#2769](https://github.com/adap/flower/pull/2769))" +"**Introduce stateful clients (experimental)** ([#2770](https://github.com/" +"adap/flower/pull/2770), [#2686](https://github.com/adap/flower/pull/2686), " +"[#2696](https://github.com/adap/flower/pull/2696), [#2643](https://github." +"com/adap/flower/pull/2643), [#2769](https://github.com/adap/flower/" +"pull/2769))" msgstr "" "** baselines的普通更新** ([#2301](https://github.com/adap/flower/pull/2301), " -"[#2305](https://github.com/adap/flower/pull/2305), " -"[#2307](https://github.com/adap/flower/pull/2307), " -"[#2327](https://github.com/adap/flower/pull/2327), " -"[#2435](https://github.com/adap/flower/pull/2435))" +"[#2305](https://github.com/adap/flower/pull/2305), [#2307](https://github." +"com/adap/flower/pull/2307), [#2327](https://github.com/adap/flower/" +"pull/2327), [#2435](https://github.com/adap/flower/pull/2435))" #: ../../source/ref-changelog.md:21 msgid "" "Subclasses of `Client` and `NumPyClient` can now store local state that " "remains on the client. Let's start with the highlight first: this new " -"feature is compatible with both simulated clients (via " -"`start_simulation`) and networked clients (via `start_client`). It's also" -" the first preview of new abstractions like `Context` and `RecordSet`. " -"Clients can access state of type `RecordSet` via `state: RecordSet = " -"self.context.state`. Changes to this `RecordSet` are preserved across " -"different rounds of execution to enable stateful computations in a " -"unified way across simulation and deployment." +"feature is compatible with both simulated clients (via `start_simulation`) " +"and networked clients (via `start_client`). It's also the first preview of " +"new abstractions like `Context` and `RecordSet`. Clients can access state of " +"type `RecordSet` via `state: RecordSet = self.context.state`. Changes to " +"this `RecordSet` are preserved across different rounds of execution to " +"enable stateful computations in a unified way across simulation and " +"deployment." msgstr "" #: ../../source/ref-changelog.md:23 #, fuzzy msgid "" -"**Improve performance** " -"([#2293](https://github.com/adap/flower/pull/2293))" +"**Improve performance** ([#2293](https://github.com/adap/flower/pull/2293))" msgstr "**改进示例笔记** ([#2005](https://github.com/adap/flower/pull/2005))" #: ../../source/ref-changelog.md:25 msgid "" -"Flower is faster than ever. All `FedAvg`-derived strategies now use in-" -"place aggregation to reduce memory consumption. The Flower client " -"serialization/deserialization has been rewritten from the ground up, " -"which results in significant speedups, especially when the client-side " -"training time is short." +"Flower is faster than ever. All `FedAvg`-derived strategies now use in-place " +"aggregation to reduce memory consumption. The Flower client serialization/" +"deserialization has been rewritten from the ground up, which results in " +"significant speedups, especially when the client-side training time is short." msgstr "" #: ../../source/ref-changelog.md:27 #, fuzzy msgid "" -"**Support Federated Learning with Apple MLX and Flower** " -"([#2693](https://github.com/adap/flower/pull/2693))" +"**Support Federated Learning with Apple MLX and Flower** ([#2693](https://" +"github.com/adap/flower/pull/2693))" msgstr "" -"** 添加使用 fastai 和 Flower 进行联邦学习的新示例** " -"([#1598](https://github.com/adap/flower/pull/1598))" +"** 添加使用 fastai 和 Flower 进行联邦学习的新示例** ([#1598](https://github." +"com/adap/flower/pull/1598))" #: ../../source/ref-changelog.md:29 msgid "" -"Flower has official support for federated learning using [Apple " -"MLX](https://ml-explore.github.io/mlx) via the new `quickstart-mlx` code " -"example." +"Flower has official support for federated learning using [Apple MLX](https://" +"ml-explore.github.io/mlx) via the new `quickstart-mlx` code example." msgstr "" #: ../../source/ref-changelog.md:31 #, fuzzy msgid "" -"**Introduce new XGBoost cyclic strategy** " -"([#2666](https://github.com/adap/flower/pull/2666), " -"[#2668](https://github.com/adap/flower/pull/2668))" +"**Introduce new XGBoost cyclic strategy** ([#2666](https://github.com/adap/" +"flower/pull/2666), [#2668](https://github.com/adap/flower/pull/2668))" msgstr "" -"**介绍 iOS SDK(预览版)** ([#1621](https://github.com/adap/flower/pull/1621), " -"[#1764](https://github.com/adap/flower/pull/1764))" +"**介绍 iOS SDK(预览版)** ([#1621](https://github.com/adap/flower/" +"pull/1621), [#1764](https://github.com/adap/flower/pull/1764))" #: ../../source/ref-changelog.md:33 msgid "" -"A new strategy called `FedXgbCyclic` supports a client-by-client style of" -" training (often called cyclic). The `xgboost-comprehensive` code example" -" shows how to use it in a full project. In addition to that, `xgboost-" -"comprehensive` now also supports simulation mode. With this, Flower " -"offers best-in-class XGBoost support." +"A new strategy called `FedXgbCyclic` supports a client-by-client style of " +"training (often called cyclic). The `xgboost-comprehensive` code example " +"shows how to use it in a full project. In addition to that, `xgboost-" +"comprehensive` now also supports simulation mode. With this, Flower offers " +"best-in-class XGBoost support." msgstr "" #: ../../source/ref-changelog.md:35 #, fuzzy msgid "" -"**Support Python 3.11** " -"([#2394](https://github.com/adap/flower/pull/2394))" -msgstr "** 支持 Python 3.10** ([#1320](https://github.com/adap/flower/pull/1320))" +"**Support Python 3.11** ([#2394](https://github.com/adap/flower/pull/2394))" +msgstr "" +"** 支持 Python 3.10** ([#1320](https://github.com/adap/flower/pull/1320))" #: ../../source/ref-changelog.md:37 msgid "" -"Framework tests now run on Python 3.8, 3.9, 3.10, and 3.11. This will " -"ensure better support for users using more recent Python versions." +"Framework tests now run on Python 3.8, 3.9, 3.10, and 3.11. This will ensure " +"better support for users using more recent Python versions." msgstr "" #: ../../source/ref-changelog.md:39 #, fuzzy msgid "" -"**Update gRPC and ProtoBuf dependencies** " -"([#2814](https://github.com/adap/flower/pull/2814))" +"**Update gRPC and ProtoBuf dependencies** ([#2814](https://github.com/adap/" +"flower/pull/2814))" msgstr "" -"**更新 REST API 以支持创建和删除节点** " -"([#2283](https://github.com/adap/flower/pull/2283))" +"**更新 REST API 以支持创建和删除节点** ([#2283](https://github.com/adap/" +"flower/pull/2283))" #: ../../source/ref-changelog.md:41 msgid "" @@ -15078,99 +15331,90 @@ msgstr "" #: ../../source/ref-changelog.md:43 #, fuzzy msgid "" -"**Introduce Docker image for Flower server** " -"([#2700](https://github.com/adap/flower/pull/2700), " -"[#2688](https://github.com/adap/flower/pull/2688), " -"[#2705](https://github.com/adap/flower/pull/2705), " -"[#2695](https://github.com/adap/flower/pull/2695), " -"[#2747](https://github.com/adap/flower/pull/2747), " -"[#2746](https://github.com/adap/flower/pull/2746), " -"[#2680](https://github.com/adap/flower/pull/2680), " -"[#2682](https://github.com/adap/flower/pull/2682), " -"[#2701](https://github.com/adap/flower/pull/2701))" -msgstr "" -"** 支持 SSL 的服务器和客户端** ([#842](https://github.com/adap/flower/pull/842), " -"[#844](https://github.com/adap/flower/pull/844), " -"[#845](https://github.com/adap/flower/pull/845), " -"[#847](https://github.com/adap/flower/pull/847), " -"[#993](https://github.com/adap/flower/pull/993), " -"[#994](https://github.com/adap/flower/pull/994))" +"**Introduce Docker image for Flower server** ([#2700](https://github.com/" +"adap/flower/pull/2700), [#2688](https://github.com/adap/flower/pull/2688), " +"[#2705](https://github.com/adap/flower/pull/2705), [#2695](https://github." +"com/adap/flower/pull/2695), [#2747](https://github.com/adap/flower/" +"pull/2747), [#2746](https://github.com/adap/flower/pull/2746), [#2680]" +"(https://github.com/adap/flower/pull/2680), [#2682](https://github.com/adap/" +"flower/pull/2682), [#2701](https://github.com/adap/flower/pull/2701))" +msgstr "" +"** 支持 SSL 的服务器和客户端** ([#842](https://github.com/adap/flower/" +"pull/842), [#844](https://github.com/adap/flower/pull/844), [#845](https://" +"github.com/adap/flower/pull/845), [#847](https://github.com/adap/flower/" +"pull/847), [#993](https://github.com/adap/flower/pull/993), [#994](https://" +"github.com/adap/flower/pull/994))" #: ../../source/ref-changelog.md:45 msgid "" -"The Flower server can now be run using an official Docker image. A new " -"how-to guide explains [how to run Flower using " -"Docker](https://flower.ai/docs/framework/how-to-run-flower-using-" -"docker.html). An official Flower client Docker image will follow." +"The Flower server can now be run using an official Docker image. A new how-" +"to guide explains [how to run Flower using Docker](https://flower.ai/docs/" +"framework/how-to-run-flower-using-docker.html). An official Flower client " +"Docker image will follow." msgstr "" #: ../../source/ref-changelog.md:47 #, fuzzy msgid "" -"**Introduce** `flower-via-docker-compose` **example** " -"([#2626](https://github.com/adap/flower/pull/2626))" +"**Introduce** `flower-via-docker-compose` **example** ([#2626](https://" +"github.com/adap/flower/pull/2626))" msgstr "" -"**介绍Flower Android SDK** " -"([#2131](https://github.com/adap/flower/pull/2131))" +"**介绍Flower Android SDK** ([#2131](https://github.com/adap/flower/" +"pull/2131))" #: ../../source/ref-changelog.md:49 #, fuzzy msgid "" -"**Introduce** `quickstart-sklearn-tabular` **example** " -"([#2719](https://github.com/adap/flower/pull/2719))" -msgstr "**引入 start_driver**([#1697](https://github.com/adap/flower/pull/1697))" +"**Introduce** `quickstart-sklearn-tabular` **example** ([#2719](https://" +"github.com/adap/flower/pull/2719))" +msgstr "" +"**引入 start_driver**([#1697](https://github.com/adap/flower/pull/1697))" #: ../../source/ref-changelog.md:51 #, fuzzy msgid "" -"**Introduce** `custom-metrics` **example** " -"([#1958](https://github.com/adap/flower/pull/1958))" -msgstr "**引入 start_driver**([#1697](https://github.com/adap/flower/pull/1697))" +"**Introduce** `custom-metrics` **example** ([#1958](https://github.com/adap/" +"flower/pull/1958))" +msgstr "" +"**引入 start_driver**([#1697](https://github.com/adap/flower/pull/1697))" #: ../../source/ref-changelog.md:53 #, fuzzy msgid "" -"**Update code examples to use Flower Datasets** " -"([#2450](https://github.com/adap/flower/pull/2450), " -"[#2456](https://github.com/adap/flower/pull/2456), " -"[#2318](https://github.com/adap/flower/pull/2318), " -"[#2712](https://github.com/adap/flower/pull/2712))" +"**Update code examples to use Flower Datasets** ([#2450](https://github.com/" +"adap/flower/pull/2450), [#2456](https://github.com/adap/flower/pull/2456), " +"[#2318](https://github.com/adap/flower/pull/2318), [#2712](https://github." +"com/adap/flower/pull/2712))" msgstr "" -"更新开发人员工具([#1231](https://github.com/adap/flower/pull/1231), " -"[#1276](https://github.com/adap/flower/pull/1276), " -"[#1301](https://github.com/adap/flower/pull/1301), " -"[#1310](https://github.com/adap/flower/pull/1310)" +"更新开发人员工具([#1231](https://github.com/adap/flower/pull/1231), [#1276]" +"(https://github.com/adap/flower/pull/1276), [#1301](https://github.com/adap/" +"flower/pull/1301), [#1310](https://github.com/adap/flower/pull/1310)" #: ../../source/ref-changelog.md:55 msgid "" -"Several code examples were updated to use [Flower " -"Datasets](https://flower.ai/docs/datasets/)." +"Several code examples were updated to use [Flower Datasets](https://flower." +"ai/docs/datasets/)." msgstr "" #: ../../source/ref-changelog.md:57 #, fuzzy msgid "" -"**General updates to Flower Examples** " -"([#2381](https://github.com/adap/flower/pull/2381), " -"[#2805](https://github.com/adap/flower/pull/2805), " -"[#2782](https://github.com/adap/flower/pull/2782), " -"[#2806](https://github.com/adap/flower/pull/2806), " -"[#2829](https://github.com/adap/flower/pull/2829), " -"[#2825](https://github.com/adap/flower/pull/2825), " -"[#2816](https://github.com/adap/flower/pull/2816), " -"[#2726](https://github.com/adap/flower/pull/2726), " -"[#2659](https://github.com/adap/flower/pull/2659), " -"[#2655](https://github.com/adap/flower/pull/2655))" -msgstr "" -"**改进(试验性)驱动程序应用程序接口** ([#1663](https://github.com/adap/flower/pull/1663)," -" [#1666](https://github.com/adap/flower/pull/1666), " -"[#1667](https://github.com/adap/flower/pull/1667), " -"[#1664](https://github.com/adap/flower/pull/1664), " -"[#1675](https://github.com/adap/flower/pull/1675), " -"[#1676](https://github.com/adap/flower/pull/1676), " -"[#1693](https://github.com/adap/flower/pull/1693), " -"[#1662](https://github.com/adap/flower/pull/1662), " -"[#1794](https://github.com/adap/flower/pull/1794))" +"**General updates to Flower Examples** ([#2381](https://github.com/adap/" +"flower/pull/2381), [#2805](https://github.com/adap/flower/pull/2805), [#2782]" +"(https://github.com/adap/flower/pull/2782), [#2806](https://github.com/adap/" +"flower/pull/2806), [#2829](https://github.com/adap/flower/pull/2829), [#2825]" +"(https://github.com/adap/flower/pull/2825), [#2816](https://github.com/adap/" +"flower/pull/2816), [#2726](https://github.com/adap/flower/pull/2726), [#2659]" +"(https://github.com/adap/flower/pull/2659), [#2655](https://github.com/adap/" +"flower/pull/2655))" +msgstr "" +"**改进(试验性)驱动程序应用程序接口** ([#1663](https://github.com/adap/" +"flower/pull/1663), [#1666](https://github.com/adap/flower/pull/1666), [#1667]" +"(https://github.com/adap/flower/pull/1667), [#1664](https://github.com/adap/" +"flower/pull/1664), [#1675](https://github.com/adap/flower/pull/1675), [#1676]" +"(https://github.com/adap/flower/pull/1676), [#1693](https://github.com/adap/" +"flower/pull/1693), [#1662](https://github.com/adap/flower/pull/1662), [#1794]" +"(https://github.com/adap/flower/pull/1794))" #: ../../source/ref-changelog.md:59 msgid "Many Flower code examples received substantial updates." @@ -15183,11 +15427,11 @@ msgstr "**更新 Flower Baselines**" #: ../../source/ref-changelog.md:63 #, fuzzy msgid "" -"HFedXGBoost ([#2226](https://github.com/adap/flower/pull/2226), " -"[#2771](https://github.com/adap/flower/pull/2771))" +"HFedXGBoost ([#2226](https://github.com/adap/flower/pull/2226), [#2771]" +"(https://github.com/adap/flower/pull/2771))" msgstr "" -"FedBN ([#2608](https://github.com/adap/flower/pull/2608), " -"[#2615](https://github.com/adap/flower/pull/2615))" +"FedBN ([#2608](https://github.com/adap/flower/pull/2608), [#2615](https://" +"github.com/adap/flower/pull/2615))" #: ../../source/ref-changelog.md:64 #, fuzzy @@ -15217,172 +15461,138 @@ msgstr "FedPer [#2266](https://github.com/adap/flower/pull/2266)" #: ../../source/ref-changelog.md:70 #, fuzzy msgid "" -"**Improve documentation** " -"([#2674](https://github.com/adap/flower/pull/2674), " -"[#2480](https://github.com/adap/flower/pull/2480), " -"[#2826](https://github.com/adap/flower/pull/2826), " -"[#2727](https://github.com/adap/flower/pull/2727), " -"[#2761](https://github.com/adap/flower/pull/2761), " -"[#2900](https://github.com/adap/flower/pull/2900))" +"**Improve documentation** ([#2674](https://github.com/adap/flower/" +"pull/2674), [#2480](https://github.com/adap/flower/pull/2480), [#2826]" +"(https://github.com/adap/flower/pull/2826), [#2727](https://github.com/adap/" +"flower/pull/2727), [#2761](https://github.com/adap/flower/pull/2761), [#2900]" +"(https://github.com/adap/flower/pull/2900))" msgstr "" -"** 更新文档** ([#1629](https://github.com/adap/flower/pull/1629), " -"[#1628](https://github.com/adap/flower/pull/1628), " -"[#1620](https://github.com/adap/flower/pull/1620), " -"[#1618](https://github.com/adap/flower/pull/1618), " -"[#1617](https://github.com/adap/flower/pull/1617), " -"[#1613](https://github.com/adap/flower/pull/1613), " -"[#1614](https://github.com/adap/flower/pull/1614)))" +"** 更新文档** ([#1629](https://github.com/adap/flower/pull/1629), [#1628]" +"(https://github.com/adap/flower/pull/1628), [#1620](https://github.com/adap/" +"flower/pull/1620), [#1618](https://github.com/adap/flower/pull/1618), [#1617]" +"(https://github.com/adap/flower/pull/1617), [#1613](https://github.com/adap/" +"flower/pull/1613), [#1614](https://github.com/adap/flower/pull/1614)))" #: ../../source/ref-changelog.md:72 msgid "" -"**Improved testing and development infrastructure** " -"([#2797](https://github.com/adap/flower/pull/2797), " -"[#2676](https://github.com/adap/flower/pull/2676), " -"[#2644](https://github.com/adap/flower/pull/2644), " -"[#2656](https://github.com/adap/flower/pull/2656), " -"[#2848](https://github.com/adap/flower/pull/2848), " -"[#2675](https://github.com/adap/flower/pull/2675), " -"[#2735](https://github.com/adap/flower/pull/2735), " -"[#2767](https://github.com/adap/flower/pull/2767), " -"[#2732](https://github.com/adap/flower/pull/2732), " -"[#2744](https://github.com/adap/flower/pull/2744), " -"[#2681](https://github.com/adap/flower/pull/2681), " -"[#2699](https://github.com/adap/flower/pull/2699), " -"[#2745](https://github.com/adap/flower/pull/2745), " -"[#2734](https://github.com/adap/flower/pull/2734), " -"[#2731](https://github.com/adap/flower/pull/2731), " -"[#2652](https://github.com/adap/flower/pull/2652), " -"[#2720](https://github.com/adap/flower/pull/2720), " -"[#2721](https://github.com/adap/flower/pull/2721), " -"[#2717](https://github.com/adap/flower/pull/2717), " -"[#2864](https://github.com/adap/flower/pull/2864), " -"[#2694](https://github.com/adap/flower/pull/2694), " -"[#2709](https://github.com/adap/flower/pull/2709), " -"[#2658](https://github.com/adap/flower/pull/2658), " -"[#2796](https://github.com/adap/flower/pull/2796), " -"[#2692](https://github.com/adap/flower/pull/2692), " -"[#2657](https://github.com/adap/flower/pull/2657), " -"[#2813](https://github.com/adap/flower/pull/2813), " -"[#2661](https://github.com/adap/flower/pull/2661), " -"[#2398](https://github.com/adap/flower/pull/2398))" +"**Improved testing and development infrastructure** ([#2797](https://github." +"com/adap/flower/pull/2797), [#2676](https://github.com/adap/flower/" +"pull/2676), [#2644](https://github.com/adap/flower/pull/2644), [#2656]" +"(https://github.com/adap/flower/pull/2656), [#2848](https://github.com/adap/" +"flower/pull/2848), [#2675](https://github.com/adap/flower/pull/2675), [#2735]" +"(https://github.com/adap/flower/pull/2735), [#2767](https://github.com/adap/" +"flower/pull/2767), [#2732](https://github.com/adap/flower/pull/2732), [#2744]" +"(https://github.com/adap/flower/pull/2744), [#2681](https://github.com/adap/" +"flower/pull/2681), [#2699](https://github.com/adap/flower/pull/2699), [#2745]" +"(https://github.com/adap/flower/pull/2745), [#2734](https://github.com/adap/" +"flower/pull/2734), [#2731](https://github.com/adap/flower/pull/2731), [#2652]" +"(https://github.com/adap/flower/pull/2652), [#2720](https://github.com/adap/" +"flower/pull/2720), [#2721](https://github.com/adap/flower/pull/2721), [#2717]" +"(https://github.com/adap/flower/pull/2717), [#2864](https://github.com/adap/" +"flower/pull/2864), [#2694](https://github.com/adap/flower/pull/2694), [#2709]" +"(https://github.com/adap/flower/pull/2709), [#2658](https://github.com/adap/" +"flower/pull/2658), [#2796](https://github.com/adap/flower/pull/2796), [#2692]" +"(https://github.com/adap/flower/pull/2692), [#2657](https://github.com/adap/" +"flower/pull/2657), [#2813](https://github.com/adap/flower/pull/2813), [#2661]" +"(https://github.com/adap/flower/pull/2661), [#2398](https://github.com/adap/" +"flower/pull/2398))" msgstr "" #: ../../source/ref-changelog.md:74 msgid "" -"The Flower testing and development infrastructure has received " -"substantial updates. This makes Flower 1.7 the most tested release ever." +"The Flower testing and development infrastructure has received substantial " +"updates. This makes Flower 1.7 the most tested release ever." msgstr "" #: ../../source/ref-changelog.md:76 #, fuzzy msgid "" -"**Update dependencies** " -"([#2753](https://github.com/adap/flower/pull/2753), " -"[#2651](https://github.com/adap/flower/pull/2651), " -"[#2739](https://github.com/adap/flower/pull/2739), " -"[#2837](https://github.com/adap/flower/pull/2837), " -"[#2788](https://github.com/adap/flower/pull/2788), " -"[#2811](https://github.com/adap/flower/pull/2811), " -"[#2774](https://github.com/adap/flower/pull/2774), " -"[#2790](https://github.com/adap/flower/pull/2790), " -"[#2751](https://github.com/adap/flower/pull/2751), " -"[#2850](https://github.com/adap/flower/pull/2850), " -"[#2812](https://github.com/adap/flower/pull/2812), " -"[#2872](https://github.com/adap/flower/pull/2872), " -"[#2736](https://github.com/adap/flower/pull/2736), " -"[#2756](https://github.com/adap/flower/pull/2756), " -"[#2857](https://github.com/adap/flower/pull/2857), " -"[#2757](https://github.com/adap/flower/pull/2757), " -"[#2810](https://github.com/adap/flower/pull/2810), " -"[#2740](https://github.com/adap/flower/pull/2740), " -"[#2789](https://github.com/adap/flower/pull/2789))" -msgstr "" -"**更新Example** ([#1772](https://github.com/adap/flower/pull/1772), " -"[#1873](https://github.com/adap/flower/pull/1873), " -"[#1981](https://github.com/adap/flower/pull/1981), " -"[#1988](https://github.com/adap/flower/pull/1988), " -"[#1984](https://github.com/adap/flower/pull/1984), " -"[#1982](https://github.com/adap/flower/pull/1982), " -"[#2112](https://github.com/adap/flower/pull/2112), " -"[#2144](https://github.com/adap/flower/pull/2144), " -"[#2174](https://github.com/adap/flower/pull/2174), " -"[#2225](https://github.com/adap/flower/pull/2225), " -"[#2183](https://github.com/adap/flower/pull/2183))" +"**Update dependencies** ([#2753](https://github.com/adap/flower/pull/2753), " +"[#2651](https://github.com/adap/flower/pull/2651), [#2739](https://github." +"com/adap/flower/pull/2739), [#2837](https://github.com/adap/flower/" +"pull/2837), [#2788](https://github.com/adap/flower/pull/2788), [#2811]" +"(https://github.com/adap/flower/pull/2811), [#2774](https://github.com/adap/" +"flower/pull/2774), [#2790](https://github.com/adap/flower/pull/2790), [#2751]" +"(https://github.com/adap/flower/pull/2751), [#2850](https://github.com/adap/" +"flower/pull/2850), [#2812](https://github.com/adap/flower/pull/2812), [#2872]" +"(https://github.com/adap/flower/pull/2872), [#2736](https://github.com/adap/" +"flower/pull/2736), [#2756](https://github.com/adap/flower/pull/2756), [#2857]" +"(https://github.com/adap/flower/pull/2857), [#2757](https://github.com/adap/" +"flower/pull/2757), [#2810](https://github.com/adap/flower/pull/2810), [#2740]" +"(https://github.com/adap/flower/pull/2740), [#2789](https://github.com/adap/" +"flower/pull/2789))" +msgstr "" +"**更新Example** ([#1772](https://github.com/adap/flower/pull/1772), [#1873]" +"(https://github.com/adap/flower/pull/1873), [#1981](https://github.com/adap/" +"flower/pull/1981), [#1988](https://github.com/adap/flower/pull/1988), [#1984]" +"(https://github.com/adap/flower/pull/1984), [#1982](https://github.com/adap/" +"flower/pull/1982), [#2112](https://github.com/adap/flower/pull/2112), [#2144]" +"(https://github.com/adap/flower/pull/2144), [#2174](https://github.com/adap/" +"flower/pull/2174), [#2225](https://github.com/adap/flower/pull/2225), [#2183]" +"(https://github.com/adap/flower/pull/2183))" #: ../../source/ref-changelog.md:78 msgid "" -"**General improvements** " -"([#2803](https://github.com/adap/flower/pull/2803), " -"[#2847](https://github.com/adap/flower/pull/2847), " -"[#2877](https://github.com/adap/flower/pull/2877), " -"[#2690](https://github.com/adap/flower/pull/2690), " -"[#2889](https://github.com/adap/flower/pull/2889), " -"[#2874](https://github.com/adap/flower/pull/2874), " -"[#2819](https://github.com/adap/flower/pull/2819), " -"[#2689](https://github.com/adap/flower/pull/2689), " -"[#2457](https://github.com/adap/flower/pull/2457), " -"[#2870](https://github.com/adap/flower/pull/2870), " -"[#2669](https://github.com/adap/flower/pull/2669), " -"[#2876](https://github.com/adap/flower/pull/2876), " -"[#2885](https://github.com/adap/flower/pull/2885), " -"[#2858](https://github.com/adap/flower/pull/2858), " -"[#2867](https://github.com/adap/flower/pull/2867), " -"[#2351](https://github.com/adap/flower/pull/2351), " -"[#2886](https://github.com/adap/flower/pull/2886), " -"[#2860](https://github.com/adap/flower/pull/2860), " -"[#2828](https://github.com/adap/flower/pull/2828), " -"[#2869](https://github.com/adap/flower/pull/2869), " -"[#2875](https://github.com/adap/flower/pull/2875), " -"[#2733](https://github.com/adap/flower/pull/2733), " -"[#2488](https://github.com/adap/flower/pull/2488), " -"[#2646](https://github.com/adap/flower/pull/2646), " -"[#2879](https://github.com/adap/flower/pull/2879), " -"[#2821](https://github.com/adap/flower/pull/2821), " -"[#2855](https://github.com/adap/flower/pull/2855), " -"[#2800](https://github.com/adap/flower/pull/2800), " -"[#2807](https://github.com/adap/flower/pull/2807), " -"[#2801](https://github.com/adap/flower/pull/2801), " -"[#2804](https://github.com/adap/flower/pull/2804), " -"[#2851](https://github.com/adap/flower/pull/2851), " -"[#2787](https://github.com/adap/flower/pull/2787), " -"[#2852](https://github.com/adap/flower/pull/2852), " -"[#2672](https://github.com/adap/flower/pull/2672), " -"[#2759](https://github.com/adap/flower/pull/2759))" +"**General improvements** ([#2803](https://github.com/adap/flower/pull/2803), " +"[#2847](https://github.com/adap/flower/pull/2847), [#2877](https://github." +"com/adap/flower/pull/2877), [#2690](https://github.com/adap/flower/" +"pull/2690), [#2889](https://github.com/adap/flower/pull/2889), [#2874]" +"(https://github.com/adap/flower/pull/2874), [#2819](https://github.com/adap/" +"flower/pull/2819), [#2689](https://github.com/adap/flower/pull/2689), [#2457]" +"(https://github.com/adap/flower/pull/2457), [#2870](https://github.com/adap/" +"flower/pull/2870), [#2669](https://github.com/adap/flower/pull/2669), [#2876]" +"(https://github.com/adap/flower/pull/2876), [#2885](https://github.com/adap/" +"flower/pull/2885), [#2858](https://github.com/adap/flower/pull/2858), [#2867]" +"(https://github.com/adap/flower/pull/2867), [#2351](https://github.com/adap/" +"flower/pull/2351), [#2886](https://github.com/adap/flower/pull/2886), [#2860]" +"(https://github.com/adap/flower/pull/2860), [#2828](https://github.com/adap/" +"flower/pull/2828), [#2869](https://github.com/adap/flower/pull/2869), [#2875]" +"(https://github.com/adap/flower/pull/2875), [#2733](https://github.com/adap/" +"flower/pull/2733), [#2488](https://github.com/adap/flower/pull/2488), [#2646]" +"(https://github.com/adap/flower/pull/2646), [#2879](https://github.com/adap/" +"flower/pull/2879), [#2821](https://github.com/adap/flower/pull/2821), [#2855]" +"(https://github.com/adap/flower/pull/2855), [#2800](https://github.com/adap/" +"flower/pull/2800), [#2807](https://github.com/adap/flower/pull/2807), [#2801]" +"(https://github.com/adap/flower/pull/2801), [#2804](https://github.com/adap/" +"flower/pull/2804), [#2851](https://github.com/adap/flower/pull/2851), [#2787]" +"(https://github.com/adap/flower/pull/2787), [#2852](https://github.com/adap/" +"flower/pull/2852), [#2672](https://github.com/adap/flower/pull/2672), [#2759]" +"(https://github.com/adap/flower/pull/2759))" msgstr "" #: ../../source/ref-changelog.md:82 #, fuzzy msgid "" -"**Deprecate** `start_numpy_client` " -"([#2563](https://github.com/adap/flower/pull/2563), " -"[#2718](https://github.com/adap/flower/pull/2718))" +"**Deprecate** `start_numpy_client` ([#2563](https://github.com/adap/flower/" +"pull/2563), [#2718](https://github.com/adap/flower/pull/2718))" msgstr "" -"TAMUNA ([#2254](https://github.com/adap/flower/pull/2254), " -"[#2508](https://github.com/adap/flower/pull/2508))" +"TAMUNA ([#2254](https://github.com/adap/flower/pull/2254), [#2508](https://" +"github.com/adap/flower/pull/2508))" #: ../../source/ref-changelog.md:84 msgid "" "Until now, clients of type `NumPyClient` needed to be started via " -"`start_numpy_client`. In our efforts to consolidate framework APIs, we " -"have introduced changes, and now all client types should start via " -"`start_client`. To continue using `NumPyClient` clients, you simply need " -"to first call the `.to_client()` method and then pass returned `Client` " -"object to `start_client`. The examples and the documentation have been " -"updated accordingly." +"`start_numpy_client`. In our efforts to consolidate framework APIs, we have " +"introduced changes, and now all client types should start via " +"`start_client`. To continue using `NumPyClient` clients, you simply need to " +"first call the `.to_client()` method and then pass returned `Client` object " +"to `start_client`. The examples and the documentation have been updated " +"accordingly." msgstr "" #: ../../source/ref-changelog.md:86 #, fuzzy msgid "" -"**Deprecate legacy DP wrappers** " -"([#2749](https://github.com/adap/flower/pull/2749))" -msgstr "**移除过时的 KerasClient**([#857](https://github.com/adap/flower/pull/857))" +"**Deprecate legacy DP wrappers** ([#2749](https://github.com/adap/flower/" +"pull/2749))" +msgstr "" +"**移除过时的 KerasClient**([#857](https://github.com/adap/flower/pull/857))" #: ../../source/ref-changelog.md:88 msgid "" -"Legacy DP wrapper classes are deprecated, but still functional. This is " -"in preparation for an all-new pluggable version of differential privacy " -"support in Flower." +"Legacy DP wrapper classes are deprecated, but still functional. This is in " +"preparation for an all-new pluggable version of differential privacy support " +"in Flower." msgstr "" #: ../../source/ref-changelog.md:90 @@ -15391,41 +15601,41 @@ msgid "" "**Make optional arg** `--callable` **in** `flower-client` **a required " "positional arg** ([#2673](https://github.com/adap/flower/pull/2673))" msgstr "" -"**从** `start_client` 中移除** `rest` **实验参数 " -"([#2324](https://github.com/adap/flower/pull/2324))" +"**从** `start_client` 中移除** `rest` **实验参数 ([#2324](https://github.com/" +"adap/flower/pull/2324))" #: ../../source/ref-changelog.md:92 #, fuzzy msgid "" -"**Rename** `certificates` **to** `root_certificates` **in** `Driver` " -"([#2890](https://github.com/adap/flower/pull/2890))" +"**Rename** `certificates` **to** `root_certificates` **in** `Driver` ([#2890]" +"(https://github.com/adap/flower/pull/2890))" msgstr "" -"**重新命名** `rnd` ** to** `server_round` " -"([#1321](https://github.com/adap/flower/pull/1321))" +"**重新命名** `rnd` ** to** `server_round` ([#1321](https://github.com/adap/" +"flower/pull/1321))" #: ../../source/ref-changelog.md:94 #, fuzzy msgid "" -"**Drop experimental** `Task` **fields** " -"([#2866](https://github.com/adap/flower/pull/2866), " -"[#2865](https://github.com/adap/flower/pull/2865))" +"**Drop experimental** `Task` **fields** ([#2866](https://github.com/adap/" +"flower/pull/2866), [#2865](https://github.com/adap/flower/pull/2865))" msgstr "" -"FedBN ([#2608](https://github.com/adap/flower/pull/2608), " -"[#2615](https://github.com/adap/flower/pull/2615))" +"FedBN ([#2608](https://github.com/adap/flower/pull/2608), [#2615](https://" +"github.com/adap/flower/pull/2615))" #: ../../source/ref-changelog.md:96 msgid "" "Experimental fields `sa`, `legacy_server_message` and " -"`legacy_client_message` were removed from `Task` message. The removed " -"fields are superseded by the new `RecordSet` abstraction." +"`legacy_client_message` were removed from `Task` message. The removed fields " +"are superseded by the new `RecordSet` abstraction." msgstr "" #: ../../source/ref-changelog.md:98 #, fuzzy msgid "" -"**Retire MXNet examples** " -"([#2724](https://github.com/adap/flower/pull/2724))" -msgstr "**新的 scikit-learn 代码示例** ([#748](https://github.com/adap/flower/pull/748))" +"**Retire MXNet examples** ([#2724](https://github.com/adap/flower/pull/2724))" +msgstr "" +"**新的 scikit-learn 代码示例** ([#748](https://github.com/adap/flower/" +"pull/748))" #: ../../source/ref-changelog.md:100 msgid "" @@ -15443,82 +15653,82 @@ msgstr "v1.4.0 (2023-04-21)" msgid "" "`Aashish Kolluri`, `Adam Narozniak`, `Alessio Mora`, `Barathwaja S`, " "`Charles Beauville`, `Daniel J. Beutel`, `Daniel Nata Nugraha`, `Gabriel " -"Mota`, `Heng Pan`, `Ivan Agarský`, `JS.KIM`, `Javier`, `Marius Schlegel`," -" `Navin Chandra`, `Nic Lane`, `Peterpan828`, `Qinbin Li`, `Shaz-hash`, " -"`Steve Laskaridis`, `Taner Topal`, `William Lindskog`, `Yan Gao`, " -"`cnxdeveloper`, `k3nfalt` " +"Mota`, `Heng Pan`, `Ivan Agarský`, `JS.KIM`, `Javier`, `Marius Schlegel`, " +"`Navin Chandra`, `Nic Lane`, `Peterpan828`, `Qinbin Li`, `Shaz-hash`, `Steve " +"Laskaridis`, `Taner Topal`, `William Lindskog`, `Yan Gao`, `cnxdeveloper`, " +"`k3nfalt` " msgstr "" #: ../../source/ref-changelog.md:112 msgid "" -"**Add experimental support for Python 3.12** " -"([#2565](https://github.com/adap/flower/pull/2565))" +"**Add experimental support for Python 3.12** ([#2565](https://github.com/" +"adap/flower/pull/2565))" msgstr "" -"** 增加对 Python 3.12 的实验支持** " -"([#2565](https://github.com/adap/flower/pull/2565))" +"** 增加对 Python 3.12 的实验支持** ([#2565](https://github.com/adap/flower/" +"pull/2565))" #: ../../source/ref-changelog.md:114 #, fuzzy msgid "" -"**Add new XGBoost examples** " -"([#2612](https://github.com/adap/flower/pull/2612), " -"[#2554](https://github.com/adap/flower/pull/2554), " -"[#2617](https://github.com/adap/flower/pull/2617), " -"[#2618](https://github.com/adap/flower/pull/2618), " -"[#2619](https://github.com/adap/flower/pull/2619), " -"[#2567](https://github.com/adap/flower/pull/2567))" +"**Add new XGBoost examples** ([#2612](https://github.com/adap/flower/" +"pull/2612), [#2554](https://github.com/adap/flower/pull/2554), [#2617]" +"(https://github.com/adap/flower/pull/2617), [#2618](https://github.com/adap/" +"flower/pull/2618), [#2619](https://github.com/adap/flower/pull/2619), [#2567]" +"(https://github.com/adap/flower/pull/2567))" msgstr "" -"**引入(试验性)Driver API** ([#1520](https://github.com/adap/flower/pull/1520)," -" [#1525](https://github.com/adap/flower/pull/1525), " -"[#1545](https://github.com/adap/flower/pull/1545), " -"[#1546](https://github.com/adap/flower/pull/1546), " -"[#1550](https://github.com/adap/flower/pull/1550), " -"[#1551](https://github.com/adap/flower/pull/1551), " -"[#1567](https://github.com/adap/flower/pull/1567))" +"**引入(试验性)Driver API** ([#1520](https://github.com/adap/flower/" +"pull/1520), [#1525](https://github.com/adap/flower/pull/1525), [#1545]" +"(https://github.com/adap/flower/pull/1545), [#1546](https://github.com/adap/" +"flower/pull/1546), [#1550](https://github.com/adap/flower/pull/1550), [#1551]" +"(https://github.com/adap/flower/pull/1551), [#1567](https://github.com/adap/" +"flower/pull/1567))" #: ../../source/ref-changelog.md:116 msgid "" -"We have added a new `xgboost-quickstart` example alongside a new " -"`xgboost-comprehensive` example that goes more in-depth." +"We have added a new `xgboost-quickstart` example alongside a new `xgboost-" +"comprehensive` example that goes more in-depth." msgstr "" #: ../../source/ref-changelog.md:118 #, fuzzy msgid "" -"**Add Vertical FL example** " -"([#2598](https://github.com/adap/flower/pull/2598))" -msgstr "**新的 iOS CoreML 代码示例**([#1289](https://github.com/adap/flower/pull/1289))" +"**Add Vertical FL example** ([#2598](https://github.com/adap/flower/" +"pull/2598))" +msgstr "" +"**新的 iOS CoreML 代码示例**([#1289](https://github.com/adap/flower/" +"pull/1289))" #: ../../source/ref-changelog.md:120 msgid "" -"We had many questions about Vertical Federated Learning using Flower, so " -"we decided to add an simple example for it on the [Titanic " -"dataset](https://www.kaggle.com/competitions/titanic/data) alongside a " -"tutorial (in the README)." +"We had many questions about Vertical Federated Learning using Flower, so we " +"decided to add an simple example for it on the [Titanic dataset](https://www." +"kaggle.com/competitions/titanic/data) alongside a tutorial (in the README)." msgstr "" #: ../../source/ref-changelog.md:122 msgid "" -"**Support custom** `ClientManager` **in** `start_driver()` " -"([#2292](https://github.com/adap/flower/pull/2292))" -msgstr "**在***`start_driver()`中支持自定义***`ClientManager([#2292](https://github.com/adap/flower/pull/2292))" +"**Support custom** `ClientManager` **in** `start_driver()` ([#2292](https://" +"github.com/adap/flower/pull/2292))" +msgstr "" +"**在***`start_driver()`中支持自定义***`ClientManager([#2292](https://github." +"com/adap/flower/pull/2292))" #: ../../source/ref-changelog.md:124 msgid "" -"**Update REST API to support create and delete nodes** " -"([#2283](https://github.com/adap/flower/pull/2283))" +"**Update REST API to support create and delete nodes** ([#2283](https://" +"github.com/adap/flower/pull/2283))" msgstr "" -"**更新 REST API 以支持创建和删除节点** " -"([#2283](https://github.com/adap/flower/pull/2283))" +"**更新 REST API 以支持创建和删除节点** ([#2283](https://github.com/adap/" +"flower/pull/2283))" #: ../../source/ref-changelog.md:126 #, fuzzy msgid "" -"**Update the Android SDK** " -"([#2187](https://github.com/adap/flower/pull/2187))" +"**Update the Android SDK** ([#2187](https://github.com/adap/flower/" +"pull/2187))" msgstr "" -"**介绍Flower Android SDK** " -"([#2131](https://github.com/adap/flower/pull/2131))" +"**介绍Flower Android SDK** ([#2131](https://github.com/adap/flower/" +"pull/2131))" #: ../../source/ref-changelog.md:128 #, fuzzy @@ -15528,16 +15738,14 @@ msgstr "为 C++ SDK 添加 gRPC 请求-响应功能。" #: ../../source/ref-changelog.md:130 #, fuzzy msgid "" -"**Update the C++ SDK** " -"([#2537](https://github.com/adap/flower/pull/2537), " -"[#2528](https://github.com/adap/flower/pull/2528), " -"[#2523](https://github.com/adap/flower/pull/2523), " -"[#2522](https://github.com/adap/flower/pull/2522))" +"**Update the C++ SDK** ([#2537](https://github.com/adap/flower/pull/2537), " +"[#2528](https://github.com/adap/flower/pull/2528), [#2523](https://github." +"com/adap/flower/pull/2523), [#2522](https://github.com/adap/flower/" +"pull/2522))" msgstr "" -"** 更新 C++ SDK** ([#2537](https://github/com/adap/flower/pull/2537), " -"[#2528](https://github/com/adap/flower/pull/2528), " -"[#2523](https://github.com/adap/flower/pull/2523), " -"[#2522](https://github.com/adap/flower/pull/2522))" +"** 更新 C++ SDK** ([#2537](https://github/com/adap/flower/pull/2537), [#2528]" +"(https://github/com/adap/flower/pull/2528), [#2523](https://github.com/adap/" +"flower/pull/2523), [#2522](https://github.com/adap/flower/pull/2522))" #: ../../source/ref-changelog.md:132 msgid "Add gRPC request-response capability to the C++ SDK." @@ -15546,118 +15754,119 @@ msgstr "为 C++ SDK 添加 gRPC 请求-响应功能。" #: ../../source/ref-changelog.md:134 #, fuzzy msgid "" -"**Make HTTPS the new default** " -"([#2591](https://github.com/adap/flower/pull/2591), " -"[#2636](https://github.com/adap/flower/pull/2636))" +"**Make HTTPS the new default** ([#2591](https://github.com/adap/flower/" +"pull/2591), [#2636](https://github.com/adap/flower/pull/2636))" msgstr "" -"Baselines文档([#2290](https://github.com/adap/flower/pull/2290), " -"[#2400](https://github.com/adap/flower/pull/2400)" +"Baselines文档([#2290](https://github.com/adap/flower/pull/2290), [#2400]" +"(https://github.com/adap/flower/pull/2400)" #: ../../source/ref-changelog.md:136 msgid "" "Flower is moving to HTTPS by default. The new `flower-server` requires " -"passing `--certificates`, but users can enable `--insecure` to use HTTP " -"for prototyping. The same applies to `flower-client`, which can either " -"use user-provided credentials or gRPC-bundled certificates to connect to " -"an HTTPS-enabled server or requires opt-out via passing `--insecure` to " -"enable insecure HTTP connections." +"passing `--certificates`, but users can enable `--insecure` to use HTTP for " +"prototyping. The same applies to `flower-client`, which can either use user-" +"provided credentials or gRPC-bundled certificates to connect to an HTTPS-" +"enabled server or requires opt-out via passing `--insecure` to enable " +"insecure HTTP connections." msgstr "" #: ../../source/ref-changelog.md:138 msgid "" -"For backward compatibility, `start_client()` and `start_numpy_client()` " -"will still start in insecure mode by default. In a future release, " -"insecure connections will require user opt-in by passing `insecure=True`." +"For backward compatibility, `start_client()` and `start_numpy_client()` will " +"still start in insecure mode by default. In a future release, insecure " +"connections will require user opt-in by passing `insecure=True`." msgstr "" #: ../../source/ref-changelog.md:140 msgid "" "**Unify client API** ([#2303](https://github.com/adap/flower/pull/2303), " -"[#2390](https://github.com/adap/flower/pull/2390), " -"[#2493](https://github.com/adap/flower/pull/2493))" +"[#2390](https://github.com/adap/flower/pull/2390), [#2493](https://github." +"com/adap/flower/pull/2493))" msgstr "" -"** 统一客户端应用程序接口** ([#2303](https://github.com/adap/flower/pull/2303), " -"[#2390](https://github.com/adap/flower/pull/2390), " -"[#2493](https://github.com/adap/flower/pull/2493))" +"** 统一客户端应用程序接口** ([#2303](https://github.com/adap/flower/" +"pull/2303), [#2390](https://github.com/adap/flower/pull/2390), [#2493]" +"(https://github.com/adap/flower/pull/2493))" #: ../../source/ref-changelog.md:142 #, fuzzy msgid "" -"Using the `client_fn`, Flower clients can interchangeably run as " -"standalone processes (i.e. via `start_client`) or in simulation (i.e. via" -" `start_simulation`) without requiring changes to how the client class is" -" defined and instantiated. The `to_client()` function is introduced to " +"Using the `client_fn`, Flower clients can interchangeably run as standalone " +"processes (i.e. via `start_client`) or in simulation (i.e. via " +"`start_simulation`) without requiring changes to how the client class is " +"defined and instantiated. The `to_client()` function is introduced to " "convert a `NumPyClient` to a `Client`." msgstr "" -"使用 `client_fn`,Flower 客户端可以作为独立进程(即通过 `start_client`)或在模拟中(即通过 " -"`start_simulation`)交替运行,而无需更改客户端类的定义和实例化方式。调用 `start_numpy_client` 现已过时。" +"使用 `client_fn`,Flower 客户端可以作为独立进程(即通过 `start_client`)或在" +"模拟中(即通过 `start_simulation`)交替运行,而无需更改客户端类的定义和实例化" +"方式。调用 `start_numpy_client` 现已过时。" #: ../../source/ref-changelog.md:144 msgid "" -"**Add new** `Bulyan` **strategy** " -"([#1817](https://github.com/adap/flower/pull/1817), " -"[#1891](https://github.com/adap/flower/pull/1891))" +"**Add new** `Bulyan` **strategy** ([#1817](https://github.com/adap/flower/" +"pull/1817), [#1891](https://github.com/adap/flower/pull/1891))" msgstr "" -"**添加新**\"Bulyan " -"\"**策略**([#1817](https://github.com/adap/flower/pull/1817), " -"[#1891](https://github.com/adap/flower/pull/1891)" +"**添加新**\"Bulyan \"**策略**([#1817](https://github.com/adap/flower/" +"pull/1817), [#1891](https://github.com/adap/flower/pull/1891)" #: ../../source/ref-changelog.md:146 msgid "" -"The new `Bulyan` strategy implements Bulyan by [El Mhamdi et al., " -"2018](https://arxiv.org/abs/1802.07927)" -msgstr "新的 \"Bulyan\"策略通过[El Mhamdi 等人,2018](https://arxiv.org/abs/1802.07927)实现" +"The new `Bulyan` strategy implements Bulyan by [El Mhamdi et al., 2018]" +"(https://arxiv.org/abs/1802.07927)" +msgstr "" +"新的 \"Bulyan\"策略通过[El Mhamdi 等人,2018](https://arxiv.org/" +"abs/1802.07927)实现" #: ../../source/ref-changelog.md:148 #, fuzzy msgid "" -"**Add new** `XGB Bagging` **strategy** " -"([#2611](https://github.com/adap/flower/pull/2611))" -msgstr "**添加新的`FedProx`策略** ([#1619](https://github.com/adap/flower/pull/1619))" +"**Add new** `XGB Bagging` **strategy** ([#2611](https://github.com/adap/" +"flower/pull/2611))" +msgstr "" +"**添加新的`FedProx`策略** ([#1619](https://github.com/adap/flower/" +"pull/1619))" #: ../../source/ref-changelog.md:150 ../../source/ref-changelog.md:152 #, fuzzy msgid "" -"**Introduce `WorkloadState`** " -"([#2564](https://github.com/adap/flower/pull/2564), " -"[#2632](https://github.com/adap/flower/pull/2632))" +"**Introduce `WorkloadState`** ([#2564](https://github.com/adap/flower/" +"pull/2564), [#2632](https://github.com/adap/flower/pull/2632))" msgstr "" -"**新的内置策略**([#828](https://github.com/adap/flower/pull/828) " -"[#822](https://github.com/adap/flower/pull/822)" +"**新的内置策略**([#828](https://github.com/adap/flower/pull/828) [#822]" +"(https://github.com/adap/flower/pull/822)" #: ../../source/ref-changelog.md:156 msgid "" -"FedProx ([#2210](https://github.com/adap/flower/pull/2210), " -"[#2286](https://github.com/adap/flower/pull/2286), " -"[#2509](https://github.com/adap/flower/pull/2509))" +"FedProx ([#2210](https://github.com/adap/flower/pull/2210), [#2286](https://" +"github.com/adap/flower/pull/2286), [#2509](https://github.com/adap/flower/" +"pull/2509))" msgstr "" -"FedProx ([#2210](https://github.com/adap/flower/pull/2210), " -"[#2286](https://github.com/adap/flower/pull/2286), " -"[#2509](https://github.com/adap/flower/pull/2509))" +"FedProx ([#2210](https://github.com/adap/flower/pull/2210), [#2286](https://" +"github.com/adap/flower/pull/2286), [#2509](https://github.com/adap/flower/" +"pull/2509))" #: ../../source/ref-changelog.md:158 msgid "" -"Baselines Docs ([#2290](https://github.com/adap/flower/pull/2290), " -"[#2400](https://github.com/adap/flower/pull/2400))" +"Baselines Docs ([#2290](https://github.com/adap/flower/pull/2290), [#2400]" +"(https://github.com/adap/flower/pull/2400))" msgstr "" -"Baselines文档([#2290](https://github.com/adap/flower/pull/2290), " -"[#2400](https://github.com/adap/flower/pull/2400)" +"Baselines文档([#2290](https://github.com/adap/flower/pull/2290), [#2400]" +"(https://github.com/adap/flower/pull/2400)" #: ../../source/ref-changelog.md:160 msgid "" -"FedMLB ([#2340](https://github.com/adap/flower/pull/2340), " -"[#2507](https://github.com/adap/flower/pull/2507))" +"FedMLB ([#2340](https://github.com/adap/flower/pull/2340), [#2507](https://" +"github.com/adap/flower/pull/2507))" msgstr "" -"FedMLB ([#2340](https://github.com/adap/flower/pull/2340), " -"[#2507](https://github.com/adap/flower/pull/2507))" +"FedMLB ([#2340](https://github.com/adap/flower/pull/2340), [#2507](https://" +"github.com/adap/flower/pull/2507))" #: ../../source/ref-changelog.md:162 msgid "" -"TAMUNA ([#2254](https://github.com/adap/flower/pull/2254), " -"[#2508](https://github.com/adap/flower/pull/2508))" +"TAMUNA ([#2254](https://github.com/adap/flower/pull/2254), [#2508](https://" +"github.com/adap/flower/pull/2508))" msgstr "" -"TAMUNA ([#2254](https://github.com/adap/flower/pull/2254), " -"[#2508](https://github.com/adap/flower/pull/2508))" +"TAMUNA ([#2254](https://github.com/adap/flower/pull/2254), [#2508](https://" +"github.com/adap/flower/pull/2508))" #: ../../source/ref-changelog.md:164 msgid "FedMeta [#2438](https://github.com/adap/flower/pull/2438)" @@ -15689,165 +15898,142 @@ msgstr "niid-Bench [#2428](https://github.com/adap/flower/pull/2428)" #: ../../source/ref-changelog.md:178 msgid "" -"FedBN ([#2608](https://github.com/adap/flower/pull/2608), " -"[#2615](https://github.com/adap/flower/pull/2615))" +"FedBN ([#2608](https://github.com/adap/flower/pull/2608), [#2615](https://" +"github.com/adap/flower/pull/2615))" msgstr "" -"FedBN ([#2608](https://github.com/adap/flower/pull/2608), " -"[#2615](https://github.com/adap/flower/pull/2615))" +"FedBN ([#2608](https://github.com/adap/flower/pull/2608), [#2615](https://" +"github.com/adap/flower/pull/2615))" #: ../../source/ref-changelog.md:180 #, fuzzy msgid "" -"**General updates to Flower Examples** " -"([#2384](https://github.com/adap/flower/pull/2384), " -"[#2425](https://github.com/adap/flower/pull/2425), " -"[#2526](https://github.com/adap/flower/pull/2526), " -"[#2302](https://github.com/adap/flower/pull/2302), " -"[#2545](https://github.com/adap/flower/pull/2545))" +"**General updates to Flower Examples** ([#2384](https://github.com/adap/" +"flower/pull/2384), [#2425](https://github.com/adap/flower/pull/2425), [#2526]" +"(https://github.com/adap/flower/pull/2526), [#2302](https://github.com/adap/" +"flower/pull/2302), [#2545](https://github.com/adap/flower/pull/2545))" msgstr "" -"** 更新 C++ SDK** ([#2537](https://github/com/adap/flower/pull/2537), " -"[#2528](https://github/com/adap/flower/pull/2528), " -"[#2523](https://github.com/adap/flower/pull/2523), " -"[#2522](https://github.com/adap/flower/pull/2522))" +"** 更新 C++ SDK** ([#2537](https://github/com/adap/flower/pull/2537), [#2528]" +"(https://github/com/adap/flower/pull/2528), [#2523](https://github.com/adap/" +"flower/pull/2523), [#2522](https://github.com/adap/flower/pull/2522))" #: ../../source/ref-changelog.md:182 #, fuzzy msgid "" -"**General updates to Flower Baselines** " -"([#2301](https://github.com/adap/flower/pull/2301), " -"[#2305](https://github.com/adap/flower/pull/2305), " -"[#2307](https://github.com/adap/flower/pull/2307), " -"[#2327](https://github.com/adap/flower/pull/2327), " -"[#2435](https://github.com/adap/flower/pull/2435), " -"[#2462](https://github.com/adap/flower/pull/2462), " -"[#2463](https://github.com/adap/flower/pull/2463), " -"[#2461](https://github.com/adap/flower/pull/2461), " -"[#2469](https://github.com/adap/flower/pull/2469), " -"[#2466](https://github.com/adap/flower/pull/2466), " -"[#2471](https://github.com/adap/flower/pull/2471), " -"[#2472](https://github.com/adap/flower/pull/2472), " -"[#2470](https://github.com/adap/flower/pull/2470))" -msgstr "" -"**普通改进**([#2309](https://github.com/adap/flower/pull/2309), " -"[#2310](https://github.com/adap/flower/pull/2310), " -"[2313](https://github.com/adap/flower/pull/2313), " -"[#2316](https://github.com/adap/flower/pull/2316), " -"[2317](https://github.com/adap/flower/pull/2317),[#2349](https://github.com/adap/flower/pull/2349)," -" [#2360](https://github.com/adap/flower/pull/2360), " -"[#2402](https://github.com/adap/flower/pull/2402), " -"[#2446](https://github.com/adap/flower/pull/2446) " -"[#2561](https://github.com/adap/flower/pull/2561))" +"**General updates to Flower Baselines** ([#2301](https://github.com/adap/" +"flower/pull/2301), [#2305](https://github.com/adap/flower/pull/2305), [#2307]" +"(https://github.com/adap/flower/pull/2307), [#2327](https://github.com/adap/" +"flower/pull/2327), [#2435](https://github.com/adap/flower/pull/2435), [#2462]" +"(https://github.com/adap/flower/pull/2462), [#2463](https://github.com/adap/" +"flower/pull/2463), [#2461](https://github.com/adap/flower/pull/2461), [#2469]" +"(https://github.com/adap/flower/pull/2469), [#2466](https://github.com/adap/" +"flower/pull/2466), [#2471](https://github.com/adap/flower/pull/2471), [#2472]" +"(https://github.com/adap/flower/pull/2472), [#2470](https://github.com/adap/" +"flower/pull/2470))" +msgstr "" +"**普通改进**([#2309](https://github.com/adap/flower/pull/2309), [#2310]" +"(https://github.com/adap/flower/pull/2310), [2313](https://github.com/adap/" +"flower/pull/2313), [#2316](https://github.com/adap/flower/pull/2316), [2317]" +"(https://github.com/adap/flower/pull/2317),[#2349](https://github.com/adap/" +"flower/pull/2349), [#2360](https://github.com/adap/flower/pull/2360), [#2402]" +"(https://github.com/adap/flower/pull/2402), [#2446](https://github.com/adap/" +"flower/pull/2446) [#2561](https://github.com/adap/flower/pull/2561))" #: ../../source/ref-changelog.md:184 #, fuzzy msgid "" -"**General updates to the simulation engine** " -"([#2331](https://github.com/adap/flower/pull/2331), " -"[#2447](https://github.com/adap/flower/pull/2447), " -"[#2448](https://github.com/adap/flower/pull/2448), " -"[#2294](https://github.com/adap/flower/pull/2294))" +"**General updates to the simulation engine** ([#2331](https://github.com/" +"adap/flower/pull/2331), [#2447](https://github.com/adap/flower/pull/2447), " +"[#2448](https://github.com/adap/flower/pull/2448), [#2294](https://github." +"com/adap/flower/pull/2294))" msgstr "" "**模拟引擎的普通更新** ([#2331](https://github.com/adap/flower/pull/2331), " -"[#2447](https://github.com/adap/flower/pull/2447), " -"[#2448](https://github.com/adap/flower/pull/2448))" +"[#2447](https://github.com/adap/flower/pull/2447), [#2448](https://github." +"com/adap/flower/pull/2448))" #: ../../source/ref-changelog.md:186 #, fuzzy msgid "" -"**General updates to Flower SDKs** " -"([#2288](https://github.com/adap/flower/pull/2288), " -"[#2429](https://github.com/adap/flower/pull/2429), " -"[#2555](https://github.com/adap/flower/pull/2555), " -"[#2543](https://github.com/adap/flower/pull/2543), " -"[#2544](https://github.com/adap/flower/pull/2544), " -"[#2597](https://github.com/adap/flower/pull/2597), " -"[#2623](https://github.com/adap/flower/pull/2623))" +"**General updates to Flower SDKs** ([#2288](https://github.com/adap/flower/" +"pull/2288), [#2429](https://github.com/adap/flower/pull/2429), [#2555]" +"(https://github.com/adap/flower/pull/2555), [#2543](https://github.com/adap/" +"flower/pull/2543), [#2544](https://github.com/adap/flower/pull/2544), [#2597]" +"(https://github.com/adap/flower/pull/2597), [#2623](https://github.com/adap/" +"flower/pull/2623))" msgstr "" -"**改进教程** ([#1468](https://github.com/adap/flower/pull/1468), " -"[#1470](https://github.com/adap/flower/pull/1470), " -"[#1472](https://github.com/adap/flower/pull/1472), " -"[#1473](https://github.com/adap/flower/pull/1473), " -"[#1474](https://github.com/adap/flower/pull/1474), " -"[#1475](https://github.com/adap/flower/pull/1475)))" +"**改进教程** ([#1468](https://github.com/adap/flower/pull/1468), [#1470]" +"(https://github.com/adap/flower/pull/1470), [#1472](https://github.com/adap/" +"flower/pull/1472), [#1473](https://github.com/adap/flower/pull/1473), [#1474]" +"(https://github.com/adap/flower/pull/1474), [#1475](https://github.com/adap/" +"flower/pull/1475)))" #: ../../source/ref-changelog.md:188 msgid "" -"**General improvements** " -"([#2309](https://github.com/adap/flower/pull/2309), " -"[#2310](https://github.com/adap/flower/pull/2310), " -"[#2313](https://github.com/adap/flower/pull/2313), " -"[#2316](https://github.com/adap/flower/pull/2316), " -"[#2317](https://github.com/adap/flower/pull/2317), " -"[#2349](https://github.com/adap/flower/pull/2349), " -"[#2360](https://github.com/adap/flower/pull/2360), " -"[#2402](https://github.com/adap/flower/pull/2402), " -"[#2446](https://github.com/adap/flower/pull/2446), " -"[#2561](https://github.com/adap/flower/pull/2561), " -"[#2273](https://github.com/adap/flower/pull/2273), " -"[#2267](https://github.com/adap/flower/pull/2267), " -"[#2274](https://github.com/adap/flower/pull/2274), " -"[#2275](https://github.com/adap/flower/pull/2275), " -"[#2432](https://github.com/adap/flower/pull/2432), " -"[#2251](https://github.com/adap/flower/pull/2251), " -"[#2321](https://github.com/adap/flower/pull/2321), " -"[#1936](https://github.com/adap/flower/pull/1936), " -"[#2408](https://github.com/adap/flower/pull/2408), " -"[#2413](https://github.com/adap/flower/pull/2413), " -"[#2401](https://github.com/adap/flower/pull/2401), " -"[#2531](https://github.com/adap/flower/pull/2531), " -"[#2534](https://github.com/adap/flower/pull/2534), " -"[#2535](https://github.com/adap/flower/pull/2535), " -"[#2521](https://github.com/adap/flower/pull/2521), " -"[#2553](https://github.com/adap/flower/pull/2553), " -"[#2596](https://github.com/adap/flower/pull/2596))" +"**General improvements** ([#2309](https://github.com/adap/flower/pull/2309), " +"[#2310](https://github.com/adap/flower/pull/2310), [#2313](https://github." +"com/adap/flower/pull/2313), [#2316](https://github.com/adap/flower/" +"pull/2316), [#2317](https://github.com/adap/flower/pull/2317), [#2349]" +"(https://github.com/adap/flower/pull/2349), [#2360](https://github.com/adap/" +"flower/pull/2360), [#2402](https://github.com/adap/flower/pull/2402), [#2446]" +"(https://github.com/adap/flower/pull/2446), [#2561](https://github.com/adap/" +"flower/pull/2561), [#2273](https://github.com/adap/flower/pull/2273), [#2267]" +"(https://github.com/adap/flower/pull/2267), [#2274](https://github.com/adap/" +"flower/pull/2274), [#2275](https://github.com/adap/flower/pull/2275), [#2432]" +"(https://github.com/adap/flower/pull/2432), [#2251](https://github.com/adap/" +"flower/pull/2251), [#2321](https://github.com/adap/flower/pull/2321), [#1936]" +"(https://github.com/adap/flower/pull/1936), [#2408](https://github.com/adap/" +"flower/pull/2408), [#2413](https://github.com/adap/flower/pull/2413), [#2401]" +"(https://github.com/adap/flower/pull/2401), [#2531](https://github.com/adap/" +"flower/pull/2531), [#2534](https://github.com/adap/flower/pull/2534), [#2535]" +"(https://github.com/adap/flower/pull/2535), [#2521](https://github.com/adap/" +"flower/pull/2521), [#2553](https://github.com/adap/flower/pull/2553), [#2596]" +"(https://github.com/adap/flower/pull/2596))" msgstr "" #: ../../source/ref-changelog.md:190 ../../source/ref-changelog.md:280 #: ../../source/ref-changelog.md:344 ../../source/ref-changelog.md:398 #: ../../source/ref-changelog.md:465 -msgid "Flower received many improvements under the hood, too many to list here." +msgid "" +"Flower received many improvements under the hood, too many to list here." msgstr "Flower 进行了许多改进,这里就不一一列举了。" #: ../../source/ref-changelog.md:194 msgid "" -"**Remove support for Python 3.7** " -"([#2280](https://github.com/adap/flower/pull/2280), " -"[#2299](https://github.com/adap/flower/pull/2299), " -"[#2304](https://github.com/adap/flower/pull/2304), " -"[#2306](https://github.com/adap/flower/pull/2306), " -"[#2355](https://github.com/adap/flower/pull/2355), " -"[#2356](https://github.com/adap/flower/pull/2356))" -msgstr "" -"**移除对 Python 3.7 的支持** " -"([#2280](https://github.com/adap/flower/pull/2280), " -"[#2299](https://github.com/adap/flower/pull/2299), " -"[#2304](https://github.com/adap/flower/pull/2304), " -"[#2306](https://github.com/adap/flower/pull/2306), " -"[#2355](https://github.com/adap/flower/pull/2355), " -"[#2356](https://github.com/adap/flower/pull/2356))" +"**Remove support for Python 3.7** ([#2280](https://github.com/adap/flower/" +"pull/2280), [#2299](https://github.com/adap/flower/pull/2299), [#2304]" +"(https://github.com/adap/flower/pull/2304), [#2306](https://github.com/adap/" +"flower/pull/2306), [#2355](https://github.com/adap/flower/pull/2355), [#2356]" +"(https://github.com/adap/flower/pull/2356))" +msgstr "" +"**移除对 Python 3.7 的支持** ([#2280](https://github.com/adap/flower/" +"pull/2280), [#2299](https://github.com/adap/flower/pull/2299), [#2304]" +"(https://github.com/adap/flower/pull/2304), [#2306](https://github.com/adap/" +"flower/pull/2306), [#2355](https://github.com/adap/flower/pull/2355), [#2356]" +"(https://github.com/adap/flower/pull/2356))" #: ../../source/ref-changelog.md:196 msgid "" -"Python 3.7 support was deprecated in Flower 1.5, and this release removes" -" support. Flower now requires Python 3.8." -msgstr "在 Flower 1.5 中,Python 3.7 支持已被弃用,本版本将删除该支持。Flower 现在需要 Python 3.8。" +"Python 3.7 support was deprecated in Flower 1.5, and this release removes " +"support. Flower now requires Python 3.8." +msgstr "" +"在 Flower 1.5 中,Python 3.7 支持已被弃用,本版本将删除该支持。Flower 现在需" +"要 Python 3.8。" #: ../../source/ref-changelog.md:198 msgid "" -"**Remove experimental argument** `rest` **from** `start_client` " -"([#2324](https://github.com/adap/flower/pull/2324))" +"**Remove experimental argument** `rest` **from** `start_client` ([#2324]" +"(https://github.com/adap/flower/pull/2324))" msgstr "" -"**从** `start_client` 中移除** `rest` **实验参数 " -"([#2324](https://github.com/adap/flower/pull/2324))" +"**从** `start_client` 中移除** `rest` **实验参数 ([#2324](https://github.com/" +"adap/flower/pull/2324))" #: ../../source/ref-changelog.md:200 msgid "" -"The (still experimental) argument `rest` was removed from `start_client` " -"and `start_numpy_client`. Use `transport=\"rest\"` to opt into the " -"experimental REST API instead." +"The (still experimental) argument `rest` was removed from `start_client` and " +"`start_numpy_client`. Use `transport=\"rest\"` to opt into the experimental " +"REST API instead." msgstr "" -"删除了 `start_client` 和 `start_numpy_client` 中的参数 `rest`(仍属试验性质)。请使用 " -"`transport=\"rest\"` 来选择使用试验性 REST API。" +"删除了 `start_client` 和 `start_numpy_client` 中的参数 `rest`(仍属试验性" +"质)。请使用 `transport=\"rest\"` 来选择使用试验性 REST API。" #: ../../source/ref-changelog.md:202 msgid "v1.5.0 (2023-08-31)" @@ -15869,192 +16055,163 @@ msgstr "" #: ../../source/ref-changelog.md:212 msgid "" -"**Introduce new simulation engine** " -"([#1969](https://github.com/adap/flower/pull/1969), " -"[#2221](https://github.com/adap/flower/pull/2221), " -"[#2248](https://github.com/adap/flower/pull/2248))" +"**Introduce new simulation engine** ([#1969](https://github.com/adap/flower/" +"pull/1969), [#2221](https://github.com/adap/flower/pull/2221), [#2248]" +"(https://github.com/adap/flower/pull/2248))" msgstr "" "**引入新的模拟引擎** ([#1969](https://github.com/adap/flower/pull/1969), " -"[#2221](https://github.com/adap/flower/pull/2221), " -"[#2248](https://github.com/adap/flower/pull/2248))" +"[#2221](https://github.com/adap/flower/pull/2221), [#2248](https://github." +"com/adap/flower/pull/2248))" #: ../../source/ref-changelog.md:214 msgid "" "The new simulation engine has been rewritten from the ground up, yet it " -"remains fully backwards compatible. It offers much improved stability and" -" memory handling, especially when working with GPUs. Simulations " -"transparently adapt to different settings to scale simulation in CPU-" -"only, CPU+GPU, multi-GPU, or multi-node multi-GPU environments." +"remains fully backwards compatible. It offers much improved stability and " +"memory handling, especially when working with GPUs. Simulations " +"transparently adapt to different settings to scale simulation in CPU-only, " +"CPU+GPU, multi-GPU, or multi-node multi-GPU environments." msgstr "" -"新的模拟引擎从头开始重新编写,但仍完全向后兼容。它的稳定性和内存处理能力大大提高,尤其是在使用 GPU 时。仿真可透明地适应不同的设置,以在仅 " -"CPU、CPU+GPU、多 GPU 或多节点多 GPU 环境中扩展模拟。" +"新的模拟引擎从头开始重新编写,但仍完全向后兼容。它的稳定性和内存处理能力大大" +"提高,尤其是在使用 GPU 时。仿真可透明地适应不同的设置,以在仅 CPU、CPU+GPU、" +"多 GPU 或多节点多 GPU 环境中扩展模拟。" #: ../../source/ref-changelog.md:216 msgid "" -"Comprehensive documentation includes a new [how-to run " -"simulations](https://flower.ai/docs/framework/how-to-run-" -"simulations.html) guide, new [simulation-" +"Comprehensive documentation includes a new [how-to run simulations](https://" +"flower.ai/docs/framework/how-to-run-simulations.html) guide, new [simulation-" "pytorch](https://flower.ai/docs/examples/simulation-pytorch.html) and " "[simulation-tensorflow](https://flower.ai/docs/examples/simulation-" -"tensorflow.html) notebooks, and a new [YouTube tutorial " -"series](https://www.youtube.com/watch?v=cRebUIGB5RU&list=PLNG4feLHqCWlnj8a_E1A_n5zr2-8pafTB)." +"tensorflow.html) notebooks, and a new [YouTube tutorial series](https://www." +"youtube.com/watch?v=cRebUIGB5RU&list=PLNG4feLHqCWlnj8a_E1A_n5zr2-8pafTB)." msgstr "" -"综合文档包括新的[how-to run simulations](https://flower.ai/docs/framework/how-to-" -"run-simulations.html) guide, new [simulation-" -"pytorch](https://flower.ai/docs/examples/simulation-pytorch.html) and " -"[simulation-tensorflow](https://flower.ai/docs/examples/simulation-" -"tensorflow.html) notebooks, and a new [YouTube tutorial " -"series](https://www.youtube.com/watch?v=cRebUIGB5RU&list=PLNG4feLHqCWlnj8a_E1A_n5zr2-8pafTB)。" +"综合文档包括新的[how-to run simulations](https://flower.ai/docs/framework/" +"how-to-run-simulations.html) guide, new [simulation-pytorch](https://flower." +"ai/docs/examples/simulation-pytorch.html) and [simulation-tensorflow]" +"(https://flower.ai/docs/examples/simulation-tensorflow.html) notebooks, and " +"a new [YouTube tutorial series](https://www.youtube.com/watch?" +"v=cRebUIGB5RU&list=PLNG4feLHqCWlnj8a_E1A_n5zr2-8pafTB)。" #: ../../source/ref-changelog.md:218 msgid "" -"**Restructure Flower Docs** " -"([#1824](https://github.com/adap/flower/pull/1824), " -"[#1865](https://github.com/adap/flower/pull/1865), " -"[#1884](https://github.com/adap/flower/pull/1884), " -"[#1887](https://github.com/adap/flower/pull/1887), " -"[#1919](https://github.com/adap/flower/pull/1919), " -"[#1922](https://github.com/adap/flower/pull/1922), " -"[#1920](https://github.com/adap/flower/pull/1920), " -"[#1923](https://github.com/adap/flower/pull/1923), " -"[#1924](https://github.com/adap/flower/pull/1924), " -"[#1962](https://github.com/adap/flower/pull/1962), " -"[#2006](https://github.com/adap/flower/pull/2006), " -"[#2133](https://github.com/adap/flower/pull/2133), " -"[#2203](https://github.com/adap/flower/pull/2203), " -"[#2215](https://github.com/adap/flower/pull/2215), " -"[#2122](https://github.com/adap/flower/pull/2122), " -"[#2223](https://github.com/adap/flower/pull/2223), " -"[#2219](https://github.com/adap/flower/pull/2219), " -"[#2232](https://github.com/adap/flower/pull/2232), " -"[#2233](https://github.com/adap/flower/pull/2233), " -"[#2234](https://github.com/adap/flower/pull/2234), " -"[#2235](https://github.com/adap/flower/pull/2235), " -"[#2237](https://github.com/adap/flower/pull/2237), " -"[#2238](https://github.com/adap/flower/pull/2238), " -"[#2242](https://github.com/adap/flower/pull/2242), " -"[#2231](https://github.com/adap/flower/pull/2231), " -"[#2243](https://github.com/adap/flower/pull/2243), " -"[#2227](https://github.com/adap/flower/pull/2227))" +"**Restructure Flower Docs** ([#1824](https://github.com/adap/flower/" +"pull/1824), [#1865](https://github.com/adap/flower/pull/1865), [#1884]" +"(https://github.com/adap/flower/pull/1884), [#1887](https://github.com/adap/" +"flower/pull/1887), [#1919](https://github.com/adap/flower/pull/1919), [#1922]" +"(https://github.com/adap/flower/pull/1922), [#1920](https://github.com/adap/" +"flower/pull/1920), [#1923](https://github.com/adap/flower/pull/1923), [#1924]" +"(https://github.com/adap/flower/pull/1924), [#1962](https://github.com/adap/" +"flower/pull/1962), [#2006](https://github.com/adap/flower/pull/2006), [#2133]" +"(https://github.com/adap/flower/pull/2133), [#2203](https://github.com/adap/" +"flower/pull/2203), [#2215](https://github.com/adap/flower/pull/2215), [#2122]" +"(https://github.com/adap/flower/pull/2122), [#2223](https://github.com/adap/" +"flower/pull/2223), [#2219](https://github.com/adap/flower/pull/2219), [#2232]" +"(https://github.com/adap/flower/pull/2232), [#2233](https://github.com/adap/" +"flower/pull/2233), [#2234](https://github.com/adap/flower/pull/2234), [#2235]" +"(https://github.com/adap/flower/pull/2235), [#2237](https://github.com/adap/" +"flower/pull/2237), [#2238](https://github.com/adap/flower/pull/2238), [#2242]" +"(https://github.com/adap/flower/pull/2242), [#2231](https://github.com/adap/" +"flower/pull/2231), [#2243](https://github.com/adap/flower/pull/2243), [#2227]" +"(https://github.com/adap/flower/pull/2227))" msgstr "" "**重构 Flower 文档** ([#1824](https://github.com/adap/flower/pull/1824), " -"[#1865](https://github.com/adap/flower/pull/1865), " -"[#1884](https://github.com/adap/flower/pull/1884), " -"[#1887](https://github.com/adap/flower/pull/1887), " -"[#1919](https://github.com/adap/flower/pull/1919), " -"[#1922](https://github.com/adap/flower/pull/1922), " -"[#1920](https://github.com/adap/flower/pull/1920), " -"[#1923](https://github.com/adap/flower/pull/1923), " -"[#1924](https://github.com/adap/flower/pull/1924), " -"[#1962](https://github.com/adap/flower/pull/1962), " -"[#2006](https://github.com/adap/flower/pull/2006), " -"[#2133](https://github.com/adap/flower/pull/2133), " -"[#2203](https://github.com/adap/flower/pull/2203), " -"[#2215](https://github.com/adap/flower/pull/2215), " -"[#2122](https://github.com/adap/flower/pull/2122), " -"[#2223](https://github.com/adap/flower/pull/2223), " -"[#2219](https://github.com/adap/flower/pull/2219), " -"[#2232](https://github.com/adap/flower/pull/2232), " -"[#2233](https://github.com/adap/flower/pull/2233), " -"[#2234](https://github.com/adap/flower/pull/2234), " -"[#2235](https://github.com/adap/flower/pull/2235), " -"[#2237](https://github.com/adap/flower/pull/2237), " -"[#2238](https://github.com/adap/flower/pull/2238), " -"[#2242](https://github.com/adap/flower/pull/2242), " -"[#2231](https://github.com/adap/flower/pull/2231), " -"[#2243](https://github.com/adap/flower/pull/2243), " -"[#2227](https://github.com/adap/flower/pull/2227))" +"[#1865](https://github.com/adap/flower/pull/1865), [#1884](https://github." +"com/adap/flower/pull/1884), [#1887](https://github.com/adap/flower/" +"pull/1887), [#1919](https://github.com/adap/flower/pull/1919), [#1922]" +"(https://github.com/adap/flower/pull/1922), [#1920](https://github.com/adap/" +"flower/pull/1920), [#1923](https://github.com/adap/flower/pull/1923), [#1924]" +"(https://github.com/adap/flower/pull/1924), [#1962](https://github.com/adap/" +"flower/pull/1962), [#2006](https://github.com/adap/flower/pull/2006), [#2133]" +"(https://github.com/adap/flower/pull/2133), [#2203](https://github.com/adap/" +"flower/pull/2203), [#2215](https://github.com/adap/flower/pull/2215), [#2122]" +"(https://github.com/adap/flower/pull/2122), [#2223](https://github.com/adap/" +"flower/pull/2223), [#2219](https://github.com/adap/flower/pull/2219), [#2232]" +"(https://github.com/adap/flower/pull/2232), [#2233](https://github.com/adap/" +"flower/pull/2233), [#2234](https://github.com/adap/flower/pull/2234), [#2235]" +"(https://github.com/adap/flower/pull/2235), [#2237](https://github.com/adap/" +"flower/pull/2237), [#2238](https://github.com/adap/flower/pull/2238), [#2242]" +"(https://github.com/adap/flower/pull/2242), [#2231](https://github.com/adap/" +"flower/pull/2231), [#2243](https://github.com/adap/flower/pull/2243), [#2227]" +"(https://github.com/adap/flower/pull/2227))" #: ../../source/ref-changelog.md:220 #, fuzzy msgid "" -"Much effort went into a completely restructured Flower docs experience. " -"The documentation on [flower.ai/docs](https://flower.ai/docs) is now " -"divided into Flower Framework, Flower Baselines, Flower Android SDK, " -"Flower iOS SDK, and code example projects." +"Much effort went into a completely restructured Flower docs experience. The " +"documentation on [flower.ai/docs](https://flower.ai/docs) is now divided " +"into Flower Framework, Flower Baselines, Flower Android SDK, Flower iOS SDK, " +"and code example projects." msgstr "" -"Flower 文档体验的全面重构耗费了大量精力。现在,[flower.ai/docs](flower.ai/docs)上的文档分为 Flower " -"Framework、Flower Baselines、Flower Android SDK、Flower iOS SDK 和代码示例项目。" +"Flower 文档体验的全面重构耗费了大量精力。现在,[flower.ai/docs](flower.ai/" +"docs)上的文档分为 Flower Framework、Flower Baselines、Flower Android SDK、" +"Flower iOS SDK 和代码示例项目。" #: ../../source/ref-changelog.md:222 msgid "" -"**Introduce Flower Swift SDK** " -"([#1858](https://github.com/adap/flower/pull/1858), " -"[#1897](https://github.com/adap/flower/pull/1897))" +"**Introduce Flower Swift SDK** ([#1858](https://github.com/adap/flower/" +"pull/1858), [#1897](https://github.com/adap/flower/pull/1897))" msgstr "" -"**介绍 Flower Swift SDK** " -"([#1858](https://github.com/adap/flower/pull/1858), " -"[#1897](https://github.com/adap/flower/pull/1897))" +"**介绍 Flower Swift SDK** ([#1858](https://github.com/adap/flower/" +"pull/1858), [#1897](https://github.com/adap/flower/pull/1897))" #: ../../source/ref-changelog.md:224 msgid "" -"This is the first preview release of the Flower Swift SDK. Flower support" -" on iOS is improving, and alongside the Swift SDK and code example, there" -" is now also an iOS quickstart tutorial." +"This is the first preview release of the Flower Swift SDK. Flower support on " +"iOS is improving, and alongside the Swift SDK and code example, there is now " +"also an iOS quickstart tutorial." msgstr "" -"这是 Flower Swift SDK 的首个预览版。Flower 对 iOS 的支持正在不断改进,除了 Swift SDK " -"和代码示例外,现在还有 iOS 快速入门教程。" +"这是 Flower Swift SDK 的首个预览版。Flower 对 iOS 的支持正在不断改进,除了 " +"Swift SDK 和代码示例外,现在还有 iOS 快速入门教程。" #: ../../source/ref-changelog.md:226 msgid "" -"**Introduce Flower Android SDK** " -"([#2131](https://github.com/adap/flower/pull/2131))" +"**Introduce Flower Android SDK** ([#2131](https://github.com/adap/flower/" +"pull/2131))" msgstr "" -"**介绍Flower Android SDK** " -"([#2131](https://github.com/adap/flower/pull/2131))" +"**介绍Flower Android SDK** ([#2131](https://github.com/adap/flower/" +"pull/2131))" #: ../../source/ref-changelog.md:228 msgid "" -"This is the first preview release of the Flower Kotlin SDK. Flower " -"support on Android is improving, and alongside the Kotlin SDK and code " -"example, there is now also an Android quickstart tutorial." +"This is the first preview release of the Flower Kotlin SDK. Flower support " +"on Android is improving, and alongside the Kotlin SDK and code example, " +"there is now also an Android quickstart tutorial." msgstr "" -"这是 Flower Kotlin SDK 的首个预览版。Flower 对 Android 的支持正在不断改进,除了 Kotlin SDK " -"和代码示例,现在还有 Android 快速入门教程。" +"这是 Flower Kotlin SDK 的首个预览版。Flower 对 Android 的支持正在不断改进,除" +"了 Kotlin SDK 和代码示例,现在还有 Android 快速入门教程。" #: ../../source/ref-changelog.md:230 msgid "" -"**Introduce new end-to-end testing infrastructure** " -"([#1842](https://github.com/adap/flower/pull/1842), " -"[#2071](https://github.com/adap/flower/pull/2071), " -"[#2072](https://github.com/adap/flower/pull/2072), " -"[#2068](https://github.com/adap/flower/pull/2068), " -"[#2067](https://github.com/adap/flower/pull/2067), " -"[#2069](https://github.com/adap/flower/pull/2069), " -"[#2073](https://github.com/adap/flower/pull/2073), " -"[#2070](https://github.com/adap/flower/pull/2070), " -"[#2074](https://github.com/adap/flower/pull/2074), " -"[#2082](https://github.com/adap/flower/pull/2082), " -"[#2084](https://github.com/adap/flower/pull/2084), " -"[#2093](https://github.com/adap/flower/pull/2093), " -"[#2109](https://github.com/adap/flower/pull/2109), " -"[#2095](https://github.com/adap/flower/pull/2095), " -"[#2140](https://github.com/adap/flower/pull/2140), " -"[#2137](https://github.com/adap/flower/pull/2137), " -"[#2165](https://github.com/adap/flower/pull/2165))" +"**Introduce new end-to-end testing infrastructure** ([#1842](https://github." +"com/adap/flower/pull/1842), [#2071](https://github.com/adap/flower/" +"pull/2071), [#2072](https://github.com/adap/flower/pull/2072), [#2068]" +"(https://github.com/adap/flower/pull/2068), [#2067](https://github.com/adap/" +"flower/pull/2067), [#2069](https://github.com/adap/flower/pull/2069), [#2073]" +"(https://github.com/adap/flower/pull/2073), [#2070](https://github.com/adap/" +"flower/pull/2070), [#2074](https://github.com/adap/flower/pull/2074), [#2082]" +"(https://github.com/adap/flower/pull/2082), [#2084](https://github.com/adap/" +"flower/pull/2084), [#2093](https://github.com/adap/flower/pull/2093), [#2109]" +"(https://github.com/adap/flower/pull/2109), [#2095](https://github.com/adap/" +"flower/pull/2095), [#2140](https://github.com/adap/flower/pull/2140), [#2137]" +"(https://github.com/adap/flower/pull/2137), [#2165](https://github.com/adap/" +"flower/pull/2165))" msgstr "" "*介绍新的端到端测试** ([#1842](https://github.com/adap/flower/pull/1842), " -"[#2071](https://github.com/adap/flower/pull/2071), " -"[#2072](https://github.com/adap/flower/pull/2072), " -"[#2068](https://github.com/adap/flower/pull/2068), " -"[#2067](https://github.com/adap/flower/pull/2067), " -"[#2069](https://github.com/adap/flower/pull/2069), " -"[#2073](https://github.com/adap/flower/pull/2073), " -"[#2070](https://github.com/adap/flower/pull/2070), " -"[#2074](https://github.com/adap/flower/pull/2074), " -"[#2082](https://github.com/adap/flower/pull/2082), " -"[#2084](https://github.com/adap/flower/pull/2084), " -"[#2093](https://github.com/adap/flower/pull/2093), " -"[#2109](https://github.com/adap/flower/pull/2109), " -"[#2095](https://github.com/adap/flower/pull/2095), " -"[#2140](https://github.com/adap/flower/pull/2140), " -"[#2137](https://github.com/adap/flower/pull/2137), " -"[#2165](https://github.com/adap/flower/pull/2165))" +"[#2071](https://github.com/adap/flower/pull/2071), [#2072](https://github." +"com/adap/flower/pull/2072), [#2068](https://github.com/adap/flower/" +"pull/2068), [#2067](https://github.com/adap/flower/pull/2067), [#2069]" +"(https://github.com/adap/flower/pull/2069), [#2073](https://github.com/adap/" +"flower/pull/2073), [#2070](https://github.com/adap/flower/pull/2070), [#2074]" +"(https://github.com/adap/flower/pull/2074), [#2082](https://github.com/adap/" +"flower/pull/2082), [#2084](https://github.com/adap/flower/pull/2084), [#2093]" +"(https://github.com/adap/flower/pull/2093), [#2109](https://github.com/adap/" +"flower/pull/2109), [#2095](https://github.com/adap/flower/pull/2095), [#2140]" +"(https://github.com/adap/flower/pull/2140), [#2137](https://github.com/adap/" +"flower/pull/2137), [#2165](https://github.com/adap/flower/pull/2165))" #: ../../source/ref-changelog.md:232 msgid "" -"A new testing infrastructure ensures that new changes stay compatible " -"with existing framework integrations or strategies." +"A new testing infrastructure ensures that new changes stay compatible with " +"existing framework integrations or strategies." msgstr "新的测试设施可确保新的变更与现有的框架集成或策略保持兼容。" #: ../../source/ref-changelog.md:234 @@ -16063,151 +16220,155 @@ msgstr "** 过时的 Python 3.7**" #: ../../source/ref-changelog.md:236 msgid "" -"Since Python 3.7 reached its end of life (EOL) on 2023-06-27, support for" -" Python 3.7 is now deprecated and will be removed in an upcoming release." -msgstr "由于 Python 3.7 已于 2023-06-27 弃用 (EOL),对 Python 3.7 的支持现已废弃,并将在即将发布的版本中移除。" +"Since Python 3.7 reached its end of life (EOL) on 2023-06-27, support for " +"Python 3.7 is now deprecated and will be removed in an upcoming release." +msgstr "" +"由于 Python 3.7 已于 2023-06-27 弃用 (EOL),对 Python 3.7 的支持现已废弃,并" +"将在即将发布的版本中移除。" #: ../../source/ref-changelog.md:238 msgid "" -"**Add new** `FedTrimmedAvg` **strategy** " -"([#1769](https://github.com/adap/flower/pull/1769), " -"[#1853](https://github.com/adap/flower/pull/1853))" +"**Add new** `FedTrimmedAvg` **strategy** ([#1769](https://github.com/adap/" +"flower/pull/1769), [#1853](https://github.com/adap/flower/pull/1853))" msgstr "" -"**添加新的**`FedTrimmedAvg`**策略**([#1769](https://github.com/adap/flower/pull/1769)," -" [#1853](https://github.com/adap/flower/pull/1853)" +"**添加新的**`FedTrimmedAvg`**策略**([#1769](https://github.com/adap/flower/" +"pull/1769), [#1853](https://github.com/adap/flower/pull/1853)" #: ../../source/ref-changelog.md:240 msgid "" -"The new `FedTrimmedAvg` strategy implements Trimmed Mean by [Dong Yin, " -"2018](https://arxiv.org/abs/1803.01498)." +"The new `FedTrimmedAvg` strategy implements Trimmed Mean by [Dong Yin, 2018]" +"(https://arxiv.org/abs/1803.01498)." msgstr "" -"新的 \"FedTrimmedAvg \"策略实现了[Dong Yin, " -"2018](https://arxiv.org/abs/1803.01498)的 \"Trimmed Mean\"。" +"新的 \"FedTrimmedAvg \"策略实现了[Dong Yin, 2018](https://arxiv.org/" +"abs/1803.01498)的 \"Trimmed Mean\"。" #: ../../source/ref-changelog.md:242 msgid "" -"**Introduce start_driver** " -"([#1697](https://github.com/adap/flower/pull/1697))" -msgstr "**引入 start_driver**([#1697](https://github.com/adap/flower/pull/1697))" +"**Introduce start_driver** ([#1697](https://github.com/adap/flower/" +"pull/1697))" +msgstr "" +"**引入 start_driver**([#1697](https://github.com/adap/flower/pull/1697))" #: ../../source/ref-changelog.md:244 msgid "" -"In addition to `start_server` and using the raw Driver API, there is a " -"new `start_driver` function that allows for running `start_server` " -"scripts as a Flower driver with only a single-line code change. Check out" -" the `mt-pytorch` code example to see a working example using " -"`start_driver`." +"In addition to `start_server` and using the raw Driver API, there is a new " +"`start_driver` function that allows for running `start_server` scripts as a " +"Flower driver with only a single-line code change. Check out the `mt-" +"pytorch` code example to see a working example using `start_driver`." msgstr "" -"除了 `start_server` 和使用原始驱动 API 之外,还有一个新的 `start_driver` 函数,只需修改一行代码,就能将 " -"`start_server` 脚本作为 Flower 驱动程序运行。请查看 `mt-pytorch` 代码示例,了解使用 " -"`start_driver` 的工作示例。" +"除了 `start_server` 和使用原始驱动 API 之外,还有一个新的 `start_driver` 函" +"数,只需修改一行代码,就能将 `start_server` 脚本作为 Flower 驱动程序运行。请" +"查看 `mt-pytorch` 代码示例,了解使用 `start_driver` 的工作示例。" #: ../../source/ref-changelog.md:246 msgid "" -"**Add parameter aggregation to** `mt-pytorch` **code example** " -"([#1785](https://github.com/adap/flower/pull/1785))" +"**Add parameter aggregation to** `mt-pytorch` **code example** ([#1785]" +"(https://github.com/adap/flower/pull/1785))" msgstr "" -"为 `mt-pytorch` **代码示例**添加参数聚合 " -"([#1785](https://github.com/adap/flower/pull/1785))" +"为 `mt-pytorch` **代码示例**添加参数聚合 ([#1785](https://github.com/adap/" +"flower/pull/1785))" #: ../../source/ref-changelog.md:248 msgid "" -"The `mt-pytorch` example shows how to aggregate parameters when writing a" -" driver script. The included `driver.py` and `server.py` have been " -"aligned to demonstrate both the low-level way and the high-level way of " -"building server-side logic." +"The `mt-pytorch` example shows how to aggregate parameters when writing a " +"driver script. The included `driver.py` and `server.py` have been aligned to " +"demonstrate both the low-level way and the high-level way of building server-" +"side logic." msgstr "" -"`mt-pytorch`示例展示了如何在编写驱动程序脚本时聚合参数。附带的 `driver.py` 和 `server.py` " -"已经进行了调整,以演示构建服务器端逻辑的低级方法和高级方法。" +"`mt-pytorch`示例展示了如何在编写驱动程序脚本时聚合参数。附带的 `driver.py` " +"和 `server.py` 已经进行了调整,以演示构建服务器端逻辑的低级方法和高级方法。" #: ../../source/ref-changelog.md:250 msgid "" -"**Migrate experimental REST API to Starlette** " -"([2171](https://github.com/adap/flower/pull/2171))" +"**Migrate experimental REST API to Starlette** ([2171](https://github.com/" +"adap/flower/pull/2171))" msgstr "" -"**将实验性 REST API 移植到 Starlette** " -"([2171](https://github.com/adap/flower/pull/2171))" +"**将实验性 REST API 移植到 Starlette** ([2171](https://github.com/adap/" +"flower/pull/2171))" #: ../../source/ref-changelog.md:252 msgid "" -"The (experimental) REST API used to be implemented in " -"[FastAPI](https://fastapi.tiangolo.com/), but it has now been migrated to" -" use [Starlette](https://www.starlette.io/) directly." +"The (experimental) REST API used to be implemented in [FastAPI](https://" +"fastapi.tiangolo.com/), but it has now been migrated to use [Starlette]" +"(https://www.starlette.io/) directly." msgstr "" -"REST API(试验性)曾在 [FastAPI](https://fastapi.tiangolo.com/) 中实现,但现在已迁移到直接使用 " -"[Starlette](https://www.starlette.io/) 。" +"REST API(试验性)曾在 [FastAPI](https://fastapi.tiangolo.com/) 中实现,但现" +"在已迁移到直接使用 [Starlette](https://www.starlette.io/) 。" #: ../../source/ref-changelog.md:254 msgid "" -"Please note: The REST request-response API is still experimental and will" -" likely change significantly over time." -msgstr "请注意:REST 请求-响应 API 仍处于试验阶段,随着时间的推移可能会发生重大变化。" +"Please note: The REST request-response API is still experimental and will " +"likely change significantly over time." +msgstr "" +"请注意:REST 请求-响应 API 仍处于试验阶段,随着时间的推移可能会发生重大变化。" #: ../../source/ref-changelog.md:256 msgid "" -"**Introduce experimental gRPC request-response API** " -"([#1867](https://github.com/adap/flower/pull/1867), " -"[#1901](https://github.com/adap/flower/pull/1901))" +"**Introduce experimental gRPC request-response API** ([#1867](https://github." +"com/adap/flower/pull/1867), [#1901](https://github.com/adap/flower/" +"pull/1901))" msgstr "" -"**引入实验性 gRPC 请求-响应 API** " -"([#1867](https://github.com/adap/flower/pull/1867), " -"[#1901](https://github.com/adap/flower/pull/1901)" +"**引入实验性 gRPC 请求-响应 API** ([#1867](https://github.com/adap/flower/" +"pull/1867), [#1901](https://github.com/adap/flower/pull/1901)" #: ../../source/ref-changelog.md:258 msgid "" -"In addition to the existing gRPC API (based on bidirectional streaming) " -"and the experimental REST API, there is now a new gRPC API that uses a " -"request-response model to communicate with client nodes." +"In addition to the existing gRPC API (based on bidirectional streaming) and " +"the experimental REST API, there is now a new gRPC API that uses a request-" +"response model to communicate with client nodes." msgstr "" -"除了现有的 gRPC 应用程序接口(基于双向流)和试验性 REST 应用程序接口外,现在还有一个新的 gRPC " -"应用程序接口,它使用请求-响应模型与客户端节点通信。" +"除了现有的 gRPC 应用程序接口(基于双向流)和试验性 REST 应用程序接口外,现在" +"还有一个新的 gRPC 应用程序接口,它使用请求-响应模型与客户端节点通信。" #: ../../source/ref-changelog.md:260 msgid "" -"Please note: The gRPC request-response API is still experimental and will" -" likely change significantly over time." -msgstr "请注意:gRPC 请求-响应 API 仍处于试验阶段,随着时间的推移可能会发生重大变化。" +"Please note: The gRPC request-response API is still experimental and will " +"likely change significantly over time." +msgstr "" +"请注意:gRPC 请求-响应 API 仍处于试验阶段,随着时间的推移可能会发生重大变化。" #: ../../source/ref-changelog.md:262 msgid "" "**Replace the experimental** `start_client(rest=True)` **with the new** " -"`start_client(transport=\"rest\")` " -"([#1880](https://github.com/adap/flower/pull/1880))" +"`start_client(transport=\"rest\")` ([#1880](https://github.com/adap/flower/" +"pull/1880))" msgstr "" "**用新的** `start_client(transport=\"rest\")` 替换实验性** " -"`start_client(rest=True)` " -"([#1880](https://github.com/adap/flower/pull/1880))" +"`start_client(rest=True)` ([#1880](https://github.com/adap/flower/pull/1880))" #: ../../source/ref-changelog.md:264 msgid "" -"The (experimental) `start_client` argument `rest` was deprecated in " -"favour of a new argument `transport`. `start_client(transport=\"rest\")` " -"will yield the same behaviour as `start_client(rest=True)` did before. " -"All code should migrate to the new argument `transport`. The deprecated " -"argument `rest` will be removed in a future release." +"The (experimental) `start_client` argument `rest` was deprecated in favour " +"of a new argument `transport`. `start_client(transport=\"rest\")` will yield " +"the same behaviour as `start_client(rest=True)` did before. All code should " +"migrate to the new argument `transport`. The deprecated argument `rest` will " +"be removed in a future release." msgstr "" -"已废弃(试验性的)`start_client`参数`rest`,改用新参数`transport`。`start_client(transport=\"rest\")`将产生与以前的`start_client(rest=True)`相同的行为。所有代码都应迁移到新参数" -" `transport`。过时的参数 `rest` 将在今后的版本中删除。" +"已废弃(试验性的)`start_client`参数`rest`,改用新参数`transport`。" +"`start_client(transport=\"rest\")`将产生与以前的`start_client(rest=True)`相同" +"的行为。所有代码都应迁移到新参数 `transport`。过时的参数 `rest` 将在今后的版" +"本中删除。" #: ../../source/ref-changelog.md:266 msgid "" -"**Add a new gRPC option** " -"([#2197](https://github.com/adap/flower/pull/2197))" -msgstr "** 添加一个新的 gRPC 选项**([#2197](https://github.com/adap/flower/pull/2197))" +"**Add a new gRPC option** ([#2197](https://github.com/adap/flower/pull/2197))" +msgstr "" +"** 添加一个新的 gRPC 选项**([#2197](https://github.com/adap/flower/" +"pull/2197))" #: ../../source/ref-changelog.md:268 msgid "" -"We now start a gRPC server with the `grpc.keepalive_permit_without_calls`" -" option set to 0 by default. This prevents the clients from sending " -"keepalive pings when there is no outstanding stream." +"We now start a gRPC server with the `grpc.keepalive_permit_without_calls` " +"option set to 0 by default. This prevents the clients from sending keepalive " +"pings when there is no outstanding stream." msgstr "" -"现在我们启动一个 gRPC 服务器,并将 `grpc.keepalive_permit_without_calls` 选项默认设置为 " -"0。这将防止客户端在没有未处理数据流时发送 keepalive pings。" +"现在我们启动一个 gRPC 服务器,并将 `grpc.keepalive_permit_without_calls` 选项" +"默认设置为 0。这将防止客户端在没有未处理数据流时发送 keepalive pings。" #: ../../source/ref-changelog.md:270 msgid "" -"**Improve example notebooks** " -"([#2005](https://github.com/adap/flower/pull/2005))" +"**Improve example notebooks** ([#2005](https://github.com/adap/flower/" +"pull/2005))" msgstr "**改进示例笔记** ([#2005](https://github.com/adap/flower/pull/2005))" #: ../../source/ref-changelog.md:272 @@ -16217,57 +16378,48 @@ msgstr "有一个新的 30 分钟的联邦学习 PyTorch 教程!" #: ../../source/ref-changelog.md:274 msgid "" "**Example updates** ([#1772](https://github.com/adap/flower/pull/1772), " -"[#1873](https://github.com/adap/flower/pull/1873), " -"[#1981](https://github.com/adap/flower/pull/1981), " -"[#1988](https://github.com/adap/flower/pull/1988), " -"[#1984](https://github.com/adap/flower/pull/1984), " -"[#1982](https://github.com/adap/flower/pull/1982), " -"[#2112](https://github.com/adap/flower/pull/2112), " -"[#2144](https://github.com/adap/flower/pull/2144), " -"[#2174](https://github.com/adap/flower/pull/2174), " -"[#2225](https://github.com/adap/flower/pull/2225), " -"[#2183](https://github.com/adap/flower/pull/2183))" -msgstr "" -"**更新Example** ([#1772](https://github.com/adap/flower/pull/1772), " -"[#1873](https://github.com/adap/flower/pull/1873), " -"[#1981](https://github.com/adap/flower/pull/1981), " -"[#1988](https://github.com/adap/flower/pull/1988), " -"[#1984](https://github.com/adap/flower/pull/1984), " -"[#1982](https://github.com/adap/flower/pull/1982), " -"[#2112](https://github.com/adap/flower/pull/2112), " -"[#2144](https://github.com/adap/flower/pull/2144), " -"[#2174](https://github.com/adap/flower/pull/2174), " -"[#2225](https://github.com/adap/flower/pull/2225), " -"[#2183](https://github.com/adap/flower/pull/2183))" +"[#1873](https://github.com/adap/flower/pull/1873), [#1981](https://github." +"com/adap/flower/pull/1981), [#1988](https://github.com/adap/flower/" +"pull/1988), [#1984](https://github.com/adap/flower/pull/1984), [#1982]" +"(https://github.com/adap/flower/pull/1982), [#2112](https://github.com/adap/" +"flower/pull/2112), [#2144](https://github.com/adap/flower/pull/2144), [#2174]" +"(https://github.com/adap/flower/pull/2174), [#2225](https://github.com/adap/" +"flower/pull/2225), [#2183](https://github.com/adap/flower/pull/2183))" +msgstr "" +"**更新Example** ([#1772](https://github.com/adap/flower/pull/1772), [#1873]" +"(https://github.com/adap/flower/pull/1873), [#1981](https://github.com/adap/" +"flower/pull/1981), [#1988](https://github.com/adap/flower/pull/1988), [#1984]" +"(https://github.com/adap/flower/pull/1984), [#1982](https://github.com/adap/" +"flower/pull/1982), [#2112](https://github.com/adap/flower/pull/2112), [#2144]" +"(https://github.com/adap/flower/pull/2144), [#2174](https://github.com/adap/" +"flower/pull/2174), [#2225](https://github.com/adap/flower/pull/2225), [#2183]" +"(https://github.com/adap/flower/pull/2183))" #: ../../source/ref-changelog.md:276 msgid "" "Many examples have received significant updates, including simplified " "advanced-tensorflow and advanced-pytorch examples, improved macOS " -"compatibility of TensorFlow examples, and code examples for simulation. A" -" major upgrade is that all code examples now have a `requirements.txt` " -"(in addition to `pyproject.toml`)." +"compatibility of TensorFlow examples, and code examples for simulation. A " +"major upgrade is that all code examples now have a `requirements.txt` (in " +"addition to `pyproject.toml`)." msgstr "" -"许多示例都进行了重大更新,包括简化了 advanced-tensorflow 和 advanced-pytorch 示例,改进了 " -"TensorFlow 示例的 macOS 兼容性,以及模拟代码示例。一项重大升级是所有代码示例现在都有了 " -"\"requirements.txt\"(除 \"pyproject.toml \"外)。" +"许多示例都进行了重大更新,包括简化了 advanced-tensorflow 和 advanced-pytorch " +"示例,改进了 TensorFlow 示例的 macOS 兼容性,以及模拟代码示例。一项重大升级是" +"所有代码示例现在都有了 \"requirements.txt\"(除 \"pyproject.toml \"外)。" #: ../../source/ref-changelog.md:278 msgid "" -"**General improvements** " -"([#1872](https://github.com/adap/flower/pull/1872), " -"[#1866](https://github.com/adap/flower/pull/1866), " -"[#1884](https://github.com/adap/flower/pull/1884), " -"[#1837](https://github.com/adap/flower/pull/1837), " -"[#1477](https://github.com/adap/flower/pull/1477), " -"[#2171](https://github.com/adap/flower/pull/2171))" -msgstr "" -"**普通改进**([#1872](https://github.com/adap/flower/pull/1872), " -"[#1866](https://github.com/adap/flower/pull/1866), " -"[#1884](https://github.com/adap/flower/pull/1884), " -"[#1837](https://github.com/adap/flower/pull/1837), " -"[#1477](https://github.com/adap/flower/pull/1477), " -"[#2171](https://github.com/adap/flower/pull/2171))" +"**General improvements** ([#1872](https://github.com/adap/flower/pull/1872), " +"[#1866](https://github.com/adap/flower/pull/1866), [#1884](https://github." +"com/adap/flower/pull/1884), [#1837](https://github.com/adap/flower/" +"pull/1837), [#1477](https://github.com/adap/flower/pull/1477), [#2171]" +"(https://github.com/adap/flower/pull/2171))" +msgstr "" +"**普通改进**([#1872](https://github.com/adap/flower/pull/1872), [#1866]" +"(https://github.com/adap/flower/pull/1866), [#1884](https://github.com/adap/" +"flower/pull/1884), [#1837](https://github.com/adap/flower/pull/1837), [#1477]" +"(https://github.com/adap/flower/pull/1477), [#2171](https://github.com/adap/" +"flower/pull/2171))" #: ../../source/ref-changelog.md:284 ../../source/ref-changelog.md:348 #: ../../source/ref-changelog.md:406 ../../source/ref-changelog.md:475 @@ -16282,165 +16434,159 @@ msgstr "v1.4.0 (2023-04-21)" #: ../../source/ref-changelog.md:292 msgid "" "`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " -"`Chenyang Ma (Danny)`, `Daniel J. Beutel`, `Edoardo`, `Gautam Jajoo`, " -"`Iacob-Alexandru-Andrei`, `JDRanpariya`, `Jean Charle Yaacoub`, `Kunal " -"Sarkhel`, `L. Jiang`, `Lennart Behme`, `Max Kapsecker`, `Michał`, `Nic " -"Lane`, `Nikolaos Episkopos`, `Ragy`, `Saurav Maheshkar`, `Semo Yang`, " -"`Steve Laskaridis`, `Steven Hé (Sīchàng)`, `Taner Topal`" +"`Chenyang Ma (Danny)`, `Daniel J. Beutel`, `Edoardo`, `Gautam Jajoo`, `Iacob-" +"Alexandru-Andrei`, `JDRanpariya`, `Jean Charle Yaacoub`, `Kunal Sarkhel`, " +"`L. Jiang`, `Lennart Behme`, `Max Kapsecker`, `Michał`, `Nic Lane`, " +"`Nikolaos Episkopos`, `Ragy`, `Saurav Maheshkar`, `Semo Yang`, `Steve " +"Laskaridis`, `Steven Hé (Sīchàng)`, `Taner Topal`" msgstr "" "`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " -"`Chenyang Ma (Danny)`, `Daniel J. Beutel`, `Edoardo`, `Gautam Jajoo`, " -"`Iacob-Alexandru-Andrei`, `JDRanpariya`, `Jean Charle Yaacoub`, `Kunal " -"Sarkhel`, `L. Jiang`, `Lennart Behme`, `Max Kapsecker`, `Michał`, `Nic " -"Lane`, `Nikolaos Episkopos`, `Ragy`, `Saurav Maheshkar`, `Semo Yang`, " -"`Steve Laskaridis`, `Steven Hé (Sīchàng)`, `Taner Topal`" +"`Chenyang Ma (Danny)`, `Daniel J. Beutel`, `Edoardo`, `Gautam Jajoo`, `Iacob-" +"Alexandru-Andrei`, `JDRanpariya`, `Jean Charle Yaacoub`, `Kunal Sarkhel`, " +"`L. Jiang`, `Lennart Behme`, `Max Kapsecker`, `Michał`, `Nic Lane`, " +"`Nikolaos Episkopos`, `Ragy`, `Saurav Maheshkar`, `Semo Yang`, `Steve " +"Laskaridis`, `Steven Hé (Sīchàng)`, `Taner Topal`" #: ../../source/ref-changelog.md:296 msgid "" -"**Introduce support for XGBoost (**`FedXgbNnAvg` **strategy and " -"example)** ([#1694](https://github.com/adap/flower/pull/1694), " -"[#1709](https://github.com/adap/flower/pull/1709), " -"[#1715](https://github.com/adap/flower/pull/1715), " -"[#1717](https://github.com/adap/flower/pull/1717), " -"[#1763](https://github.com/adap/flower/pull/1763), " -"[#1795](https://github.com/adap/flower/pull/1795))" -msgstr "" -"**引入对XGBoost的支持(**`FedXgbNnAvg` **策略和示例)** " -"([#1694](https://github.com/adap/flower/pull/1694), " -"[#1709](https://github.com/adap/flower/pull/1709), " -"[#1715](https://github.com/adap/flower/pull/1715), " -"[#1717](https://github.com/adap/flower/pull/1717), " -"[#1763](https://github.com/adap/flower/pull/1763), " -"[#1795](https://github.com/adap/flower/pull/1795))" +"**Introduce support for XGBoost (**`FedXgbNnAvg` **strategy and example)** " +"([#1694](https://github.com/adap/flower/pull/1694), [#1709](https://github." +"com/adap/flower/pull/1709), [#1715](https://github.com/adap/flower/" +"pull/1715), [#1717](https://github.com/adap/flower/pull/1717), [#1763]" +"(https://github.com/adap/flower/pull/1763), [#1795](https://github.com/adap/" +"flower/pull/1795))" +msgstr "" +"**引入对XGBoost的支持(**`FedXgbNnAvg` **策略和示例)** ([#1694](https://" +"github.com/adap/flower/pull/1694), [#1709](https://github.com/adap/flower/" +"pull/1709), [#1715](https://github.com/adap/flower/pull/1715), [#1717]" +"(https://github.com/adap/flower/pull/1717), [#1763](https://github.com/adap/" +"flower/pull/1763), [#1795](https://github.com/adap/flower/pull/1795))" #: ../../source/ref-changelog.md:298 msgid "" "XGBoost is a tree-based ensemble machine learning algorithm that uses " -"gradient boosting to improve model accuracy. We added a new `FedXgbNnAvg`" -" " -"[strategy](https://github.com/adap/flower/tree/main/src/py/flwr/server/strategy/fedxgb_nn_avg.py)," -" and a [code example](https://github.com/adap/flower/tree/main/examples" -"/xgboost-quickstart) that demonstrates the usage of this new strategy in " -"an XGBoost project." -msgstr "" -"XGBoost 是一种基于树的集合机器学习算法,它使用梯度提升来提高模型的准确性。我们添加了一个新的 " -"\"FedXgbNnAvg\"[策略](https://github.com/adap/flower/tree/main/src/py/flwr/server/strategy/fedxgb_nn_avg.py)和一个[代码示例](https://github.com/adap/flower/tree/main/examples" -"/xgboost-quickstart),演示如何在 XGBoost 项目中使用这个新策略。" +"gradient boosting to improve model accuracy. We added a new `FedXgbNnAvg` " +"[strategy](https://github.com/adap/flower/tree/main/src/py/flwr/server/" +"strategy/fedxgb_nn_avg.py), and a [code example](https://github.com/adap/" +"flower/tree/main/examples/xgboost-quickstart) that demonstrates the usage of " +"this new strategy in an XGBoost project." +msgstr "" +"XGBoost 是一种基于树的集合机器学习算法,它使用梯度提升来提高模型的准确性。我" +"们添加了一个新的 \"FedXgbNnAvg\"[策略](https://github.com/adap/flower/tree/" +"main/src/py/flwr/server/strategy/fedxgb_nn_avg.py)和一个[代码示例](https://" +"github.com/adap/flower/tree/main/examples/xgboost-quickstart),演示如何在 " +"XGBoost 项目中使用这个新策略。" #: ../../source/ref-changelog.md:300 msgid "" -"**Introduce iOS SDK (preview)** " -"([#1621](https://github.com/adap/flower/pull/1621), " -"[#1764](https://github.com/adap/flower/pull/1764))" +"**Introduce iOS SDK (preview)** ([#1621](https://github.com/adap/flower/" +"pull/1621), [#1764](https://github.com/adap/flower/pull/1764))" msgstr "" -"**介绍 iOS SDK(预览版)** ([#1621](https://github.com/adap/flower/pull/1621), " -"[#1764](https://github.com/adap/flower/pull/1764))" +"**介绍 iOS SDK(预览版)** ([#1621](https://github.com/adap/flower/" +"pull/1621), [#1764](https://github.com/adap/flower/pull/1764))" #: ../../source/ref-changelog.md:302 msgid "" -"This is a major update for anyone wanting to implement Federated Learning" -" on iOS mobile devices. We now have a swift iOS SDK present under " -"[src/swift/flwr](https://github.com/adap/flower/tree/main/src/swift/flwr)" -" that will facilitate greatly the app creating process. To showcase its " -"use, the [iOS " +"This is a major update for anyone wanting to implement Federated Learning on " +"iOS mobile devices. We now have a swift iOS SDK present under [src/swift/" +"flwr](https://github.com/adap/flower/tree/main/src/swift/flwr) that will " +"facilitate greatly the app creating process. To showcase its use, the [iOS " "example](https://github.com/adap/flower/tree/main/examples/ios) has also " "been updated!" msgstr "" -"对于想要在 iOS 移动设备上实施联邦学习的人来说,这是一次重大更新。现在,我们在 " -"[src/swift/flwr](https://github.com/adap/flower/tree/main/src/swift/flwr)" -" 下提供了一个迅捷的 iOS SDK,这将大大方便应用程序的创建过程。为了展示其使用情况,我们还更新了 [iOS " -"示例](https://github.com/adap/flower/tree/main/examples/ios)!" +"对于想要在 iOS 移动设备上实施联邦学习的人来说,这是一次重大更新。现在,我们" +"在 [src/swift/flwr](https://github.com/adap/flower/tree/main/src/swift/flwr) " +"下提供了一个迅捷的 iOS SDK,这将大大方便应用程序的创建过程。为了展示其使用情" +"况,我们还更新了 [iOS 示例](https://github.com/adap/flower/tree/main/" +"examples/ios)!" #: ../../source/ref-changelog.md:304 msgid "" -"**Introduce new \"What is Federated Learning?\" tutorial** " -"([#1657](https://github.com/adap/flower/pull/1657), " -"[#1721](https://github.com/adap/flower/pull/1721))" +"**Introduce new \"What is Federated Learning?\" tutorial** ([#1657](https://" +"github.com/adap/flower/pull/1657), [#1721](https://github.com/adap/flower/" +"pull/1721))" msgstr "" -"**引入新的 " -"\"什么是联邦学习?\"教程**([#1657](https://github.com/adap/flower/pull/1657), " -"[#1721](https://github.com/adap/flower/pull/1721)" +"**引入新的 \"什么是联邦学习?\"教程**([#1657](https://github.com/adap/" +"flower/pull/1657), [#1721](https://github.com/adap/flower/pull/1721)" #: ../../source/ref-changelog.md:306 msgid "" -"A new [entry-level tutorial](https://flower.ai/docs/framework/tutorial-" -"what-is-federated-learning.html) in our documentation explains the basics" -" of Fedetated Learning. It enables anyone who's unfamiliar with Federated" -" Learning to start their journey with Flower. Forward it to anyone who's " +"A new [entry-level tutorial](https://flower.ai/docs/framework/tutorial-what-" +"is-federated-learning.html) in our documentation explains the basics of " +"Fedetated Learning. It enables anyone who's unfamiliar with Federated " +"Learning to start their journey with Flower. Forward it to anyone who's " "interested in Federated Learning!" msgstr "" -"我们的文档中新增了一个[入门级教程](https://flower.ai/docs/framework/tutorial-what-is-" -"federated-learning.html),解释了联邦学习的基础知识。它让任何不熟悉联邦学习的人都能开始 Flower " -"之旅。请转发给对联邦学习感兴趣的人!" +"我们的文档中新增了一个[入门级教程](https://flower.ai/docs/framework/tutorial-" +"what-is-federated-learning.html),解释了联邦学习的基础知识。它让任何不熟悉联" +"邦学习的人都能开始 Flower 之旅。请转发给对联邦学习感兴趣的人!" #: ../../source/ref-changelog.md:308 msgid "" -"**Introduce new Flower Baseline: FedProx MNIST** " -"([#1513](https://github.com/adap/flower/pull/1513), " -"[#1680](https://github.com/adap/flower/pull/1680), " -"[#1681](https://github.com/adap/flower/pull/1681), " -"[#1679](https://github.com/adap/flower/pull/1679))" +"**Introduce new Flower Baseline: FedProx MNIST** ([#1513](https://github.com/" +"adap/flower/pull/1513), [#1680](https://github.com/adap/flower/pull/1680), " +"[#1681](https://github.com/adap/flower/pull/1681), [#1679](https://github." +"com/adap/flower/pull/1679))" msgstr "" -"**引入新的 Flower Baseline: FedProx MNIST** " -"([#1513](https://github.com/adap/flower/pull/1513), " -"[#1680](https://github.com/adap/flower/pull/1680), " -"[#1681](https://github.com/adap/flower/pull/1681), " -"[#1679](https://github.com/adap/flower/pull/1679)" +"**引入新的 Flower Baseline: FedProx MNIST** ([#1513](https://github.com/" +"adap/flower/pull/1513), [#1680](https://github.com/adap/flower/pull/1680), " +"[#1681](https://github.com/adap/flower/pull/1681), [#1679](https://github." +"com/adap/flower/pull/1679)" #: ../../source/ref-changelog.md:310 msgid "" -"This new baseline replicates the MNIST+CNN task from the paper [Federated" -" Optimization in Heterogeneous Networks (Li et al., " -"2018)](https://arxiv.org/abs/1812.06127). It uses the `FedProx` strategy," -" which aims at making convergence more robust in heterogeneous settings." +"This new baseline replicates the MNIST+CNN task from the paper [Federated " +"Optimization in Heterogeneous Networks (Li et al., 2018)](https://arxiv.org/" +"abs/1812.06127). It uses the `FedProx` strategy, which aims at making " +"convergence more robust in heterogeneous settings." msgstr "" -"这条新Baseline复现了论文[Federated Optimization in Heterogeneous Networks (Li et " -"al., 2018)](https://arxiv.org/abs/1812.06127)中的 MNIST+CNN 任务。它使用 " -"\"FedProx \"策略,旨在使收敛在异构环境中更加稳健。" +"这条新Baseline复现了论文[Federated Optimization in Heterogeneous Networks " +"(Li et al., 2018)](https://arxiv.org/abs/1812.06127)中的 MNIST+CNN 任务。它使" +"用 \"FedProx \"策略,旨在使收敛在异构环境中更加稳健。" #: ../../source/ref-changelog.md:312 msgid "" -"**Introduce new Flower Baseline: FedAvg FEMNIST** " -"([#1655](https://github.com/adap/flower/pull/1655))" +"**Introduce new Flower Baseline: FedAvg FEMNIST** ([#1655](https://github." +"com/adap/flower/pull/1655))" msgstr "" -"**引入新的 Flower Baseline: FedAvg FEMNIST** " -"([#1655](https://github.com/adap/flower/pull/1655))" +"**引入新的 Flower Baseline: FedAvg FEMNIST** ([#1655](https://github.com/" +"adap/flower/pull/1655))" #: ../../source/ref-changelog.md:314 msgid "" -"This new baseline replicates an experiment evaluating the performance of " -"the FedAvg algorithm on the FEMNIST dataset from the paper [LEAF: A " -"Benchmark for Federated Settings (Caldas et al., " -"2018)](https://arxiv.org/abs/1812.01097)." +"This new baseline replicates an experiment evaluating the performance of the " +"FedAvg algorithm on the FEMNIST dataset from the paper [LEAF: A Benchmark " +"for Federated Settings (Caldas et al., 2018)](https://arxiv.org/" +"abs/1812.01097)." msgstr "" -"这一新Baseline复现了论文[LEAF: A Benchmark for Federated Settings(Caldas 等人,2018 " -"年)](https://arxiv.org/abs/1812.01097)中评估 FedAvg 算法在 FEMNIST 数据集上性能的实验。" +"这一新Baseline复现了论文[LEAF: A Benchmark for Federated Settings(Caldas 等" +"人,2018 年)](https://arxiv.org/abs/1812.01097)中评估 FedAvg 算法在 FEMNIST " +"数据集上性能的实验。" #: ../../source/ref-changelog.md:316 msgid "" -"**Introduce (experimental) REST API** " -"([#1594](https://github.com/adap/flower/pull/1594), " -"[#1690](https://github.com/adap/flower/pull/1690), " -"[#1695](https://github.com/adap/flower/pull/1695), " -"[#1712](https://github.com/adap/flower/pull/1712), " -"[#1802](https://github.com/adap/flower/pull/1802), " -"[#1770](https://github.com/adap/flower/pull/1770), " -"[#1733](https://github.com/adap/flower/pull/1733))" -msgstr "" -"**引入(试验性)REST API** ([#1594](https://github.com/adap/flower/pull/1594), " -"[#1690](https://github.com/adap/flower/pull/1690), " -"[#1695](https://github.com/adap/flower/pull/1695), " -"[#1712](https://github.com/adap/flower/pull/1712), " -"[#1802](https://github.com/adap/flower/pull/1802), " -"[#1770](https://github.com/adap/flower/pull/1770), " -"[#1733](https://github.com/adap/flower/pull/1733))" +"**Introduce (experimental) REST API** ([#1594](https://github.com/adap/" +"flower/pull/1594), [#1690](https://github.com/adap/flower/pull/1690), [#1695]" +"(https://github.com/adap/flower/pull/1695), [#1712](https://github.com/adap/" +"flower/pull/1712), [#1802](https://github.com/adap/flower/pull/1802), [#1770]" +"(https://github.com/adap/flower/pull/1770), [#1733](https://github.com/adap/" +"flower/pull/1733))" +msgstr "" +"**引入(试验性)REST API** ([#1594](https://github.com/adap/flower/" +"pull/1594), [#1690](https://github.com/adap/flower/pull/1690), [#1695]" +"(https://github.com/adap/flower/pull/1695), [#1712](https://github.com/adap/" +"flower/pull/1712), [#1802](https://github.com/adap/flower/pull/1802), [#1770]" +"(https://github.com/adap/flower/pull/1770), [#1733](https://github.com/adap/" +"flower/pull/1733))" #: ../../source/ref-changelog.md:318 msgid "" "A new REST API has been introduced as an alternative to the gRPC-based " "communication stack. In this initial version, the REST API only supports " "anonymous clients." -msgstr "作为基于 gRPC 的通信栈的替代方案,我们引入了新的 REST API。在初始版本中,REST API 仅支持匿名客户端。" +msgstr "" +"作为基于 gRPC 的通信栈的替代方案,我们引入了新的 REST API。在初始版本中," +"REST API 仅支持匿名客户端。" #: ../../source/ref-changelog.md:320 msgid "" @@ -16450,203 +16596,171 @@ msgstr "请注意:REST API 仍处于试验阶段,随着时间的推移可能 #: ../../source/ref-changelog.md:322 msgid "" -"**Improve the (experimental) Driver API** " -"([#1663](https://github.com/adap/flower/pull/1663), " -"[#1666](https://github.com/adap/flower/pull/1666), " -"[#1667](https://github.com/adap/flower/pull/1667), " -"[#1664](https://github.com/adap/flower/pull/1664), " -"[#1675](https://github.com/adap/flower/pull/1675), " -"[#1676](https://github.com/adap/flower/pull/1676), " -"[#1693](https://github.com/adap/flower/pull/1693), " -"[#1662](https://github.com/adap/flower/pull/1662), " -"[#1794](https://github.com/adap/flower/pull/1794))" -msgstr "" -"**改进(试验性)驱动程序应用程序接口** ([#1663](https://github.com/adap/flower/pull/1663)," -" [#1666](https://github.com/adap/flower/pull/1666), " -"[#1667](https://github.com/adap/flower/pull/1667), " -"[#1664](https://github.com/adap/flower/pull/1664), " -"[#1675](https://github.com/adap/flower/pull/1675), " -"[#1676](https://github.com/adap/flower/pull/1676), " -"[#1693](https://github.com/adap/flower/pull/1693), " -"[#1662](https://github.com/adap/flower/pull/1662), " -"[#1794](https://github.com/adap/flower/pull/1794))" +"**Improve the (experimental) Driver API** ([#1663](https://github.com/adap/" +"flower/pull/1663), [#1666](https://github.com/adap/flower/pull/1666), [#1667]" +"(https://github.com/adap/flower/pull/1667), [#1664](https://github.com/adap/" +"flower/pull/1664), [#1675](https://github.com/adap/flower/pull/1675), [#1676]" +"(https://github.com/adap/flower/pull/1676), [#1693](https://github.com/adap/" +"flower/pull/1693), [#1662](https://github.com/adap/flower/pull/1662), [#1794]" +"(https://github.com/adap/flower/pull/1794))" +msgstr "" +"**改进(试验性)驱动程序应用程序接口** ([#1663](https://github.com/adap/" +"flower/pull/1663), [#1666](https://github.com/adap/flower/pull/1666), [#1667]" +"(https://github.com/adap/flower/pull/1667), [#1664](https://github.com/adap/" +"flower/pull/1664), [#1675](https://github.com/adap/flower/pull/1675), [#1676]" +"(https://github.com/adap/flower/pull/1676), [#1693](https://github.com/adap/" +"flower/pull/1693), [#1662](https://github.com/adap/flower/pull/1662), [#1794]" +"(https://github.com/adap/flower/pull/1794))" #: ../../source/ref-changelog.md:324 msgid "" -"The Driver API is still an experimental feature, but this release " -"introduces some major upgrades. One of the main improvements is the " -"introduction of an SQLite database to store server state on disk (instead" -" of in-memory). Another improvement is that tasks (instructions or " -"results) that have been delivered will now be deleted. This greatly " -"improves the memory efficiency of a long-running Flower server." +"The Driver API is still an experimental feature, but this release introduces " +"some major upgrades. One of the main improvements is the introduction of an " +"SQLite database to store server state on disk (instead of in-memory). " +"Another improvement is that tasks (instructions or results) that have been " +"delivered will now be deleted. This greatly improves the memory efficiency " +"of a long-running Flower server." msgstr "" -"驱动程序应用程序接口(Driver API)仍是一项试验性功能,但这一版本引入了一些重大升级。主要改进之一是引入了 SQLite " -"数据库,将服务器状态存储在磁盘上(而不是内存中)。另一项改进是,已交付的任务(指令或结果)现在将被删除。这大大提高了长期运行的 Flower " -"服务器的内存效率。" +"驱动程序应用程序接口(Driver API)仍是一项试验性功能,但这一版本引入了一些重" +"大升级。主要改进之一是引入了 SQLite 数据库,将服务器状态存储在磁盘上(而不是" +"内存中)。另一项改进是,已交付的任务(指令或结果)现在将被删除。这大大提高了" +"长期运行的 Flower 服务器的内存效率。" #: ../../source/ref-changelog.md:326 msgid "" -"**Fix spilling issues related to Ray during simulations** " -"([#1698](https://github.com/adap/flower/pull/1698))" -msgstr "**修复模拟过程中与Ray有关的溢出问题** ([#1698](https://github.com/adap/flower/pull/1698))" +"**Fix spilling issues related to Ray during simulations** ([#1698](https://" +"github.com/adap/flower/pull/1698))" +msgstr "" +"**修复模拟过程中与Ray有关的溢出问题** ([#1698](https://github.com/adap/" +"flower/pull/1698))" #: ../../source/ref-changelog.md:328 msgid "" -"While running long simulations, `ray` was sometimes spilling huge amounts" -" of data that would make the training unable to continue. This is now " -"fixed! 🎉" -msgstr "在运行长时间模拟时,`ray` 有时会溢出大量数据,导致训练无法继续。现在这个问题已经解决!🎉" +"While running long simulations, `ray` was sometimes spilling huge amounts of " +"data that would make the training unable to continue. This is now fixed! 🎉" +msgstr "" +"在运行长时间模拟时,`ray` 有时会溢出大量数据,导致训练无法继续。现在这个问题" +"已经解决!🎉" #: ../../source/ref-changelog.md:330 msgid "" -"**Add new example using** `TabNet` **and Flower** " -"([#1725](https://github.com/adap/flower/pull/1725))" +"**Add new example using** `TabNet` **and Flower** ([#1725](https://github." +"com/adap/flower/pull/1725))" msgstr "" -"** 添加使用** `TabNet` ** 的新示例** " -"([#1725](https://github.com/adap/flower/pull/1725))" +"** 添加使用** `TabNet` ** 的新示例** ([#1725](https://github.com/adap/flower/" +"pull/1725))" #: ../../source/ref-changelog.md:332 msgid "" -"TabNet is a powerful and flexible framework for training machine learning" -" models on tabular data. We now have a federated example using Flower: " -"[quickstart-tabnet](https://github.com/adap/flower/tree/main/examples" -"/quickstart-tabnet)." +"TabNet is a powerful and flexible framework for training machine learning " +"models on tabular data. We now have a federated example using Flower: " +"[quickstart-tabnet](https://github.com/adap/flower/tree/main/examples/" +"quickstart-tabnet)." msgstr "" -"TabNet 是一个强大而灵活的框架,用于在表格数据上训练机器学习模型。我们现在有一个使用 Flower 的联邦示例:[quickstart-" -"tabnet](https://github.com/adap/flower/tree/main/examples/quickstart-" -"tabnet)。" +"TabNet 是一个强大而灵活的框架,用于在表格数据上训练机器学习模型。我们现在有一" +"个使用 Flower 的联邦示例:[quickstart-tabnet](https://github.com/adap/flower/" +"tree/main/examples/quickstart-tabnet)。" #: ../../source/ref-changelog.md:334 msgid "" -"**Add new how-to guide for monitoring simulations** " -"([#1649](https://github.com/adap/flower/pull/1649))" -msgstr "** 添加新的模拟监控指南** ([#1649](https://github.com/adap/flower/pull/1649))" +"**Add new how-to guide for monitoring simulations** ([#1649](https://github." +"com/adap/flower/pull/1649))" +msgstr "" +"** 添加新的模拟监控指南** ([#1649](https://github.com/adap/flower/pull/1649))" #: ../../source/ref-changelog.md:336 msgid "" -"We now have a documentation guide to help users monitor their performance" -" during simulations." +"We now have a documentation guide to help users monitor their performance " +"during simulations." msgstr "我们现在有一份文档指南,可帮助用户在模拟过程中监控其性能。" #: ../../source/ref-changelog.md:338 msgid "" -"**Add training metrics to** `History` **object during simulations** " -"([#1696](https://github.com/adap/flower/pull/1696))" +"**Add training metrics to** `History` **object during simulations** ([#1696]" +"(https://github.com/adap/flower/pull/1696))" msgstr "" -"**在模拟过程中为***`历史`***对象添加训练指标*** " -"([#1696](https://github.com/adap/flower/pull/1696))" +"**在模拟过程中为***`历史`***对象添加训练指标*** ([#1696](https://github.com/" +"adap/flower/pull/1696))" #: ../../source/ref-changelog.md:340 msgid "" -"The `fit_metrics_aggregation_fn` can be used to aggregate training " -"metrics, but previous releases did not save the results in the `History` " -"object. This is now the case!" +"The `fit_metrics_aggregation_fn` can be used to aggregate training metrics, " +"but previous releases did not save the results in the `History` object. This " +"is now the case!" msgstr "" -"`fit_metrics_aggregation_fn`可用于汇总训练指标,但以前的版本不会将结果保存在 \"History " -"\"对象中。现在可以了!" +"`fit_metrics_aggregation_fn`可用于汇总训练指标,但以前的版本不会将结果保存在 " +"\"History \"对象中。现在可以了!" #: ../../source/ref-changelog.md:342 msgid "" -"**General improvements** " -"([#1659](https://github.com/adap/flower/pull/1659), " -"[#1646](https://github.com/adap/flower/pull/1646), " -"[#1647](https://github.com/adap/flower/pull/1647), " -"[#1471](https://github.com/adap/flower/pull/1471), " -"[#1648](https://github.com/adap/flower/pull/1648), " -"[#1651](https://github.com/adap/flower/pull/1651), " -"[#1652](https://github.com/adap/flower/pull/1652), " -"[#1653](https://github.com/adap/flower/pull/1653), " -"[#1659](https://github.com/adap/flower/pull/1659), " -"[#1665](https://github.com/adap/flower/pull/1665), " -"[#1670](https://github.com/adap/flower/pull/1670), " -"[#1672](https://github.com/adap/flower/pull/1672), " -"[#1677](https://github.com/adap/flower/pull/1677), " -"[#1684](https://github.com/adap/flower/pull/1684), " -"[#1683](https://github.com/adap/flower/pull/1683), " -"[#1686](https://github.com/adap/flower/pull/1686), " -"[#1682](https://github.com/adap/flower/pull/1682), " -"[#1685](https://github.com/adap/flower/pull/1685), " -"[#1692](https://github.com/adap/flower/pull/1692), " -"[#1705](https://github.com/adap/flower/pull/1705), " -"[#1708](https://github.com/adap/flower/pull/1708), " -"[#1711](https://github.com/adap/flower/pull/1711), " -"[#1713](https://github.com/adap/flower/pull/1713), " -"[#1714](https://github.com/adap/flower/pull/1714), " -"[#1718](https://github.com/adap/flower/pull/1718), " -"[#1716](https://github.com/adap/flower/pull/1716), " -"[#1723](https://github.com/adap/flower/pull/1723), " -"[#1735](https://github.com/adap/flower/pull/1735), " -"[#1678](https://github.com/adap/flower/pull/1678), " -"[#1750](https://github.com/adap/flower/pull/1750), " -"[#1753](https://github.com/adap/flower/pull/1753), " -"[#1736](https://github.com/adap/flower/pull/1736), " -"[#1766](https://github.com/adap/flower/pull/1766), " -"[#1760](https://github.com/adap/flower/pull/1760), " -"[#1775](https://github.com/adap/flower/pull/1775), " -"[#1776](https://github.com/adap/flower/pull/1776), " -"[#1777](https://github.com/adap/flower/pull/1777), " -"[#1779](https://github.com/adap/flower/pull/1779), " -"[#1784](https://github.com/adap/flower/pull/1784), " -"[#1773](https://github.com/adap/flower/pull/1773), " -"[#1755](https://github.com/adap/flower/pull/1755), " -"[#1789](https://github.com/adap/flower/pull/1789), " -"[#1788](https://github.com/adap/flower/pull/1788), " -"[#1798](https://github.com/adap/flower/pull/1798), " -"[#1799](https://github.com/adap/flower/pull/1799), " -"[#1739](https://github.com/adap/flower/pull/1739), " -"[#1800](https://github.com/adap/flower/pull/1800), " -"[#1804](https://github.com/adap/flower/pull/1804), " -"[#1805](https://github.com/adap/flower/pull/1805))" -msgstr "" -"**普通改进** ([#1659](https://github.com/adap/flower/pull/1659), " -"[#1646](https://github.com/adap/flower/pull/1646), " -"[#1647](https://github.com/adap/flower/pull/1647), " -"[#1471](https://github.com/adap/flower/pull/1471), " -"[#1648](https://github.com/adap/flower/pull/1648), " -"[#1651](https://github.com/adap/flower/pull/1651), " -"[#1652](https://github.com/adap/flower/pull/1652), " -"[#1653](https://github.com/adap/flower/pull/1653), " -"[#1659](https://github.com/adap/flower/pull/1659), " -"[#1665](https://github.com/adap/flower/pull/1665), " -"[#1670](https://github.com/adap/flower/pull/1670), " -"[#1672](https://github.com/adap/flower/pull/1672), " -"[#1677](https://github.com/adap/flower/pull/1677), " -"[#1684](https://github.com/adap/flower/pull/1684), " -"[#1683](https://github.com/adap/flower/pull/1683), " -"[#1686](https://github.com/adap/flower/pull/1686), " -"[#1682](https://github.com/adap/flower/pull/1682), " -"[#1685](https://github.com/adap/flower/pull/1685), " -"[#1692](https://github.com/adap/flower/pull/1692), " -"[#1705](https://github.com/adap/flower/pull/1705), " -"[#1708](https://github.com/adap/flower/pull/1708), " -"[#1711](https://github.com/adap/flower/pull/1711), " -"[#1713](https://github.com/adap/flower/pull/1713), " -"[#1714](https://github.com/adap/flower/pull/1714), " -"[#1718](https://github.com/adap/flower/pull/1718), " -"[#1716](https://github.com/adap/flower/pull/1716), " -"[#1723](https://github.com/adap/flower/pull/1723), " -"[#1735](https://github.com/adap/flower/pull/1735), " -"[#1678](https://github.com/adap/flower/pull/1678), " -"[#1750](https://github.com/adap/flower/pull/1750), " -"[#1753](https://github.com/adap/flower/pull/1753), " -"[#1736](https://github.com/adap/flower/pull/1736), " -"[#1766](https://github.com/adap/flower/pull/1766), " -"[#1760](https://github.com/adap/flower/pull/1760), " -"[#1775](https://github.com/adap/flower/pull/1775), " -"[#1776](https://github.com/adap/flower/pull/1776), " -"[#1777](https://github.com/adap/flower/pull/1777), " -"[#1779](https://github.com/adap/flower/pull/1779), " -"[#1784](https://github.com/adap/flower/pull/1784), " -"[#1773](https://github.com/adap/flower/pull/1773), " -"[#1755](https://github.com/adap/flower/pull/1755), " -"[#1789](https://github.com/adap/flower/pull/1789), " -"[#1788](https://github.com/adap/flower/pull/1788), " -"[#1798](https://github.com/adap/flower/pull/1798), " -"[#1799](https://github.com/adap/flower/pull/1799), " -"[#1739](https://github.com/adap/flower/pull/1739), " -"[#1800](https://github.com/adap/flower/pull/1800), " -"[#1804](https://github.com/adap/flower/pull/1804), " -"[#1805](https://github.com/adap/flower/pull/1805))" +"**General improvements** ([#1659](https://github.com/adap/flower/pull/1659), " +"[#1646](https://github.com/adap/flower/pull/1646), [#1647](https://github." +"com/adap/flower/pull/1647), [#1471](https://github.com/adap/flower/" +"pull/1471), [#1648](https://github.com/adap/flower/pull/1648), [#1651]" +"(https://github.com/adap/flower/pull/1651), [#1652](https://github.com/adap/" +"flower/pull/1652), [#1653](https://github.com/adap/flower/pull/1653), [#1659]" +"(https://github.com/adap/flower/pull/1659), [#1665](https://github.com/adap/" +"flower/pull/1665), [#1670](https://github.com/adap/flower/pull/1670), [#1672]" +"(https://github.com/adap/flower/pull/1672), [#1677](https://github.com/adap/" +"flower/pull/1677), [#1684](https://github.com/adap/flower/pull/1684), [#1683]" +"(https://github.com/adap/flower/pull/1683), [#1686](https://github.com/adap/" +"flower/pull/1686), [#1682](https://github.com/adap/flower/pull/1682), [#1685]" +"(https://github.com/adap/flower/pull/1685), [#1692](https://github.com/adap/" +"flower/pull/1692), [#1705](https://github.com/adap/flower/pull/1705), [#1708]" +"(https://github.com/adap/flower/pull/1708), [#1711](https://github.com/adap/" +"flower/pull/1711), [#1713](https://github.com/adap/flower/pull/1713), [#1714]" +"(https://github.com/adap/flower/pull/1714), [#1718](https://github.com/adap/" +"flower/pull/1718), [#1716](https://github.com/adap/flower/pull/1716), [#1723]" +"(https://github.com/adap/flower/pull/1723), [#1735](https://github.com/adap/" +"flower/pull/1735), [#1678](https://github.com/adap/flower/pull/1678), [#1750]" +"(https://github.com/adap/flower/pull/1750), [#1753](https://github.com/adap/" +"flower/pull/1753), [#1736](https://github.com/adap/flower/pull/1736), [#1766]" +"(https://github.com/adap/flower/pull/1766), [#1760](https://github.com/adap/" +"flower/pull/1760), [#1775](https://github.com/adap/flower/pull/1775), [#1776]" +"(https://github.com/adap/flower/pull/1776), [#1777](https://github.com/adap/" +"flower/pull/1777), [#1779](https://github.com/adap/flower/pull/1779), [#1784]" +"(https://github.com/adap/flower/pull/1784), [#1773](https://github.com/adap/" +"flower/pull/1773), [#1755](https://github.com/adap/flower/pull/1755), [#1789]" +"(https://github.com/adap/flower/pull/1789), [#1788](https://github.com/adap/" +"flower/pull/1788), [#1798](https://github.com/adap/flower/pull/1798), [#1799]" +"(https://github.com/adap/flower/pull/1799), [#1739](https://github.com/adap/" +"flower/pull/1739), [#1800](https://github.com/adap/flower/pull/1800), [#1804]" +"(https://github.com/adap/flower/pull/1804), [#1805](https://github.com/adap/" +"flower/pull/1805))" +msgstr "" +"**普通改进** ([#1659](https://github.com/adap/flower/pull/1659), [#1646]" +"(https://github.com/adap/flower/pull/1646), [#1647](https://github.com/adap/" +"flower/pull/1647), [#1471](https://github.com/adap/flower/pull/1471), [#1648]" +"(https://github.com/adap/flower/pull/1648), [#1651](https://github.com/adap/" +"flower/pull/1651), [#1652](https://github.com/adap/flower/pull/1652), [#1653]" +"(https://github.com/adap/flower/pull/1653), [#1659](https://github.com/adap/" +"flower/pull/1659), [#1665](https://github.com/adap/flower/pull/1665), [#1670]" +"(https://github.com/adap/flower/pull/1670), [#1672](https://github.com/adap/" +"flower/pull/1672), [#1677](https://github.com/adap/flower/pull/1677), [#1684]" +"(https://github.com/adap/flower/pull/1684), [#1683](https://github.com/adap/" +"flower/pull/1683), [#1686](https://github.com/adap/flower/pull/1686), [#1682]" +"(https://github.com/adap/flower/pull/1682), [#1685](https://github.com/adap/" +"flower/pull/1685), [#1692](https://github.com/adap/flower/pull/1692), [#1705]" +"(https://github.com/adap/flower/pull/1705), [#1708](https://github.com/adap/" +"flower/pull/1708), [#1711](https://github.com/adap/flower/pull/1711), [#1713]" +"(https://github.com/adap/flower/pull/1713), [#1714](https://github.com/adap/" +"flower/pull/1714), [#1718](https://github.com/adap/flower/pull/1718), [#1716]" +"(https://github.com/adap/flower/pull/1716), [#1723](https://github.com/adap/" +"flower/pull/1723), [#1735](https://github.com/adap/flower/pull/1735), [#1678]" +"(https://github.com/adap/flower/pull/1678), [#1750](https://github.com/adap/" +"flower/pull/1750), [#1753](https://github.com/adap/flower/pull/1753), [#1736]" +"(https://github.com/adap/flower/pull/1736), [#1766](https://github.com/adap/" +"flower/pull/1766), [#1760](https://github.com/adap/flower/pull/1760), [#1775]" +"(https://github.com/adap/flower/pull/1775), [#1776](https://github.com/adap/" +"flower/pull/1776), [#1777](https://github.com/adap/flower/pull/1777), [#1779]" +"(https://github.com/adap/flower/pull/1779), [#1784](https://github.com/adap/" +"flower/pull/1784), [#1773](https://github.com/adap/flower/pull/1773), [#1755]" +"(https://github.com/adap/flower/pull/1755), [#1789](https://github.com/adap/" +"flower/pull/1789), [#1788](https://github.com/adap/flower/pull/1788), [#1798]" +"(https://github.com/adap/flower/pull/1798), [#1799](https://github.com/adap/" +"flower/pull/1799), [#1739](https://github.com/adap/flower/pull/1739), [#1800]" +"(https://github.com/adap/flower/pull/1800), [#1804](https://github.com/adap/" +"flower/pull/1804), [#1805](https://github.com/adap/flower/pull/1805))" #: ../../source/ref-changelog.md:350 msgid "v1.3.0 (2023-02-06)" @@ -16654,48 +16768,49 @@ msgstr "v1.3.0 (2023-02-06)" #: ../../source/ref-changelog.md:356 msgid "" -"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " -"`Daniel J. Beutel`, `JDRanpariya`, `Lennart Behme`, `Taner Topal`" +"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, `Daniel " +"J. Beutel`, `JDRanpariya`, `Lennart Behme`, `Taner Topal`" msgstr "" -"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " -"`Daniel J. Beutel`, `JDRanpariya`, `Lennart Behme`, `Taner Topal`" +"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, `Daniel " +"J. Beutel`, `JDRanpariya`, `Lennart Behme`, `Taner Topal`" #: ../../source/ref-changelog.md:360 msgid "" "**Add support for** `workload_id` **and** `group_id` **in Driver API** " "([#1595](https://github.com/adap/flower/pull/1595))" msgstr "" -"**在驱动程序应用程序接口中添加对** `workload_id` **和** `group_id` **的支持** " -"([#1595](https://github.com/adap/flower/pull/1595))" +"**在驱动程序应用程序接口中添加对** `workload_id` **和** `group_id` **的支持" +"** ([#1595](https://github.com/adap/flower/pull/1595))" #: ../../source/ref-changelog.md:362 msgid "" -"The (experimental) Driver API now supports a `workload_id` that can be " -"used to identify which workload a task belongs to. It also supports a new" -" `group_id` that can be used, for example, to indicate the current " -"training round. Both the `workload_id` and `group_id` enable client nodes" -" to decide whether they want to handle a task or not." +"The (experimental) Driver API now supports a `workload_id` that can be used " +"to identify which workload a task belongs to. It also supports a new " +"`group_id` that can be used, for example, to indicate the current training " +"round. Both the `workload_id` and `group_id` enable client nodes to decide " +"whether they want to handle a task or not." msgstr "" -"驱动程序 API(试验性)现在支持 `workload_id`,可用于识别任务所属的工作量。它还支持新的 " -"`group_id`,例如,可用于指示当前的训练轮次。通过 `workload_id` 和 `group_id` " -"客户端节点可以决定是否要处理某个任务。" +"驱动程序 API(试验性)现在支持 `workload_id`,可用于识别任务所属的工作量。它" +"还支持新的 `group_id`,例如,可用于指示当前的训练轮次。通过 `workload_id` 和 " +"`group_id` 客户端节点可以决定是否要处理某个任务。" #: ../../source/ref-changelog.md:364 msgid "" -"**Make Driver API and Fleet API address configurable** " -"([#1637](https://github.com/adap/flower/pull/1637))" +"**Make Driver API and Fleet API address configurable** ([#1637](https://" +"github.com/adap/flower/pull/1637))" msgstr "" -"**使Driver API 和Fleet " -"API地址可配置**([#1637](https://github.com/adap/flower/pull/1637))" +"**使Driver API 和Fleet API地址可配置**([#1637](https://github.com/adap/" +"flower/pull/1637))" #: ../../source/ref-changelog.md:366 msgid "" -"The (experimental) long-running Flower server (Driver API and Fleet API) " -"can now configure the server address of both Driver API (via `--driver-" -"api-address`) and Fleet API (via `--fleet-api-address`) when starting:" +"The (experimental) long-running Flower server (Driver API and Fleet API) can " +"now configure the server address of both Driver API (via `--driver-api-" +"address`) and Fleet API (via `--fleet-api-address`) when starting:" msgstr "" -"长期运行的 Flower 服务器(Driver API 和 Fleet API)现在可以在启动时配置 Driver API(通过 " -"`--driver-api-address`)和 Fleet API(通过 `-fleet-api-address`)的服务器地址:" +"长期运行的 Flower 服务器(Driver API 和 Fleet API)现在可以在启动时配置 " +"Driver API(通过 `--driver-api-address`)和 Fleet API(通过 `-fleet-api-" +"address`)的服务器地址:" #: ../../source/ref-changelog.md:368 #, fuzzy @@ -16712,68 +16827,69 @@ msgstr "支持 IPv4 和 IPv6 地址。" #: ../../source/ref-changelog.md:372 msgid "" -"**Add new example of Federated Learning using fastai and Flower** " -"([#1598](https://github.com/adap/flower/pull/1598))" +"**Add new example of Federated Learning using fastai and Flower** ([#1598]" +"(https://github.com/adap/flower/pull/1598))" msgstr "" -"** 添加使用 fastai 和 Flower 进行联邦学习的新示例** " -"([#1598](https://github.com/adap/flower/pull/1598))" +"** 添加使用 fastai 和 Flower 进行联邦学习的新示例** ([#1598](https://github." +"com/adap/flower/pull/1598))" #: ../../source/ref-changelog.md:374 msgid "" "A new code example (`quickstart-fastai`) demonstrates federated learning " "with [fastai](https://www.fast.ai/) and Flower. You can find it here: " -"[quickstart-fastai](https://github.com/adap/flower/tree/main/examples" -"/quickstart-fastai)." +"[quickstart-fastai](https://github.com/adap/flower/tree/main/examples/" +"quickstart-fastai)." msgstr "" -"一个新的代码示例(`quickstart-fastai`)演示了使用 [fastai](https://www.fast.ai/) 和 " -"Flower 的联邦学习。您可以在这里找到它: [quickstart-" -"fastai](https://github.com/adap/flower/tree/main/examples/quickstart-" -"fastai)。" +"一个新的代码示例(`quickstart-fastai`)演示了使用 [fastai](https://www.fast." +"ai/) 和 Flower 的联邦学习。您可以在这里找到它: [quickstart-fastai](https://" +"github.com/adap/flower/tree/main/examples/quickstart-fastai)。" #: ../../source/ref-changelog.md:376 msgid "" -"**Make Android example compatible with** `flwr >= 1.0.0` **and the latest" -" versions of Android** " -"([#1603](https://github.com/adap/flower/pull/1603))" +"**Make Android example compatible with** `flwr >= 1.0.0` **and the latest " +"versions of Android** ([#1603](https://github.com/adap/flower/pull/1603))" msgstr "" -"**使安卓示例兼容** `flwr >= 1.0.0` **和最新版本的安卓** " -"([#1603](https://github.com/adap/flower/pull/1603))" +"**使安卓示例兼容** `flwr >= 1.0.0` **和最新版本的安卓** ([#1603](https://" +"github.com/adap/flower/pull/1603))" #: ../../source/ref-changelog.md:378 msgid "" -"The Android code example has received a substantial update: the project " -"is compatible with Flower 1.0 (and later), the UI received a full " -"refresh, and the project is updated to be compatible with newer Android " -"tooling." +"The Android code example has received a substantial update: the project is " +"compatible with Flower 1.0 (and later), the UI received a full refresh, and " +"the project is updated to be compatible with newer Android tooling." msgstr "" -"Android 代码示例已进行了大幅更新:项目兼容 Flower 1.0(及更高版本),用户界面已全面刷新,项目已更新为兼容较新的 Android" -" 工具。" +"Android 代码示例已进行了大幅更新:项目兼容 Flower 1.0(及更高版本),用户界面" +"已全面刷新,项目已更新为兼容较新的 Android 工具。" #: ../../source/ref-changelog.md:380 msgid "" -"**Add new `FedProx` strategy** " -"([#1619](https://github.com/adap/flower/pull/1619))" -msgstr "**添加新的`FedProx`策略** ([#1619](https://github.com/adap/flower/pull/1619))" +"**Add new `FedProx` strategy** ([#1619](https://github.com/adap/flower/" +"pull/1619))" +msgstr "" +"**添加新的`FedProx`策略** ([#1619](https://github.com/adap/flower/" +"pull/1619))" #: ../../source/ref-changelog.md:382 msgid "" -"This " -"[strategy](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedprox.py)" -" is almost identical to " -"[`FedAvg`](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedavg.py)," -" but helps users replicate what is described in this " -"[paper](https://arxiv.org/abs/1812.06127). It essentially adds a " -"parameter called `proximal_mu` to regularize the local models with " -"respect to the global models." +"This [strategy](https://github.com/adap/flower/blob/main/src/py/flwr/server/" +"strategy/fedprox.py) is almost identical to [`FedAvg`](https://github.com/" +"adap/flower/blob/main/src/py/flwr/server/strategy/fedavg.py), but helps " +"users replicate what is described in this [paper](https://arxiv.org/" +"abs/1812.06127). It essentially adds a parameter called `proximal_mu` to " +"regularize the local models with respect to the global models." msgstr "" -"该[策略](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedprox.py)与[`FedAvg`](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedavg.py)几乎相同,但可以帮助用户复现本[论文](https://arxiv.org/abs/1812.06127)中的描述。它的本质是添加一个名为" -" `proximal_mu`的参数,使局部模型与全局模型正则化。" +"该[策略](https://github.com/adap/flower/blob/main/src/py/flwr/server/" +"strategy/fedprox.py)与[`FedAvg`](https://github.com/adap/flower/blob/main/" +"src/py/flwr/server/strategy/fedavg.py)几乎相同,但可以帮助用户复现本[论文]" +"(https://arxiv.org/abs/1812.06127)中的描述。它的本质是添加一个名为 " +"`proximal_mu`的参数,使局部模型与全局模型正则化。" #: ../../source/ref-changelog.md:384 msgid "" -"**Add new metrics to telemetry events** " -"([#1640](https://github.com/adap/flower/pull/1640))" -msgstr "**为遥测事件添加新指标**([#1640](https://github.com/adap/flower/pull/1640))" +"**Add new metrics to telemetry events** ([#1640](https://github.com/adap/" +"flower/pull/1640))" +msgstr "" +"**为遥测事件添加新指标**([#1640](https://github.com/adap/flower/pull/1640))" #: ../../source/ref-changelog.md:386 msgid "" @@ -16783,134 +16899,114 @@ msgstr "例如,更新后的事件结构可以将同一工作负载中的事件 #: ../../source/ref-changelog.md:388 msgid "" -"**Add new custom strategy tutorial section** " -"[#1623](https://github.com/adap/flower/pull/1623)" -msgstr "**添加新的自定义策略教程部分** [#1623](https://github.com/adap/flower/pull/1623)" +"**Add new custom strategy tutorial section** [#1623](https://github.com/adap/" +"flower/pull/1623)" +msgstr "" +"**添加新的自定义策略教程部分** [#1623](https://github.com/adap/flower/" +"pull/1623)" #: ../../source/ref-changelog.md:390 msgid "" -"The Flower tutorial now has a new section that covers implementing a " -"custom strategy from scratch: [Open in " -"Colab](https://colab.research.google.com/github/adap/flower/blob/main/doc/source" -"/tutorial-build-a-strategy-from-scratch-pytorch.ipynb)" +"The Flower tutorial now has a new section that covers implementing a custom " +"strategy from scratch: [Open in Colab](https://colab.research.google.com/" +"github/adap/flower/blob/main/doc/source/tutorial-build-a-strategy-from-" +"scratch-pytorch.ipynb)" msgstr "" -"Flower 教程新增了一个章节,介绍如何从零开始实施自定义策略: [在 Colab " -"中打开](https://colab.research.google.com/github/adap/flower/blob/main/doc/source" -"/tutorial-build-a-strategy-from-scratch-pytorch.ipynb)" +"Flower 教程新增了一个章节,介绍如何从零开始实施自定义策略: [在 Colab 中打开]" +"(https://colab.research.google.com/github/adap/flower/blob/main/doc/source/" +"tutorial-build-a-strategy-from-scratch-pytorch.ipynb)" #: ../../source/ref-changelog.md:392 msgid "" -"**Add new custom serialization tutorial section** " -"([#1622](https://github.com/adap/flower/pull/1622))" -msgstr "** 添加新的自定义序列化教程部分** ([#1622](https://github.com/adap/flower/pull/1622))" +"**Add new custom serialization tutorial section** ([#1622](https://github." +"com/adap/flower/pull/1622))" +msgstr "" +"** 添加新的自定义序列化教程部分** ([#1622](https://github.com/adap/flower/" +"pull/1622))" #: ../../source/ref-changelog.md:394 msgid "" -"The Flower tutorial now has a new section that covers custom " -"serialization: [Open in " -"Colab](https://colab.research.google.com/github/adap/flower/blob/main/doc/source" -"/tutorial-customize-the-client-pytorch.ipynb)" +"The Flower tutorial now has a new section that covers custom serialization: " +"[Open in Colab](https://colab.research.google.com/github/adap/flower/blob/" +"main/doc/source/tutorial-customize-the-client-pytorch.ipynb)" msgstr "" -"Flower 教程现在新增了一个章节,介绍自定义序列化: [在 Colab " -"中打开](https://colab.research.google.com/github/adap/flower/blob/main/doc/source" -"/tutorial-customize-the-client-pytorch.ipynb)" +"Flower 教程现在新增了一个章节,介绍自定义序列化: [在 Colab 中打开](https://" +"colab.research.google.com/github/adap/flower/blob/main/doc/source/tutorial-" +"customize-the-client-pytorch.ipynb)" #: ../../source/ref-changelog.md:396 msgid "" -"**General improvements** " -"([#1638](https://github.com/adap/flower/pull/1638), " -"[#1634](https://github.com/adap/flower/pull/1634), " -"[#1636](https://github.com/adap/flower/pull/1636), " -"[#1635](https://github.com/adap/flower/pull/1635), " -"[#1633](https://github.com/adap/flower/pull/1633), " -"[#1632](https://github.com/adap/flower/pull/1632), " -"[#1631](https://github.com/adap/flower/pull/1631), " -"[#1630](https://github.com/adap/flower/pull/1630), " -"[#1627](https://github.com/adap/flower/pull/1627), " -"[#1593](https://github.com/adap/flower/pull/1593), " -"[#1616](https://github.com/adap/flower/pull/1616), " -"[#1615](https://github.com/adap/flower/pull/1615), " -"[#1607](https://github.com/adap/flower/pull/1607), " -"[#1609](https://github.com/adap/flower/pull/1609), " -"[#1608](https://github.com/adap/flower/pull/1608), " -"[#1603](https://github.com/adap/flower/pull/1603), " -"[#1590](https://github.com/adap/flower/pull/1590), " -"[#1580](https://github.com/adap/flower/pull/1580), " -"[#1599](https://github.com/adap/flower/pull/1599), " -"[#1600](https://github.com/adap/flower/pull/1600), " -"[#1601](https://github.com/adap/flower/pull/1601), " -"[#1597](https://github.com/adap/flower/pull/1597), " -"[#1595](https://github.com/adap/flower/pull/1595), " -"[#1591](https://github.com/adap/flower/pull/1591), " -"[#1588](https://github.com/adap/flower/pull/1588), " -"[#1589](https://github.com/adap/flower/pull/1589), " -"[#1587](https://github.com/adap/flower/pull/1587), " -"[#1573](https://github.com/adap/flower/pull/1573), " -"[#1581](https://github.com/adap/flower/pull/1581), " -"[#1578](https://github.com/adap/flower/pull/1578), " -"[#1574](https://github.com/adap/flower/pull/1574), " -"[#1572](https://github.com/adap/flower/pull/1572), " -"[#1586](https://github.com/adap/flower/pull/1586))" -msgstr "" -"**普通改进** ([#1638](https://github.com/adap/flower/pull/1638), " -"[#1634](https://github.com/adap/flower/pull/1634), " -"[#1636](https://github.com/adap/flower/pull/1636), " -"[#1635](https://github.com/adap/flower/pull/1635), " -"[#1633](https://github.com/adap/flower/pull/1633), " -"[#1632](https://github.com/adap/flower/pull/1632), " -"[#1631](https://github.com/adap/flower/pull/1631), " -"[#1630](https://github.com/adap/flower/pull/1630), " -"[#1627](https://github. com/adap/flower/pull/1627), " -"[#1593](https://github.com/adap/flower/pull/1593), " -"[#1616](https://github.com/adap/flower/pull/1616), " -"[#1615](https://github.com/adap/flower/pull/1615), " -"[#1607](https://github.com/adap/flower/pull/1607), " -"[#1609](https://github.com/adap/flower/pull/1609), " -"[#1608](https://github.com/adap/flower/pull/1608), " -"[#1603](https://github.com/adap/flower/pull/1603), " -"[#1590](https://github. com/adap/flower/pull/1590), " -"[#1580](https://github.com/adap/flower/pull/1580), " -"[#1599](https://github.com/adap/flower/pull/1599), " -"[#1600](https://github.com/adap/flower/pull/1600), " -"[#1601](https://github.com/adap/flower/pull/1601), " -"[#1597](https://github.com/adap/flower/pull/1597), " -"[#1595](https://github.com/adap/flower/pull/1595), " -"[#1591](https://github.com/adap/flower/pull/1591), " -"[#1588](https://github. com/adap/flower/pull/1588), " -"[#1589](https://github.com/adap/flower/pull/1589), " -"[#1587](https://github.com/adap/flower/pull/1587), " -"[#1573](https://github.com/adap/flower/pull/1573), " -"[#1581](https://github.com/adap/flower/pull/1581), " -"[#1578](https://github.com/adap/flower/pull/1578), " -"[#1574](https://github.com/adap/flower/pull/1574), " -"[#1572](https://github.com/adap/flower/pull/1572), " -"[#1586](https://github.com/adap/flower/pull/1586))" +"**General improvements** ([#1638](https://github.com/adap/flower/pull/1638), " +"[#1634](https://github.com/adap/flower/pull/1634), [#1636](https://github." +"com/adap/flower/pull/1636), [#1635](https://github.com/adap/flower/" +"pull/1635), [#1633](https://github.com/adap/flower/pull/1633), [#1632]" +"(https://github.com/adap/flower/pull/1632), [#1631](https://github.com/adap/" +"flower/pull/1631), [#1630](https://github.com/adap/flower/pull/1630), [#1627]" +"(https://github.com/adap/flower/pull/1627), [#1593](https://github.com/adap/" +"flower/pull/1593), [#1616](https://github.com/adap/flower/pull/1616), [#1615]" +"(https://github.com/adap/flower/pull/1615), [#1607](https://github.com/adap/" +"flower/pull/1607), [#1609](https://github.com/adap/flower/pull/1609), [#1608]" +"(https://github.com/adap/flower/pull/1608), [#1603](https://github.com/adap/" +"flower/pull/1603), [#1590](https://github.com/adap/flower/pull/1590), [#1580]" +"(https://github.com/adap/flower/pull/1580), [#1599](https://github.com/adap/" +"flower/pull/1599), [#1600](https://github.com/adap/flower/pull/1600), [#1601]" +"(https://github.com/adap/flower/pull/1601), [#1597](https://github.com/adap/" +"flower/pull/1597), [#1595](https://github.com/adap/flower/pull/1595), [#1591]" +"(https://github.com/adap/flower/pull/1591), [#1588](https://github.com/adap/" +"flower/pull/1588), [#1589](https://github.com/adap/flower/pull/1589), [#1587]" +"(https://github.com/adap/flower/pull/1587), [#1573](https://github.com/adap/" +"flower/pull/1573), [#1581](https://github.com/adap/flower/pull/1581), [#1578]" +"(https://github.com/adap/flower/pull/1578), [#1574](https://github.com/adap/" +"flower/pull/1574), [#1572](https://github.com/adap/flower/pull/1572), [#1586]" +"(https://github.com/adap/flower/pull/1586))" +msgstr "" +"**普通改进** ([#1638](https://github.com/adap/flower/pull/1638), [#1634]" +"(https://github.com/adap/flower/pull/1634), [#1636](https://github.com/adap/" +"flower/pull/1636), [#1635](https://github.com/adap/flower/pull/1635), [#1633]" +"(https://github.com/adap/flower/pull/1633), [#1632](https://github.com/adap/" +"flower/pull/1632), [#1631](https://github.com/adap/flower/pull/1631), [#1630]" +"(https://github.com/adap/flower/pull/1630), [#1627](https://github. com/adap/" +"flower/pull/1627), [#1593](https://github.com/adap/flower/pull/1593), [#1616]" +"(https://github.com/adap/flower/pull/1616), [#1615](https://github.com/adap/" +"flower/pull/1615), [#1607](https://github.com/adap/flower/pull/1607), [#1609]" +"(https://github.com/adap/flower/pull/1609), [#1608](https://github.com/adap/" +"flower/pull/1608), [#1603](https://github.com/adap/flower/pull/1603), [#1590]" +"(https://github. com/adap/flower/pull/1590), [#1580](https://github.com/adap/" +"flower/pull/1580), [#1599](https://github.com/adap/flower/pull/1599), [#1600]" +"(https://github.com/adap/flower/pull/1600), [#1601](https://github.com/adap/" +"flower/pull/1601), [#1597](https://github.com/adap/flower/pull/1597), [#1595]" +"(https://github.com/adap/flower/pull/1595), [#1591](https://github.com/adap/" +"flower/pull/1591), [#1588](https://github. com/adap/flower/pull/1588), " +"[#1589](https://github.com/adap/flower/pull/1589), [#1587](https://github." +"com/adap/flower/pull/1587), [#1573](https://github.com/adap/flower/" +"pull/1573), [#1581](https://github.com/adap/flower/pull/1581), [#1578]" +"(https://github.com/adap/flower/pull/1578), [#1574](https://github.com/adap/" +"flower/pull/1574), [#1572](https://github.com/adap/flower/pull/1572), [#1586]" +"(https://github.com/adap/flower/pull/1586))" #: ../../source/ref-changelog.md:400 msgid "" -"**Updated documentation** " -"([#1629](https://github.com/adap/flower/pull/1629), " -"[#1628](https://github.com/adap/flower/pull/1628), " -"[#1620](https://github.com/adap/flower/pull/1620), " -"[#1618](https://github.com/adap/flower/pull/1618), " -"[#1617](https://github.com/adap/flower/pull/1617), " -"[#1613](https://github.com/adap/flower/pull/1613), " -"[#1614](https://github.com/adap/flower/pull/1614))" -msgstr "" -"** 更新文档** ([#1629](https://github.com/adap/flower/pull/1629), " -"[#1628](https://github.com/adap/flower/pull/1628), " -"[#1620](https://github.com/adap/flower/pull/1620), " -"[#1618](https://github.com/adap/flower/pull/1618), " -"[#1617](https://github.com/adap/flower/pull/1617), " -"[#1613](https://github.com/adap/flower/pull/1613), " -"[#1614](https://github.com/adap/flower/pull/1614)))" +"**Updated documentation** ([#1629](https://github.com/adap/flower/" +"pull/1629), [#1628](https://github.com/adap/flower/pull/1628), [#1620]" +"(https://github.com/adap/flower/pull/1620), [#1618](https://github.com/adap/" +"flower/pull/1618), [#1617](https://github.com/adap/flower/pull/1617), [#1613]" +"(https://github.com/adap/flower/pull/1613), [#1614](https://github.com/adap/" +"flower/pull/1614))" +msgstr "" +"** 更新文档** ([#1629](https://github.com/adap/flower/pull/1629), [#1628]" +"(https://github.com/adap/flower/pull/1628), [#1620](https://github.com/adap/" +"flower/pull/1620), [#1618](https://github.com/adap/flower/pull/1618), [#1617]" +"(https://github.com/adap/flower/pull/1617), [#1613](https://github.com/adap/" +"flower/pull/1613), [#1614](https://github.com/adap/flower/pull/1614)))" #: ../../source/ref-changelog.md:402 ../../source/ref-changelog.md:469 msgid "" -"As usual, the documentation has improved quite a bit. It is another step " -"in our effort to make the Flower documentation the best documentation of " -"any project. Stay tuned and as always, feel free to provide feedback!" -msgstr "和往常一样,我们的文档有了很大的改进。这是我们努力使 Flower 文档成为所有项目中最好文档的又一步骤。请继续关注,并随时提供反馈意见!" +"As usual, the documentation has improved quite a bit. It is another step in " +"our effort to make the Flower documentation the best documentation of any " +"project. Stay tuned and as always, feel free to provide feedback!" +msgstr "" +"和往常一样,我们的文档有了很大的改进。这是我们努力使 Flower 文档成为所有项目" +"中最好文档的又一步骤。请继续关注,并随时提供反馈意见!" #: ../../source/ref-changelog.md:408 msgid "v1.2.0 (2023-01-13)" @@ -16918,21 +17014,19 @@ msgstr "v1.2.0 (2023-01-13)" #: ../../source/ref-changelog.md:414 msgid "" -"`Adam Narozniak`, `Charles Beauville`, `Daniel J. Beutel`, `Edoardo`, `L." -" Jiang`, `Ragy`, `Taner Topal`, `dannymcy`" +"`Adam Narozniak`, `Charles Beauville`, `Daniel J. Beutel`, `Edoardo`, `L. " +"Jiang`, `Ragy`, `Taner Topal`, `dannymcy`" msgstr "" -"`Adam Narozniak`, `Charles Beauville`, `Daniel J. Beutel`, `Edoardo`, `L." -" Jiang`, `Ragy`, `Taner Topal`, `dannymcy`" +"`Adam Narozniak`, `Charles Beauville`, `Daniel J. Beutel`, `Edoardo`, `L. " +"Jiang`, `Ragy`, `Taner Topal`, `dannymcy`" #: ../../source/ref-changelog.md:418 msgid "" -"**Introduce new Flower Baseline: FedAvg MNIST** " -"([#1497](https://github.com/adap/flower/pull/1497), " -"[#1552](https://github.com/adap/flower/pull/1552))" +"**Introduce new Flower Baseline: FedAvg MNIST** ([#1497](https://github.com/" +"adap/flower/pull/1497), [#1552](https://github.com/adap/flower/pull/1552))" msgstr "" -"**引入新的 Flower Baseline: FedAvg MNIST** " -"([#1497](https://github.com/adap/flower/pull/1497), " -"[#1552](https://github.com/adap/flower/pull/1552))" +"**引入新的 Flower Baseline: FedAvg MNIST** ([#1497](https://github.com/adap/" +"flower/pull/1497), [#1552](https://github.com/adap/flower/pull/1552))" #: ../../source/ref-changelog.md:420 msgid "" @@ -16940,283 +17034,272 @@ msgid "" "implementations useful especially to FL newcomers. They will typically " "revisit well known papers from the literature, and be suitable for " "integration in your own application or for experimentation, in order to " -"deepen your knowledge of FL in general. Today's release is the first in " -"this series. [Read more.](https://flower.ai/blog/2023-01-12-fl-starter-" -"pack-fedavg-mnist-cnn/)" +"deepen your knowledge of FL in general. Today's release is the first in this " +"series. [Read more.](https://flower.ai/blog/2023-01-12-fl-starter-pack-" +"fedavg-mnist-cnn/)" msgstr "" -"在未来几周内,我们将发布一些新的参考,特别是对 FL " -"新手有用的方法。它们通常会重温文献中的知名论文,适合集成到您自己的应用程序中或用于实验,以加深您对 FL " -"的总体了解。今天发布的是该系列中的第一篇。[阅读全文](https://flower.ai/blog/2023-01-12-fl-starter-" -"pack-fedavg-mnist-cnn/)" +"在未来几周内,我们将发布一些新的参考,特别是对 FL 新手有用的方法。它们通常会" +"重温文献中的知名论文,适合集成到您自己的应用程序中或用于实验,以加深您对 FL " +"的总体了解。今天发布的是该系列中的第一篇。[阅读全文](https://flower.ai/" +"blog/2023-01-12-fl-starter-pack-fedavg-mnist-cnn/)" #: ../../source/ref-changelog.md:422 msgid "" -"**Improve GPU support in simulations** " -"([#1555](https://github.com/adap/flower/pull/1555))" -msgstr "**改进模拟中的 GPU 支持**([#1555](https://github.com/adap/flower/pull/1555))" +"**Improve GPU support in simulations** ([#1555](https://github.com/adap/" +"flower/pull/1555))" +msgstr "" +"**改进模拟中的 GPU 支持**([#1555](https://github.com/adap/flower/" +"pull/1555))" #: ../../source/ref-changelog.md:424 msgid "" -"The Ray-based Virtual Client Engine (`start_simulation`) has been updated" -" to improve GPU support. The update includes some of the hard-earned " -"lessons from scaling simulations in GPU cluster environments. New " -"defaults make running GPU-based simulations substantially more robust." +"The Ray-based Virtual Client Engine (`start_simulation`) has been updated to " +"improve GPU support. The update includes some of the hard-earned lessons " +"from scaling simulations in GPU cluster environments. New defaults make " +"running GPU-based simulations substantially more robust." msgstr "" -"基于 Ray 的虚拟客户端引擎 (`start_simulation`)已更新,以改进对 GPU 的支持。此次更新包含了在 GPU " -"集群环境中扩展模拟的一些经验教训。新的默认设置使基于 GPU 的模拟运行更加稳健。" +"基于 Ray 的虚拟客户端引擎 (`start_simulation`)已更新,以改进对 GPU 的支持。此" +"次更新包含了在 GPU 集群环境中扩展模拟的一些经验教训。新的默认设置使基于 GPU " +"的模拟运行更加稳健。" #: ../../source/ref-changelog.md:426 msgid "" -"**Improve GPU support in Jupyter Notebook tutorials** " -"([#1527](https://github.com/adap/flower/pull/1527), " -"[#1558](https://github.com/adap/flower/pull/1558))" +"**Improve GPU support in Jupyter Notebook tutorials** ([#1527](https://" +"github.com/adap/flower/pull/1527), [#1558](https://github.com/adap/flower/" +"pull/1558))" msgstr "" -"**改进 Jupyter Notebook 教程中的 GPU 支持** " -"([#1527](https://github.com/adap/flower/pull/1527), " -"[#1558](https://github.com/adap/flower/pull/1558))" +"**改进 Jupyter Notebook 教程中的 GPU 支持** ([#1527](https://github.com/adap/" +"flower/pull/1527), [#1558](https://github.com/adap/flower/pull/1558))" #: ../../source/ref-changelog.md:428 msgid "" -"Some users reported that Jupyter Notebooks have not always been easy to " -"use on GPU instances. We listened and made improvements to all of our " -"Jupyter notebooks! Check out the updated notebooks here:" +"Some users reported that Jupyter Notebooks have not always been easy to use " +"on GPU instances. We listened and made improvements to all of our Jupyter " +"notebooks! Check out the updated notebooks here:" msgstr "" -"一些用户报告说,在 GPU 实例上使用 Jupyter 笔记本并不是很方便。我们听取了他们的意见,并对所有 Jupyter " -"笔记本进行了改进!点击这里查看更新后的笔记本:" +"一些用户报告说,在 GPU 实例上使用 Jupyter 笔记本并不是很方便。我们听取了他们" +"的意见,并对所有 Jupyter 笔记本进行了改进!点击这里查看更新后的笔记本:" #: ../../source/ref-changelog.md:430 msgid "" -"[An Introduction to Federated Learning](https://flower.ai/docs/framework" -"/tutorial-get-started-with-flower-pytorch.html)" +"[An Introduction to Federated Learning](https://flower.ai/docs/framework/" +"tutorial-get-started-with-flower-pytorch.html)" msgstr "" "[联邦学习简介](https://flower.ai/docs/framework/tutorial-get-started-with-" "flower-pytorch.html)" #: ../../source/ref-changelog.md:431 msgid "" -"[Strategies in Federated Learning](https://flower.ai/docs/framework" -"/tutorial-use-a-federated-learning-strategy-pytorch.html)" +"[Strategies in Federated Learning](https://flower.ai/docs/framework/tutorial-" +"use-a-federated-learning-strategy-pytorch.html)" msgstr "" "[联邦学习策略](https://flower.ai/docs/framework/tutorial-use-a-federated-" "learning-strategy-pytorch.html)" #: ../../source/ref-changelog.md:432 msgid "" -"[Building a Strategy](https://flower.ai/docs/framework/tutorial-build-a" -"-strategy-from-scratch-pytorch.html)" +"[Building a Strategy](https://flower.ai/docs/framework/tutorial-build-a-" +"strategy-from-scratch-pytorch.html)" msgstr "" "[制定策略](https://flower.ai/docs/framework/tutorial-build-a-strategy-from-" "scratch-pytorch.html)" #: ../../source/ref-changelog.md:433 msgid "" -"[Client and NumPyClient](https://flower.ai/docs/framework/tutorial-" -"customize-the-client-pytorch.html)" +"[Client and NumPyClient](https://flower.ai/docs/framework/tutorial-customize-" +"the-client-pytorch.html)" msgstr "" "[客户端和 NumPyClient](https://flower.ai/docs/framework/tutorial-customize-" "the-client-pytorch.html)" #: ../../source/ref-changelog.md:435 msgid "" -"**Introduce optional telemetry** " -"([#1533](https://github.com/adap/flower/pull/1533), " -"[#1544](https://github.com/adap/flower/pull/1544), " -"[#1584](https://github.com/adap/flower/pull/1584))" +"**Introduce optional telemetry** ([#1533](https://github.com/adap/flower/" +"pull/1533), [#1544](https://github.com/adap/flower/pull/1544), [#1584]" +"(https://github.com/adap/flower/pull/1584))" msgstr "" -"**引入可选遥测**([#1533](https://github.com/adap/flower/pull/1533), " -"[#1544](https://github.com/adap/flower/pull/1544), " -"[#1584](https://github.com/adap/flower/pull/1584)" +"**引入可选遥测**([#1533](https://github.com/adap/flower/pull/1533), [#1544]" +"(https://github.com/adap/flower/pull/1544), [#1584](https://github.com/adap/" +"flower/pull/1584)" #: ../../source/ref-changelog.md:437 msgid "" -"After a [request for " -"feedback](https://github.com/adap/flower/issues/1534) from the community," -" the Flower open-source project introduces optional collection of " -"*anonymous* usage metrics to make well-informed decisions to improve " -"Flower. Doing this enables the Flower team to understand how Flower is " -"used and what challenges users might face." +"After a [request for feedback](https://github.com/adap/flower/issues/1534) " +"from the community, the Flower open-source project introduces optional " +"collection of *anonymous* usage metrics to make well-informed decisions to " +"improve Flower. Doing this enables the Flower team to understand how Flower " +"is used and what challenges users might face." msgstr "" "在社区发出[反馈请求](https://github.com/adap/flower/issues/1534)之后,Flower " -"开放源码项目引入了可选的*匿名*使用指标收集,以便在充分知情的情况下做出改进 Flower 的决定。这样做能让 Flower 团队了解 " -"Flower 的使用情况以及用户可能面临的挑战。" +"开放源码项目引入了可选的*匿名*使用指标收集,以便在充分知情的情况下做出改进 " +"Flower 的决定。这样做能让 Flower 团队了解 Flower 的使用情况以及用户可能面临的" +"挑战。" #: ../../source/ref-changelog.md:439 msgid "" -"**Flower is a friendly framework for collaborative AI and data science.**" -" Staying true to this statement, Flower makes it easy to disable " -"telemetry for users who do not want to share anonymous usage metrics. " -"[Read more.](https://flower.ai/docs/telemetry.html)." +"**Flower is a friendly framework for collaborative AI and data science.** " +"Staying true to this statement, Flower makes it easy to disable telemetry " +"for users who do not want to share anonymous usage metrics. [Read more.]" +"(https://flower.ai/docs/telemetry.html)." msgstr "" -"**Flower 是一个用于协作式人工智能和数据科学的友好框架。** Flower " -"遵循这一声明,让不想分享匿名使用指标的用户可以轻松禁用遥测技术。[阅读全文](https://flower.ai/docs/telemetry.html)。" +"**Flower 是一个用于协作式人工智能和数据科学的友好框架。** Flower 遵循这一声" +"明,让不想分享匿名使用指标的用户可以轻松禁用遥测技术。[阅读全文](https://" +"flower.ai/docs/telemetry.html)。" #: ../../source/ref-changelog.md:441 msgid "" -"**Introduce (experimental) Driver API** " -"([#1520](https://github.com/adap/flower/pull/1520), " -"[#1525](https://github.com/adap/flower/pull/1525), " -"[#1545](https://github.com/adap/flower/pull/1545), " -"[#1546](https://github.com/adap/flower/pull/1546), " -"[#1550](https://github.com/adap/flower/pull/1550), " -"[#1551](https://github.com/adap/flower/pull/1551), " -"[#1567](https://github.com/adap/flower/pull/1567))" -msgstr "" -"**引入(试验性)Driver API** ([#1520](https://github.com/adap/flower/pull/1520)," -" [#1525](https://github.com/adap/flower/pull/1525), " -"[#1545](https://github.com/adap/flower/pull/1545), " -"[#1546](https://github.com/adap/flower/pull/1546), " -"[#1550](https://github.com/adap/flower/pull/1550), " -"[#1551](https://github.com/adap/flower/pull/1551), " -"[#1567](https://github.com/adap/flower/pull/1567))" +"**Introduce (experimental) Driver API** ([#1520](https://github.com/adap/" +"flower/pull/1520), [#1525](https://github.com/adap/flower/pull/1525), [#1545]" +"(https://github.com/adap/flower/pull/1545), [#1546](https://github.com/adap/" +"flower/pull/1546), [#1550](https://github.com/adap/flower/pull/1550), [#1551]" +"(https://github.com/adap/flower/pull/1551), [#1567](https://github.com/adap/" +"flower/pull/1567))" +msgstr "" +"**引入(试验性)Driver API** ([#1520](https://github.com/adap/flower/" +"pull/1520), [#1525](https://github.com/adap/flower/pull/1525), [#1545]" +"(https://github.com/adap/flower/pull/1545), [#1546](https://github.com/adap/" +"flower/pull/1546), [#1550](https://github.com/adap/flower/pull/1550), [#1551]" +"(https://github.com/adap/flower/pull/1551), [#1567](https://github.com/adap/" +"flower/pull/1567))" #: ../../source/ref-changelog.md:443 msgid "" "Flower now has a new (experimental) Driver API which will enable fully " "programmable, async, and multi-tenant Federated Learning and Federated " -"Analytics applications. Phew, that's a lot! Going forward, the Driver API" -" will be the abstraction that many upcoming features will be built on - " -"and you can start building those things now, too." +"Analytics applications. Phew, that's a lot! Going forward, the Driver API " +"will be the abstraction that many upcoming features will be built on - and " +"you can start building those things now, too." msgstr "" -"Flower 现在有了一个新的(试验性的)驱动程序应用程序接口(Driver " -"API),它将支持完全可编程、异步和多租户的联邦学习(Federated Learning)和联邦分析(Federated " -"Analytics)应用程序。展望未来,Driver API 将成为许多即将推出的功能的抽象基础,您现在就可以开始构建这些功能。" +"Flower 现在有了一个新的(试验性的)驱动程序应用程序接口(Driver API),它将支" +"持完全可编程、异步和多租户的联邦学习(Federated Learning)和联邦分析" +"(Federated Analytics)应用程序。展望未来,Driver API 将成为许多即将推出的功" +"能的抽象基础,您现在就可以开始构建这些功能。" #: ../../source/ref-changelog.md:445 msgid "" -"The Driver API also enables a new execution mode in which the server runs" -" indefinitely. Multiple individual workloads can run concurrently and " -"start and stop their execution independent of the server. This is " -"especially useful for users who want to deploy Flower in production." +"The Driver API also enables a new execution mode in which the server runs " +"indefinitely. Multiple individual workloads can run concurrently and start " +"and stop their execution independent of the server. This is especially " +"useful for users who want to deploy Flower in production." msgstr "" -"驱动程序应用程序接口还支持一种新的执行模式,在这种模式下,服务器可无限期运行。多个单独的工作负载可以同时运行,并独立于服务器启动和停止执行。这对于希望在生产中部署" -" Flower 的用户来说尤其有用。" +"驱动程序应用程序接口还支持一种新的执行模式,在这种模式下,服务器可无限期运" +"行。多个单独的工作负载可以同时运行,并独立于服务器启动和停止执行。这对于希望" +"在生产中部署 Flower 的用户来说尤其有用。" #: ../../source/ref-changelog.md:447 msgid "" -"To learn more, check out the `mt-pytorch` code example. We look forward " -"to you feedback!" +"To learn more, check out the `mt-pytorch` code example. We look forward to " +"you feedback!" msgstr "要了解更多信息,请查看 `mt-pytorch` 代码示例。我们期待您的反馈!" #: ../../source/ref-changelog.md:449 msgid "" -"Please note: *The Driver API is still experimental and will likely change" -" significantly over time.*" +"Please note: *The Driver API is still experimental and will likely change " +"significantly over time.*" msgstr "请注意:Driver API仍处于试验阶段,随着时间的推移可能会发生重大变化。*" #: ../../source/ref-changelog.md:451 msgid "" -"**Add new Federated Analytics with Pandas example** " -"([#1469](https://github.com/adap/flower/pull/1469), " -"[#1535](https://github.com/adap/flower/pull/1535))" +"**Add new Federated Analytics with Pandas example** ([#1469](https://github." +"com/adap/flower/pull/1469), [#1535](https://github.com/adap/flower/" +"pull/1535))" msgstr "" -"** 添加新的使用 Pandas " -"的联邦分析示例**([#1469](https://github.com/adap/flower/pull/1469), " -"[#1535](https://github.com/adap/flower/pull/1535)" +"** 添加新的使用 Pandas 的联邦分析示例**([#1469](https://github.com/adap/" +"flower/pull/1469), [#1535](https://github.com/adap/flower/pull/1535)" #: ../../source/ref-changelog.md:453 msgid "" -"A new code example (`quickstart-pandas`) demonstrates federated analytics" -" with Pandas and Flower. You can find it here: [quickstart-" -"pandas](https://github.com/adap/flower/tree/main/examples/quickstart-" -"pandas)." +"A new code example (`quickstart-pandas`) demonstrates federated analytics " +"with Pandas and Flower. You can find it here: [quickstart-pandas](https://" +"github.com/adap/flower/tree/main/examples/quickstart-pandas)." msgstr "" -"新代码示例(`quickstart-pandas`)演示了使用 Pandas 和 Flower 进行联邦分析。您可以在此处找到它: " -"[quickstart-pandas](https://github.com/adap/flower/tree/main/examples" -"/quickstart-pandas)。" +"新代码示例(`quickstart-pandas`)演示了使用 Pandas 和 Flower 进行联邦分析。您" +"可以在此处找到它: [quickstart-pandas](https://github.com/adap/flower/tree/" +"main/examples/quickstart-pandas)。" #: ../../source/ref-changelog.md:455 msgid "" -"**Add new strategies: Krum and MultiKrum** " -"([#1481](https://github.com/adap/flower/pull/1481))" +"**Add new strategies: Krum and MultiKrum** ([#1481](https://github.com/adap/" +"flower/pull/1481))" msgstr "" -"**添加新策略: Krum 和 MultiKrum** " -"([#1481](https://github.com/adap/flower/pull/1481))" +"**添加新策略: Krum 和 MultiKrum** ([#1481](https://github.com/adap/flower/" +"pull/1481))" #: ../../source/ref-changelog.md:457 msgid "" "Edoardo, a computer science student at the Sapienza University of Rome, " -"contributed a new `Krum` strategy that enables users to easily use Krum " -"and MultiKrum in their workloads." +"contributed a new `Krum` strategy that enables users to easily use Krum and " +"MultiKrum in their workloads." msgstr "" -"罗马萨皮恩扎大学(Sapienza University)计算机科学专业的学生埃多尔多(Edoardo)提出了一种新的 \"Krum " -"\"策略,使用户能够在其工作负载中轻松使用 Krum 和 MultiKrum。" +"罗马萨皮恩扎大学(Sapienza University)计算机科学专业的学生埃多尔多" +"(Edoardo)提出了一种新的 \"Krum \"策略,使用户能够在其工作负载中轻松使用 " +"Krum 和 MultiKrum。" #: ../../source/ref-changelog.md:459 msgid "" -"**Update C++ example to be compatible with Flower v1.2.0** " -"([#1495](https://github.com/adap/flower/pull/1495))" +"**Update C++ example to be compatible with Flower v1.2.0** ([#1495](https://" +"github.com/adap/flower/pull/1495))" msgstr "" -"** 更新 C++ 示例,与 Flower v1.2.0 兼容** " -"([#1495](https://github.com/adap/flower/pull/1495))" +"** 更新 C++ 示例,与 Flower v1.2.0 兼容** ([#1495](https://github.com/adap/" +"flower/pull/1495))" #: ../../source/ref-changelog.md:461 msgid "" -"The C++ code example has received a substantial update to make it " -"compatible with the latest version of Flower." +"The C++ code example has received a substantial update to make it compatible " +"with the latest version of Flower." msgstr "为了与最新版本的 Flower 兼容,C++ 示例代码进行了大幅更新。" #: ../../source/ref-changelog.md:463 msgid "" -"**General improvements** " -"([#1491](https://github.com/adap/flower/pull/1491), " -"[#1504](https://github.com/adap/flower/pull/1504), " -"[#1506](https://github.com/adap/flower/pull/1506), " -"[#1514](https://github.com/adap/flower/pull/1514), " -"[#1522](https://github.com/adap/flower/pull/1522), " -"[#1523](https://github.com/adap/flower/pull/1523), " -"[#1526](https://github.com/adap/flower/pull/1526), " -"[#1528](https://github.com/adap/flower/pull/1528), " -"[#1547](https://github.com/adap/flower/pull/1547), " -"[#1549](https://github.com/adap/flower/pull/1549), " -"[#1560](https://github.com/adap/flower/pull/1560), " -"[#1564](https://github.com/adap/flower/pull/1564), " -"[#1566](https://github.com/adap/flower/pull/1566))" -msgstr "" -"**普通改进** ([#1491](https://github.com/adap/flower/pull/1491), " -"[#1504](https://github.com/adap/flower/pull/1504), " -"[#1506](https://github.com/adap/flower/pull/1506), " -"[#1514](https://github.com/adap/flower/pull/1514), " -"[#1522](https://github.com/adap/flower/pull/1522), " -"[#1523](https://github.com/adap/flower/pull/1523), " -"[#1526](https://github. com/adap/flower/pull/1526), " -"[#1528](https://github.com/adap/flower/pull/1528), " -"[#1547](https://github.com/adap/flower/pull/1547), " -"[#1549](https://github.com/adap/flower/pull/1549), " -"[#1560](https://github.com/adap/flower/pull/1560), " -"[#1564](https://github.com/adap/flower/pull/1564), " -"[#1566](https://github.com/adap/flower/pull/1566))" +"**General improvements** ([#1491](https://github.com/adap/flower/pull/1491), " +"[#1504](https://github.com/adap/flower/pull/1504), [#1506](https://github." +"com/adap/flower/pull/1506), [#1514](https://github.com/adap/flower/" +"pull/1514), [#1522](https://github.com/adap/flower/pull/1522), [#1523]" +"(https://github.com/adap/flower/pull/1523), [#1526](https://github.com/adap/" +"flower/pull/1526), [#1528](https://github.com/adap/flower/pull/1528), [#1547]" +"(https://github.com/adap/flower/pull/1547), [#1549](https://github.com/adap/" +"flower/pull/1549), [#1560](https://github.com/adap/flower/pull/1560), [#1564]" +"(https://github.com/adap/flower/pull/1564), [#1566](https://github.com/adap/" +"flower/pull/1566))" +msgstr "" +"**普通改进** ([#1491](https://github.com/adap/flower/pull/1491), [#1504]" +"(https://github.com/adap/flower/pull/1504), [#1506](https://github.com/adap/" +"flower/pull/1506), [#1514](https://github.com/adap/flower/pull/1514), [#1522]" +"(https://github.com/adap/flower/pull/1522), [#1523](https://github.com/adap/" +"flower/pull/1523), [#1526](https://github. com/adap/flower/pull/1526), " +"[#1528](https://github.com/adap/flower/pull/1528), [#1547](https://github." +"com/adap/flower/pull/1547), [#1549](https://github.com/adap/flower/" +"pull/1549), [#1560](https://github.com/adap/flower/pull/1560), [#1564]" +"(https://github.com/adap/flower/pull/1564), [#1566](https://github.com/adap/" +"flower/pull/1566))" #: ../../source/ref-changelog.md:467 msgid "" -"**Updated documentation** " -"([#1494](https://github.com/adap/flower/pull/1494), " -"[#1496](https://github.com/adap/flower/pull/1496), " -"[#1500](https://github.com/adap/flower/pull/1500), " -"[#1503](https://github.com/adap/flower/pull/1503), " -"[#1505](https://github.com/adap/flower/pull/1505), " -"[#1524](https://github.com/adap/flower/pull/1524), " -"[#1518](https://github.com/adap/flower/pull/1518), " -"[#1519](https://github.com/adap/flower/pull/1519), " -"[#1515](https://github.com/adap/flower/pull/1515))" -msgstr "" -"** 更新文档** ([#1494](https://github.com/adap/flower/pull/1494), " -"[#1496](https://github.com/adap/flower/pull/1496), " -"[#1500](https://github.com/adap/flower/pull/1500), " -"[#1503](https://github.com/adap/flower/pull/1503), " -"[#1505](https://github.com/adap/flower/pull/1505), " -"[#1524](https://github.com/adap/flower/pull/1524), " -"[#1518](https://github.com/adap/flower/pull/1518), " -"[#1519](https://github.com/adap/flower/pull/1519), " -"[#1515](https://github.com/adap/flower/pull/1515))" +"**Updated documentation** ([#1494](https://github.com/adap/flower/" +"pull/1494), [#1496](https://github.com/adap/flower/pull/1496), [#1500]" +"(https://github.com/adap/flower/pull/1500), [#1503](https://github.com/adap/" +"flower/pull/1503), [#1505](https://github.com/adap/flower/pull/1505), [#1524]" +"(https://github.com/adap/flower/pull/1524), [#1518](https://github.com/adap/" +"flower/pull/1518), [#1519](https://github.com/adap/flower/pull/1519), [#1515]" +"(https://github.com/adap/flower/pull/1515))" +msgstr "" +"** 更新文档** ([#1494](https://github.com/adap/flower/pull/1494), [#1496]" +"(https://github.com/adap/flower/pull/1496), [#1500](https://github.com/adap/" +"flower/pull/1500), [#1503](https://github.com/adap/flower/pull/1503), [#1505]" +"(https://github.com/adap/flower/pull/1505), [#1524](https://github.com/adap/" +"flower/pull/1524), [#1518](https://github.com/adap/flower/pull/1518), [#1519]" +"(https://github.com/adap/flower/pull/1519), [#1515](https://github.com/adap/" +"flower/pull/1515))" #: ../../source/ref-changelog.md:471 msgid "" -"One highlight is the new [first time contributor " -"guide](https://flower.ai/docs/first-time-contributors.html): if you've " -"never contributed on GitHub before, this is the perfect place to start!" +"One highlight is the new [first time contributor guide](https://flower.ai/" +"docs/first-time-contributors.html): if you've never contributed on GitHub " +"before, this is the perfect place to start!" msgstr "" "其中一个亮点是新的[首次贡献者指南](https://flower.ai/docs/first-time-" -"contributors.html):如果你以前从未在 GitHub 上做过贡献,这将是一个完美的开始!" +"contributors.html):如果你以前从未在 GitHub 上做过贡献,这将是一个完美的开" +"始!" #: ../../source/ref-changelog.md:477 msgid "v1.1.0 (2022-10-31)" @@ -17226,183 +17309,184 @@ msgstr "v1.1.0 (2022-10-31)" msgid "" "We would like to give our **special thanks** to all the contributors who " "made the new version of Flower possible (in `git shortlog` order):" -msgstr "在此,我们向所有促成 Flower 新版本的贡献者致以**特别的谢意(按 \"git shortlog \"顺序排列):" +msgstr "" +"在此,我们向所有促成 Flower 新版本的贡献者致以**特别的谢意(按 \"git " +"shortlog \"顺序排列):" #: ../../source/ref-changelog.md:483 msgid "" "`Akis Linardos`, `Christopher S`, `Daniel J. Beutel`, `George`, `Jan " "Schlicht`, `Mohammad Fares`, `Pedro Porto Buarque de Gusmão`, `Philipp " -"Wiesner`, `Rob Luke`, `Taner Topal`, `VasundharaAgarwal`, " -"`danielnugraha`, `edogab33`" +"Wiesner`, `Rob Luke`, `Taner Topal`, `VasundharaAgarwal`, `danielnugraha`, " +"`edogab33`" msgstr "" "`Akis Linardos`, `Christopher S`, `Daniel J. Beutel`, `George`, `Jan " "Schlicht`, `Mohammad Fares`, `Pedro Porto Buarque de Gusmão`, `Philipp " -"Wiesner`, `Rob Luke`, `Taner Topal`, `VasundharaAgarwal`, " -"`danielnugraha`, `edogab33`" +"Wiesner`, `Rob Luke`, `Taner Topal`, `VasundharaAgarwal`, `danielnugraha`, " +"`edogab33`" #: ../../source/ref-changelog.md:487 msgid "" -"**Introduce Differential Privacy wrappers (preview)** " -"([#1357](https://github.com/adap/flower/pull/1357), " -"[#1460](https://github.com/adap/flower/pull/1460))" +"**Introduce Differential Privacy wrappers (preview)** ([#1357](https://" +"github.com/adap/flower/pull/1357), [#1460](https://github.com/adap/flower/" +"pull/1460))" msgstr "" -"**引入差分隐私包装器(预览)** ([#1357](https://github.com/adap/flower/pull/1357), " -"[#1460](https://github.com/adap/flower/pull/1460))" +"**引入差分隐私包装器(预览)** ([#1357](https://github.com/adap/flower/" +"pull/1357), [#1460](https://github.com/adap/flower/pull/1460))" #: ../../source/ref-changelog.md:489 msgid "" -"The first (experimental) preview of pluggable Differential Privacy " -"wrappers enables easy configuration and usage of differential privacy " -"(DP). The pluggable DP wrappers enable framework-agnostic **and** " -"strategy-agnostic usage of both client-side DP and server-side DP. Head " -"over to the Flower docs, a new explainer goes into more detail." +"The first (experimental) preview of pluggable Differential Privacy wrappers " +"enables easy configuration and usage of differential privacy (DP). The " +"pluggable DP wrappers enable framework-agnostic **and** strategy-agnostic " +"usage of both client-side DP and server-side DP. Head over to the Flower " +"docs, a new explainer goes into more detail." msgstr "" -"可插拔差分隐私封装器的首个(实验性)预览版可轻松配置和使用差分隐私(DP)。可插拔的差分隐私封装器可实现客户端差分隐私和服务器端差分隐私的框架无关**以及**策略无关的使用。请访问" -" Flower 文档,新的解释器会提供更多细节。" +"可插拔差分隐私封装器的首个(实验性)预览版可轻松配置和使用差分隐私(DP)。可" +"插拔的差分隐私封装器可实现客户端差分隐私和服务器端差分隐私的框架无关**以及**" +"策略无关的使用。请访问 Flower 文档,新的解释器会提供更多细节。" #: ../../source/ref-changelog.md:491 msgid "" -"**New iOS CoreML code example** " -"([#1289](https://github.com/adap/flower/pull/1289))" -msgstr "**新的 iOS CoreML 代码示例**([#1289](https://github.com/adap/flower/pull/1289))" +"**New iOS CoreML code example** ([#1289](https://github.com/adap/flower/" +"pull/1289))" +msgstr "" +"**新的 iOS CoreML 代码示例**([#1289](https://github.com/adap/flower/" +"pull/1289))" #: ../../source/ref-changelog.md:493 msgid "" -"Flower goes iOS! A massive new code example shows how Flower clients can " -"be built for iOS. The code example contains both Flower iOS SDK " -"components that can be used for many tasks, and one task example running " -"on CoreML." +"Flower goes iOS! A massive new code example shows how Flower clients can be " +"built for iOS. The code example contains both Flower iOS SDK components that " +"can be used for many tasks, and one task example running on CoreML." msgstr "" -"Flower 进入 iOS!大量新代码示例展示了如何为 iOS 构建 Flower 客户端。该代码示例包含可用于多种任务的 Flower iOS " -"SDK 组件,以及在 CoreML 上运行的一个任务示例。" +"Flower 进入 iOS!大量新代码示例展示了如何为 iOS 构建 Flower 客户端。该代码示" +"例包含可用于多种任务的 Flower iOS SDK 组件,以及在 CoreML 上运行的一个任务示" +"例。" #: ../../source/ref-changelog.md:495 msgid "" -"**New FedMedian strategy** " -"([#1461](https://github.com/adap/flower/pull/1461))" -msgstr "**新的联邦医疗策略** ([#1461](https://github.com/adap/flower/pull/1461))" +"**New FedMedian strategy** ([#1461](https://github.com/adap/flower/" +"pull/1461))" +msgstr "" +"**新的联邦医疗策略** ([#1461](https://github.com/adap/flower/pull/1461))" #: ../../source/ref-changelog.md:497 msgid "" -"The new `FedMedian` strategy implements Federated Median (FedMedian) by " -"[Yin et al., 2018](https://arxiv.org/pdf/1803.01498v1.pdf)." +"The new `FedMedian` strategy implements Federated Median (FedMedian) by [Yin " +"et al., 2018](https://arxiv.org/pdf/1803.01498v1.pdf)." msgstr "" -"新的 \"FedMedian \"战略实现了[Yin " -"等人,2018]的联邦中值(FedMedian)(https://arxiv.org/pdf/1803.01498v1.pdf)。" +"新的 \"FedMedian \"战略实现了[Yin 等人,2018]的联邦中值(FedMedian)(https://" +"arxiv.org/pdf/1803.01498v1.pdf)。" #: ../../source/ref-changelog.md:499 msgid "" -"**Log** `Client` **exceptions in Virtual Client Engine** " -"([#1493](https://github.com/adap/flower/pull/1493))" -msgstr "**虚拟客户端引擎中的**日志**`客户端`**异常([#1493](https://github.com/adap/flower/pull/1493))" +"**Log** `Client` **exceptions in Virtual Client Engine** ([#1493](https://" +"github.com/adap/flower/pull/1493))" +msgstr "" +"**虚拟客户端引擎中的**日志**`客户端`**异常([#1493](https://github.com/adap/" +"flower/pull/1493))" #: ../../source/ref-changelog.md:501 msgid "" -"All `Client` exceptions happening in the VCE are now logged by default " -"and not just exposed to the configured `Strategy` (via the `failures` " -"argument)." -msgstr "VCE 中发生的所有 \"客户端 \"异常现在都会被默认记录下来,而不只是暴露给配置的 `Strategy`(通过 `failures`参数)。" +"All `Client` exceptions happening in the VCE are now logged by default and " +"not just exposed to the configured `Strategy` (via the `failures` argument)." +msgstr "" +"VCE 中发生的所有 \"客户端 \"异常现在都会被默认记录下来,而不只是暴露给配置的 " +"`Strategy`(通过 `failures`参数)。" #: ../../source/ref-changelog.md:503 msgid "" -"**Improve Virtual Client Engine internals** " -"([#1401](https://github.com/adap/flower/pull/1401), " -"[#1453](https://github.com/adap/flower/pull/1453))" -msgstr "**改进虚拟客户端引擎内部**([#1401](https://github.com/adap/flower/pull/1401)、[#1453](https://github.com/adap/flower/pull/1453))" +"**Improve Virtual Client Engine internals** ([#1401](https://github.com/adap/" +"flower/pull/1401), [#1453](https://github.com/adap/flower/pull/1453))" +msgstr "" +"**改进虚拟客户端引擎内部**([#1401](https://github.com/adap/flower/" +"pull/1401)、[#1453](https://github.com/adap/flower/pull/1453))" #: ../../source/ref-changelog.md:505 msgid "" -"Some internals of the Virtual Client Engine have been revamped. The VCE " -"now uses Ray 2.0 under the hood, the value type of the `client_resources`" -" dictionary changed to `float` to allow fractions of resources to be " +"Some internals of the Virtual Client Engine have been revamped. The VCE now " +"uses Ray 2.0 under the hood, the value type of the `client_resources` " +"dictionary changed to `float` to allow fractions of resources to be " "allocated." msgstr "" -"虚拟客户端引擎的部分内部结构已进行了修改。VCE 现在使用 Ray 2.0,\"client_resources \"字典的值类型改为 " -"\"float\",以允许分配分数资源。" +"虚拟客户端引擎的部分内部结构已进行了修改。VCE 现在使用 Ray " +"2.0,\"client_resources \"字典的值类型改为 \"float\",以允许分配分数资源。" #: ../../source/ref-changelog.md:507 msgid "" -"**Support optional** `Client`**/**`NumPyClient` **methods in Virtual " -"Client Engine**" +"**Support optional** `Client`**/**`NumPyClient` **methods in Virtual Client " +"Engine**" msgstr "**支持虚拟客户端引擎中的可选** `Client`**/**`NumPyClient` **方法**" #: ../../source/ref-changelog.md:509 msgid "" -"The Virtual Client Engine now has full support for optional `Client` (and" -" `NumPyClient`) methods." +"The Virtual Client Engine now has full support for optional `Client` (and " +"`NumPyClient`) methods." msgstr "虚拟客户端引擎现在完全支持可选的 `Client`(和 `NumPyClient`)方法。" #: ../../source/ref-changelog.md:511 msgid "" -"**Provide type information to packages using** `flwr` " -"([#1377](https://github.com/adap/flower/pull/1377))" +"**Provide type information to packages using** `flwr` ([#1377](https://" +"github.com/adap/flower/pull/1377))" msgstr "" -"**使用** `flwr`向软件包提供类型信息 " -"([#1377](https://github.com/adap/flower/pull/1377))" +"**使用** `flwr`向软件包提供类型信息 ([#1377](https://github.com/adap/flower/" +"pull/1377))" #: ../../source/ref-changelog.md:513 msgid "" -"The package `flwr` is now bundled with a `py.typed` file indicating that " -"the package is typed. This enables typing support for projects or " -"packages that use `flwr` by enabling them to improve their code using " -"static type checkers like `mypy`." +"The package `flwr` is now bundled with a `py.typed` file indicating that the " +"package is typed. This enables typing support for projects or packages that " +"use `flwr` by enabling them to improve their code using static type checkers " +"like `mypy`." msgstr "" -"软件包 `flwr` 现在捆绑了一个 `py.typed` 文件,表明该软件包是类型化的。这样,使用 `flwr` 的项目或软件包就可以使用 " -"`mypy` 等静态类型检查器改进代码,从而获得类型支持。" +"软件包 `flwr` 现在捆绑了一个 `py.typed` 文件,表明该软件包是类型化的。这样," +"使用 `flwr` 的项目或软件包就可以使用 `mypy` 等静态类型检查器改进代码,从而获" +"得类型支持。" #: ../../source/ref-changelog.md:515 msgid "" -"**Updated code example** " -"([#1344](https://github.com/adap/flower/pull/1344), " +"**Updated code example** ([#1344](https://github.com/adap/flower/pull/1344), " "[#1347](https://github.com/adap/flower/pull/1347))" msgstr "" -"** 更新代码示例** ([#1344](https://github.com/adap/flower/pull/1344), " -"[#1347](https://github.com/adap/flower/pull/1347))" +"** 更新代码示例** ([#1344](https://github.com/adap/flower/pull/1344), [#1347]" +"(https://github.com/adap/flower/pull/1347))" #: ../../source/ref-changelog.md:517 msgid "" "The code examples covering scikit-learn and PyTorch Lightning have been " "updated to work with the latest version of Flower." -msgstr "涵盖 scikit-learn 和 PyTorch Lightning 的代码示例已更新,以便与最新版本的 Flower 配合使用。" +msgstr "" +"涵盖 scikit-learn 和 PyTorch Lightning 的代码示例已更新,以便与最新版本的 " +"Flower 配合使用。" #: ../../source/ref-changelog.md:519 msgid "" -"**Updated documentation** " -"([#1355](https://github.com/adap/flower/pull/1355), " -"[#1558](https://github.com/adap/flower/pull/1558), " -"[#1379](https://github.com/adap/flower/pull/1379), " -"[#1380](https://github.com/adap/flower/pull/1380), " -"[#1381](https://github.com/adap/flower/pull/1381), " -"[#1332](https://github.com/adap/flower/pull/1332), " -"[#1391](https://github.com/adap/flower/pull/1391), " -"[#1403](https://github.com/adap/flower/pull/1403), " -"[#1364](https://github.com/adap/flower/pull/1364), " -"[#1409](https://github.com/adap/flower/pull/1409), " -"[#1419](https://github.com/adap/flower/pull/1419), " -"[#1444](https://github.com/adap/flower/pull/1444), " -"[#1448](https://github.com/adap/flower/pull/1448), " -"[#1417](https://github.com/adap/flower/pull/1417), " -"[#1449](https://github.com/adap/flower/pull/1449), " -"[#1465](https://github.com/adap/flower/pull/1465), " -"[#1467](https://github.com/adap/flower/pull/1467))" -msgstr "" -"**更新文档** ([#1355](https://github.com/adap/flower/pull/1355), " -"[#1558](https://github.com/adap/flower/pull/1558), " -"[#1379](https://github.com/adap/flower/pull/1379), " -"[#1380](https://github.com/adap/flower/pull/1380), " -"[#1381](https://github.com/adap/flower/pull/1381), " -"[#1332](https://github.com/adap/flower/pull/1332), " -"[#1391](https://github.com/adap/flower/pull/1391), " -"[#1403](https://github.com/adap/flower/pull/1403), " -"[#1364](https://github. com/adap/flower/pull/1364), " -"[#1409](https://github.com/adap/flower/pull/1409), " -"[#1419](https://github.com/adap/flower/pull/1419), " -"[#1444](https://github.com/adap/flower/pull/1444), " -"[#1448](https://github.com/adap/flower/pull/1448), " -"[#1417](https://github.com/adap/flower/pull/1417), " -"[#1449](https://github.com/adap/flower/pull/1449), " -"[#1465](https://github.com/adap/flower/pull/1465), " -"[#1467](https://github.com/adap/flower/pull/1467))" +"**Updated documentation** ([#1355](https://github.com/adap/flower/" +"pull/1355), [#1558](https://github.com/adap/flower/pull/1558), [#1379]" +"(https://github.com/adap/flower/pull/1379), [#1380](https://github.com/adap/" +"flower/pull/1380), [#1381](https://github.com/adap/flower/pull/1381), [#1332]" +"(https://github.com/adap/flower/pull/1332), [#1391](https://github.com/adap/" +"flower/pull/1391), [#1403](https://github.com/adap/flower/pull/1403), [#1364]" +"(https://github.com/adap/flower/pull/1364), [#1409](https://github.com/adap/" +"flower/pull/1409), [#1419](https://github.com/adap/flower/pull/1419), [#1444]" +"(https://github.com/adap/flower/pull/1444), [#1448](https://github.com/adap/" +"flower/pull/1448), [#1417](https://github.com/adap/flower/pull/1417), [#1449]" +"(https://github.com/adap/flower/pull/1449), [#1465](https://github.com/adap/" +"flower/pull/1465), [#1467](https://github.com/adap/flower/pull/1467))" +msgstr "" +"**更新文档** ([#1355](https://github.com/adap/flower/pull/1355), [#1558]" +"(https://github.com/adap/flower/pull/1558), [#1379](https://github.com/adap/" +"flower/pull/1379), [#1380](https://github.com/adap/flower/pull/1380), [#1381]" +"(https://github.com/adap/flower/pull/1381), [#1332](https://github.com/adap/" +"flower/pull/1332), [#1391](https://github.com/adap/flower/pull/1391), [#1403]" +"(https://github.com/adap/flower/pull/1403), [#1364](https://github. com/adap/" +"flower/pull/1364), [#1409](https://github.com/adap/flower/pull/1409), [#1419]" +"(https://github.com/adap/flower/pull/1419), [#1444](https://github.com/adap/" +"flower/pull/1444), [#1448](https://github.com/adap/flower/pull/1448), [#1417]" +"(https://github.com/adap/flower/pull/1417), [#1449](https://github.com/adap/" +"flower/pull/1449), [#1465](https://github.com/adap/flower/pull/1465), [#1467]" +"(https://github.com/adap/flower/pull/1467))" #: ../../source/ref-changelog.md:521 msgid "" @@ -17412,58 +17496,60 @@ msgstr "文档更新的数量之多,甚至没有必要逐一列出。" #: ../../source/ref-changelog.md:523 msgid "" -"**Restructured documentation** " -"([#1387](https://github.com/adap/flower/pull/1387))" +"**Restructured documentation** ([#1387](https://github.com/adap/flower/" +"pull/1387))" msgstr "**重构文档**([#1387](https://github.com/adap/flower/pull/1387))" #: ../../source/ref-changelog.md:525 msgid "" -"The documentation has been restructured to make it easier to navigate. " -"This is just the first step in a larger effort to make the Flower " -"documentation the best documentation of any project ever. Stay tuned!" -msgstr "我们对文档进行了重组,使其更易于浏览。这只是让 Flower 文档成为所有项目中最好文档的第一步。敬请期待!" +"The documentation has been restructured to make it easier to navigate. This " +"is just the first step in a larger effort to make the Flower documentation " +"the best documentation of any project ever. Stay tuned!" +msgstr "" +"我们对文档进行了重组,使其更易于浏览。这只是让 Flower 文档成为所有项目中最好" +"文档的第一步。敬请期待!" #: ../../source/ref-changelog.md:527 msgid "" -"**Open in Colab button** " -"([#1389](https://github.com/adap/flower/pull/1389))" -msgstr "**在 Colab 中打开按钮** ([#1389](https://github.com/adap/flower/pull/1389))" +"**Open in Colab button** ([#1389](https://github.com/adap/flower/pull/1389))" +msgstr "" +"**在 Colab 中打开按钮** ([#1389](https://github.com/adap/flower/pull/1389))" #: ../../source/ref-changelog.md:529 msgid "" -"The four parts of the Flower Federated Learning Tutorial now come with a " -"new `Open in Colab` button. No need to install anything on your local " -"machine, you can now use and learn about Flower in your browser, it's " -"only a single click away." +"The four parts of the Flower Federated Learning Tutorial now come with a new " +"`Open in Colab` button. No need to install anything on your local machine, " +"you can now use and learn about Flower in your browser, it's only a single " +"click away." msgstr "" -"Flower 联邦学习教程的四个部分现在都带有一个新的 \"在 Colab 中打开 " -"\"按钮。现在,您无需在本地计算机上安装任何软件,只需点击一下,就可以在浏览器中使用和学习 Flower。" +"Flower 联邦学习教程的四个部分现在都带有一个新的 \"在 Colab 中打开 \"按钮。现" +"在,您无需在本地计算机上安装任何软件,只需点击一下,就可以在浏览器中使用和学" +"习 Flower。" #: ../../source/ref-changelog.md:531 msgid "" -"**Improved tutorial** ([#1468](https://github.com/adap/flower/pull/1468)," -" [#1470](https://github.com/adap/flower/pull/1470), " -"[#1472](https://github.com/adap/flower/pull/1472), " -"[#1473](https://github.com/adap/flower/pull/1473), " -"[#1474](https://github.com/adap/flower/pull/1474), " -"[#1475](https://github.com/adap/flower/pull/1475))" +"**Improved tutorial** ([#1468](https://github.com/adap/flower/pull/1468), " +"[#1470](https://github.com/adap/flower/pull/1470), [#1472](https://github." +"com/adap/flower/pull/1472), [#1473](https://github.com/adap/flower/" +"pull/1473), [#1474](https://github.com/adap/flower/pull/1474), [#1475]" +"(https://github.com/adap/flower/pull/1475))" msgstr "" -"**改进教程** ([#1468](https://github.com/adap/flower/pull/1468), " -"[#1470](https://github.com/adap/flower/pull/1470), " -"[#1472](https://github.com/adap/flower/pull/1472), " -"[#1473](https://github.com/adap/flower/pull/1473), " -"[#1474](https://github.com/adap/flower/pull/1474), " -"[#1475](https://github.com/adap/flower/pull/1475)))" +"**改进教程** ([#1468](https://github.com/adap/flower/pull/1468), [#1470]" +"(https://github.com/adap/flower/pull/1470), [#1472](https://github.com/adap/" +"flower/pull/1472), [#1473](https://github.com/adap/flower/pull/1473), [#1474]" +"(https://github.com/adap/flower/pull/1474), [#1475](https://github.com/adap/" +"flower/pull/1475)))" #: ../../source/ref-changelog.md:533 msgid "" "The Flower Federated Learning Tutorial has two brand-new parts covering " "custom strategies (still WIP) and the distinction between `Client` and " -"`NumPyClient`. The existing parts one and two have also been improved " -"(many small changes and fixes)." +"`NumPyClient`. The existing parts one and two have also been improved (many " +"small changes and fixes)." msgstr "" -"Flower 联邦学习教程有两个全新的部分,涉及自定义策略(仍处于 WIP 阶段)和 `Client` 与 `NumPyClient` " -"之间的区别。现有的第一和第二部分也得到了改进(许多小改动和修正)。" +"Flower 联邦学习教程有两个全新的部分,涉及自定义策略(仍处于 WIP 阶段)和 " +"`Client` 与 `NumPyClient` 之间的区别。现有的第一和第二部分也得到了改进(许多" +"小改动和修正)。" #: ../../source/ref-changelog.md:539 msgid "v1.0.0 (2022-07-28)" @@ -17487,141 +17573,120 @@ msgstr "可配置的`get_parameters`" #: ../../source/ref-changelog.md:546 msgid "" -"Tons of small API cleanups resulting in a more coherent developer " -"experience" +"Tons of small API cleanups resulting in a more coherent developer experience" msgstr "对大量小型应用程序接口进行了清理,使开发人员的体验更加一致" #: ../../source/ref-changelog.md:550 msgid "" "We would like to give our **special thanks** to all the contributors who " -"made Flower 1.0 possible (in reverse [GitHub " -"Contributors](https://github.com/adap/flower/graphs/contributors) order):" +"made Flower 1.0 possible (in reverse [GitHub Contributors](https://github." +"com/adap/flower/graphs/contributors) order):" msgstr "" -"在此,我们谨向所有促成 Flower 1.0 的贡献者致以**特别的谢意(按[GitHub " -"贡献者](https://github.com/adap/flower/graphs/contributors) 倒序排列):" +"在此,我们谨向所有促成 Flower 1.0 的贡献者致以**特别的谢意(按[GitHub 贡献者]" +"(https://github.com/adap/flower/graphs/contributors) 倒序排列):" #: ../../source/ref-changelog.md:552 msgid "" -"[@rtaiello](https://github.com/rtaiello), " -"[@g-pichler](https://github.com/g-pichler), [@rob-" -"luke](https://github.com/rob-luke), [@andreea-zaharia](https://github.com" -"/andreea-zaharia), [@kinshukdua](https://github.com/kinshukdua), " -"[@nfnt](https://github.com/nfnt), " -"[@tatiana-s](https://github.com/tatiana-s), " -"[@TParcollet](https://github.com/TParcollet), " -"[@vballoli](https://github.com/vballoli), " -"[@negedng](https://github.com/negedng), " -"[@RISHIKESHAVAN](https://github.com/RISHIKESHAVAN), " -"[@hei411](https://github.com/hei411), " -"[@SebastianSpeitel](https://github.com/SebastianSpeitel), " -"[@AmitChaulwar](https://github.com/AmitChaulwar), " -"[@Rubiel1](https://github.com/Rubiel1), [@FANTOME-PAN](https://github.com" -"/FANTOME-PAN), [@Rono-BC](https://github.com/Rono-BC), " -"[@lbhm](https://github.com/lbhm), " -"[@sishtiaq](https://github.com/sishtiaq), " -"[@remde](https://github.com/remde), [@Jueun-Park](https://github.com" -"/Jueun-Park), [@architjen](https://github.com/architjen), " -"[@PratikGarai](https://github.com/PratikGarai), " -"[@mrinaald](https://github.com/mrinaald), " -"[@zliel](https://github.com/zliel), " -"[@MeiruiJiang](https://github.com/MeiruiJiang), " -"[@sancarlim](https://github.com/sancarlim), " -"[@gubertoli](https://github.com/gubertoli), " -"[@Vingt100](https://github.com/Vingt100), " -"[@MakGulati](https://github.com/MakGulati), " -"[@cozek](https://github.com/cozek), " -"[@jafermarq](https://github.com/jafermarq), " -"[@sisco0](https://github.com/sisco0), " -"[@akhilmathurs](https://github.com/akhilmathurs), " -"[@CanTuerk](https://github.com/CanTuerk), " -"[@mariaboerner1987](https://github.com/mariaboerner1987), " -"[@pedropgusmao](https://github.com/pedropgusmao), " -"[@tanertopal](https://github.com/tanertopal), " -"[@danieljanes](https://github.com/danieljanes)." -msgstr "" -"[@rtaiello](https://github.com/rtaiello), " -"[@g-pichler](https://github.com/g-pichler), [@rob-" -"luke](https://github.com/rob-luke), [@andreea-zaharia](https://github.com" -"/andreea-zaharia), [@kinshukdua](https://github.com/kinshukdua), " -"[@nfnt](https://github.com/nfnt), " -"[@tatiana-s](https://github.com/tatiana-s), " -"[@TParcollet](https://github.com/TParcollet), " -"[@vballoli](https://github.com/vballoli), " -"[@negedng](https://github.com/negedng), " -"[@RISHIKESHAVAN](https://github.com/RISHIKESHAVAN), " -"[@hei411](https://github.com/hei411), " -"[@SebastianSpeitel](https://github.com/SebastianSpeitel), " -"[@AmitChaulwar](https://github.com/AmitChaulwar), " -"[@Rubiel1](https://github.com/Rubiel1), [@FANTOME-PAN](https://github.com" -"/FANTOME-PAN), [@Rono-BC](https://github.com/Rono-BC), " -"[@lbhm](https://github.com/lbhm), " -"[@sishtiaq](https://github.com/sishtiaq), " -"[@remde](https://github.com/remde), [@Jueun-Park](https://github.com" -"/Jueun-Park), [@architjen](https://github.com/architjen), " -"[@PratikGarai](https://github.com/PratikGarai), " -"[@mrinaald](https://github.com/mrinaald), " -"[@zliel](https://github.com/zliel), " -"[@MeiruiJiang](https://github.com/MeiruiJiang), " -"[@sancarlim](https://github.com/sancarlim), " -"[@gubertoli](https://github.com/gubertoli), " -"[@Vingt100](https://github.com/Vingt100), " -"[@MakGulati](https://github.com/MakGulati), " -"[@cozek](https://github.com/cozek), " -"[@jafermarq](https://github.com/jafermarq), " -"[@sisco0](https://github.com/sisco0), " -"[@akhilmathurs](https://github.com/akhilmathurs), " -"[@CanTuerk](https://github.com/CanTuerk), " -"[@mariaboerner1987](https://github.com/mariaboerner1987), " -"[@pedropgusmao](https://github.com/pedropgusmao), " -"[@tanertopal](https://github.com/tanertopal), " -"[@danieljanes](https://github.com/danieljanes)." +"[@rtaiello](https://github.com/rtaiello), [@g-pichler](https://github.com/g-" +"pichler), [@rob-luke](https://github.com/rob-luke), [@andreea-zaharia]" +"(https://github.com/andreea-zaharia), [@kinshukdua](https://github.com/" +"kinshukdua), [@nfnt](https://github.com/nfnt), [@tatiana-s](https://github." +"com/tatiana-s), [@TParcollet](https://github.com/TParcollet), [@vballoli]" +"(https://github.com/vballoli), [@negedng](https://github.com/negedng), " +"[@RISHIKESHAVAN](https://github.com/RISHIKESHAVAN), [@hei411](https://github." +"com/hei411), [@SebastianSpeitel](https://github.com/SebastianSpeitel), " +"[@AmitChaulwar](https://github.com/AmitChaulwar), [@Rubiel1](https://github." +"com/Rubiel1), [@FANTOME-PAN](https://github.com/FANTOME-PAN), [@Rono-BC]" +"(https://github.com/Rono-BC), [@lbhm](https://github.com/lbhm), [@sishtiaq]" +"(https://github.com/sishtiaq), [@remde](https://github.com/remde), [@Jueun-" +"Park](https://github.com/Jueun-Park), [@architjen](https://github.com/" +"architjen), [@PratikGarai](https://github.com/PratikGarai), [@mrinaald]" +"(https://github.com/mrinaald), [@zliel](https://github.com/zliel), " +"[@MeiruiJiang](https://github.com/MeiruiJiang), [@sancarlim](https://github." +"com/sancarlim), [@gubertoli](https://github.com/gubertoli), [@Vingt100]" +"(https://github.com/Vingt100), [@MakGulati](https://github.com/MakGulati), " +"[@cozek](https://github.com/cozek), [@jafermarq](https://github.com/" +"jafermarq), [@sisco0](https://github.com/sisco0), [@akhilmathurs](https://" +"github.com/akhilmathurs), [@CanTuerk](https://github.com/CanTuerk), " +"[@mariaboerner1987](https://github.com/mariaboerner1987), [@pedropgusmao]" +"(https://github.com/pedropgusmao), [@tanertopal](https://github.com/" +"tanertopal), [@danieljanes](https://github.com/danieljanes)." +msgstr "" +"[@rtaiello](https://github.com/rtaiello), [@g-pichler](https://github.com/g-" +"pichler), [@rob-luke](https://github.com/rob-luke), [@andreea-zaharia]" +"(https://github.com/andreea-zaharia), [@kinshukdua](https://github.com/" +"kinshukdua), [@nfnt](https://github.com/nfnt), [@tatiana-s](https://github." +"com/tatiana-s), [@TParcollet](https://github.com/TParcollet), [@vballoli]" +"(https://github.com/vballoli), [@negedng](https://github.com/negedng), " +"[@RISHIKESHAVAN](https://github.com/RISHIKESHAVAN), [@hei411](https://github." +"com/hei411), [@SebastianSpeitel](https://github.com/SebastianSpeitel), " +"[@AmitChaulwar](https://github.com/AmitChaulwar), [@Rubiel1](https://github." +"com/Rubiel1), [@FANTOME-PAN](https://github.com/FANTOME-PAN), [@Rono-BC]" +"(https://github.com/Rono-BC), [@lbhm](https://github.com/lbhm), [@sishtiaq]" +"(https://github.com/sishtiaq), [@remde](https://github.com/remde), [@Jueun-" +"Park](https://github.com/Jueun-Park), [@architjen](https://github.com/" +"architjen), [@PratikGarai](https://github.com/PratikGarai), [@mrinaald]" +"(https://github.com/mrinaald), [@zliel](https://github.com/zliel), " +"[@MeiruiJiang](https://github.com/MeiruiJiang), [@sancarlim](https://github." +"com/sancarlim), [@gubertoli](https://github.com/gubertoli), [@Vingt100]" +"(https://github.com/Vingt100), [@MakGulati](https://github.com/MakGulati), " +"[@cozek](https://github.com/cozek), [@jafermarq](https://github.com/" +"jafermarq), [@sisco0](https://github.com/sisco0), [@akhilmathurs](https://" +"github.com/akhilmathurs), [@CanTuerk](https://github.com/CanTuerk), " +"[@mariaboerner1987](https://github.com/mariaboerner1987), [@pedropgusmao]" +"(https://github.com/pedropgusmao), [@tanertopal](https://github.com/" +"tanertopal), [@danieljanes](https://github.com/danieljanes)." #: ../../source/ref-changelog.md:556 msgid "" -"**All arguments must be passed as keyword arguments** " -"([#1338](https://github.com/adap/flower/pull/1338))" -msgstr "** 所有参数必须作为关键字参数传递** ([#1338](https://github.com/adap/flower/pull/1338))" +"**All arguments must be passed as keyword arguments** ([#1338](https://" +"github.com/adap/flower/pull/1338))" +msgstr "" +"** 所有参数必须作为关键字参数传递** ([#1338](https://github.com/adap/flower/" +"pull/1338))" #: ../../source/ref-changelog.md:558 msgid "" -"Pass all arguments as keyword arguments, positional arguments are not " -"longer supported. Code that uses positional arguments (e.g., " -"`start_client(\"127.0.0.1:8080\", FlowerClient())`) must add the keyword " -"for each positional argument (e.g., " -"`start_client(server_address=\"127.0.0.1:8080\", " -"client=FlowerClient())`)." +"Pass all arguments as keyword arguments, positional arguments are not longer " +"supported. Code that uses positional arguments (e.g., " +"`start_client(\"127.0.0.1:8080\", FlowerClient())`) must add the keyword for " +"each positional argument (e.g., " +"`start_client(server_address=\"127.0.0.1:8080\", client=FlowerClient())`)." msgstr "" -"以关键字参数传递所有参数,不再支持位置参数。使用位置参数的代码(例如,`start_client(\"127.0.0.1:8080\", " -"FlowerClient())`)必须为每个位置参数添加关键字(例如,`start_client(server_address=\"127.0.0.1:8080\"," -" client=FlowerClient())`)。" +"以关键字参数传递所有参数,不再支持位置参数。使用位置参数的代码(例如," +"`start_client(\"127.0.0.1:8080\", FlowerClient())`)必须为每个位置参数添加关" +"键字(例如,`start_client(server_address=\"127.0.0.1:8080\", " +"client=FlowerClient())`)。" #: ../../source/ref-changelog.md:560 msgid "" "**Introduce configuration object** `ServerConfig` **in** `start_server` " -"**and** `start_simulation` " -"([#1317](https://github.com/adap/flower/pull/1317))" +"**and** `start_simulation` ([#1317](https://github.com/adap/flower/" +"pull/1317))" msgstr "" "**在*** `start_server` ***和*** `start_simulation` 中引入配置对象*** " "`ServerConfig` ([#1317](https://github.com/adap/flower/pull/1317))" #: ../../source/ref-changelog.md:562 msgid "" -"Instead of a config dictionary `{\"num_rounds\": 3, \"round_timeout\": " -"600.0}`, `start_server` and `start_simulation` now expect a configuration" -" object of type `flwr.server.ServerConfig`. `ServerConfig` takes the same" -" arguments that as the previous config dict, but it makes writing type-" -"safe code easier and the default parameters values more transparent." +"Instead of a config dictionary `{\"num_rounds\": 3, \"round_timeout\": 600.0}" +"`, `start_server` and `start_simulation` now expect a configuration object " +"of type `flwr.server.ServerConfig`. `ServerConfig` takes the same arguments " +"that as the previous config dict, but it makes writing type-safe code easier " +"and the default parameters values more transparent." msgstr "" -"并非配置字典`{\"num_rounds\": 3, \"round_timeout\": 600.0}`, `start_server`和 " -"`start_simulation`现在用一个类型为 " -"`flwr.server.ServerConfig`的配置对象。`ServerConfig`接收的参数与之前的 config dict " -"相同,但它使编写类型安全代码变得更容易,默认参数值也更加透明。" +"并非配置字典`{\"num_rounds\": 3, \"round_timeout\": 600.0}`, `start_server`" +"和 `start_simulation`现在用一个类型为 `flwr.server.ServerConfig`的配置对象。" +"`ServerConfig`接收的参数与之前的 config dict 相同,但它使编写类型安全代码变得" +"更容易,默认参数值也更加透明。" #: ../../source/ref-changelog.md:564 msgid "" -"**Rename built-in strategy parameters for clarity** " -"([#1334](https://github.com/adap/flower/pull/1334))" -msgstr "**重新命名内置策略参数,使其更加清晰** ([#1334](https://github.com/adap/flower/pull/1334))" +"**Rename built-in strategy parameters for clarity** ([#1334](https://github." +"com/adap/flower/pull/1334))" +msgstr "" +"**重新命名内置策略参数,使其更加清晰** ([#1334](https://github.com/adap/" +"flower/pull/1334))" #: ../../source/ref-changelog.md:566 msgid "" @@ -17643,20 +17708,23 @@ msgstr "`eval_fn` --> `evaluate_fn`" #: ../../source/ref-changelog.md:572 msgid "" -"**Update default arguments of built-in strategies** " -"([#1278](https://github.com/adap/flower/pull/1278))" -msgstr "**更新内置策略的默认参数** ([#1278](https://github.com/adap/flower/pull/1278))" +"**Update default arguments of built-in strategies** ([#1278](https://github." +"com/adap/flower/pull/1278))" +msgstr "" +"**更新内置策略的默认参数** ([#1278](https://github.com/adap/flower/" +"pull/1278))" #: ../../source/ref-changelog.md:574 msgid "" "All built-in strategies now use `fraction_fit=1.0` and " -"`fraction_evaluate=1.0`, which means they select *all* currently " -"available clients for training and evaluation. Projects that relied on " -"the previous default values can get the previous behaviour by " -"initializing the strategy in the following way:" +"`fraction_evaluate=1.0`, which means they select *all* currently available " +"clients for training and evaluation. Projects that relied on the previous " +"default values can get the previous behaviour by initializing the strategy " +"in the following way:" msgstr "" -"所有内置策略现在都使用 \"fraction_fit=1.0 \"和 " -"\"fraction_evaluate=1.0\",这意味着它们会选择*所有*当前可用的客户端进行训练和评估。依赖以前默认值的项目可以通过以下方式初始化策略,获得以前的行为:" +"所有内置策略现在都使用 \"fraction_fit=1.0 \"和 \"fraction_evaluate=1.0\",这" +"意味着它们会选择*所有*当前可用的客户端进行训练和评估。依赖以前默认值的项目可" +"以通过以下方式初始化策略,获得以前的行为:" #: ../../source/ref-changelog.md:576 msgid "`strategy = FedAvg(fraction_fit=0.1, fraction_evaluate=0.1)`" @@ -17664,17 +17732,18 @@ msgstr "`strategy = FedAvg(fraction_fit=0.1, fraction_evaluate=0.1)`" #: ../../source/ref-changelog.md:578 msgid "" -"**Add** `server_round` **to** `Strategy.evaluate` " -"([#1334](https://github.com/adap/flower/pull/1334))" +"**Add** `server_round` **to** `Strategy.evaluate` ([#1334](https://github." +"com/adap/flower/pull/1334))" msgstr "" -"**添加*** `server_round` ***到*** `Strategy.evaluate` " -"([#1334](https://github.com/adap/flower/pull/1334))" +"**添加*** `server_round` ***到*** `Strategy.evaluate` ([#1334](https://" +"github.com/adap/flower/pull/1334))" #: ../../source/ref-changelog.md:580 msgid "" -"The `Strategy` method `evaluate` now receives the current round of " -"federated learning/evaluation as the first parameter." -msgstr "`Strategy`的`evaluate` 方法现在会接收当前一轮联邦学习/评估作为第一个参数。" +"The `Strategy` method `evaluate` now receives the current round of federated " +"learning/evaluation as the first parameter." +msgstr "" +"`Strategy`的`evaluate` 方法现在会接收当前一轮联邦学习/评估作为第一个参数。" #: ../../source/ref-changelog.md:582 msgid "" @@ -17688,48 +17757,51 @@ msgstr "" msgid "" "The `evaluate_fn` passed to built-in strategies like `FedAvg` now takes " "three parameters: (1) The current round of federated learning/evaluation " -"(`server_round`), (2) the model parameters to evaluate (`parameters`), " -"and (3) a config dictionary (`config`)." +"(`server_round`), (2) the model parameters to evaluate (`parameters`), and " +"(3) a config dictionary (`config`)." msgstr "" -"传递给内置策略(如 `FedAvg`)的 `evaluate_fn` 现在需要三个参数:(1) 当前一轮联邦学习/评估 " -"(`server_round`),(2) 要评估的模型参数 (`parameters`),(3) 配置字典 (`config`)。" +"传递给内置策略(如 `FedAvg`)的 `evaluate_fn` 现在需要三个参数:(1) 当前一轮" +"联邦学习/评估 (`server_round`),(2) 要评估的模型参数 (`parameters`),(3) 配置" +"字典 (`config`)。" #: ../../source/ref-changelog.md:586 msgid "" -"**Rename** `rnd` **to** `server_round` " -"([#1321](https://github.com/adap/flower/pull/1321))" +"**Rename** `rnd` **to** `server_round` ([#1321](https://github.com/adap/" +"flower/pull/1321))" msgstr "" -"**重新命名** `rnd` ** to** `server_round` " -"([#1321](https://github.com/adap/flower/pull/1321))" +"**重新命名** `rnd` ** to** `server_round` ([#1321](https://github.com/adap/" +"flower/pull/1321))" #: ../../source/ref-changelog.md:588 msgid "" "Several Flower methods and functions (`evaluate_fn`, `configure_fit`, " "`aggregate_fit`, `configure_evaluate`, `aggregate_evaluate`) receive the " -"current round of federated learning/evaluation as their first parameter. " -"To improve reaability and avoid confusion with *random*, this parameter " -"has been renamed from `rnd` to `server_round`." +"current round of federated learning/evaluation as their first parameter. To " +"improve reaability and avoid confusion with *random*, this parameter has " +"been renamed from `rnd` to `server_round`." msgstr "" -"几个 Flower " -"方法和函数(`evaluate_fn`、`configure_fit`、`aggregate_fit`、`configure_evaluate`、`aggregate_evaluate`)的第一个参数是当前一轮的联邦学习/评估。为提高可重复性并避免与" -" *random* 混淆,该参数已从 `rnd` 更名为 `server_round`。" +"几个 Flower 方法和函数(`evaluate_fn`、`configure_fit`、`aggregate_fit`、" +"`configure_evaluate`、`aggregate_evaluate`)的第一个参数是当前一轮的联邦学习/" +"评估。为提高可重复性并避免与 *random* 混淆,该参数已从 `rnd` 更名为 " +"`server_round`。" #: ../../source/ref-changelog.md:590 msgid "" -"**Move** `flwr.dataset` **to** `flwr_baselines` " -"([#1273](https://github.com/adap/flower/pull/1273))" +"**Move** `flwr.dataset` **to** `flwr_baselines` ([#1273](https://github.com/" +"adap/flower/pull/1273))" msgstr "" -"**移动*** `flwr.dataset` **到*** `flwr_baselines` " -"([#1273](https://github.com/adap/flower/pull/1273))" +"**移动*** `flwr.dataset` **到*** `flwr_baselines` ([#1273](https://github." +"com/adap/flower/pull/1273))" #: ../../source/ref-changelog.md:592 -msgid "The experimental package `flwr.dataset` was migrated to Flower Baselines." +msgid "" +"The experimental package `flwr.dataset` was migrated to Flower Baselines." msgstr "实验软件包 `flwr.dataset` 已迁移至 Flower Baselines。" #: ../../source/ref-changelog.md:594 msgid "" -"**Remove experimental strategies** " -"([#1280](https://github.com/adap/flower/pull/1280))" +"**Remove experimental strategies** ([#1280](https://github.com/adap/flower/" +"pull/1280))" msgstr "**删除实验策略** ([#1280](https://github.com/adap/flower/pull/1280))" #: ../../source/ref-changelog.md:596 @@ -17740,45 +17812,45 @@ msgstr "移除未维护的试验性策略(`FastAndSlow`、`FedFSv0`、`FedFSv1 #: ../../source/ref-changelog.md:598 msgid "" -"**Rename** `Weights` **to** `NDArrays` " -"([#1258](https://github.com/adap/flower/pull/1258), " -"[#1259](https://github.com/adap/flower/pull/1259))" +"**Rename** `Weights` **to** `NDArrays` ([#1258](https://github.com/adap/" +"flower/pull/1258), [#1259](https://github.com/adap/flower/pull/1259))" msgstr "" -"**重新命名** `Weights` **到** `NDArrays` " -"([#1258](https://github.com/adap/flower/pull/1258), " -"[#1259](https://github.com/adap/flower/pull/1259))" +"**重新命名** `Weights` **到** `NDArrays` ([#1258](https://github.com/adap/" +"flower/pull/1258), [#1259](https://github.com/adap/flower/pull/1259))" #: ../../source/ref-changelog.md:600 msgid "" "`flwr.common.Weights` was renamed to `flwr.common.NDArrays` to better " "capture what this type is all about." -msgstr "flwr.common.Weights \"更名为 \"flwr.common.NDArrays\",以更好地反映该类型的含义。" +msgstr "" +"flwr.common.Weights \"更名为 \"flwr.common.NDArrays\",以更好地反映该类型的含" +"义。" #: ../../source/ref-changelog.md:602 msgid "" -"**Remove antiquated** `force_final_distributed_eval` **from** " -"`start_server` ([#1258](https://github.com/adap/flower/pull/1258), " -"[#1259](https://github.com/adap/flower/pull/1259))" +"**Remove antiquated** `force_final_distributed_eval` **from** `start_server` " +"([#1258](https://github.com/adap/flower/pull/1258), [#1259](https://github." +"com/adap/flower/pull/1259))" msgstr "" -"**从** `start_server` 中移除过时的** `force_final_distributed_eval` " -"([#1258](https://github.com/adap/flower/pull/1258), " -"[#1259](https://github.com/adap/flower/pull/1259))" +"**从** `start_server` 中移除过时的** `force_final_distributed_eval` ([#1258]" +"(https://github.com/adap/flower/pull/1258), [#1259](https://github.com/adap/" +"flower/pull/1259))" #: ../../source/ref-changelog.md:604 msgid "" -"The `start_server` parameter `force_final_distributed_eval` has long been" -" a historic artefact, in this release it is finally gone for good." +"The `start_server` parameter `force_final_distributed_eval` has long been a " +"historic artefact, in this release it is finally gone for good." msgstr "" -"start_server \"参数 \"force_final_distributed_eval " -"\"长期以来一直是个历史遗留问题,在此版本中终于永远消失了。" +"start_server \"参数 \"force_final_distributed_eval \"长期以来一直是个历史遗留" +"问题,在此版本中终于永远消失了。" #: ../../source/ref-changelog.md:606 msgid "" -"**Make** `get_parameters` **configurable** " -"([#1242](https://github.com/adap/flower/pull/1242))" +"**Make** `get_parameters` **configurable** ([#1242](https://github.com/adap/" +"flower/pull/1242))" msgstr "" -"**使** `get_parameters` **可配置** " -"([#1242](https://github.com/adap/flower/pull/1242))" +"**使** `get_parameters` **可配置** ([#1242](https://github.com/adap/flower/" +"pull/1242))" #: ../../source/ref-changelog.md:608 msgid "" @@ -17799,79 +17871,82 @@ msgstr "" #: ../../source/ref-changelog.md:612 msgid "" "The `start_simulation` function now accepts a configuration dictionary " -"`config` instead of the `num_rounds` integer. This improves the " -"consistency between `start_simulation` and `start_server` and makes " -"transitioning between the two easier." +"`config` instead of the `num_rounds` integer. This improves the consistency " +"between `start_simulation` and `start_server` and makes transitioning " +"between the two easier." msgstr "" -"现在,`start_simulation`(开始模拟)` 函数接受配置字典 `config` 而不是 `num_rounds` 整数。这改进了 " -"`start_simulation` 和 `start_server` 之间的一致性,并使两者之间的转换更容易。" +"现在,`start_simulation`(开始模拟)` 函数接受配置字典 `config` 而不是 " +"`num_rounds` 整数。这改进了 `start_simulation` 和 `start_server` 之间的一致" +"性,并使两者之间的转换更容易。" #: ../../source/ref-changelog.md:616 msgid "" -"**Support Python 3.10** " -"([#1320](https://github.com/adap/flower/pull/1320))" -msgstr "** 支持 Python 3.10** ([#1320](https://github.com/adap/flower/pull/1320))" +"**Support Python 3.10** ([#1320](https://github.com/adap/flower/pull/1320))" +msgstr "" +"** 支持 Python 3.10** ([#1320](https://github.com/adap/flower/pull/1320))" #: ../../source/ref-changelog.md:618 msgid "" -"The previous Flower release introduced experimental support for Python " -"3.10, this release declares Python 3.10 support as stable." -msgstr "上一个 Flower 版本引入了对 Python 3.10 的实验支持,而本版本则宣布对 Python 3.10 的支持为稳定支持。" +"The previous Flower release introduced experimental support for Python 3.10, " +"this release declares Python 3.10 support as stable." +msgstr "" +"上一个 Flower 版本引入了对 Python 3.10 的实验支持,而本版本则宣布对 Python " +"3.10 的支持为稳定支持。" #: ../../source/ref-changelog.md:620 msgid "" -"**Make all** `Client` **and** `NumPyClient` **methods optional** " -"([#1260](https://github.com/adap/flower/pull/1260), " -"[#1277](https://github.com/adap/flower/pull/1277))" +"**Make all** `Client` **and** `NumPyClient` **methods optional** ([#1260]" +"(https://github.com/adap/flower/pull/1260), [#1277](https://github.com/adap/" +"flower/pull/1277))" msgstr "" -"**使所有** `Client` **和** `NumPyClient` **方法成为可选** " -"([#1260](https://github.com/adap/flower/pull/1260), " -"[#1277](https://github.com/adap/flower/pull/1277))" +"**使所有** `Client` **和** `NumPyClient` **方法成为可选** ([#1260](https://" +"github.com/adap/flower/pull/1260), [#1277](https://github.com/adap/flower/" +"pull/1277))" #: ../../source/ref-changelog.md:622 msgid "" "The `Client`/`NumPyClient` methods `get_properties`, `get_parameters`, " -"`fit`, and `evaluate` are all optional. This enables writing clients that" -" implement, for example, only `fit`, but no other method. No need to " +"`fit`, and `evaluate` are all optional. This enables writing clients that " +"implement, for example, only `fit`, but no other method. No need to " "implement `evaluate` when using centralized evaluation!" msgstr "" "`Client`/`NumPyClient`的 \"get_properties\"、\"get_parameters\"、\"fit \"和 " -"\"evaluate \"方法都是可选的。这样就可以编写只实现 `fit` 而不实现其他方法的客户端。使用集中评估时,无需实现 " -"`evaluate`!" +"\"evaluate \"方法都是可选的。这样就可以编写只实现 `fit` 而不实现其他方法的客" +"户端。使用集中评估时,无需实现 `evaluate`!" #: ../../source/ref-changelog.md:624 msgid "" -"**Enable passing a** `Server` **instance to** `start_simulation` " -"([#1281](https://github.com/adap/flower/pull/1281))" +"**Enable passing a** `Server` **instance to** `start_simulation` ([#1281]" +"(https://github.com/adap/flower/pull/1281))" msgstr "" -"**启用向** `start_simulation` 传递** `Server` 实例 " -"([#1281](https://github.com/adap/flower/pull/1281))" +"**启用向** `start_simulation` 传递** `Server` 实例 ([#1281](https://github." +"com/adap/flower/pull/1281))" #: ../../source/ref-changelog.md:626 msgid "" -"Similar to `start_server`, `start_simulation` now accepts a full `Server`" -" instance. This enables users to heavily customize the execution of " -"eperiments and opens the door to running, for example, async FL using the" -" Virtual Client Engine." +"Similar to `start_server`, `start_simulation` now accepts a full `Server` " +"instance. This enables users to heavily customize the execution of " +"eperiments and opens the door to running, for example, async FL using the " +"Virtual Client Engine." msgstr "" -"与 `start_server` 类似,`start_simulation` 现在也接受一个完整的 `Server` " -"实例。这使得用户可以对实验的执行进行大量自定义,并为使用虚拟客户端引擎运行异步 FL 等打开了大门。" +"与 `start_server` 类似,`start_simulation` 现在也接受一个完整的 `Server` 实" +"例。这使得用户可以对实验的执行进行大量自定义,并为使用虚拟客户端引擎运行异步 " +"FL 等打开了大门。" #: ../../source/ref-changelog.md:628 msgid "" -"**Update code examples** " -"([#1291](https://github.com/adap/flower/pull/1291), " -"[#1286](https://github.com/adap/flower/pull/1286), " -"[#1282](https://github.com/adap/flower/pull/1282))" +"**Update code examples** ([#1291](https://github.com/adap/flower/pull/1291), " +"[#1286](https://github.com/adap/flower/pull/1286), [#1282](https://github." +"com/adap/flower/pull/1282))" msgstr "" -"**更新代码示例** ([#1291](https://github.com/adap/flower/pull/1291), " -"[#1286](https://github.com/adap/flower/pull/1286), " -"[#1282](https://github.com/adap/flower/pull/1282))" +"**更新代码示例** ([#1291](https://github.com/adap/flower/pull/1291), [#1286]" +"(https://github.com/adap/flower/pull/1286), [#1282](https://github.com/adap/" +"flower/pull/1282))" #: ../../source/ref-changelog.md:630 msgid "" -"Many code examples received small or even large maintenance updates, " -"among them are" +"Many code examples received small or even large maintenance updates, among " +"them are" msgstr "许多代码示例都进行了小规模甚至大规模的维护更新,其中包括" #: ../../source/ref-changelog.md:632 @@ -17900,9 +17975,10 @@ msgstr "`advanced_tensorflow`" #: ../../source/ref-changelog.md:639 msgid "" -"**Remove the obsolete simulation example** " -"([#1328](https://github.com/adap/flower/pull/1328))" -msgstr "**删除过时的模拟示例** ([#1328](https://github.com/adap/flower/pull/1328))" +"**Remove the obsolete simulation example** ([#1328](https://github.com/adap/" +"flower/pull/1328))" +msgstr "" +"**删除过时的模拟示例** ([#1328](https://github.com/adap/flower/pull/1328))" #: ../../source/ref-changelog.md:641 msgid "" @@ -17915,41 +17991,36 @@ msgstr "" #: ../../source/ref-changelog.md:643 msgid "" -"**Update documentation** " -"([#1223](https://github.com/adap/flower/pull/1223), " -"[#1209](https://github.com/adap/flower/pull/1209), " -"[#1251](https://github.com/adap/flower/pull/1251), " -"[#1257](https://github.com/adap/flower/pull/1257), " -"[#1267](https://github.com/adap/flower/pull/1267), " -"[#1268](https://github.com/adap/flower/pull/1268), " -"[#1300](https://github.com/adap/flower/pull/1300), " -"[#1304](https://github.com/adap/flower/pull/1304), " -"[#1305](https://github.com/adap/flower/pull/1305), " -"[#1307](https://github.com/adap/flower/pull/1307))" -msgstr "" -"**更新文档** ([#1223](https://github.com/adap/flower/pull/1223), " -"[#1209](https://github.com/adap/flower/pull/1209), " -"[#1251](https://github.com/adap/flower/pull/1251), " -"[#1257](https://github.com/adap/flower/pull/1257), " -"[#1267](https://github.com/adap/flower/pull/1267), " -"[#1268](https://github.com/adap/flower/pull/1268), " -"[#1300](https://github.com/adap/flower/pull/1300), " -"[#1304](https://github.com/adap/flower/pull/1304), " -"[#1305](https://github.com/adap/flower/pull/1305), " -"[#1307](https://github.com/adap/flower/pull/1307))" +"**Update documentation** ([#1223](https://github.com/adap/flower/pull/1223), " +"[#1209](https://github.com/adap/flower/pull/1209), [#1251](https://github." +"com/adap/flower/pull/1251), [#1257](https://github.com/adap/flower/" +"pull/1257), [#1267](https://github.com/adap/flower/pull/1267), [#1268]" +"(https://github.com/adap/flower/pull/1268), [#1300](https://github.com/adap/" +"flower/pull/1300), [#1304](https://github.com/adap/flower/pull/1304), [#1305]" +"(https://github.com/adap/flower/pull/1305), [#1307](https://github.com/adap/" +"flower/pull/1307))" +msgstr "" +"**更新文档** ([#1223](https://github.com/adap/flower/pull/1223), [#1209]" +"(https://github.com/adap/flower/pull/1209), [#1251](https://github.com/adap/" +"flower/pull/1251), [#1257](https://github.com/adap/flower/pull/1257), [#1267]" +"(https://github.com/adap/flower/pull/1267), [#1268](https://github.com/adap/" +"flower/pull/1268), [#1300](https://github.com/adap/flower/pull/1300), [#1304]" +"(https://github.com/adap/flower/pull/1304), [#1305](https://github.com/adap/" +"flower/pull/1305), [#1307](https://github.com/adap/flower/pull/1307))" #: ../../source/ref-changelog.md:645 msgid "" "One substantial documentation update fixes multiple smaller rendering " "issues, makes titles more succinct to improve navigation, removes a " -"deprecated library, updates documentation dependencies, includes the " -"`flwr.common` module in the API reference, includes support for markdown-" -"based documentation, migrates the changelog from `.rst` to `.md`, and " -"fixes a number of smaller details!" +"deprecated library, updates documentation dependencies, includes the `flwr." +"common` module in the API reference, includes support for markdown-based " +"documentation, migrates the changelog from `.rst` to `.md`, and fixes a " +"number of smaller details!" msgstr "" -"其中一个实质性的文档更新修复了多个较小的渲染问题,使标题更加简洁以改善导航,删除了一个已废弃的库,更新了文档依赖关系,在 API 参考中包含了 " -"`flwr.common` 模块,包含了对基于 markdown 的文档的支持,将更新日志从 `.rst` 移植到了 " -"`.md`,并修复了一些较小的细节!" +"其中一个实质性的文档更新修复了多个较小的渲染问题,使标题更加简洁以改善导航," +"删除了一个已废弃的库,更新了文档依赖关系,在 API 参考中包含了 `flwr.common` " +"模块,包含了对基于 markdown 的文档的支持,将更新日志从 `.rst` 移植到了 `." +"md`,并修复了一些较小的细节!" #: ../../source/ref-changelog.md:647 ../../source/ref-changelog.md:702 #: ../../source/ref-changelog.md:771 ../../source/ref-changelog.md:810 @@ -17958,41 +18029,40 @@ msgstr "**小规模更新**" #: ../../source/ref-changelog.md:649 msgid "" -"Add round number to fit and evaluate log messages " -"([#1266](https://github.com/adap/flower/pull/1266))" -msgstr "添加四舍五入数字,以适应和评估日志信息([#1266](https://github.com/adap/flower/pull/1266))" +"Add round number to fit and evaluate log messages ([#1266](https://github." +"com/adap/flower/pull/1266))" +msgstr "" +"添加四舍五入数字,以适应和评估日志信息([#1266](https://github.com/adap/" +"flower/pull/1266))" #: ../../source/ref-changelog.md:650 msgid "" -"Add secure gRPC connection to the `advanced_tensorflow` code example " -"([#847](https://github.com/adap/flower/pull/847))" +"Add secure gRPC connection to the `advanced_tensorflow` code example ([#847]" +"(https://github.com/adap/flower/pull/847))" msgstr "" -"为 `advanced_tensorflow` 代码示例添加安全 gRPC 连接 " -"([#847](https://github.com/adap/flower/pull/847))" +"为 `advanced_tensorflow` 代码示例添加安全 gRPC 连接 ([#847](https://github." +"com/adap/flower/pull/847))" #: ../../source/ref-changelog.md:651 msgid "" -"Update developer tooling " -"([#1231](https://github.com/adap/flower/pull/1231), " -"[#1276](https://github.com/adap/flower/pull/1276), " -"[#1301](https://github.com/adap/flower/pull/1301), " -"[#1310](https://github.com/adap/flower/pull/1310))" +"Update developer tooling ([#1231](https://github.com/adap/flower/pull/1231), " +"[#1276](https://github.com/adap/flower/pull/1276), [#1301](https://github." +"com/adap/flower/pull/1301), [#1310](https://github.com/adap/flower/" +"pull/1310))" msgstr "" -"更新开发人员工具([#1231](https://github.com/adap/flower/pull/1231), " -"[#1276](https://github.com/adap/flower/pull/1276), " -"[#1301](https://github.com/adap/flower/pull/1301), " -"[#1310](https://github.com/adap/flower/pull/1310)" +"更新开发人员工具([#1231](https://github.com/adap/flower/pull/1231), [#1276]" +"(https://github.com/adap/flower/pull/1276), [#1301](https://github.com/adap/" +"flower/pull/1301), [#1310](https://github.com/adap/flower/pull/1310)" #: ../../source/ref-changelog.md:652 msgid "" -"Rename ProtoBuf messages to improve consistency " -"([#1214](https://github.com/adap/flower/pull/1214), " -"[#1258](https://github.com/adap/flower/pull/1258), " +"Rename ProtoBuf messages to improve consistency ([#1214](https://github.com/" +"adap/flower/pull/1214), [#1258](https://github.com/adap/flower/pull/1258), " "[#1259](https://github.com/adap/flower/pull/1259))" msgstr "" -"重命名 ProtoBuf 消息以提高一致性([#1214](https://github.com/adap/flower/pull/1214), " -"[#1258](https://github.com/adap/flower/pull/1258), " -"[#1259](https://github.com/adap/flower/pull/1259)" +"重命名 ProtoBuf 消息以提高一致性([#1214](https://github.com/adap/flower/" +"pull/1214), [#1258](https://github.com/adap/flower/pull/1258), [#1259]" +"(https://github.com/adap/flower/pull/1259)" #: ../../source/ref-changelog.md:654 msgid "v0.19.0 (2022-05-18)" @@ -18000,158 +18070,162 @@ msgstr "v0.19.0 (2022-05-18)" #: ../../source/ref-changelog.md:658 msgid "" -"**Flower Baselines (preview): FedOpt, FedBN, FedAvgM** " -"([#919](https://github.com/adap/flower/pull/919), " -"[#1127](https://github.com/adap/flower/pull/1127), " -"[#914](https://github.com/adap/flower/pull/914))" +"**Flower Baselines (preview): FedOpt, FedBN, FedAvgM** ([#919](https://" +"github.com/adap/flower/pull/919), [#1127](https://github.com/adap/flower/" +"pull/1127), [#914](https://github.com/adap/flower/pull/914))" msgstr "" -"**Flower Baselines(预览): FedOpt、FedBN、FedAvgM** " -"([#919](https://github.com/adap/flower/pull/919), " -"[#1127](https://github.com/adap/flower/pull/1127), " -"[#914](https://github.com/adap/flower/pull/914))" +"**Flower Baselines(预览): FedOpt、FedBN、FedAvgM** ([#919](https://github." +"com/adap/flower/pull/919), [#1127](https://github.com/adap/flower/" +"pull/1127), [#914](https://github.com/adap/flower/pull/914))" #: ../../source/ref-changelog.md:660 #, fuzzy msgid "" "The first preview release of Flower Baselines has arrived! We're " "kickstarting Flower Baselines with implementations of FedOpt (FedYogi, " -"FedAdam, FedAdagrad), FedBN, and FedAvgM. Check the documentation on how " -"to use [Flower Baselines](https://flower.ai/docs/using-baselines.html). " -"With this first preview release we're also inviting the community to " -"[contribute their own baselines](https://flower.ai/docs/baselines/how-to-" -"contribute-baselines.html)." -msgstr "" -"Flower Baselines 的第一个预览版已经发布!我们通过实现 " -"FedOpt(FedYogi、FedAdam、FedAdagrad)、FedBN 和 FedAvgM 来启动 Flower " -"Baselines。请查阅文档了解如何使用 [Flower Baselines](https://flower.ai/docs/using-" -"baselines.html)。在首次发布预览版时,我们还邀请社区成员[贡献自己的Baselines](https://flower.ai/docs" -"/contributing-baselines.html)。" +"FedAdam, FedAdagrad), FedBN, and FedAvgM. Check the documentation on how to " +"use [Flower Baselines](https://flower.ai/docs/using-baselines.html). With " +"this first preview release we're also inviting the community to [contribute " +"their own baselines](https://flower.ai/docs/baselines/how-to-contribute-" +"baselines.html)." +msgstr "" +"Flower Baselines 的第一个预览版已经发布!我们通过实现 FedOpt(FedYogi、" +"FedAdam、FedAdagrad)、FedBN 和 FedAvgM 来启动 Flower Baselines。请查阅文档了" +"解如何使用 [Flower Baselines](https://flower.ai/docs/using-baselines.html)。" +"在首次发布预览版时,我们还邀请社区成员[贡献自己的Baselines](https://flower." +"ai/docs/contributing-baselines.html)。" #: ../../source/ref-changelog.md:662 msgid "" -"**C++ client SDK (preview) and code example** " -"([#1111](https://github.com/adap/flower/pull/1111))" -msgstr "**C++客户端SDK(预览版)和代码示例**([#1111](https://github.com/adap/flower/pull/1111))" +"**C++ client SDK (preview) and code example** ([#1111](https://github.com/" +"adap/flower/pull/1111))" +msgstr "" +"**C++客户端SDK(预览版)和代码示例**([#1111](https://github.com/adap/flower/" +"pull/1111))" #: ../../source/ref-changelog.md:664 msgid "" -"Preview support for Flower clients written in C++. The C++ preview " -"includes a Flower client SDK and a quickstart code example that " -"demonstrates a simple C++ client using the SDK." +"Preview support for Flower clients written in C++. The C++ preview includes " +"a Flower client SDK and a quickstart code example that demonstrates a simple " +"C++ client using the SDK." msgstr "" -"预览版支持用 C++ 编写的 Flower 客户端。C++ 预览版包括一个 Flower 客户端 SDK 和一个快速入门代码示例,使用 SDK " -"演示了一个简单的 C++ 客户端。" +"预览版支持用 C++ 编写的 Flower 客户端。C++ 预览版包括一个 Flower 客户端 SDK " +"和一个快速入门代码示例,使用 SDK 演示了一个简单的 C++ 客户端。" #: ../../source/ref-changelog.md:666 msgid "" -"**Add experimental support for Python 3.10 and Python 3.11** " -"([#1135](https://github.com/adap/flower/pull/1135))" +"**Add experimental support for Python 3.10 and Python 3.11** ([#1135]" +"(https://github.com/adap/flower/pull/1135))" msgstr "" -"** 增加对 Python 3.10 和 Python 3.11 的实验支持** " -"([#1135](https://github.com/adap/flower/pull/1135))" +"** 增加对 Python 3.10 和 Python 3.11 的实验支持** ([#1135](https://github." +"com/adap/flower/pull/1135))" #: ../../source/ref-changelog.md:668 msgid "" -"Python 3.10 is the latest stable release of Python and Python 3.11 is due" -" to be released in October. This Flower release adds experimental support" -" for both Python versions." +"Python 3.10 is the latest stable release of Python and Python 3.11 is due to " +"be released in October. This Flower release adds experimental support for " +"both Python versions." msgstr "" -"Python 3.10 是 Python 的最新稳定版本,Python 3.11 将于 10 月份发布。Flower 版本增加了对这两个 " -"Python 版本的实验支持。" +"Python 3.10 是 Python 的最新稳定版本,Python 3.11 将于 10 月份发布。Flower 版" +"本增加了对这两个 Python 版本的实验支持。" #: ../../source/ref-changelog.md:670 msgid "" -"**Aggregate custom metrics through user-provided functions** " -"([#1144](https://github.com/adap/flower/pull/1144))" -msgstr "**通过用户提供的函数聚合自定义指标**([#1144](https://github.com/adap/flower/pull/1144))" +"**Aggregate custom metrics through user-provided functions** ([#1144]" +"(https://github.com/adap/flower/pull/1144))" +msgstr "" +"**通过用户提供的函数聚合自定义指标**([#1144](https://github.com/adap/flower/" +"pull/1144))" #: ../../source/ref-changelog.md:672 msgid "" -"Custom metrics (e.g., `accuracy`) can now be aggregated without having to" -" customize the strategy. Built-in strategies support two new arguments, " +"Custom metrics (e.g., `accuracy`) can now be aggregated without having to " +"customize the strategy. Built-in strategies support two new arguments, " "`fit_metrics_aggregation_fn` and `evaluate_metrics_aggregation_fn`, that " "allow passing custom metric aggregation functions." msgstr "" -"现在无需定制策略即可聚合自定义度量(如`准确度`)。内置策略支持两个新参数:`fit_metrics_aggregation_fn` " -"和`evaluate_metrics_aggregation_fn`,允许传递自定义度量聚合函数。" +"现在无需定制策略即可聚合自定义度量(如`准确度`)。内置策略支持两个新参数:" +"`fit_metrics_aggregation_fn` 和`evaluate_metrics_aggregation_fn`,允许传递自" +"定义度量聚合函数。" #: ../../source/ref-changelog.md:674 msgid "" -"**User-configurable round timeout** " -"([#1162](https://github.com/adap/flower/pull/1162))" -msgstr "**用户可配置的回合超时**([#1162](https://github.com/adap/flower/pull/1162))" +"**User-configurable round timeout** ([#1162](https://github.com/adap/flower/" +"pull/1162))" +msgstr "" +"**用户可配置的回合超时**([#1162](https://github.com/adap/flower/pull/1162))" #: ../../source/ref-changelog.md:676 msgid "" "A new configuration value allows the round timeout to be set for " -"`start_server` and `start_simulation`. If the `config` dictionary " -"contains a `round_timeout` key (with a `float` value in seconds), the " -"server will wait *at least* `round_timeout` seconds before it closes the " -"connection." +"`start_server` and `start_simulation`. If the `config` dictionary contains a " +"`round_timeout` key (with a `float` value in seconds), the server will wait " +"*at least* `round_timeout` seconds before it closes the connection." msgstr "" -"新的配置值允许为 `start_server` 和 `start_simulation` 设置回合超时。如果 `config` 字典中包含一个 " -"`round_timeout` 键(以秒为单位的 `float`值),服务器将至少等待 ** `round_timeout` 秒后才关闭连接。" +"新的配置值允许为 `start_server` 和 `start_simulation` 设置回合超时。如果 " +"`config` 字典中包含一个 `round_timeout` 键(以秒为单位的 `float`值),服务器" +"将至少等待 ** `round_timeout` 秒后才关闭连接。" #: ../../source/ref-changelog.md:678 msgid "" -"**Enable both federated evaluation and centralized evaluation to be used " -"at the same time in all built-in strategies** " -"([#1091](https://github.com/adap/flower/pull/1091))" +"**Enable both federated evaluation and centralized evaluation to be used at " +"the same time in all built-in strategies** ([#1091](https://github.com/adap/" +"flower/pull/1091))" msgstr "" -"**允许在所有内置策略中同时使用联邦评价和集中评估** " -"([#1091](https://github.com/adap/flower/pull/1091))" +"**允许在所有内置策略中同时使用联邦评价和集中评估** ([#1091](https://github." +"com/adap/flower/pull/1091))" #: ../../source/ref-changelog.md:680 msgid "" -"Built-in strategies can now perform both federated evaluation (i.e., " -"client-side) and centralized evaluation (i.e., server-side) in the same " -"round. Federated evaluation can be disabled by setting `fraction_eval` to" -" `0.0`." +"Built-in strategies can now perform both federated evaluation (i.e., client-" +"side) and centralized evaluation (i.e., server-side) in the same round. " +"Federated evaluation can be disabled by setting `fraction_eval` to `0.0`." msgstr "" -"内置策略现在可以在同一轮中同时执行联邦评估(即客户端)和集中评估(即服务器端)。可以通过将 `fraction_eval` 设置为 " -"`0.0`来禁用联邦评估。" +"内置策略现在可以在同一轮中同时执行联邦评估(即客户端)和集中评估(即服务器" +"端)。可以通过将 `fraction_eval` 设置为 `0.0`来禁用联邦评估。" #: ../../source/ref-changelog.md:682 msgid "" -"**Two new Jupyter Notebook tutorials** " -"([#1141](https://github.com/adap/flower/pull/1141))" +"**Two new Jupyter Notebook tutorials** ([#1141](https://github.com/adap/" +"flower/pull/1141))" msgstr "" -"**两本新的 Jupyter Notebook 教程** " -"([#1141](https://github.com/adap/flower/pull/1141))" +"**两本新的 Jupyter Notebook 教程** ([#1141](https://github.com/adap/flower/" +"pull/1141))" #: ../../source/ref-changelog.md:684 msgid "" -"Two Jupyter Notebook tutorials (compatible with Google Colab) explain " -"basic and intermediate Flower features:" -msgstr "两本 Jupyter Notebook 教程(与 Google Colab 兼容)介绍了 Flower 的基本和中级功能:" +"Two Jupyter Notebook tutorials (compatible with Google Colab) explain basic " +"and intermediate Flower features:" +msgstr "" +"两本 Jupyter Notebook 教程(与 Google Colab 兼容)介绍了 Flower 的基本和中级" +"功能:" #: ../../source/ref-changelog.md:686 msgid "" -"*An Introduction to Federated Learning*: [Open in " -"Colab](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-1" -"-Intro-to-FL-PyTorch.ipynb)" +"*An Introduction to Federated Learning*: [Open in Colab](https://colab." +"research.google.com/github/adap/flower/blob/main/tutorials/Flower-1-Intro-to-" +"FL-PyTorch.ipynb)" msgstr "" -"*联邦学习简介*: [在 Colab " -"中打开](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-1" -"-Intro-to-FL-PyTorch.ipynb)" +"*联邦学习简介*: [在 Colab 中打开](https://colab.research.google.com/github/" +"adap/flower/blob/main/tutorials/Flower-1-Intro-to-FL-PyTorch.ipynb)" #: ../../source/ref-changelog.md:688 msgid "" -"*Using Strategies in Federated Learning*: [Open in " -"Colab](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-2" -"-Strategies-in-FL-PyTorch.ipynb)" +"*Using Strategies in Federated Learning*: [Open in Colab](https://colab." +"research.google.com/github/adap/flower/blob/main/tutorials/Flower-2-" +"Strategies-in-FL-PyTorch.ipynb)" msgstr "" -"*在联邦学习中使用策略*: [在 Colab " -"中打开](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-2" -"-Strategies-in-FL-PyTorch.ipynb)" +"*在联邦学习中使用策略*: [在 Colab 中打开](https://colab.research.google.com/" +"github/adap/flower/blob/main/tutorials/Flower-2-Strategies-in-FL-PyTorch." +"ipynb)" #: ../../source/ref-changelog.md:690 msgid "" -"**New FedAvgM strategy (Federated Averaging with Server Momentum)** " -"([#1076](https://github.com/adap/flower/pull/1076))" +"**New FedAvgM strategy (Federated Averaging with Server Momentum)** ([#1076]" +"(https://github.com/adap/flower/pull/1076))" msgstr "" -"**新的 FedAvgM 策略(带服务器动量的联邦平均)** " -"([#1076](https://github.com/adap/flower/pull/1076))" +"**新的 FedAvgM 策略(带服务器动量的联邦平均)** ([#1076](https://github.com/" +"adap/flower/pull/1076))" #: ../../source/ref-changelog.md:692 msgid "" @@ -18161,9 +18235,11 @@ msgstr "新的 \"FedAvgM \"策略实现了带服务器动量的联邦平均[Hsu #: ../../source/ref-changelog.md:694 msgid "" -"**New advanced PyTorch code example** " -"([#1007](https://github.com/adap/flower/pull/1007))" -msgstr "**新的 PyTorch 高级代码示例** ([#1007](https://github.com/adap/flower/pull/1007))" +"**New advanced PyTorch code example** ([#1007](https://github.com/adap/" +"flower/pull/1007))" +msgstr "" +"**新的 PyTorch 高级代码示例** ([#1007](https://github.com/adap/flower/" +"pull/1007))" #: ../../source/ref-changelog.md:696 msgid "" @@ -18173,8 +18249,7 @@ msgstr "新代码示例 (`advanced_pytorch`) 演示了 PyTorch 的高级 Flower #: ../../source/ref-changelog.md:698 msgid "" -"**New JAX code example** " -"([#906](https://github.com/adap/flower/pull/906), " +"**New JAX code example** ([#906](https://github.com/adap/flower/pull/906), " "[#1143](https://github.com/adap/flower/pull/1143))" msgstr "" "**新的 JAX 代码示例**([#906](https://github.com/adap/flower/pull/906), " @@ -18184,107 +18259,114 @@ msgstr "" msgid "" "A new code example (`jax_from_centralized_to_federated`) shows federated " "learning with JAX and Flower." -msgstr "新代码示例(`jax_from_centralized_to_federated`)展示了使用 JAX 和 Flower 的联邦学习。" +msgstr "" +"新代码示例(`jax_from_centralized_to_federated`)展示了使用 JAX 和 Flower 的" +"联邦学习。" #: ../../source/ref-changelog.md:704 msgid "" "New option to keep Ray running if Ray was already initialized in " "`start_simulation` ([#1177](https://github.com/adap/flower/pull/1177))" msgstr "" -"新增选项,用于在 \"start_simulation\"(开始模拟)中已初始化 Ray 的情况下保持 Ray " -"运行([#1177](https://github.com/adap/flower/pull/1177))" +"新增选项,用于在 \"start_simulation\"(开始模拟)中已初始化 Ray 的情况下保持 " +"Ray 运行([#1177](https://github.com/adap/flower/pull/1177))" #: ../../source/ref-changelog.md:705 msgid "" "Add support for custom `ClientManager` as a `start_simulation` parameter " "([#1171](https://github.com/adap/flower/pull/1171))" msgstr "" -"添加对自定义 \"客户端管理器 \"作为 \"start_simulation " -"\"参数的支持([#1171](https://github.com/adap/flower/pull/1171))" +"添加对自定义 \"客户端管理器 \"作为 \"start_simulation \"参数的支持([#1171]" +"(https://github.com/adap/flower/pull/1171))" #: ../../source/ref-changelog.md:706 msgid "" -"New documentation for [implementing " -"strategies](https://flower.ai/docs/framework/how-to-implement-" -"strategies.html) ([#1097](https://github.com/adap/flower/pull/1097), " -"[#1175](https://github.com/adap/flower/pull/1175))" +"New documentation for [implementing strategies](https://flower.ai/docs/" +"framework/how-to-implement-strategies.html) ([#1097](https://github.com/adap/" +"flower/pull/1097), [#1175](https://github.com/adap/flower/pull/1175))" msgstr "" -"[实施战略](https://flower.ai/docs/framework/how-to-implement-strategies.html)" -" 的新文件([#1097](https://github.com/adap/flower/pull/1097), " -"[#1175](https://github.com/adap/flower/pull/1175)" +"[实施战略](https://flower.ai/docs/framework/how-to-implement-strategies." +"html) 的新文件([#1097](https://github.com/adap/flower/pull/1097), [#1175]" +"(https://github.com/adap/flower/pull/1175)" #: ../../source/ref-changelog.md:707 msgid "" -"New mobile-friendly documentation theme " -"([#1174](https://github.com/adap/flower/pull/1174))" -msgstr "新的移动友好型文档主题 ([#1174](https://github.com/adap/flower/pull/1174))" +"New mobile-friendly documentation theme ([#1174](https://github.com/adap/" +"flower/pull/1174))" +msgstr "" +"新的移动友好型文档主题 ([#1174](https://github.com/adap/flower/pull/1174))" #: ../../source/ref-changelog.md:708 msgid "" "Limit version range for (optional) `ray` dependency to include only " -"compatible releases (`>=1.9.2,<1.12.0`) " -"([#1205](https://github.com/adap/flower/pull/1205))" +"compatible releases (`>=1.9.2,<1.12.0`) ([#1205](https://github.com/adap/" +"flower/pull/1205))" msgstr "" "限制(可选)`ray`依赖的版本范围,使其仅包含兼容版本(`>=1.9.2,<1.12.0`) " "([#1205](https://github.com/adap/flower/pull/1205))" #: ../../source/ref-changelog.md:712 msgid "" -"**Remove deprecated support for Python 3.6** " -"([#871](https://github.com/adap/flower/pull/871))" -msgstr "**删除对 Python 3.6 的过时支持** ([#871](https://github.com/adap/flower/pull/871))" +"**Remove deprecated support for Python 3.6** ([#871](https://github.com/adap/" +"flower/pull/871))" +msgstr "" +"**删除对 Python 3.6 的过时支持** ([#871](https://github.com/adap/flower/" +"pull/871))" #: ../../source/ref-changelog.md:713 msgid "" -"**Remove deprecated KerasClient** " -"([#857](https://github.com/adap/flower/pull/857))" -msgstr "**移除过时的 KerasClient**([#857](https://github.com/adap/flower/pull/857))" +"**Remove deprecated KerasClient** ([#857](https://github.com/adap/flower/" +"pull/857))" +msgstr "" +"**移除过时的 KerasClient**([#857](https://github.com/adap/flower/pull/857))" #: ../../source/ref-changelog.md:714 msgid "" -"**Remove deprecated no-op extra installs** " -"([#973](https://github.com/adap/flower/pull/973))" -msgstr "**移除过时的不操作额外安装** ([#973](https://github.com/adap/flower/pull/973))" +"**Remove deprecated no-op extra installs** ([#973](https://github.com/adap/" +"flower/pull/973))" +msgstr "" +"**移除过时的不操作额外安装** ([#973](https://github.com/adap/flower/" +"pull/973))" #: ../../source/ref-changelog.md:715 msgid "" "**Remove deprecated proto fields from** `FitRes` **and** `EvaluateRes` " "([#869](https://github.com/adap/flower/pull/869))" msgstr "" -"**从** `FitRes` **和** `EvaluateRes` 中移除已废弃的 proto 字段 " -"([#869](https://github.com/adap/flower/pull/869))" +"**从** `FitRes` **和** `EvaluateRes` 中移除已废弃的 proto 字段 ([#869]" +"(https://github.com/adap/flower/pull/869))" #: ../../source/ref-changelog.md:716 msgid "" -"**Remove deprecated QffedAvg strategy (replaced by QFedAvg)** " -"([#1107](https://github.com/adap/flower/pull/1107))" +"**Remove deprecated QffedAvg strategy (replaced by QFedAvg)** ([#1107]" +"(https://github.com/adap/flower/pull/1107))" msgstr "" -"**移除过时的 QffedAvg 策略(由 QFedAvg 取代)** " -"([#1107](https://github.com/adap/flower/pull/1107))" +"**移除过时的 QffedAvg 策略(由 QFedAvg 取代)** ([#1107](https://github.com/" +"adap/flower/pull/1107))" #: ../../source/ref-changelog.md:717 msgid "" -"**Remove deprecated DefaultStrategy strategy** " -"([#1142](https://github.com/adap/flower/pull/1142))" +"**Remove deprecated DefaultStrategy strategy** ([#1142](https://github.com/" +"adap/flower/pull/1142))" msgstr "" -"**删除过时的 DefaultStrategy 策略** " -"([#1142](https://github.com/adap/flower/pull/1142))" +"**删除过时的 DefaultStrategy 策略** ([#1142](https://github.com/adap/flower/" +"pull/1142))" #: ../../source/ref-changelog.md:718 msgid "" -"**Remove deprecated support for eval_fn accuracy return value** " -"([#1142](https://github.com/adap/flower/pull/1142))" +"**Remove deprecated support for eval_fn accuracy return value** ([#1142]" +"(https://github.com/adap/flower/pull/1142))" msgstr "" -"**删除已过时的对 eval_fn 返回值准确性的支持** " -"([#1142](https://github.com/adap/flower/pull/1142))" +"**删除已过时的对 eval_fn 返回值准确性的支持** ([#1142](https://github.com/" +"adap/flower/pull/1142))" #: ../../source/ref-changelog.md:719 msgid "" "**Remove deprecated support for passing initial parameters as NumPy " "ndarrays** ([#1142](https://github.com/adap/flower/pull/1142))" msgstr "" -"**移除对以 NumPy ndarrays 传递初始参数的过时支持** " -"([#1142](https://github.com/adap/flower/pull/1142))" +"**移除对以 NumPy ndarrays 传递初始参数的过时支持** ([#1142](https://github." +"com/adap/flower/pull/1142))" #: ../../source/ref-changelog.md:721 msgid "v0.18.0 (2022-02-28)" @@ -18293,197 +18375,201 @@ msgstr "v0.18.0 (2022-02-28)" #: ../../source/ref-changelog.md:725 msgid "" "**Improved Virtual Client Engine compatibility with Jupyter Notebook / " -"Google Colab** ([#866](https://github.com/adap/flower/pull/866), " -"[#872](https://github.com/adap/flower/pull/872), " -"[#833](https://github.com/adap/flower/pull/833), " -"[#1036](https://github.com/adap/flower/pull/1036))" +"Google Colab** ([#866](https://github.com/adap/flower/pull/866), [#872]" +"(https://github.com/adap/flower/pull/872), [#833](https://github.com/adap/" +"flower/pull/833), [#1036](https://github.com/adap/flower/pull/1036))" msgstr "" -"**改进了虚拟客户端引擎与 Jupyter Notebook / Google Colab 的兼容性** " -"([#866](https://github.com/adap/flower/pull/866), " -"[#872](https://github.com/adap/flower/pull/872), " -"[#833](https://github.com/adap/flower/pull/833), " -"[#1036](https://github.com/adap/flower/pull/1036))" +"**改进了虚拟客户端引擎与 Jupyter Notebook / Google Colab 的兼容性** ([#866]" +"(https://github.com/adap/flower/pull/866), [#872](https://github.com/adap/" +"flower/pull/872), [#833](https://github.com/adap/flower/pull/833), [#1036]" +"(https://github.com/adap/flower/pull/1036))" #: ../../source/ref-changelog.md:727 msgid "" -"Simulations (using the Virtual Client Engine through `start_simulation`) " -"now work more smoothly on Jupyter Notebooks (incl. Google Colab) after " +"Simulations (using the Virtual Client Engine through `start_simulation`) now " +"work more smoothly on Jupyter Notebooks (incl. Google Colab) after " "installing Flower with the `simulation` extra (`pip install " "flwr[simulation]`)." msgstr "" -"通过 `start_simulation` 在 Jupyter 笔记本(包括 Google Colab)上安装 Flower 并附加 " -"`simulation` (`pip install flwr[simulation]`)后,模拟(通过 `start_simulation` " -"使用虚拟客户端引擎)现在可以更流畅地运行。" +"通过 `start_simulation` 在 Jupyter 笔记本(包括 Google Colab)上安装 Flower " +"并附加 `simulation` (`pip install flwr[simulation]`)后,模拟(通过 " +"`start_simulation` 使用虚拟客户端引擎)现在可以更流畅地运行。" #: ../../source/ref-changelog.md:729 msgid "" -"**New Jupyter Notebook code example** " -"([#833](https://github.com/adap/flower/pull/833))" +"**New Jupyter Notebook code example** ([#833](https://github.com/adap/flower/" +"pull/833))" msgstr "" -"**新的 Jupyter Notebook 代码示例** " -"([#833](https://github.com/adap/flower/pull/833))" +"**新的 Jupyter Notebook 代码示例** ([#833](https://github.com/adap/flower/" +"pull/833))" #: ../../source/ref-changelog.md:731 msgid "" -"A new code example (`quickstart_simulation`) demonstrates Flower " -"simulations using the Virtual Client Engine through Jupyter Notebook " -"(incl. Google Colab)." +"A new code example (`quickstart_simulation`) demonstrates Flower simulations " +"using the Virtual Client Engine through Jupyter Notebook (incl. Google " +"Colab)." msgstr "" "新代码示例(`quickstart_simulation`)通过 Jupyter Notebook(包括 Google " "Colab)演示了使用虚拟客户端引擎进行 Flower 模拟。" #: ../../source/ref-changelog.md:733 msgid "" -"**Client properties (feature preview)** " -"([#795](https://github.com/adap/flower/pull/795))" -msgstr "**客户端属性(功能预览)** ([#795](https://github.com/adap/flower/pull/795))" +"**Client properties (feature preview)** ([#795](https://github.com/adap/" +"flower/pull/795))" +msgstr "" +"**客户端属性(功能预览)** ([#795](https://github.com/adap/flower/pull/795))" #: ../../source/ref-changelog.md:735 msgid "" -"Clients can implement a new method `get_properties` to enable server-side" -" strategies to query client properties." -msgstr "客户端可以实现一个新方法 `get_properties`,以启用服务器端策略来查询客户端属性。" +"Clients can implement a new method `get_properties` to enable server-side " +"strategies to query client properties." +msgstr "" +"客户端可以实现一个新方法 `get_properties`,以启用服务器端策略来查询客户端属" +"性。" #: ../../source/ref-changelog.md:737 msgid "" -"**Experimental Android support with TFLite** " -"([#865](https://github.com/adap/flower/pull/865))" -msgstr "** 使用 TFLite 实验性支持安卓系统** ([#865](https://github.com/adap/flower/pull/865))" +"**Experimental Android support with TFLite** ([#865](https://github.com/adap/" +"flower/pull/865))" +msgstr "" +"** 使用 TFLite 实验性支持安卓系统** ([#865](https://github.com/adap/flower/" +"pull/865))" #: ../../source/ref-changelog.md:739 msgid "" "Android support has finally arrived in `main`! Flower is both client-" "agnostic and framework-agnostic by design. One can integrate arbitrary " -"client platforms and with this release, using Flower on Android has " -"become a lot easier." +"client platforms and with this release, using Flower on Android has become a " +"lot easier." msgstr "" -"`main`终于支持 Android 了!Flower 的设计与客户端和框架无关。我们可以集成任意客户端平台,有了这个版本,在安卓系统上使用 " -"Flower 就变得更容易了。" +"`main`终于支持 Android 了!Flower 的设计与客户端和框架无关。我们可以集成任意" +"客户端平台,有了这个版本,在安卓系统上使用 Flower 就变得更容易了。" #: ../../source/ref-changelog.md:741 msgid "" -"The example uses TFLite on the client side, along with a new " -"`FedAvgAndroid` strategy. The Android client and `FedAvgAndroid` are " -"still experimental, but they are a first step towards a fully-fledged " -"Android SDK and a unified `FedAvg` implementation that integrated the new" -" functionality from `FedAvgAndroid`." +"The example uses TFLite on the client side, along with a new `FedAvgAndroid` " +"strategy. The Android client and `FedAvgAndroid` are still experimental, but " +"they are a first step towards a fully-fledged Android SDK and a unified " +"`FedAvg` implementation that integrated the new functionality from " +"`FedAvgAndroid`." msgstr "" "该示例在客户端使用了 TFLite 以及新的 `FedAvgAndroid`策略。Android 客户端和 " -"`FedAvgAndroid`仍处于试验阶段,但这是向成熟的 Android SDK 和集成了 `FedAvgAndroid`新功能的统一 " -"`FedAvg`实现迈出的第一步。" +"`FedAvgAndroid`仍处于试验阶段,但这是向成熟的 Android SDK 和集成了 " +"`FedAvgAndroid`新功能的统一 `FedAvg`实现迈出的第一步。" #: ../../source/ref-changelog.md:743 msgid "" -"**Make gRPC keepalive time user-configurable and decrease default " -"keepalive time** ([#1069](https://github.com/adap/flower/pull/1069))" +"**Make gRPC keepalive time user-configurable and decrease default keepalive " +"time** ([#1069](https://github.com/adap/flower/pull/1069))" msgstr "" -"**使 gRPC 保持连接时间可由用户配置,并缩短默认保持连接时间** " -"([#1069](https://github.com/adap/flower/pull/1069))" +"**使 gRPC 保持连接时间可由用户配置,并缩短默认保持连接时间** ([#1069]" +"(https://github.com/adap/flower/pull/1069))" #: ../../source/ref-changelog.md:745 msgid "" "The default gRPC keepalive time has been reduced to increase the " -"compatibility of Flower with more cloud environments (for example, " -"Microsoft Azure). Users can configure the keepalive time to customize the" -" gRPC stack based on specific requirements." +"compatibility of Flower with more cloud environments (for example, Microsoft " +"Azure). Users can configure the keepalive time to customize the gRPC stack " +"based on specific requirements." msgstr "" -"为提高 Flower 与更多云环境(如 Microsoft Azure)的兼容性,缩短了默认 gRPC 保持时间。用户可以根据具体要求配置 " -"keepalive 时间,自定义 gRPC 堆栈。" +"为提高 Flower 与更多云环境(如 Microsoft Azure)的兼容性,缩短了默认 gRPC 保" +"持时间。用户可以根据具体要求配置 keepalive 时间,自定义 gRPC 堆栈。" #: ../../source/ref-changelog.md:747 msgid "" -"**New differential privacy example using Opacus and PyTorch** " -"([#805](https://github.com/adap/flower/pull/805))" +"**New differential privacy example using Opacus and PyTorch** ([#805]" +"(https://github.com/adap/flower/pull/805))" msgstr "" -"**使用 Opacus 和 PyTorch 的新差分隐私示例** " -"([#805](https://github.com/adap/flower/pull/805))" +"**使用 Opacus 和 PyTorch 的新差分隐私示例** ([#805](https://github.com/adap/" +"flower/pull/805))" #: ../../source/ref-changelog.md:749 msgid "" -"A new code example (`opacus`) demonstrates differentially-private " -"federated learning with Opacus, PyTorch, and Flower." -msgstr "一个新的代码示例(\"opacus\")演示了使用 Opacus、PyTorch 和 Flower 进行差分隐私的联邦学习。" +"A new code example (`opacus`) demonstrates differentially-private federated " +"learning with Opacus, PyTorch, and Flower." +msgstr "" +"一个新的代码示例(\"opacus\")演示了使用 Opacus、PyTorch 和 Flower 进行差分隐" +"私的联邦学习。" #: ../../source/ref-changelog.md:751 msgid "" -"**New Hugging Face Transformers code example** " -"([#863](https://github.com/adap/flower/pull/863))" +"**New Hugging Face Transformers code example** ([#863](https://github.com/" +"adap/flower/pull/863))" msgstr "" -"**新的Hugging Face Transformers代码示例** " -"([#863](https://github.com/adap/flower/pull/863))" +"**新的Hugging Face Transformers代码示例** ([#863](https://github.com/adap/" +"flower/pull/863))" #: ../../source/ref-changelog.md:753 msgid "" -"A new code example (`quickstart_huggingface`) demonstrates usage of " -"Hugging Face Transformers with Flower." -msgstr "新的代码示例(`quickstart_huggingface`)证明了结合Flower和Hugging Face Transformers的实用性。" +"A new code example (`quickstart_huggingface`) demonstrates usage of Hugging " +"Face Transformers with Flower." +msgstr "" +"新的代码示例(`quickstart_huggingface`)证明了结合Flower和Hugging Face " +"Transformers的实用性。" #: ../../source/ref-changelog.md:755 msgid "" -"**New MLCube code example** " -"([#779](https://github.com/adap/flower/pull/779), " -"[#1034](https://github.com/adap/flower/pull/1034), " -"[#1065](https://github.com/adap/flower/pull/1065), " -"[#1090](https://github.com/adap/flower/pull/1090))" +"**New MLCube code example** ([#779](https://github.com/adap/flower/" +"pull/779), [#1034](https://github.com/adap/flower/pull/1034), [#1065]" +"(https://github.com/adap/flower/pull/1065), [#1090](https://github.com/adap/" +"flower/pull/1090))" msgstr "" "**新的 MLCube 代码示例** ([#779](https://github.com/adap/flower/pull/779), " -"[#1034](https://github.com/adap/flower/pull/1034), " -"[#1065](https://github.com/adap/flower/pull/1065), " -"[#1090](https://github.com/adap/flower/pull/1090))" +"[#1034](https://github.com/adap/flower/pull/1034), [#1065](https://github." +"com/adap/flower/pull/1065), [#1090](https://github.com/adap/flower/" +"pull/1090))" #: ../../source/ref-changelog.md:757 msgid "" -"A new code example (`quickstart_mlcube`) demonstrates usage of MLCube " -"with Flower." +"A new code example (`quickstart_mlcube`) demonstrates usage of MLCube with " +"Flower." msgstr "新代码示例(\"quickstart_mlcube\")演示了 MLCube 与 Flower 的用法。" #: ../../source/ref-changelog.md:759 msgid "" -"**SSL-enabled server and client** " -"([#842](https://github.com/adap/flower/pull/842), " -"[#844](https://github.com/adap/flower/pull/844), " -"[#845](https://github.com/adap/flower/pull/845), " -"[#847](https://github.com/adap/flower/pull/847), " -"[#993](https://github.com/adap/flower/pull/993), " -"[#994](https://github.com/adap/flower/pull/994))" -msgstr "" -"** 支持 SSL 的服务器和客户端** ([#842](https://github.com/adap/flower/pull/842), " -"[#844](https://github.com/adap/flower/pull/844), " -"[#845](https://github.com/adap/flower/pull/845), " -"[#847](https://github.com/adap/flower/pull/847), " -"[#993](https://github.com/adap/flower/pull/993), " -"[#994](https://github.com/adap/flower/pull/994))" +"**SSL-enabled server and client** ([#842](https://github.com/adap/flower/" +"pull/842), [#844](https://github.com/adap/flower/pull/844), [#845](https://" +"github.com/adap/flower/pull/845), [#847](https://github.com/adap/flower/" +"pull/847), [#993](https://github.com/adap/flower/pull/993), [#994](https://" +"github.com/adap/flower/pull/994))" +msgstr "" +"** 支持 SSL 的服务器和客户端** ([#842](https://github.com/adap/flower/" +"pull/842), [#844](https://github.com/adap/flower/pull/844), [#845](https://" +"github.com/adap/flower/pull/845), [#847](https://github.com/adap/flower/" +"pull/847), [#993](https://github.com/adap/flower/pull/993), [#994](https://" +"github.com/adap/flower/pull/994))" #: ../../source/ref-changelog.md:761 msgid "" -"SSL enables secure encrypted connections between clients and servers. " -"This release open-sources the Flower secure gRPC implementation to make " -"encrypted communication channels accessible to all Flower users." -msgstr "SSL 可实现客户端与服务器之间的安全加密连接。该版本开源了 Flower 安全 gRPC 实现,使所有 Flower 用户都能访问加密通信通道。" +"SSL enables secure encrypted connections between clients and servers. This " +"release open-sources the Flower secure gRPC implementation to make encrypted " +"communication channels accessible to all Flower users." +msgstr "" +"SSL 可实现客户端与服务器之间的安全加密连接。该版本开源了 Flower 安全 gRPC 实" +"现,使所有 Flower 用户都能访问加密通信通道。" #: ../../source/ref-changelog.md:763 msgid "" -"**Updated** `FedAdam` **and** `FedYogi` **strategies** " -"([#885](https://github.com/adap/flower/pull/885), " -"[#895](https://github.com/adap/flower/pull/895))" +"**Updated** `FedAdam` **and** `FedYogi` **strategies** ([#885](https://" +"github.com/adap/flower/pull/885), [#895](https://github.com/adap/flower/" +"pull/895))" msgstr "" -"**更新**`FedAdam`**和**`FedYogi`**战略** " -"([#885](https://github.com/adap/flower/pull/885), " -"[#895](https://github.com/adap/flower/pull/895))" +"**更新**`FedAdam`**和**`FedYogi`**战略** ([#885](https://github.com/adap/" +"flower/pull/885), [#895](https://github.com/adap/flower/pull/895))" #: ../../source/ref-changelog.md:765 msgid "" -"`FedAdam` and `FedAdam` match the latest version of the Adaptive " -"Federated Optimization paper." +"`FedAdam` and `FedAdam` match the latest version of the Adaptive Federated " +"Optimization paper." msgstr "FedAdam \"和 \"FedAdam \"与最新版本的 \"自适应联邦优化 \"论文相匹配。" #: ../../source/ref-changelog.md:767 msgid "" -"**Initialize** `start_simulation` **with a list of client IDs** " -"([#860](https://github.com/adap/flower/pull/860))" +"**Initialize** `start_simulation` **with a list of client IDs** ([#860]" +"(https://github.com/adap/flower/pull/860))" msgstr "" -"**初始化** `start_simulation` **使用客户端 ID 列表** " -"([#860](https://github.com/adap/flower/pull/860))" +"**初始化** `start_simulation` **使用客户端 ID 列表** ([#860](https://github." +"com/adap/flower/pull/860))" #: ../../source/ref-changelog.md:769 msgid "" @@ -18494,52 +18580,56 @@ msgid "" "identifiers." msgstr "" "现在可以使用客户端 ID 列表(`clients_ids`,类型:`List[str]`)调用 " -"`start_simulation`。每当需要初始化客户端时,这些 ID 就会被传递到 `client_fn` 中,这样就能更轻松地加载无法通过 " -"`int` 标识符访问的数据分区。" +"`start_simulation`。每当需要初始化客户端时,这些 ID 就会被传递到 `client_fn` " +"中,这样就能更轻松地加载无法通过 `int` 标识符访问的数据分区。" #: ../../source/ref-changelog.md:773 msgid "" -"Update `num_examples` calculation in PyTorch code examples in " -"([#909](https://github.com/adap/flower/pull/909))" +"Update `num_examples` calculation in PyTorch code examples in ([#909]" +"(https://github.com/adap/flower/pull/909))" msgstr "" -"更新 PyTorch 代码示例中的 \"num_examples \"计算 " -"([#909](https://github.com/adap/flower/pull/909))" +"更新 PyTorch 代码示例中的 \"num_examples \"计算 ([#909](https://github.com/" +"adap/flower/pull/909))" #: ../../source/ref-changelog.md:774 msgid "" -"Expose Flower version through `flwr.__version__` " -"([#952](https://github.com/adap/flower/pull/952))" +"Expose Flower version through `flwr.__version__` ([#952](https://github.com/" +"adap/flower/pull/952))" msgstr "" -"通过 `flwr.__version__` 公开 Flower 版本 " -"([#952](https://github.com/adap/flower/pull/952))" +"通过 `flwr.__version__` 公开 Flower 版本 ([#952](https://github.com/adap/" +"flower/pull/952))" #: ../../source/ref-changelog.md:775 msgid "" -"`start_server` in `app.py` now returns a `History` object containing " -"metrics from training ([#974](https://github.com/adap/flower/pull/974))" +"`start_server` in `app.py` now returns a `History` object containing metrics " +"from training ([#974](https://github.com/adap/flower/pull/974))" msgstr "" -"`app.py`中的 `start_server`现在会返回一个 `History` " -"对象,其中包含训练中的指标([#974](https://github.com/adap/flower/pull/974))" +"`app.py`中的 `start_server`现在会返回一个 `History` 对象,其中包含训练中的指" +"标([#974](https://github.com/adap/flower/pull/974))" #: ../../source/ref-changelog.md:776 msgid "" -"Make `max_workers` (used by `ThreadPoolExecutor`) configurable " -"([#978](https://github.com/adap/flower/pull/978))" +"Make `max_workers` (used by `ThreadPoolExecutor`) configurable ([#978]" +"(https://github.com/adap/flower/pull/978))" msgstr "" -"使 `max_workers`(由 " -"`ThreadPoolExecutor`使用)可配置([#978](https://github.com/adap/flower/pull/978))" +"使 `max_workers`(由 `ThreadPoolExecutor`使用)可配置([#978](https://github." +"com/adap/flower/pull/978))" #: ../../source/ref-changelog.md:777 msgid "" -"Increase sleep time after server start to three seconds in all code " -"examples ([#1086](https://github.com/adap/flower/pull/1086))" -msgstr "在所有代码示例中,将服务器启动后的休眠时间延长至三秒([#1086](https://github.com/adap/flower/pull/1086))" +"Increase sleep time after server start to three seconds in all code examples " +"([#1086](https://github.com/adap/flower/pull/1086))" +msgstr "" +"在所有代码示例中,将服务器启动后的休眠时间延长至三秒([#1086](https://github." +"com/adap/flower/pull/1086))" #: ../../source/ref-changelog.md:778 msgid "" -"Added a new FAQ section to the documentation " -"([#948](https://github.com/adap/flower/pull/948))" -msgstr "在文档中添加了新的常见问题部分 ([#948](https://github.com/adap/flower/pull/948))" +"Added a new FAQ section to the documentation ([#948](https://github.com/adap/" +"flower/pull/948))" +msgstr "" +"在文档中添加了新的常见问题部分 ([#948](https://github.com/adap/flower/" +"pull/948))" #: ../../source/ref-changelog.md:779 msgid "" @@ -18552,20 +18642,21 @@ msgid "" "**Removed** `flwr_example` **and** `flwr_experimental` **from release " "build** ([#869](https://github.com/adap/flower/pull/869))" msgstr "" -"**从发布版中删除**`flwr_example`**和**`flwr_experimental`** " -"([#869](https://github.com/adap/flower/pull/869))" +"**从发布版中删除**`flwr_example`**和**`flwr_experimental`** ([#869](https://" +"github.com/adap/flower/pull/869))" #: ../../source/ref-changelog.md:785 msgid "" "The packages `flwr_example` and `flwr_experimental` have been deprecated " "since Flower 0.12.0 and they are not longer included in Flower release " "builds. The associated extras (`baseline`, `examples-pytorch`, `examples-" -"tensorflow`, `http-logger`, `ops`) are now no-op and will be removed in " -"an upcoming release." +"tensorflow`, `http-logger`, `ops`) are now no-op and will be removed in an " +"upcoming release." msgstr "" -"自 Flower 0.12.0 起,软件包 `flwr_example` 和 `flwr_experimental` 已被弃用,它们不再包含在 " -"Flower 的发布版本中。相关的额外包(`baseline`, `examples-pytorch`, `examples-" -"tensorflow`, `http-logger`, `ops`)现在已不再使用,并将在即将发布的版本中移除。" +"自 Flower 0.12.0 起,软件包 `flwr_example` 和 `flwr_experimental` 已被弃用," +"它们不再包含在 Flower 的发布版本中。相关的额外包(`baseline`, `examples-" +"pytorch`, `examples-tensorflow`, `http-logger`, `ops`)现在已不再使用,并将在" +"即将发布的版本中移除。" #: ../../source/ref-changelog.md:787 msgid "v0.17.0 (2021-09-24)" @@ -18573,169 +18664,190 @@ msgstr "v0.17.0 (2021-09-24)" #: ../../source/ref-changelog.md:791 msgid "" -"**Experimental virtual client engine** " -"([#781](https://github.com/adap/flower/pull/781) " -"[#790](https://github.com/adap/flower/pull/790) " -"[#791](https://github.com/adap/flower/pull/791))" +"**Experimental virtual client engine** ([#781](https://github.com/adap/" +"flower/pull/781) [#790](https://github.com/adap/flower/pull/790) [#791]" +"(https://github.com/adap/flower/pull/791))" msgstr "" "**实验性虚拟客户端引擎** ([#781](https://github.com/adap/flower/pull/781) " -"[#790](https://github.com/adap/flower/pull/790) " -"[#791](https://github.com/adap/flower/pull/791))" +"[#790](https://github.com/adap/flower/pull/790) [#791](https://github.com/" +"adap/flower/pull/791))" #: ../../source/ref-changelog.md:793 msgid "" -"One of Flower's goals is to enable research at scale. This release " -"enables a first (experimental) peek at a major new feature, codenamed the" -" virtual client engine. Virtual clients enable simulations that scale to " -"a (very) large number of clients on a single machine or compute cluster. " -"The easiest way to test the new functionality is to look at the two new " -"code examples called `quickstart_simulation` and `simulation_pytorch`." +"One of Flower's goals is to enable research at scale. This release enables a " +"first (experimental) peek at a major new feature, codenamed the virtual " +"client engine. Virtual clients enable simulations that scale to a (very) " +"large number of clients on a single machine or compute cluster. The easiest " +"way to test the new functionality is to look at the two new code examples " +"called `quickstart_simulation` and `simulation_pytorch`." msgstr "" -"Flower 的目标之一是实现大规模研究。这一版本首次(试验性地)展示了代号为 \"虚拟客户端引擎 " -"\"的重要新功能。虚拟客户端可以在单台机器或计算集群上对大量客户端进行模拟。测试新功能的最简单方法是查看名为 " -"\"quickstart_simulation \"和 \"simulation_pytorch \"的两个新代码示例。" +"Flower 的目标之一是实现大规模研究。这一版本首次(试验性地)展示了代号为 \"虚" +"拟客户端引擎 \"的重要新功能。虚拟客户端可以在单台机器或计算集群上对大量客户端" +"进行模拟。测试新功能的最简单方法是查看名为 \"quickstart_simulation \"和 " +"\"simulation_pytorch \"的两个新代码示例。" #: ../../source/ref-changelog.md:795 msgid "" -"The feature is still experimental, so there's no stability guarantee for " -"the API. It's also not quite ready for prime time and comes with a few " -"known caveats. However, those who are curious are encouraged to try it " -"out and share their thoughts." +"The feature is still experimental, so there's no stability guarantee for the " +"API. It's also not quite ready for prime time and comes with a few known " +"caveats. However, those who are curious are encouraged to try it out and " +"share their thoughts." msgstr "" -"该功能仍处于试验阶段,因此无法保证 API " -"的稳定性。此外,它还没有完全准备好进入黄金时间,并有一些已知的注意事项。不过,我们鼓励好奇的用户尝试使用并分享他们的想法。" +"该功能仍处于试验阶段,因此无法保证 API 的稳定性。此外,它还没有完全准备好进入" +"黄金时间,并有一些已知的注意事项。不过,我们鼓励好奇的用户尝试使用并分享他们" +"的想法。" #: ../../source/ref-changelog.md:797 msgid "" -"**New built-in strategies** " -"([#828](https://github.com/adap/flower/pull/828) " +"**New built-in strategies** ([#828](https://github.com/adap/flower/pull/828) " "[#822](https://github.com/adap/flower/pull/822))" msgstr "" -"**新的内置策略**([#828](https://github.com/adap/flower/pull/828) " -"[#822](https://github.com/adap/flower/pull/822)" +"**新的内置策略**([#828](https://github.com/adap/flower/pull/828) [#822]" +"(https://github.com/adap/flower/pull/822)" #: ../../source/ref-changelog.md:799 msgid "" "FedYogi - Federated learning strategy using Yogi on server-side. " "Implementation based on https://arxiv.org/abs/2003.00295" -msgstr "FedYogi - 在服务器端使用 Yogi 的联邦学习策略。基于 https://arxiv.org/abs/2003.00295 实现" +msgstr "" +"FedYogi - 在服务器端使用 Yogi 的联邦学习策略。基于 https://arxiv.org/" +"abs/2003.00295 实现" #: ../../source/ref-changelog.md:800 msgid "" "FedAdam - Federated learning strategy using Adam on server-side. " "Implementation based on https://arxiv.org/abs/2003.00295" -msgstr "FedAdam - 在服务器端使用 Adam 的联邦学习策略。基于 https://arxiv.org/abs/2003.00295 实现" +msgstr "" +"FedAdam - 在服务器端使用 Adam 的联邦学习策略。基于 https://arxiv.org/" +"abs/2003.00295 实现" #: ../../source/ref-changelog.md:802 msgid "" -"**New PyTorch Lightning code example** " -"([#617](https://github.com/adap/flower/pull/617))" +"**New PyTorch Lightning code example** ([#617](https://github.com/adap/" +"flower/pull/617))" msgstr "" -"**新的 PyTorch Lightning 代码示例** " -"([#617](https://github.com/adap/flower/pull/617))" +"**新的 PyTorch Lightning 代码示例** ([#617](https://github.com/adap/flower/" +"pull/617))" #: ../../source/ref-changelog.md:804 msgid "" -"**New Variational Auto-Encoder code example** " -"([#752](https://github.com/adap/flower/pull/752))" -msgstr "**新的变分自动编码器代码示例** ([#752](https://github.com/adap/flower/pull/752))" +"**New Variational Auto-Encoder code example** ([#752](https://github.com/" +"adap/flower/pull/752))" +msgstr "" +"**新的变分自动编码器代码示例** ([#752](https://github.com/adap/flower/" +"pull/752))" #: ../../source/ref-changelog.md:806 msgid "" -"**New scikit-learn code example** " -"([#748](https://github.com/adap/flower/pull/748))" -msgstr "**新的 scikit-learn 代码示例** ([#748](https://github.com/adap/flower/pull/748))" +"**New scikit-learn code example** ([#748](https://github.com/adap/flower/" +"pull/748))" +msgstr "" +"**新的 scikit-learn 代码示例** ([#748](https://github.com/adap/flower/" +"pull/748))" #: ../../source/ref-changelog.md:808 msgid "" -"**New experimental TensorBoard strategy** " -"([#789](https://github.com/adap/flower/pull/789))" -msgstr "**新的实验性 TensorBoard 策略**([#789](https://github.com/adap/flower/pull/789))" +"**New experimental TensorBoard strategy** ([#789](https://github.com/adap/" +"flower/pull/789))" +msgstr "" +"**新的实验性 TensorBoard 策略**([#789](https://github.com/adap/flower/" +"pull/789))" #: ../../source/ref-changelog.md:812 msgid "" -"Improved advanced TensorFlow code example " -"([#769](https://github.com/adap/flower/pull/769))" -msgstr "改进的高级 TensorFlow 代码示例([#769](https://github.com/adap/flower/pull/769)" +"Improved advanced TensorFlow code example ([#769](https://github.com/adap/" +"flower/pull/769))" +msgstr "" +"改进的高级 TensorFlow 代码示例([#769](https://github.com/adap/flower/" +"pull/769)" #: ../../source/ref-changelog.md:813 msgid "" -"Warning when `min_available_clients` is misconfigured " -"([#830](https://github.com/adap/flower/pull/830))" +"Warning when `min_available_clients` is misconfigured ([#830](https://github." +"com/adap/flower/pull/830))" msgstr "" -"当 `min_available_clients` 配置错误时发出警告 " -"([#830](https://github.com/adap/flower/pull/830))" +"当 `min_available_clients` 配置错误时发出警告 ([#830](https://github.com/" +"adap/flower/pull/830))" #: ../../source/ref-changelog.md:814 msgid "" -"Improved gRPC server docs " -"([#841](https://github.com/adap/flower/pull/841))" -msgstr "改进了 gRPC 服务器文档([#841](https://github.com/adap/flower/pull/841))" +"Improved gRPC server docs ([#841](https://github.com/adap/flower/pull/841))" +msgstr "" +"改进了 gRPC 服务器文档([#841](https://github.com/adap/flower/pull/841))" #: ../../source/ref-changelog.md:815 msgid "" -"Improved error message in `NumPyClient` " -"([#851](https://github.com/adap/flower/pull/851))" -msgstr "改进了 `NumPyClient` 中的错误信息 ([#851](https://github.com/adap/flower/pull/851))" +"Improved error message in `NumPyClient` ([#851](https://github.com/adap/" +"flower/pull/851))" +msgstr "" +"改进了 `NumPyClient` 中的错误信息 ([#851](https://github.com/adap/flower/" +"pull/851))" #: ../../source/ref-changelog.md:816 msgid "" -"Improved PyTorch quickstart code example " -"([#852](https://github.com/adap/flower/pull/852))" -msgstr "改进的 PyTorch 快速启动代码示例 ([#852](https://github.com/adap/flower/pull/852))" +"Improved PyTorch quickstart code example ([#852](https://github.com/adap/" +"flower/pull/852))" +msgstr "" +"改进的 PyTorch 快速启动代码示例 ([#852](https://github.com/adap/flower/" +"pull/852))" #: ../../source/ref-changelog.md:820 msgid "" -"**Disabled final distributed evaluation** " -"([#800](https://github.com/adap/flower/pull/800))" -msgstr "**禁用最终分布式评价** ([#800](https://github.com/adap/flower/pull/800))" +"**Disabled final distributed evaluation** ([#800](https://github.com/adap/" +"flower/pull/800))" +msgstr "" +"**禁用最终分布式评价** ([#800](https://github.com/adap/flower/pull/800))" #: ../../source/ref-changelog.md:822 msgid "" -"Prior behaviour was to perform a final round of distributed evaluation on" -" all connected clients, which is often not required (e.g., when using " -"server-side evaluation). The prior behaviour can be enabled by passing " +"Prior behaviour was to perform a final round of distributed evaluation on " +"all connected clients, which is often not required (e.g., when using server-" +"side evaluation). The prior behaviour can be enabled by passing " "`force_final_distributed_eval=True` to `start_server`." msgstr "" -"之前的行为是在所有连接的客户端上执行最后一轮分布式评估,而这通常是不需要的(例如,在使用服务器端评估时)。可以通过向 `start_server`" -" 传递 `force_final_distributed_eval=True` 来启用之前的行为。" +"之前的行为是在所有连接的客户端上执行最后一轮分布式评估,而这通常是不需要的" +"(例如,在使用服务器端评估时)。可以通过向 `start_server` 传递 " +"`force_final_distributed_eval=True` 来启用之前的行为。" #: ../../source/ref-changelog.md:824 msgid "" -"**Renamed q-FedAvg strategy** " -"([#802](https://github.com/adap/flower/pull/802))" -msgstr "**更名为 q-FedAvg 策略** ([#802](https://github.com/adap/flower/pull/802))" +"**Renamed q-FedAvg strategy** ([#802](https://github.com/adap/flower/" +"pull/802))" +msgstr "" +"**更名为 q-FedAvg 策略** ([#802](https://github.com/adap/flower/pull/802))" #: ../../source/ref-changelog.md:826 msgid "" -"The strategy named `QffedAvg` was renamed to `QFedAvg` to better reflect " -"the notation given in the original paper (q-FFL is the optimization " -"objective, q-FedAvg is the proposed solver). Note the original (now " -"deprecated) `QffedAvg` class is still available for compatibility reasons" -" (it will be removed in a future release)." +"The strategy named `QffedAvg` was renamed to `QFedAvg` to better reflect the " +"notation given in the original paper (q-FFL is the optimization objective, q-" +"FedAvg is the proposed solver). Note the original (now deprecated) " +"`QffedAvg` class is still available for compatibility reasons (it will be " +"removed in a future release)." msgstr "" -"名为 `QffedAvg` 的策略已更名为 `QFedAvg`,以更好地反映原始论文中给出的符号(q-FFL 是优化目标,q-FedAvg " -"是建议的求解器)。请注意,出于兼容性原因,原始(现已废弃)的 `QffedAvg` 类仍然可用(它将在未来的版本中移除)。" +"名为 `QffedAvg` 的策略已更名为 `QFedAvg`,以更好地反映原始论文中给出的符号" +"(q-FFL 是优化目标,q-FedAvg 是建议的求解器)。请注意,出于兼容性原因,原始" +"(现已废弃)的 `QffedAvg` 类仍然可用(它将在未来的版本中移除)。" #: ../../source/ref-changelog.md:828 msgid "" "**Deprecated and renamed code example** `simulation_pytorch` **to** " -"`simulation_pytorch_legacy` " -"([#791](https://github.com/adap/flower/pull/791))" +"`simulation_pytorch_legacy` ([#791](https://github.com/adap/flower/pull/791))" msgstr "" -"**删除并重命名代码示例**`simulation_pytorch`**为**`simulation_pytorch_legacy` " -"([#791](https://github.com/adap/flower/pull/791))" +"**删除并重命名代码示例**`simulation_pytorch`**为" +"**`simulation_pytorch_legacy` ([#791](https://github.com/adap/flower/" +"pull/791))" #: ../../source/ref-changelog.md:830 msgid "" -"This example has been replaced by a new example. The new example is based" -" on the experimental virtual client engine, which will become the new " -"default way of doing most types of large-scale simulations in Flower. The" -" existing example was kept for reference purposes, but it might be " -"removed in the future." +"This example has been replaced by a new example. The new example is based on " +"the experimental virtual client engine, which will become the new default " +"way of doing most types of large-scale simulations in Flower. The existing " +"example was kept for reference purposes, but it might be removed in the " +"future." msgstr "" -"该示例已被新示例取代。新示例基于试验性虚拟客户端引擎,它将成为在 Flower " -"中进行大多数类型大规模模拟的新的默认方式。现有示例将作为参考保留,但将来可能会删除。" +"该示例已被新示例取代。新示例基于试验性虚拟客户端引擎,它将成为在 Flower 中进" +"行大多数类型大规模模拟的新的默认方式。现有示例将作为参考保留,但将来可能会删" +"除。" #: ../../source/ref-changelog.md:832 msgid "v0.16.0 (2021-05-11)" @@ -18743,8 +18855,7 @@ msgstr "v0.16.0 (2021-05-11)" #: ../../source/ref-changelog.md:836 msgid "" -"**New built-in strategies** " -"([#549](https://github.com/adap/flower/pull/549))" +"**New built-in strategies** ([#549](https://github.com/adap/flower/pull/549))" msgstr "**新的内置策略** ([#549](https://github.com/adap/flower/pull/549))" #: ../../source/ref-changelog.md:838 @@ -18753,9 +18864,11 @@ msgstr "(摘要) FedOpt" #: ../../source/ref-changelog.md:841 msgid "" -"**Custom metrics for server and strategies** " -"([#717](https://github.com/adap/flower/pull/717))" -msgstr "**服务器和策略的自定义指标** ([#717](https://github.com/adap/flower/pull/717))" +"**Custom metrics for server and strategies** ([#717](https://github.com/adap/" +"flower/pull/717))" +msgstr "" +"**服务器和策略的自定义指标** ([#717](https://github.com/adap/flower/" +"pull/717))" #: ../../source/ref-changelog.md:843 msgid "" @@ -18765,66 +18878,70 @@ msgid "" "dictionary containing custom metrics from client to server. As of this " "release, custom metrics replace task-specific metrics on the server." msgstr "" -"Flower 服务器现在完全与任务无关,所有剩余的任务特定度量(如 \"准确度\")都已被自定义度量字典取代。Flower 0.15 " -"引入了从客户端向服务器传递包含自定义指标的字典的功能。从本版本开始,自定义指标将取代服务器上的特定任务指标。" +"Flower 服务器现在完全与任务无关,所有剩余的任务特定度量(如 \"准确度\")都已" +"被自定义度量字典取代。Flower 0.15 引入了从客户端向服务器传递包含自定义指标的" +"字典的功能。从本版本开始,自定义指标将取代服务器上的特定任务指标。" #: ../../source/ref-changelog.md:845 msgid "" -"Custom metric dictionaries are now used in two user-facing APIs: they are" -" returned from Strategy methods `aggregate_fit`/`aggregate_evaluate` and " -"they enable evaluation functions passed to built-in strategies (via " -"`eval_fn`) to return more than two evaluation metrics. Strategies can " -"even return *aggregated* metrics dictionaries for the server to keep " -"track of." +"Custom metric dictionaries are now used in two user-facing APIs: they are " +"returned from Strategy methods `aggregate_fit`/`aggregate_evaluate` and they " +"enable evaluation functions passed to built-in strategies (via `eval_fn`) to " +"return more than two evaluation metrics. Strategies can even return " +"*aggregated* metrics dictionaries for the server to keep track of." msgstr "" -"自定义度量字典现在可在两个面向用户的 API 中使用:它们可从策略方法 `aggregate_fit`/`aggregate_evaluate` " -"返回,还可使传递给内置策略(通过 `eval_fn`)的评估函数返回两个以上的评估度量。策略甚至可以返回 *aggregated* " -"指标字典,以便服务器跟踪。" +"自定义度量字典现在可在两个面向用户的 API 中使用:它们可从策略方法 " +"`aggregate_fit`/`aggregate_evaluate` 返回,还可使传递给内置策略(通过 " +"`eval_fn`)的评估函数返回两个以上的评估度量。策略甚至可以返回 *aggregated* 指" +"标字典,以便服务器跟踪。" #: ../../source/ref-changelog.md:847 msgid "" "Strategy implementations should migrate their `aggregate_fit` and " "`aggregate_evaluate` methods to the new return type (e.g., by simply " -"returning an empty `{}`), server-side evaluation functions should migrate" -" from `return loss, accuracy` to `return loss, {\"accuracy\": accuracy}`." +"returning an empty `{}`), server-side evaluation functions should migrate " +"from `return loss, accuracy` to `return loss, {\"accuracy\": accuracy}`." msgstr "" -"Strategy 实现应将其 `aggregate_fit` 和 `aggregate_evaluate` " -"方法迁移到新的返回类型(例如,只需返回空的 `{}`),服务器端评估函数应从 `return loss, accuracy` 迁移到 " -"`return loss, {\"accuracy\": accuracy}`。" +"Strategy 实现应将其 `aggregate_fit` 和 `aggregate_evaluate` 方法迁移到新的返" +"回类型(例如,只需返回空的 `{}`),服务器端评估函数应从 `return loss, " +"accuracy` 迁移到 `return loss, {\"accuracy\": accuracy}`。" #: ../../source/ref-changelog.md:849 msgid "" "Flower 0.15-style return types are deprecated (but still supported), " "compatibility will be removed in a future release." -msgstr "Flower 0.15 风格的返回类型已被弃用(但仍受支持),兼容性将在未来的版本中移除。" +msgstr "" +"Flower 0.15 风格的返回类型已被弃用(但仍受支持),兼容性将在未来的版本中移" +"除。" #: ../../source/ref-changelog.md:851 msgid "" -"**Migration warnings for deprecated functionality** " -"([#690](https://github.com/adap/flower/pull/690))" -msgstr "** 过时功能的迁移警告** ([#690](https://github.com/adap/flower/pull/690))" +"**Migration warnings for deprecated functionality** ([#690](https://github." +"com/adap/flower/pull/690))" +msgstr "" +"** 过时功能的迁移警告** ([#690](https://github.com/adap/flower/pull/690))" #: ../../source/ref-changelog.md:853 msgid "" "Earlier versions of Flower were often migrated to new APIs, while " -"maintaining compatibility with legacy APIs. This release introduces " -"detailed warning messages if usage of deprecated APIs is detected. The " -"new warning messages often provide details on how to migrate to more " -"recent APIs, thus easing the transition from one release to another." +"maintaining compatibility with legacy APIs. This release introduces detailed " +"warning messages if usage of deprecated APIs is detected. The new warning " +"messages often provide details on how to migrate to more recent APIs, thus " +"easing the transition from one release to another." msgstr "" -"Flower 早期版本通常会迁移到新的应用程序接口,同时保持与旧版应用程序接口的兼容。如果检测到使用了过时的 " -"API,本版本将引入详细的警告信息。新的警告信息通常会详细说明如何迁移到更新的 API,从而简化从一个版本到另一个版本的过渡。" +"Flower 早期版本通常会迁移到新的应用程序接口,同时保持与旧版应用程序接口的兼" +"容。如果检测到使用了过时的 API,本版本将引入详细的警告信息。新的警告信息通常" +"会详细说明如何迁移到更新的 API,从而简化从一个版本到另一个版本的过渡。" #: ../../source/ref-changelog.md:855 msgid "" -"Improved docs and docstrings " -"([#691](https://github.com/adap/flower/pull/691) " -"[#692](https://github.com/adap/flower/pull/692) " -"[#713](https://github.com/adap/flower/pull/713))" +"Improved docs and docstrings ([#691](https://github.com/adap/flower/" +"pull/691) [#692](https://github.com/adap/flower/pull/692) [#713](https://" +"github.com/adap/flower/pull/713))" msgstr "" -"改进了文档和文档说明 ([#691](https://github.com/adap/flower/pull/691) " -"[#692](https://github.com/adap/flower/pull/692) " -"[#713](https://github.com/adap/flower/pull/713))" +"改进了文档和文档说明 ([#691](https://github.com/adap/flower/pull/691) [#692]" +"(https://github.com/adap/flower/pull/692) [#713](https://github.com/adap/" +"flower/pull/713))" #: ../../source/ref-changelog.md:857 msgid "MXNet example and documentation" @@ -18833,55 +18950,52 @@ msgstr "MXNet 示例和文档" #: ../../source/ref-changelog.md:859 msgid "" "FedBN implementation in example PyTorch: From Centralized To Federated " -"([#696](https://github.com/adap/flower/pull/696) " -"[#702](https://github.com/adap/flower/pull/702) " -"[#705](https://github.com/adap/flower/pull/705))" +"([#696](https://github.com/adap/flower/pull/696) [#702](https://github.com/" +"adap/flower/pull/702) [#705](https://github.com/adap/flower/pull/705))" msgstr "" -"PyTorch 示例中的 FedBN 实现: 从集中到联邦 " -"([#696](https://github.com/adap/flower/pull/696) " -"[#702](https://github.com/adap/flower/pull/702) " -"[#705](https://github.com/adap/flower/pull/705))" +"PyTorch 示例中的 FedBN 实现: 从集中到联邦 ([#696](https://github.com/adap/" +"flower/pull/696) [#702](https://github.com/adap/flower/pull/702) [#705]" +"(https://github.com/adap/flower/pull/705))" #: ../../source/ref-changelog.md:863 msgid "" -"**Serialization-agnostic server** " -"([#721](https://github.com/adap/flower/pull/721))" +"**Serialization-agnostic server** ([#721](https://github.com/adap/flower/" +"pull/721))" msgstr "**序列化无关服务器** ([#721](https://github.com/adap/flower/pull/721))" #: ../../source/ref-changelog.md:865 msgid "" -"The Flower server is now fully serialization-agnostic. Prior usage of " -"class `Weights` (which represents parameters as deserialized NumPy " -"ndarrays) was replaced by class `Parameters` (e.g., in `Strategy`). " -"`Parameters` objects are fully serialization-agnostic and represents " -"parameters as byte arrays, the `tensor_type` attributes indicates how " -"these byte arrays should be interpreted (e.g., for " -"serialization/deserialization)." +"The Flower server is now fully serialization-agnostic. Prior usage of class " +"`Weights` (which represents parameters as deserialized NumPy ndarrays) was " +"replaced by class `Parameters` (e.g., in `Strategy`). `Parameters` objects " +"are fully serialization-agnostic and represents parameters as byte arrays, " +"the `tensor_type` attributes indicates how these byte arrays should be " +"interpreted (e.g., for serialization/deserialization)." msgstr "" -"Flower 服务器现在完全不依赖序列化。之前使用的 `Weights` 类(以反序列化的 NumPy ndarrays 表示参数)已被 " -"`Parameters` 类取代(例如在 `Strategy`中)。参数 " -"\"对象与序列化完全无关,它以字节数组的形式表示参数,\"tensor_type \"属性表示如何解释这些字节数组(例如,用于序列化/反序列化)。" +"Flower 服务器现在完全不依赖序列化。之前使用的 `Weights` 类(以反序列化的 " +"NumPy ndarrays 表示参数)已被 `Parameters` 类取代(例如在 `Strategy`中)。参" +"数 \"对象与序列化完全无关,它以字节数组的形式表示参数,\"tensor_type \"属性表" +"示如何解释这些字节数组(例如,用于序列化/反序列化)。" #: ../../source/ref-changelog.md:867 msgid "" -"Built-in strategies implement this approach by handling serialization and" -" deserialization to/from `Weights` internally. Custom/3rd-party Strategy " +"Built-in strategies implement this approach by handling serialization and " +"deserialization to/from `Weights` internally. Custom/3rd-party Strategy " "implementations should update to the slightly changed Strategy method " -"definitions. Strategy authors can consult PR " -"[#721](https://github.com/adap/flower/pull/721) to see how strategies can" -" easily migrate to the new format." +"definitions. Strategy authors can consult PR [#721](https://github.com/adap/" +"flower/pull/721) to see how strategies can easily migrate to the new format." msgstr "" -"内置策略通过在内部处理序列化和反序列化到/从`Weights`来实现这种方法。自定义/第三方策略实现应更新为稍有改动的策略方法定义。策略作者可查阅" -" PR [#721](https://github.com/adap/flower/pull/721) 以了解如何将策略轻松迁移到新格式。" +"内置策略通过在内部处理序列化和反序列化到/从`Weights`来实现这种方法。自定义/第" +"三方策略实现应更新为稍有改动的策略方法定义。策略作者可查阅 PR [#721](https://" +"github.com/adap/flower/pull/721) 以了解如何将策略轻松迁移到新格式。" #: ../../source/ref-changelog.md:869 msgid "" -"Deprecated `flwr.server.Server.evaluate`, use " -"`flwr.server.Server.evaluate_round` instead " -"([#717](https://github.com/adap/flower/pull/717))" +"Deprecated `flwr.server.Server.evaluate`, use `flwr.server.Server." +"evaluate_round` instead ([#717](https://github.com/adap/flower/pull/717))" msgstr "" -"已弃用 `flwr.server.Server.evaluate`,改用 " -"`flwr.server.Server.evaluate_round`([#717](https://github.com/adap/flower/pull/717)" +"已弃用 `flwr.server.Server.evaluate`,改用 `flwr.server.Server." +"evaluate_round`([#717](https://github.com/adap/flower/pull/717)" #: ../../source/ref-changelog.md:871 msgid "v0.15.0 (2021-03-12)" @@ -18889,9 +19003,10 @@ msgstr "v0.15.0 (2021-03-12)" #: ../../source/ref-changelog.md:875 msgid "" -"**Server-side parameter initialization** " -"([#658](https://github.com/adap/flower/pull/658))" -msgstr "**服务器端参数初始化** ([#658](https://github.com/adap/flower/pull/658))" +"**Server-side parameter initialization** ([#658](https://github.com/adap/" +"flower/pull/658))" +msgstr "" +"**服务器端参数初始化** ([#658](https://github.com/adap/flower/pull/658))" #: ../../source/ref-changelog.md:877 msgid "" @@ -18899,18 +19014,18 @@ msgid "" "parameter initialization works via a new `Strategy` method called " "`initialize_parameters`." msgstr "" -"现在可以在服务器端初始化模型参数。服务器端参数初始化通过名为 \"initialize_parameters \"的新 \"Strategy " -"\"方法进行。" +"现在可以在服务器端初始化模型参数。服务器端参数初始化通过名为 " +"\"initialize_parameters \"的新 \"Strategy \"方法进行。" #: ../../source/ref-changelog.md:879 msgid "" "Built-in strategies support a new constructor argument called " -"`initial_parameters` to set the initial parameters. Built-in strategies " -"will provide these initial parameters to the server on startup and then " -"delete them to free the memory afterwards." +"`initial_parameters` to set the initial parameters. Built-in strategies will " +"provide these initial parameters to the server on startup and then delete " +"them to free the memory afterwards." msgstr "" -"内置策略支持名为 \"initial_parameters " -"\"的新构造函数参数,用于设置初始参数。内置策略会在启动时向服务器提供这些初始参数,然后删除它们以释放内存。" +"内置策略支持名为 \"initial_parameters \"的新构造函数参数,用于设置初始参数。" +"内置策略会在启动时向服务器提供这些初始参数,然后删除它们以释放内存。" #: ../../source/ref-changelog.md:898 msgid "" @@ -18918,7 +19033,9 @@ msgid "" "continue to use the current behaviour (namely, it will ask one of the " "connected clients for its parameters and use these as the initial global " "parameters)." -msgstr "如果没有向策略提供初始参数,服务器将继续使用当前行为(即向其中一个已连接的客户端询问参数,并将这些参数用作初始全局参数)。" +msgstr "" +"如果没有向策略提供初始参数,服务器将继续使用当前行为(即向其中一个已连接的客" +"户端询问参数,并将这些参数用作初始全局参数)。" #: ../../source/ref-changelog.md:900 msgid "Deprecations" @@ -18926,11 +19043,11 @@ msgstr "停用" #: ../../source/ref-changelog.md:902 msgid "" -"Deprecate `flwr.server.strategy.DefaultStrategy` (migrate to " -"`flwr.server.strategy.FedAvg`, which is equivalent)" +"Deprecate `flwr.server.strategy.DefaultStrategy` (migrate to `flwr.server." +"strategy.FedAvg`, which is equivalent)" msgstr "" -"停用 `flwr.server.strategy.DefaultStrategy`(迁移到等价的 " -"`flwr.server.strategy.FedAvg`)" +"停用 `flwr.server.strategy.DefaultStrategy`(迁移到等价的 `flwr.server." +"strategy.FedAvg`)" #: ../../source/ref-changelog.md:904 msgid "v0.14.0 (2021-02-18)" @@ -18939,87 +19056,90 @@ msgstr "v0.14.0 (2021-02-18)" #: ../../source/ref-changelog.md:908 msgid "" "**Generalized** `Client.fit` **and** `Client.evaluate` **return values** " -"([#610](https://github.com/adap/flower/pull/610) " -"[#572](https://github.com/adap/flower/pull/572) " -"[#633](https://github.com/adap/flower/pull/633))" +"([#610](https://github.com/adap/flower/pull/610) [#572](https://github.com/" +"adap/flower/pull/572) [#633](https://github.com/adap/flower/pull/633))" msgstr "" -"**通用** `Client.fit` **和** `Client.evaluate` **返回值** " -"([#610](https://github.com/adap/flower/pull/610) " -"[#572](https://github.com/adap/flower/pull/572) " -"[#633](https://github.com/adap/flower/pull/633))" +"**通用** `Client.fit` **和** `Client.evaluate` **返回值** ([#610](https://" +"github.com/adap/flower/pull/610) [#572](https://github.com/adap/flower/" +"pull/572) [#633](https://github.com/adap/flower/pull/633))" #: ../../source/ref-changelog.md:910 msgid "" -"Clients can now return an additional dictionary mapping `str` keys to " -"values of the following types: `bool`, `bytes`, `float`, `int`, `str`. " -"This means one can return almost arbitrary values from `fit`/`evaluate` " -"and make use of them on the server side!" +"Clients can now return an additional dictionary mapping `str` keys to values " +"of the following types: `bool`, `bytes`, `float`, `int`, `str`. This means " +"one can return almost arbitrary values from `fit`/`evaluate` and make use of " +"them on the server side!" msgstr "" -"客户端现在可以返回一个额外的字典,将 `str` 键映射为以下类型的值: " -"bool`、`bytes`、`float`、`int`、`str`。这意味着我们可以从 `fit`/`evaluate` " -"返回几乎任意的值,并在服务器端使用它们!" +"客户端现在可以返回一个额外的字典,将 `str` 键映射为以下类型的值: bool`、" +"`bytes`、`float`、`int`、`str`。这意味着我们可以从 `fit`/`evaluate` 返回几乎" +"任意的值,并在服务器端使用它们!" #: ../../source/ref-changelog.md:912 msgid "" -"This improvement also allowed for more consistent return types between " -"`fit` and `evaluate`: `evaluate` should now return a tuple `(float, int, " -"dict)` representing the loss, number of examples, and a dictionary " -"holding arbitrary problem-specific values like accuracy." +"This improvement also allowed for more consistent return types between `fit` " +"and `evaluate`: `evaluate` should now return a tuple `(float, int, dict)` " +"representing the loss, number of examples, and a dictionary holding " +"arbitrary problem-specific values like accuracy." msgstr "" -"这一改进还使 `fit` 和 `evaluate` 之间的返回类型更加一致:`evaluate` 现在应返回一个元组`(float, int, " -"dict)`,代表损失、示例数和一个包含特定问题任意值(如准确度)的字典。" +"这一改进还使 `fit` 和 `evaluate` 之间的返回类型更加一致:`evaluate` 现在应返" +"回一个元组`(float, int, dict)`,代表损失、示例数和一个包含特定问题任意值(如" +"准确度)的字典。" #: ../../source/ref-changelog.md:914 msgid "" -"In case you wondered: this feature is compatible with existing projects, " -"the additional dictionary return value is optional. New code should " -"however migrate to the new return types to be compatible with upcoming " -"Flower releases (`fit`: `List[np.ndarray], int, Dict[str, Scalar]`, " -"`evaluate`: `float, int, Dict[str, Scalar]`). See the example below for " -"details." +"In case you wondered: this feature is compatible with existing projects, the " +"additional dictionary return value is optional. New code should however " +"migrate to the new return types to be compatible with upcoming Flower " +"releases (`fit`: `List[np.ndarray], int, Dict[str, Scalar]`, `evaluate`: " +"`float, int, Dict[str, Scalar]`). See the example below for details." msgstr "" -"如果你想知道:此功能与现有项目兼容,额外的字典返回值是可选的。不过,新代码应迁移到新的返回类型,以便与即将发布的 Flower " -"版本兼容(`fit`: `List[np.ndarray], int, Dict[str, Scalar]`,`evaluate`: " -"`float, int, Dict[str, Scalar]`)。详见下面的示例。" +"如果你想知道:此功能与现有项目兼容,额外的字典返回值是可选的。不过,新代码应" +"迁移到新的返回类型,以便与即将发布的 Flower 版本兼容(`fit`: `List[np." +"ndarray], int, Dict[str, Scalar]`,`evaluate`: `float, int, Dict[str, " +"Scalar]`)。详见下面的示例。" #: ../../source/ref-changelog.md:916 msgid "" "*Code example:* note the additional dictionary return values in both " "`FlwrClient.fit` and `FlwrClient.evaluate`:" -msgstr "*代码示例:* 注意 `FlwrClient.fit` 和 `FlwrClient.evaluate` 中的附加字典返回值:" +msgstr "" +"*代码示例:* 注意 `FlwrClient.fit` 和 `FlwrClient.evaluate` 中的附加字典返回" +"值:" #: ../../source/ref-changelog.md:931 msgid "" -"**Generalized** `config` **argument in** `Client.fit` **and** " -"`Client.evaluate` ([#595](https://github.com/adap/flower/pull/595))" +"**Generalized** `config` **argument in** `Client.fit` **and** `Client." +"evaluate` ([#595](https://github.com/adap/flower/pull/595))" msgstr "" -"**在**`Client.fit` " -"**和**`Client.evaluate`中泛化**`config`参数([#595](https://github.com/adap/flower/pull/595))" +"**在**`Client.fit` **和**`Client.evaluate`中泛化**`config`参数([#595]" +"(https://github.com/adap/flower/pull/595))" #: ../../source/ref-changelog.md:933 msgid "" -"The `config` argument used to be of type `Dict[str, str]`, which means " -"that dictionary values were expected to be strings. The new release " -"generalizes this to enable values of the following types: `bool`, " -"`bytes`, `float`, `int`, `str`." +"The `config` argument used to be of type `Dict[str, str]`, which means that " +"dictionary values were expected to be strings. The new release generalizes " +"this to enable values of the following types: `bool`, `bytes`, `float`, " +"`int`, `str`." msgstr "" -"`config`参数曾是 \"字典[str, str]\"类型,这意味着字典值应是字符串。新版本将其扩展为以下类型的值: " -"bool`、`bytes`、`float`、`int`、`str`。" +"`config`参数曾是 \"字典[str, str]\"类型,这意味着字典值应是字符串。新版本将其" +"扩展为以下类型的值: bool`、`bytes`、`float`、`int`、`str`。" #: ../../source/ref-changelog.md:935 msgid "" "This means one can now pass almost arbitrary values to `fit`/`evaluate` " -"using the `config` dictionary. Yay, no more `str(epochs)` on the server-" -"side and `int(config[\"epochs\"])` on the client side!" +"using the `config` dictionary. Yay, no more `str(epochs)` on the server-side " +"and `int(config[\"epochs\"])` on the client side!" msgstr "" -"这意味着现在可以使用 `config` 字典向 `fit`/`evaluate` 传递几乎任意的值。耶,服务器端不再需要 " -"`str(epochs)`,客户端不再需要 `int(config[\"epochs\"])`!" +"这意味着现在可以使用 `config` 字典向 `fit`/`evaluate` 传递几乎任意的值。耶," +"服务器端不再需要 `str(epochs)`,客户端不再需要 `int(config[\"epochs\"])`!" #: ../../source/ref-changelog.md:937 msgid "" "*Code example:* note that the `config` dictionary now contains non-`str` " "values in both `Client.fit` and `Client.evaluate`:" -msgstr "*代码示例:* 注意 `config` 字典现在在 `Client.fit` 和 `Client.evaluate` 中都包含非 `str` 值:" +msgstr "" +"*代码示例:* 注意 `config` 字典现在在 `Client.fit` 和 `Client.evaluate` 中都" +"包含非 `str` 值:" #: ../../source/ref-changelog.md:954 msgid "v0.13.0 (2021-01-08)" @@ -19027,16 +19147,19 @@ msgstr "v0.13.0 (2021-01-08)" #: ../../source/ref-changelog.md:958 msgid "" -"New example: PyTorch From Centralized To Federated " -"([#549](https://github.com/adap/flower/pull/549))" -msgstr "新示例: PyTorch 从集中到联邦 ([#549](https://github.com/adap/flower/pull/549))" +"New example: PyTorch From Centralized To Federated ([#549](https://github." +"com/adap/flower/pull/549))" +msgstr "" +"新示例: PyTorch 从集中到联邦 ([#549](https://github.com/adap/flower/" +"pull/549))" #: ../../source/ref-changelog.md:959 msgid "Improved documentation" msgstr "改进文档" #: ../../source/ref-changelog.md:960 -msgid "New documentation theme ([#551](https://github.com/adap/flower/pull/551))" +msgid "" +"New documentation theme ([#551](https://github.com/adap/flower/pull/551))" msgstr "新文档主题 ([#551](https://github.com/adap/flower/pull/551))" #: ../../source/ref-changelog.md:961 @@ -19045,14 +19168,14 @@ msgstr "新的 API 参考 ([#554](https://github.com/adap/flower/pull/554))" #: ../../source/ref-changelog.md:962 msgid "" -"Updated examples documentation " -"([#549](https://github.com/adap/flower/pull/549))" +"Updated examples documentation ([#549](https://github.com/adap/flower/" +"pull/549))" msgstr "更新了示例文档 ([#549](https://github.com/adap/flower/pull/549))" #: ../../source/ref-changelog.md:963 msgid "" -"Removed obsolete documentation " -"([#548](https://github.com/adap/flower/pull/548))" +"Removed obsolete documentation ([#548](https://github.com/adap/flower/" +"pull/548))" msgstr "删除了过时的文档 ([#548](https://github.com/adap/flower/pull/548))" #: ../../source/ref-changelog.md:965 @@ -19061,13 +19184,12 @@ msgstr "错误修正:" #: ../../source/ref-changelog.md:967 msgid "" -"`Server.fit` does not disconnect clients when finished, disconnecting the" -" clients is now handled in `flwr.server.start_server` " -"([#553](https://github.com/adap/flower/pull/553) " -"[#540](https://github.com/adap/flower/issues/540))." +"`Server.fit` does not disconnect clients when finished, disconnecting the " +"clients is now handled in `flwr.server.start_server` ([#553](https://github." +"com/adap/flower/pull/553) [#540](https://github.com/adap/flower/issues/540))." msgstr "" -"Server.fit \"完成后不会断开客户端连接,现在断开客户端连接是在 \"flwr.server.start_server " -"\"中处理的([#553](https://github.com/adap/flower/pull/553) " +"Server.fit \"完成后不会断开客户端连接,现在断开客户端连接是在 \"flwr.server." +"start_server \"中处理的([#553](https://github.com/adap/flower/pull/553) " "[#540](https://github.com/adap/flower/issues/540))。" #: ../../source/ref-changelog.md:969 @@ -19080,30 +19202,29 @@ msgstr "重要变更:" #: ../../source/ref-changelog.md:973 msgid "" -"Added an example for embedded devices " -"([#507](https://github.com/adap/flower/pull/507))" +"Added an example for embedded devices ([#507](https://github.com/adap/flower/" +"pull/507))" msgstr "添加了嵌入式设备示例 ([#507](https://github.com/adap/flower/pull/507))" #: ../../source/ref-changelog.md:974 msgid "" -"Added a new NumPyClient (in addition to the existing KerasClient) " -"([#504](https://github.com/adap/flower/pull/504) " -"[#508](https://github.com/adap/flower/pull/508))" +"Added a new NumPyClient (in addition to the existing KerasClient) ([#504]" +"(https://github.com/adap/flower/pull/504) [#508](https://github.com/adap/" +"flower/pull/508))" msgstr "" -"添加了一个新的 NumPyClient(除现有的 KerasClient " -"之外)([#504](https://github.com/adap/flower/pull/504) " -"[#508](https://github.com/adap/flower/pull/508)" +"添加了一个新的 NumPyClient(除现有的 KerasClient 之外)([#504](https://" +"github.com/adap/flower/pull/504) [#508](https://github.com/adap/flower/" +"pull/508)" #: ../../source/ref-changelog.md:975 msgid "" -"Deprecated `flwr_example` package and started to migrate examples into " -"the top-level `examples` directory " -"([#494](https://github.com/adap/flower/pull/494) " -"[#512](https://github.com/adap/flower/pull/512))" +"Deprecated `flwr_example` package and started to migrate examples into the " +"top-level `examples` directory ([#494](https://github.com/adap/flower/" +"pull/494) [#512](https://github.com/adap/flower/pull/512))" msgstr "" -"弃用 `flwr_example` 软件包,并开始将示例迁移到顶层的 `examples` 目录 " -"([#494](https://github.com/adap/flower/pull/494) " -"[#512](https://github.com/adap/flower/pull/512))" +"弃用 `flwr_example` 软件包,并开始将示例迁移到顶层的 `examples` 目录 ([#494]" +"(https://github.com/adap/flower/pull/494) [#512](https://github.com/adap/" +"flower/pull/512))" #: ../../source/ref-changelog.md:977 msgid "v0.11.0 (2020-11-30)" @@ -19115,16 +19236,16 @@ msgstr "不兼容的更改:" #: ../../source/ref-changelog.md:981 msgid "" -"Renamed strategy methods " -"([#486](https://github.com/adap/flower/pull/486)) to unify the naming of " -"Flower's public APIs. Other public methods/functions (e.g., every method " -"in `Client`, but also `Strategy.evaluate`) do not use the `on_` prefix, " -"which is why we're removing it from the four methods in Strategy. To " -"migrate rename the following `Strategy` methods accordingly:" +"Renamed strategy methods ([#486](https://github.com/adap/flower/pull/486)) " +"to unify the naming of Flower's public APIs. Other public methods/functions " +"(e.g., every method in `Client`, but also `Strategy.evaluate`) do not use " +"the `on_` prefix, which is why we're removing it from the four methods in " +"Strategy. To migrate rename the following `Strategy` methods accordingly:" msgstr "" -"重命名了策略方法([#486](https://github.com/adap/flower/pull/486)),以统一 Flower公共 " -"API 的命名。其他公共方法/函数(例如 `Client` 中的每个方法,以及 `Strategy.evaluate`)不使用 `on_` " -"前缀,这就是我们从 Strategy 中的四个方法中移除它的原因。迁移时,请相应地重命名以下 `Strategy` 方法:" +"重命名了策略方法([#486](https://github.com/adap/flower/pull/486)),以统一 " +"Flower公共 API 的命名。其他公共方法/函数(例如 `Client` 中的每个方法,以及 " +"`Strategy.evaluate`)不使用 `on_` 前缀,这就是我们从 Strategy 中的四个方法中" +"移除它的原因。迁移时,请相应地重命名以下 `Strategy` 方法:" #: ../../source/ref-changelog.md:982 msgid "`on_configure_evaluate` => `configure_evaluate`" @@ -19144,42 +19265,42 @@ msgstr "`on_aggregate_fit` => `aggregate_fit`" #: ../../source/ref-changelog.md:989 msgid "" -"Deprecated `DefaultStrategy` " -"([#479](https://github.com/adap/flower/pull/479)). To migrate use " -"`FedAvg` instead." +"Deprecated `DefaultStrategy` ([#479](https://github.com/adap/flower/" +"pull/479)). To migrate use `FedAvg` instead." msgstr "" -"已废弃的 `DefaultStrategy` ([#479](https://github.com/adap/flower/pull/479)) " -"。迁移时请使用 `FedAvg`。" +"已废弃的 `DefaultStrategy` ([#479](https://github.com/adap/flower/" +"pull/479)) 。迁移时请使用 `FedAvg`。" #: ../../source/ref-changelog.md:990 msgid "" -"Simplified examples and baselines " -"([#484](https://github.com/adap/flower/pull/484))." -msgstr "简化示例和baselines([#484](https://github.com/adap/flower/pull/484))。" +"Simplified examples and baselines ([#484](https://github.com/adap/flower/" +"pull/484))." +msgstr "" +"简化示例和baselines([#484](https://github.com/adap/flower/pull/484))。" #: ../../source/ref-changelog.md:991 msgid "" -"Removed presently unused `on_conclude_round` from strategy interface " -"([#483](https://github.com/adap/flower/pull/483))." +"Removed presently unused `on_conclude_round` from strategy interface ([#483]" +"(https://github.com/adap/flower/pull/483))." msgstr "" -"删除了策略界面中目前未使用的 " -"\"on_conclude_round\"([#483](https://github.com/adap/flower/pull/483))。" +"删除了策略界面中目前未使用的 \"on_conclude_round\"([#483](https://github." +"com/adap/flower/pull/483))。" #: ../../source/ref-changelog.md:992 msgid "" -"Set minimal Python version to 3.6.1 instead of 3.6.9 " -"([#471](https://github.com/adap/flower/pull/471))." +"Set minimal Python version to 3.6.1 instead of 3.6.9 ([#471](https://github." +"com/adap/flower/pull/471))." msgstr "" -"将最小 Python 版本设为 3.6.1,而不是 3.6.9 " -"([#471](https://github.com/adap/flower/pull/471))." +"将最小 Python 版本设为 3.6.1,而不是 3.6.9 ([#471](https://github.com/adap/" +"flower/pull/471))." #: ../../source/ref-changelog.md:993 msgid "" -"Improved `Strategy` docstrings " -"([#470](https://github.com/adap/flower/pull/470))." +"Improved `Strategy` docstrings ([#470](https://github.com/adap/flower/" +"pull/470))." msgstr "" -"改进了 `Strategy` " -"docstrings([#470](https://github.com/adap/flower/pull/470))。" +"改进了 `Strategy` docstrings([#470](https://github.com/adap/flower/" +"pull/470))。" #: ../../source/ref-example-projects.rst:2 msgid "Example projects" @@ -19187,15 +19308,15 @@ msgstr "项目实例" #: ../../source/ref-example-projects.rst:4 msgid "" -"Flower comes with a number of usage examples. The examples demonstrate " -"how Flower can be used to federate different kinds of existing machine " -"learning pipelines, usually leveraging popular machine learning " -"frameworks such as `PyTorch `_ or `TensorFlow " -"`_." +"Flower comes with a number of usage examples. The examples demonstrate how " +"Flower can be used to federate different kinds of existing machine learning " +"pipelines, usually leveraging popular machine learning frameworks such as " +"`PyTorch `_ or `TensorFlow `_." msgstr "" -"Flower 附带了许多使用示例。这些示例演示了如何使用 Flower 联邦不同类型的现有机器学习形式,通常是利用流行的机器学习框架,如 " -"`PyTorch `_ 或 `TensorFlow " -"`_。" +"Flower 附带了许多使用示例。这些示例演示了如何使用 Flower 联邦不同类型的现有机" +"器学习形式,通常是利用流行的机器学习框架,如 `PyTorch `_ 或 `TensorFlow `_。" #: ../../source/ref-example-projects.rst:10 #, fuzzy @@ -19206,30 +19327,32 @@ msgstr "以下示例可作为独立项目使用。" #: ../../source/ref-example-projects.rst:14 msgid "" -"The TensorFlow/Keras quickstart example shows CIFAR-10 image " -"classification with MobileNetV2:" -msgstr "TensorFlow/Keras 快速入门示例展示了使用 MobileNetV2 进行的 CIFAR-10 图像分类:" +"The TensorFlow/Keras quickstart example shows CIFAR-10 image classification " +"with MobileNetV2:" +msgstr "" +"TensorFlow/Keras 快速入门示例展示了使用 MobileNetV2 进行的 CIFAR-10 图像分" +"类:" #: ../../source/ref-example-projects.rst:17 msgid "" -"`Quickstart TensorFlow (Code) " -"`_" +"`Quickstart TensorFlow (Code) `_" msgstr "" -"`TensorFlow快速入门 (代码) `_" +"`TensorFlow快速入门 (代码) `_" #: ../../source/ref-example-projects.rst:18 #, fuzzy -msgid ":doc:`Quickstart TensorFlow (Tutorial) `" +msgid "" +":doc:`Quickstart TensorFlow (Tutorial) `" msgstr "" "`TensorFlow快速入门 (教程) `_" #: ../../source/ref-example-projects.rst:19 msgid "" -"`Quickstart TensorFlow (Blog Post) `_" +"`Quickstart TensorFlow (Blog Post) `_" msgstr "" "`TensorFlow快速入门 (博客) `_" @@ -19241,24 +19364,25 @@ msgstr "PyTorch快速入门" #: ../../source/ref-example-projects.rst:25 msgid "" -"The PyTorch quickstart example shows CIFAR-10 image classification with a" -" simple Convolutional Neural Network:" -msgstr "PyTorch 快速入门范例展示了使用简单卷积神经网络进行 CIFAR-10 图像分类的情况:" +"The PyTorch quickstart example shows CIFAR-10 image classification with a " +"simple Convolutional Neural Network:" +msgstr "" +"PyTorch 快速入门范例展示了使用简单卷积神经网络进行 CIFAR-10 图像分类的情况:" #: ../../source/ref-example-projects.rst:28 msgid "" -"`Quickstart PyTorch (Code) " -"`_" +"`Quickstart PyTorch (Code) `_" msgstr "" -"`PyTorch快速入门 (代码) `_" +"`PyTorch快速入门 (代码) `_" #: ../../source/ref-example-projects.rst:29 #, fuzzy msgid ":doc:`Quickstart PyTorch (Tutorial) `" msgstr "" -"`PyTorch快速入门 (教程) `_" +"`PyTorch快速入门 (教程) `_" #: ../../source/ref-example-projects.rst:33 msgid "PyTorch: From Centralized To Federated" @@ -19272,12 +19396,11 @@ msgstr "本例展示了如何使用 Flower 联邦化一个普通的 PyTorch 项 #: ../../source/ref-example-projects.rst:37 msgid "" -"`PyTorch: From Centralized To Federated (Code) " -"`_" +"`PyTorch: From Centralized To Federated (Code) `_" msgstr "" -"PyTorch: 从集中式到联邦式(代码) `_" +"PyTorch: 从集中式到联邦式(代码) `_" #: ../../source/ref-example-projects.rst:38 #, fuzzy @@ -19285,8 +19408,8 @@ msgid "" ":doc:`PyTorch: From Centralized To Federated (Tutorial) `" msgstr "" -"PyTorch: 从集中式到联邦式(教程) `_" +"PyTorch: 从集中式到联邦式(教程) `_" #: ../../source/ref-example-projects.rst:42 msgid "Federated Learning on Raspberry Pi and Nvidia Jetson" @@ -19296,23 +19419,27 @@ msgstr "树莓派和 Nvidia Jetson 上的联邦学习" msgid "" "This example shows how Flower can be used to build a federated learning " "system that run across Raspberry Pi and Nvidia Jetson:" -msgstr "本示例展示了如何利用 Flower 建立一个跨 Raspberry Pi 和 Nvidia Jetson 运行的联邦学习系统:" +msgstr "" +"本示例展示了如何利用 Flower 建立一个跨 Raspberry Pi 和 Nvidia Jetson 运行的联" +"邦学习系统:" #: ../../source/ref-example-projects.rst:46 msgid "" -"`Federated Learning on Raspberry Pi and Nvidia Jetson (Code) " -"`_" +"`Federated Learning on Raspberry Pi and Nvidia Jetson (Code) `_" msgstr "" -"Raspberry Pi 和 Nvidia Jetson 上的联邦学习(代码) " -"`_" +"Raspberry Pi 和 Nvidia Jetson 上的联邦学习(代码) `_" #: ../../source/ref-example-projects.rst:47 msgid "" -"`Federated Learning on Raspberry Pi and Nvidia Jetson (Blog Post) " -"`_" +"`Federated Learning on Raspberry Pi and Nvidia Jetson (Blog Post) `_" msgstr "" -"Raspberry Pi和 Nvidia Jetson 上的联邦学习(博客) " -"`_" +"Raspberry Pi和 Nvidia Jetson 上的联邦学习(博客) `_" #: ../../source/ref-faq.rst:4 msgid "" @@ -19323,33 +19450,32 @@ msgstr "本页收集了有关 \"Flower 联邦学习 \"常见问题的答案。" #: ../../source/ref-faq.rst #, fuzzy msgid ":fa:`eye,mr-1` Can Flower run on Jupyter Notebooks / Google Colab?" -msgstr ":fa:`eye,mr-1` Flower 可以在 Juptyter Notebooks / Google Colab 上运行吗?" +msgstr "" +":fa:`eye,mr-1` Flower 可以在 Juptyter Notebooks / Google Colab 上运行吗?" #: ../../source/ref-faq.rst:8 msgid "" -"Yes, it can! Flower even comes with a few under-the-hood optimizations to" -" make it work even better on Colab. Here's a quickstart example:" -msgstr "是的,它可以!Flower 甚至还进行了一些底层优化,使其在 Colab 上运行得更好。下面是一个快速启动示例:" +"Yes, it can! Flower even comes with a few under-the-hood optimizations to " +"make it work even better on Colab. Here's a quickstart example:" +msgstr "" +"是的,它可以!Flower 甚至还进行了一些底层优化,使其在 Colab 上运行得更好。下" +"面是一个快速启动示例:" #: ../../source/ref-faq.rst:10 msgid "" -"`Flower simulation PyTorch " -"`_" +"`Flower simulation PyTorch `_" msgstr "" -"`Flower 模拟 PyTorch " -"`_" +"`Flower 模拟 PyTorch `_" #: ../../source/ref-faq.rst:11 msgid "" -"`Flower simulation TensorFlow/Keras " -"`_" +"`Flower simulation TensorFlow/Keras `_" msgstr "" -"`Flower模拟TensorFlow/Keras " -"`_" +"`Flower模拟TensorFlow/Keras `_" #: ../../source/ref-faq.rst msgid ":fa:`eye,mr-1` How can I run Federated Learning on a Raspberry Pi?" @@ -19358,33 +19484,37 @@ msgstr ":fa:`eye,mr-1` 如何在 Raspberry Pi 上运行联邦学习?" #: ../../source/ref-faq.rst:15 msgid "" "Find the `blog post about federated learning on embedded device here " -"`_" -" and the corresponding `GitHub code example " -"`_." +"`_ " +"and the corresponding `GitHub code example `_." msgstr "" -"请点击此处查看有关嵌入式设备联邦学习的 " -"\"博文\"`_和相应的" -" \"GitHub 代码示例\"`_。" +"请点击此处查看有关嵌入式设备联邦学习的 \"博文\"`_和" +"相应的 \"GitHub 代码示例\"`_。" #: ../../source/ref-faq.rst -msgid ":fa:`eye,mr-1` Does Flower support federated learning on Android devices?" +msgid "" +":fa:`eye,mr-1` Does Flower support federated learning on Android devices?" msgstr ":fa:`eye,mr-1` Flower 是否支持安卓设备上的联邦学习?" #: ../../source/ref-faq.rst:19 msgid "" -"Yes, it does. Please take a look at our `blog post " -"`_ or check out the code examples:" +"Yes, it does. Please take a look at our `blog post `_ or " +"check out the code examples:" msgstr "" -"是的,确实如此。请查看我们的 \"博客文章 `_\" 或查看代码示例:" +"是的,确实如此。请查看我们的 \"博客文章 `_\" 或查看代码示例:" #: ../../source/ref-faq.rst:21 msgid "" -"`Android Kotlin example `_" -msgstr "`Android Kotlin 示例 `_" +"`Android Kotlin example `_" +msgstr "" +"`Android Kotlin 示例 `_" #: ../../source/ref-faq.rst:22 msgid "`Android Java example `_" @@ -19402,46 +19532,46 @@ msgstr "当然可以。有关在区块链环境中使用 Flower 的可用示例 #: ../../source/ref-faq.rst:28 msgid "" -"`Flower meets Nevermined GitHub Repository `_." +"`Flower meets Nevermined GitHub Repository `_." msgstr "" -"`Flower meets Nevermined GitHub Repository `_." +"`Flower meets Nevermined GitHub Repository `_." #: ../../source/ref-faq.rst:29 msgid "" -"`Flower meets Nevermined YouTube video " -"`_." +"`Flower meets Nevermined YouTube video `_." msgstr "" -"`Flower meets Nevermined YouTube 视频 " -"`_." +"`Flower meets Nevermined YouTube 视频 `_." #: ../../source/ref-faq.rst:30 #, fuzzy msgid "" -"`Flower meets KOSMoS `_." +"`Flower meets KOSMoS `_." msgstr "" -"`Flower meets KOSMoS `_." +"`Flower meets KOSMoS `_." #: ../../source/ref-faq.rst:31 msgid "" "`Flower meets Talan blog post `_ ." +"learning-same-mask-different-faces-imen-ayari/?" +"trackingId=971oIlxLQ9%2BA9RB0IQ73XQ%3D%3D>`_ ." msgstr "" "`Flower meets Talan博文 `_ 。" +"same-mask-different-faces-imen-ayari/?" +"trackingId=971oIlxLQ9%2BA9RB0IQ73XQ%3D%3D>`_ 。" #: ../../source/ref-faq.rst:32 msgid "" -"`Flower meets Talan GitHub Repository " -"`_ ." +"`Flower meets Talan GitHub Repository `_ ." msgstr "" -"`Flower meets Talan GitHub Repository " -"`_ ." +"`Flower meets Talan GitHub Repository `_ ." #: ../../source/ref-telemetry.md:1 msgid "Telemetry" @@ -19449,60 +19579,64 @@ msgstr "遥测功能" #: ../../source/ref-telemetry.md:3 msgid "" -"The Flower open-source project collects **anonymous** usage metrics to " -"make well-informed decisions to improve Flower. Doing this enables the " -"Flower team to understand how Flower is used and what challenges users " -"might face." +"The Flower open-source project collects **anonymous** usage metrics to make " +"well-informed decisions to improve Flower. Doing this enables the Flower " +"team to understand how Flower is used and what challenges users might face." msgstr "" -"Flower 开源项目收集**匿名**使用指标,以便在充分知情的情况下做出改进 Flower 的决定。这样做能让 Flower 团队了解 " -"Flower 的使用情况以及用户可能面临的挑战。" +"Flower 开源项目收集**匿名**使用指标,以便在充分知情的情况下做出改进 Flower 的" +"决定。这样做能让 Flower 团队了解 Flower 的使用情况以及用户可能面临的挑战。" #: ../../source/ref-telemetry.md:5 msgid "" -"**Flower is a friendly framework for collaborative AI and data science.**" -" Staying true to this statement, Flower makes it easy to disable " -"telemetry for users that do not want to share anonymous usage metrics." -msgstr "**Flower 是一个用于协作式人工智能和数据科学的友好框架。** Flower 遵循这一声明,让不想分享匿名使用指标的用户可以轻松禁用遥测技术。" +"**Flower is a friendly framework for collaborative AI and data science.** " +"Staying true to this statement, Flower makes it easy to disable telemetry " +"for users that do not want to share anonymous usage metrics." +msgstr "" +"**Flower 是一个用于协作式人工智能和数据科学的友好框架。** Flower 遵循这一声" +"明,让不想分享匿名使用指标的用户可以轻松禁用遥测技术。" #: ../../source/ref-telemetry.md:7 msgid "Principles" msgstr "原则" #: ../../source/ref-telemetry.md:9 -msgid "We follow strong principles guarding anonymous usage metrics collection:" +msgid "" +"We follow strong principles guarding anonymous usage metrics collection:" msgstr "我们遵循严格的匿名使用指标收集原则:" #: ../../source/ref-telemetry.md:11 msgid "" -"**Optional:** You will always be able to disable telemetry; read on to " -"learn “[How to opt-out](#how-to-opt-out)”." -msgstr "**可选:** 您始终可以禁用遥测功能;请继续阅读\"[如何退出](#how-to-opt-out)\"。" +"**Optional:** You will always be able to disable telemetry; read on to learn " +"“[How to opt-out](#how-to-opt-out)”." +msgstr "" +"**可选:** 您始终可以禁用遥测功能;请继续阅读\"[如何退出](#how-to-opt-" +"out)\"。" #: ../../source/ref-telemetry.md:12 msgid "" -"**Anonymous:** The reported usage metrics are anonymous and do not " -"contain any personally identifiable information (PII). See “[Collected " -"metrics](#collected-metrics)” to understand what metrics are being " -"reported." +"**Anonymous:** The reported usage metrics are anonymous and do not contain " +"any personally identifiable information (PII). See “[Collected metrics]" +"(#collected-metrics)” to understand what metrics are being reported." msgstr "" -"**匿名:** 报告的使用指标是匿名的,不包含任何个人身份信息 (PII)。请参阅\"[收集的指标](#collected-metrics) " -"\"了解报告的指标。" +"**匿名:** 报告的使用指标是匿名的,不包含任何个人身份信息 (PII)。请参阅\"[收" +"集的指标](#collected-metrics) \"了解报告的指标。" #: ../../source/ref-telemetry.md:13 msgid "" "**Transparent:** You can easily inspect what anonymous metrics are being " -"reported; see the section “[How to inspect what is being reported](#how-" -"to-inspect-what-is-being-reported)”" +"reported; see the section “[How to inspect what is being reported](#how-to-" +"inspect-what-is-being-reported)”" msgstr "" -"**透明:** 您可以轻松查看正在报告的匿名指标;请参阅\"[如何查看正在报告的指标](#how-to-inspect-what-is-" -"being-reported)\"部分" +"**透明:** 您可以轻松查看正在报告的匿名指标;请参阅\"[如何查看正在报告的指标]" +"(#how-to-inspect-what-is-being-reported)\"部分" #: ../../source/ref-telemetry.md:14 msgid "" -"**Open for feedback:** You can always reach out to us if you have " -"feedback; see the section “[How to contact us](#how-to-contact-us)” for " -"details." -msgstr "**欢迎反馈:** 如果您有反馈意见,可以随时联系我们;详情请参见\"[如何联系我们](#how-to-contact-us) \"部分。" +"**Open for feedback:** You can always reach out to us if you have feedback; " +"see the section “[How to contact us](#how-to-contact-us)” for details." +msgstr "" +"**欢迎反馈:** 如果您有反馈意见,可以随时联系我们;详情请参见\"[如何联系我们]" +"(#how-to-contact-us) \"部分。" #: ../../source/ref-telemetry.md:16 msgid "How to opt-out" @@ -19516,17 +19650,17 @@ msgid "" "client, simply do so by prepending your command as in:" msgstr "" "Flower 启动时,会检查环境变量 `FLWR_TELEMETRY_ENABLED` 是否存在。通过设置 " -"`FLWR_TELEMETRY_ENABLED=0` 可以轻松禁用遥测功能。假设你启动的是 Flower " -"服务器或客户端,只需在命令前添加以下内容即可:" +"`FLWR_TELEMETRY_ENABLED=0` 可以轻松禁用遥测功能。假设你启动的是 Flower 服务器" +"或客户端,只需在命令前添加以下内容即可:" #: ../../source/ref-telemetry.md:24 msgid "" -"Alternatively, you can export `FLWR_TELEMETRY_ENABLED=0` in, for example," -" `.bashrc` (or whatever configuration file applies to your environment) " -"to disable Flower telemetry permanently." +"Alternatively, you can export `FLWR_TELEMETRY_ENABLED=0` in, for example, `." +"bashrc` (or whatever configuration file applies to your environment) to " +"disable Flower telemetry permanently." msgstr "" -"或者,你也可以在 `.bashrc`(或任何适用于你的环境的配置文件)中导出 `FLWR_TELEMETRY_ENABLED=0` 来永久禁用 " -"Flower telemetry。" +"或者,你也可以在 `.bashrc`(或任何适用于你的环境的配置文件)中导出 " +"`FLWR_TELEMETRY_ENABLED=0` 来永久禁用 Flower telemetry。" #: ../../source/ref-telemetry.md:26 msgid "Collected metrics" @@ -19538,17 +19672,21 @@ msgstr "Flower 遥测技术收集以下指标:" #: ../../source/ref-telemetry.md:30 msgid "" -"**Flower version.** Understand which versions of Flower are currently " -"being used. This helps us to decide whether we should invest effort into " -"releasing a patch version for an older version of Flower or instead use " -"the bandwidth to build new features." -msgstr "**了解目前使用的 Flower 版本。这有助于我们决定是否应该投入精力为旧版本的 Flower 发布补丁版本,还是利用带宽来构建新功能。" +"**Flower version.** Understand which versions of Flower are currently being " +"used. This helps us to decide whether we should invest effort into releasing " +"a patch version for an older version of Flower or instead use the bandwidth " +"to build new features." +msgstr "" +"**了解目前使用的 Flower 版本。这有助于我们决定是否应该投入精力为旧版本的 " +"Flower 发布补丁版本,还是利用带宽来构建新功能。" #: ../../source/ref-telemetry.md:32 msgid "" "**Operating system.** Enables us to answer questions such as: *Should we " "create more guides for Linux, macOS, or Windows?*" -msgstr "**操作系统**使我们能够回答以下问题: *我们应该为 Linux、macOS 还是 Windows 创建更多指南?*" +msgstr "" +"**操作系统**使我们能够回答以下问题: *我们应该为 Linux、macOS 还是 Windows 创" +"建更多指南?*" #: ../../source/ref-telemetry.md:34 msgid "" @@ -19556,73 +19694,82 @@ msgid "" "decide whether we should invest effort into supporting old versions of " "Python or stop supporting them and start taking advantage of new Python " "features." -msgstr "**例如,了解 Python 版本有助于我们决定是否应该投入精力支持旧版本的 Python,还是停止支持这些版本并开始利用新的 Python 功能。" +msgstr "" +"**例如,了解 Python 版本有助于我们决定是否应该投入精力支持旧版本的 Python,还" +"是停止支持这些版本并开始利用新的 Python 功能。" #: ../../source/ref-telemetry.md:36 msgid "" -"**Hardware properties.** Understanding the hardware environment that " -"Flower is being used in helps to decide whether we should, for example, " -"put more effort into supporting low-resource environments." -msgstr "**硬件属性** 了解 Flower 的硬件使用环境,有助于决定我们是否应在支持低资源环境等方面投入更多精力。" +"**Hardware properties.** Understanding the hardware environment that Flower " +"is being used in helps to decide whether we should, for example, put more " +"effort into supporting low-resource environments." +msgstr "" +"**硬件属性** 了解 Flower 的硬件使用环境,有助于决定我们是否应在支持低资源环境" +"等方面投入更多精力。" #: ../../source/ref-telemetry.md:38 msgid "" -"**Execution mode.** Knowing what execution mode Flower starts in enables " -"us to understand how heavily certain features are being used and better " +"**Execution mode.** Knowing what execution mode Flower starts in enables us " +"to understand how heavily certain features are being used and better " "prioritize based on that." -msgstr "** 执行模式** 了解 Flower 的启动执行模式,能让我们了解某些功能的使用率,并据此更好地确定优先级。" +msgstr "" +"** 执行模式** 了解 Flower 的启动执行模式,能让我们了解某些功能的使用率,并据" +"此更好地确定优先级。" #: ../../source/ref-telemetry.md:40 msgid "" "**Cluster.** Flower telemetry assigns a random in-memory cluster ID each " "time a Flower workload starts. This allows us to understand which device " -"types not only start Flower workloads but also successfully complete " -"them." +"types not only start Flower workloads but also successfully complete them." msgstr "" -"**每次 Flower 工作负载启动时,Flower 遥测都会随机分配一个内存集群 ID。这样,我们就能了解哪些设备类型不仅启动了 Flower " -"工作负载,而且还成功完成了它们。" +"**每次 Flower 工作负载启动时,Flower 遥测都会随机分配一个内存集群 ID。这样," +"我们就能了解哪些设备类型不仅启动了 Flower 工作负载,而且还成功完成了它们。" #: ../../source/ref-telemetry.md:42 msgid "" -"**Source.** Flower telemetry tries to store a random source ID in " -"`~/.flwr/source` the first time a telemetry event is generated. The " -"source ID is important to identify whether an issue is recurring or " -"whether an issue is triggered by multiple clusters running concurrently " -"(which often happens in simulation). For example, if a device runs " -"multiple workloads at the same time, and this results in an issue, then, " -"in order to reproduce the issue, multiple workloads must be started at " -"the same time." +"**Source.** Flower telemetry tries to store a random source ID in `~/.flwr/" +"source` the first time a telemetry event is generated. The source ID is " +"important to identify whether an issue is recurring or whether an issue is " +"triggered by multiple clusters running concurrently (which often happens in " +"simulation). For example, if a device runs multiple workloads at the same " +"time, and this results in an issue, then, in order to reproduce the issue, " +"multiple workloads must be started at the same time." msgstr "" -"**Source.** Flower 遥测会在第一次生成遥测事件时,尝试在 `~/.flwr/source` 中存储一个随机源 ID。源 ID " -"对于识别问题是否反复出现或问题是否由多个集群同时运行触发(这在模拟中经常发生)非常重要。例如,如果设备同时运行多个工作负载并导致问题,那么为了重现问题,必须同时启动多个工作负载。" +"**Source.** Flower 遥测会在第一次生成遥测事件时,尝试在 `~/.flwr/source` 中存" +"储一个随机源 ID。源 ID 对于识别问题是否反复出现或问题是否由多个集群同时运行触" +"发(这在模拟中经常发生)非常重要。例如,如果设备同时运行多个工作负载并导致问" +"题,那么为了重现问题,必须同时启动多个工作负载。" #: ../../source/ref-telemetry.md:44 msgid "" -"You may delete the source ID at any time. If you wish for all events " -"logged under a specific source ID to be deleted, you can send a deletion " -"request mentioning the source ID to `telemetry@flower.ai`. All events " -"related to that source ID will then be permanently deleted." +"You may delete the source ID at any time. If you wish for all events logged " +"under a specific source ID to be deleted, you can send a deletion request " +"mentioning the source ID to `telemetry@flower.ai`. All events related to " +"that source ID will then be permanently deleted." msgstr "" -"您可以随时删除源 ID。如果您希望删除特定源 ID 下记录的所有事件,可以向 `telemetry@flower.ai` 发送删除请求,并提及该源" -" ID。届时,与该源 ID 相关的所有事件都将被永久删除。" +"您可以随时删除源 ID。如果您希望删除特定源 ID 下记录的所有事件,可以向 " +"`telemetry@flower.ai` 发送删除请求,并提及该源 ID。届时,与该源 ID 相关的所有" +"事件都将被永久删除。" #: ../../source/ref-telemetry.md:46 msgid "" -"We will not collect any personally identifiable information. If you think" -" any of the metrics collected could be misused in any way, please [get in" -" touch with us](#how-to-contact-us). We will update this page to reflect " -"any changes to the metrics collected and publish changes in the " -"changelog." +"We will not collect any personally identifiable information. If you think " +"any of the metrics collected could be misused in any way, please [get in " +"touch with us](#how-to-contact-us). We will update this page to reflect any " +"changes to the metrics collected and publish changes in the changelog." msgstr "" -"我们不会收集任何个人身份信息。如果您认为所收集的任何指标可能以任何方式被滥用,请[与我们联系](#how-to-contact-" -"us)。我们将更新本页面,以反映对所收集指标的任何更改,并在更新日志中公布更改内容。" +"我们不会收集任何个人身份信息。如果您认为所收集的任何指标可能以任何方式被滥" +"用,请[与我们联系](#how-to-contact-us)。我们将更新本页面,以反映对所收集指" +"标的任何更改,并在更新日志中公布更改内容。" #: ../../source/ref-telemetry.md:48 msgid "" "If you think other metrics would be helpful for us to better guide our " "decisions, please let us know! We will carefully review them; if we are " "confident that they do not compromise user privacy, we may add them." -msgstr "如果您认为其他指标有助于我们更好地指导决策,请告诉我们!我们将仔细审查这些指标;如果我们确信它们不会损害用户隐私,我们可能会添加这些指标。" +msgstr "" +"如果您认为其他指标有助于我们更好地指导决策,请告诉我们!我们将仔细审查这些指" +"标;如果我们确信它们不会损害用户隐私,我们可能会添加这些指标。" #: ../../source/ref-telemetry.md:50 msgid "How to inspect what is being reported" @@ -19631,21 +19778,23 @@ msgstr "如何检查报告中的内容" #: ../../source/ref-telemetry.md:52 msgid "" "We wanted to make it very easy for you to inspect what anonymous usage " -"metrics are reported. You can view all the reported telemetry information" -" by setting the environment variable `FLWR_TELEMETRY_LOGGING=1`. Logging " -"is disabled by default. You may use logging independently from " +"metrics are reported. You can view all the reported telemetry information by " +"setting the environment variable `FLWR_TELEMETRY_LOGGING=1`. Logging is " +"disabled by default. You may use logging independently from " "`FLWR_TELEMETRY_ENABLED` so that you can inspect the telemetry feature " "without sending any metrics." msgstr "" -"我们希望能让您轻松查看所报告的匿名使用指标。通过设置环境变量 `FLWR_TELEMETRY_LOGGING=1` " -"可以查看所有报告的遥测信息。日志记录默认为禁用。您可以不使用 `FLWR_TELEMETRY_ENABLED` " -"而单独使用日志记录,这样就可以在不发送任何指标的情况下检查遥测功能。" +"我们希望能让您轻松查看所报告的匿名使用指标。通过设置环境变量 " +"`FLWR_TELEMETRY_LOGGING=1` 可以查看所有报告的遥测信息。日志记录默认为禁用。您" +"可以不使用 `FLWR_TELEMETRY_ENABLED` 而单独使用日志记录,这样就可以在不发送任" +"何指标的情况下检查遥测功能。" #: ../../source/ref-telemetry.md:58 msgid "" -"The inspect Flower telemetry without sending any anonymous usage metrics," -" use both environment variables:" -msgstr "在不发送任何匿名使用指标的情况下检查 Flower 遥测,可使用这两个环境变量:" +"The inspect Flower telemetry without sending any anonymous usage metrics, " +"use both environment variables:" +msgstr "" +"在不发送任何匿名使用指标的情况下检查 Flower 遥测,可使用这两个环境变量:" #: ../../source/ref-telemetry.md:64 msgid "How to contact us" @@ -19658,14 +19807,16 @@ msgid "" "[Slack](https://flower.ai/join-slack/) (channel `#telemetry`) or email " "(`telemetry@flower.ai`)." msgstr "" -"我们希望听到您的意见。如果您对如何改进我们处理匿名使用指标的方式有任何反馈或想法,请通过 [Slack](https://flower.ai" -"/join-slack/) (频道 `#telemetry`)或电子邮件 (`telemetry@flower.ai`)与我们联系。" +"我们希望听到您的意见。如果您对如何改进我们处理匿名使用指标的方式有任何反馈或" +"想法,请通过 [Slack](https://flower.ai/join-slack/) (频道 `#telemetry`)或电" +"子邮件 (`telemetry@flower.ai`)与我们联系。" #: ../../source/tutorial-quickstart-android.rst:-1 msgid "" -"Read this Federated Learning quickstart tutorial for creating an Android " -"app using Flower." -msgstr "阅读本联邦学习快速入门教程,了解如何使用 Flower 创建 Android 应用程序。" +"Read this Federated Learning quickstart tutorial for creating an Android app " +"using Flower." +msgstr "" +"阅读本联邦学习快速入门教程,了解如何使用 Flower 创建 Android 应用程序。" #: ../../source/tutorial-quickstart-android.rst:5 msgid "Quickstart Android" @@ -19673,24 +19824,24 @@ msgstr "快速入门 Android" #: ../../source/tutorial-quickstart-android.rst:10 msgid "" -"Let's build a federated learning system using TFLite and Flower on " -"Android!" +"Let's build a federated learning system using TFLite and Flower on Android!" msgstr "让我们在 Android 上使用 TFLite 和 Flower 构建一个联邦学习系统!" #: ../../source/tutorial-quickstart-android.rst:12 msgid "" -"Please refer to the `full code example " -"`_ to learn " -"more." +"Please refer to the `full code example `_ to learn more." msgstr "" -"请参阅`完整代码示例 " -"`_了解更多信息。" +"请参阅`完整代码示例 `_了解更多信息。" #: ../../source/tutorial-quickstart-fastai.rst:-1 msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with FastAI to train a vision model on CIFAR-10." -msgstr "查看此联邦学习快速入门教程,了解如何使用 Flower 和 FastAI 在 CIFAR-10 上训练视觉模型。" +"Check out this Federated Learning quickstart tutorial for using Flower with " +"FastAI to train a vision model on CIFAR-10." +msgstr "" +"查看此联邦学习快速入门教程,了解如何使用 Flower 和 FastAI 在 CIFAR-10 上训练" +"视觉模型。" #: ../../source/tutorial-quickstart-fastai.rst:5 msgid "Quickstart fastai" @@ -19702,18 +19853,19 @@ msgstr "让我们用 fastai 和 Flower 建立一个联邦学习系统!" #: ../../source/tutorial-quickstart-fastai.rst:12 msgid "" -"Please refer to the `full code example " -"`_ " -"to learn more." +"Please refer to the `full code example `_ to learn more." msgstr "" -"请参阅 `完整代码示例 `_了解更多信息。" +"请参阅 `完整代码示例 `_了解更多信息。" #: ../../source/tutorial-quickstart-huggingface.rst:-1 msgid "" -"Check out this Federating Learning quickstart tutorial for using Flower " -"with HuggingFace Transformers in order to fine-tune an LLM." -msgstr "查看此联邦学习 快速入门教程,了解如何使用 Flower 和 HuggingFace Transformers 来微调 LLM。" +"Check out this Federating Learning quickstart tutorial for using Flower with " +"HuggingFace Transformers in order to fine-tune an LLM." +msgstr "" +"查看此联邦学习 快速入门教程,了解如何使用 Flower 和 HuggingFace Transformers " +"来微调 LLM。" #: ../../source/tutorial-quickstart-huggingface.rst:5 msgid "Quickstart 🤗 Transformers" @@ -19721,20 +19873,21 @@ msgstr "🤗 Transformers快速入门" #: ../../source/tutorial-quickstart-huggingface.rst:10 msgid "" -"Let's build a federated learning system using Hugging Face Transformers " -"and Flower!" +"Let's build a federated learning system using Hugging Face Transformers and " +"Flower!" msgstr "让我们用Hugging Face Transformers和Flower来构建一个联邦学习系统!" #: ../../source/tutorial-quickstart-huggingface.rst:12 msgid "" -"We will leverage Hugging Face to federate the training of language models" -" over multiple clients using Flower. More specifically, we will fine-tune" -" a pre-trained Transformer model (distilBERT) for sequence classification" -" over a dataset of IMDB ratings. The end goal is to detect if a movie " -"rating is positive or negative." +"We will leverage Hugging Face to federate the training of language models " +"over multiple clients using Flower. More specifically, we will fine-tune a " +"pre-trained Transformer model (distilBERT) for sequence classification over " +"a dataset of IMDB ratings. The end goal is to detect if a movie rating is " +"positive or negative." msgstr "" -"我们将利用Hugging Face技术,使用 Flower 在多个客户端上联邦训练语言模型。更具体地说,我们将对预先训练好的 " -"Transformer 模型(distilBERT)进行微调,以便在 IMDB 评分数据集上进行序列分类。最终目标是检测电影评分是正面还是负面。" +"我们将利用Hugging Face技术,使用 Flower 在多个客户端上联邦训练语言模型。更具" +"体地说,我们将对预先训练好的 Transformer 模型(distilBERT)进行微调,以便在 " +"IMDB 评分数据集上进行序列分类。最终目标是检测电影评分是正面还是负面。" #: ../../source/tutorial-quickstart-huggingface.rst:18 msgid "Dependencies" @@ -19743,12 +19896,12 @@ msgstr "依赖关系" #: ../../source/tutorial-quickstart-huggingface.rst:20 msgid "" "To follow along this tutorial you will need to install the following " -"packages: :code:`datasets`, :code:`evaluate`, :code:`flwr`, " -":code:`torch`, and :code:`transformers`. This can be done using " -":code:`pip`:" +"packages: :code:`datasets`, :code:`evaluate`, :code:`flwr`, :code:`torch`, " +"and :code:`transformers`. This can be done using :code:`pip`:" msgstr "" -"要学习本教程,您需要安装以下软件包: :code:`datasets`、 :code:`evaluate`、 :code:`flwr`、 " -":code:`torch`和 :code:`transformers`。这可以通过 :code:`pip` 来完成:" +"要学习本教程,您需要安装以下软件包: :code:`datasets`、 :code:`evaluate`、 :" +"code:`flwr`、 :code:`torch`和 :code:`transformers`。这可以通过 :code:`pip` 来" +"完成:" #: ../../source/tutorial-quickstart-huggingface.rst:30 msgid "Standard Hugging Face workflow" @@ -19764,8 +19917,9 @@ msgid "" "library. We then need to tokenize the data and create :code:`PyTorch` " "dataloaders, this is all done in the :code:`load_data` function:" msgstr "" -"为了获取 IMDB 数据集,我们将使用 Hugging Face 的 :code:`datasets` 库。然后,我们需要对数据进行标记化,并创建" -" :code:`PyTorch` 数据加载器,这些都将在 :code:`load_data` 函数中完成:" +"为了获取 IMDB 数据集,我们将使用 Hugging Face 的 :code:`datasets` 库。然后," +"我们需要对数据进行标记化,并创建 :code:`PyTorch` 数据加载器,这些都将在 :" +"code:`load_data` 函数中完成:" #: ../../source/tutorial-quickstart-huggingface.rst:81 msgid "Training and testing the model" @@ -19773,12 +19927,12 @@ msgstr "训练和测试模型" #: ../../source/tutorial-quickstart-huggingface.rst:83 msgid "" -"Once we have a way of creating our trainloader and testloader, we can " -"take care of the training and testing. This is very similar to any " -":code:`PyTorch` training or testing loop:" +"Once we have a way of creating our trainloader and testloader, we can take " +"care of the training and testing. This is very similar to any :code:" +"`PyTorch` training or testing loop:" msgstr "" -"有了创建 trainloader 和 testloader 的方法后,我们就可以进行训练和测试了。这与任何 :code:`PyTorch` " -"训练或测试循环都非常相似:" +"有了创建 trainloader 和 testloader 的方法后,我们就可以进行训练和测试了。这与" +"任何 :code:`PyTorch` 训练或测试循环都非常相似:" #: ../../source/tutorial-quickstart-huggingface.rst:121 msgid "Creating the model itself" @@ -19786,11 +19940,11 @@ msgstr "创建模型本身" #: ../../source/tutorial-quickstart-huggingface.rst:123 msgid "" -"To create the model itself, we will just load the pre-trained distillBERT" -" model using Hugging Face’s :code:`AutoModelForSequenceClassification` :" +"To create the model itself, we will just load the pre-trained distillBERT " +"model using Hugging Face’s :code:`AutoModelForSequenceClassification` :" msgstr "" -"要创建模型本身,我们只需使用 Hugging Face 的 :code:`AutoModelForSequenceClassification` " -"加载预训练的 distillBERT 模型:" +"要创建模型本身,我们只需使用 Hugging Face 的 :code:" +"`AutoModelForSequenceClassification` 加载预训练的 distillBERT 模型:" #: ../../source/tutorial-quickstart-huggingface.rst:136 msgid "Federating the example" @@ -19803,23 +19957,24 @@ msgstr "创建 IMDBClient" #: ../../source/tutorial-quickstart-huggingface.rst:141 msgid "" "To federate our example to multiple clients, we first need to write our " -"Flower client class (inheriting from :code:`flwr.client.NumPyClient`). " -"This is very easy, as our model is a standard :code:`PyTorch` model:" +"Flower client class (inheriting from :code:`flwr.client.NumPyClient`). This " +"is very easy, as our model is a standard :code:`PyTorch` model:" msgstr "" -"要将我们的示例联邦到多个客户端,我们首先需要编写 Flower 客户端类(继承自 " -":code:`flwr.client.NumPyClient`)。这很容易,因为我们的模型是一个标准的 :code:`PyTorch` 模型:" +"要将我们的示例联邦到多个客户端,我们首先需要编写 Flower 客户端类(继承自 :" +"code:`flwr.client.NumPyClient`)。这很容易,因为我们的模型是一个标准的 :code:" +"`PyTorch` 模型:" #: ../../source/tutorial-quickstart-huggingface.rst:169 msgid "" "The :code:`get_parameters` function lets the server get the client's " -"parameters. Inversely, the :code:`set_parameters` function allows the " -"server to send its parameters to the client. Finally, the :code:`fit` " -"function trains the model locally for the client, and the " -":code:`evaluate` function tests the model locally and returns the " -"relevant metrics." +"parameters. Inversely, the :code:`set_parameters` function allows the server " +"to send its parameters to the client. Finally, the :code:`fit` function " +"trains the model locally for the client, and the :code:`evaluate` function " +"tests the model locally and returns the relevant metrics." msgstr "" -":code:`get_parameters` " -"函数允许服务器获取客户端的参数。相反,:code:`set_parameters`函数允许服务器将其参数发送给客户端。最后,:code:`fit`函数在本地为客户端训练模型,:code:`evaluate`函数在本地测试模型并返回相关指标。" +":code:`get_parameters` 函数允许服务器获取客户端的参数。相反,:code:" +"`set_parameters`函数允许服务器将其参数发送给客户端。最后,:code:`fit`函数在本" +"地为客户端训练模型,:code:`evaluate`函数在本地测试模型并返回相关指标。" #: ../../source/tutorial-quickstart-huggingface.rst:175 msgid "Starting the server" @@ -19827,25 +19982,26 @@ msgstr "启动服务器" #: ../../source/tutorial-quickstart-huggingface.rst:177 msgid "" -"Now that we have a way to instantiate clients, we need to create our " -"server in order to aggregate the results. Using Flower, this can be done " -"very easily by first choosing a strategy (here, we are using " -":code:`FedAvg`, which will define the global weights as the average of " -"all the clients' weights at each round) and then using the " -":code:`flwr.server.start_server` function:" +"Now that we have a way to instantiate clients, we need to create our server " +"in order to aggregate the results. Using Flower, this can be done very " +"easily by first choosing a strategy (here, we are using :code:`FedAvg`, " +"which will define the global weights as the average of all the clients' " +"weights at each round) and then using the :code:`flwr.server.start_server` " +"function:" msgstr "" -"现在我们有了实例化客户端的方法,我们需要创建服务器,以便汇总结果。使用 Flower,首先选择一个策略(这里我们使用 " -":code:`FedAvg`,它将把全局模型参数定义为每轮所有客户端模型参数的平均值),然后使用 " -":code:`flwr.server.start_server`函数,就可以非常轻松地完成这项工作:" +"现在我们有了实例化客户端的方法,我们需要创建服务器,以便汇总结果。使用 " +"Flower,首先选择一个策略(这里我们使用 :code:`FedAvg`,它将把全局模型参数定义" +"为每轮所有客户端模型参数的平均值),然后使用 :code:`flwr.server.start_server`" +"函数,就可以非常轻松地完成这项工作:" #: ../../source/tutorial-quickstart-huggingface.rst:205 msgid "" -"The :code:`weighted_average` function is there to provide a way to " -"aggregate the metrics distributed amongst the clients (basically this " -"allows us to display a nice average accuracy and loss for every round)." +"The :code:`weighted_average` function is there to provide a way to aggregate " +"the metrics distributed amongst the clients (basically this allows us to " +"display a nice average accuracy and loss for every round)." msgstr "" -"使用 :code:`weighted_average` " -"函数是为了提供一种方法来汇总分布在客户端的指标(基本上,这可以让我们显示每一轮的平均精度和损失值)。" +"使用 :code:`weighted_average` 函数是为了提供一种方法来汇总分布在客户端的指标" +"(基本上,这可以让我们显示每一轮的平均精度和损失值)。" #: ../../source/tutorial-quickstart-huggingface.rst:209 msgid "Putting everything together" @@ -19864,33 +20020,38 @@ msgstr "他们就能连接到服务器,开始联邦训练。" #: ../../source/tutorial-quickstart-huggingface.rst:223 #, fuzzy msgid "" -"If you want to check out everything put together, you should check out " -"the `full code example `_ ." +"If you want to check out everything put together, you should check out the " +"`full code example `_ ." msgstr "" -"如果您想查看所有内容,请查看完整的代码示例: [https://github.com/adap/flower/tree/main/examples" -"/quickstart-" -"huggingface](https://github.com/adap/flower/tree/main/examples" -"/quickstart-huggingface)." +"如果您想查看所有内容,请查看完整的代码示例: [https://github.com/adap/flower/" +"tree/main/examples/quickstart-huggingface](https://github.com/adap/flower/" +"tree/main/examples/quickstart-huggingface)." #: ../../source/tutorial-quickstart-huggingface.rst:226 msgid "" -"Of course, this is a very basic example, and a lot can be added or " -"modified, it was just to showcase how simply we could federate a Hugging " -"Face workflow using Flower." -msgstr "当然,这只是一个非常基本的示例,还可以添加或修改很多内容,只是为了展示我们可以如何简单地使用 Flower 联合Hugging Face的工作流程。" +"Of course, this is a very basic example, and a lot can be added or modified, " +"it was just to showcase how simply we could federate a Hugging Face workflow " +"using Flower." +msgstr "" +"当然,这只是一个非常基本的示例,还可以添加或修改很多内容,只是为了展示我们可" +"以如何简单地使用 Flower 联合Hugging Face的工作流程。" #: ../../source/tutorial-quickstart-huggingface.rst:229 msgid "" -"Note that in this example we used :code:`PyTorch`, but we could have very" -" well used :code:`TensorFlow`." -msgstr "请注意,在本例中我们使用了 :code:`PyTorch`,但也完全可以使用 :code:`TensorFlow`。" +"Note that in this example we used :code:`PyTorch`, but we could have very " +"well used :code:`TensorFlow`." +msgstr "" +"请注意,在本例中我们使用了 :code:`PyTorch`,但也完全可以使用 :code:" +"`TensorFlow`。" #: ../../source/tutorial-quickstart-ios.rst:-1 msgid "" "Read this Federated Learning quickstart tutorial for creating an iOS app " "using Flower to train a neural network on MNIST." -msgstr "阅读本联邦学习快速入门教程,了解如何使用 Flower 创建 iOS 应用程序,并在 MNIST 上训练神经网络。" +msgstr "" +"阅读本联邦学习快速入门教程,了解如何使用 Flower 创建 iOS 应用程序,并在 " +"MNIST 上训练神经网络。" #: ../../source/tutorial-quickstart-ios.rst:5 msgid "Quickstart iOS" @@ -19898,43 +20059,52 @@ msgstr "快速入门 iOS" #: ../../source/tutorial-quickstart-ios.rst:10 msgid "" -"In this tutorial we will learn how to train a Neural Network on MNIST " -"using Flower and CoreML on iOS devices." -msgstr "在本教程中,我们将学习如何在 iOS 设备上使用 Flower 和 CoreML 在 MNIST 上训练神经网络。" +"In this tutorial we will learn how to train a Neural Network on MNIST using " +"Flower and CoreML on iOS devices." +msgstr "" +"在本教程中,我们将学习如何在 iOS 设备上使用 Flower 和 CoreML 在 MNIST 上训练" +"神经网络。" #: ../../source/tutorial-quickstart-ios.rst:12 #, fuzzy msgid "" "First of all, for running the Flower Python server, it is recommended to " -"create a virtual environment and run everything within a :doc:`virtualenv" -" `. For the Flower client " +"create a virtual environment and run everything within a :doc:`virtualenv " +"`. For the Flower client " "implementation in iOS, it is recommended to use Xcode as our IDE." msgstr "" "首先,为了运行 Flower Python 服务器,建议创建一个虚拟环境,并在 `virtualenv " -"`_ 中运行一切。对于在 iOS 中实现 " -"Flower 客户端,建议使用 Xcode 作为我们的集成开发环境。" +"`_ 中运行一切。对于在 iOS " +"中实现 Flower 客户端,建议使用 Xcode 作为我们的集成开发环境。" #: ../../source/tutorial-quickstart-ios.rst:15 msgid "" -"Our example consists of one Python *server* and two iPhone *clients* that" -" all have the same model." -msgstr "我们的示例包括一个 Python *服务器*和两个 iPhone *客户端*,它们都具有相同的模型。" +"Our example consists of one Python *server* and two iPhone *clients* that " +"all have the same model." +msgstr "" +"我们的示例包括一个 Python *服务器*和两个 iPhone *客户端*,它们都具有相同的模" +"型。" #: ../../source/tutorial-quickstart-ios.rst:17 msgid "" -"*Clients* are responsible for generating individual weight updates for " -"the model based on their local datasets. These updates are then sent to " -"the *server* which will aggregate them to produce a better model. " -"Finally, the *server* sends this improved version of the model back to " -"each *client*. A complete cycle of weight updates is called a *round*." -msgstr "*客户端*负责根据其本地数据集为模型生成独立的模型参数。然后,这些参数更新会被发送到*服务器*,由*服务器*汇总后生成一个更好的模型。最后,*服务器*将改进后的模型发送回每个*客户端*。一个完整的参数更新周期称为一*轮*。" +"*Clients* are responsible for generating individual weight updates for the " +"model based on their local datasets. These updates are then sent to the " +"*server* which will aggregate them to produce a better model. Finally, the " +"*server* sends this improved version of the model back to each *client*. A " +"complete cycle of weight updates is called a *round*." +msgstr "" +"*客户端*负责根据其本地数据集为模型生成独立的模型参数。然后,这些参数更新会被" +"发送到*服务器*,由*服务器*汇总后生成一个更好的模型。最后,*服务器*将改进后的" +"模型发送回每个*客户端*。一个完整的参数更新周期称为一*轮*。" #: ../../source/tutorial-quickstart-ios.rst:21 msgid "" "Now that we have a rough idea of what is going on, let's get started to " -"setup our Flower server environment. We first need to install Flower. You" -" can do this by using pip:" -msgstr "现在我们已经有了一个大致的概念,让我们开始设置 Flower 服务器环境吧。首先,我们需要安装 Flower。你可以使用 pip 来安装:" +"setup our Flower server environment. We first need to install Flower. You " +"can do this by using pip:" +msgstr "" +"现在我们已经有了一个大致的概念,让我们开始设置 Flower 服务器环境吧。首先,我" +"们需要安装 Flower。你可以使用 pip 来安装:" #: ../../source/tutorial-quickstart-ios.rst:27 msgid "Or Poetry:" @@ -19952,29 +20122,29 @@ msgstr "Flower 客户端" #: ../../source/tutorial-quickstart-ios.rst:36 msgid "" "Now that we have all our dependencies installed, let's run a simple " -"distributed training using CoreML as our local training pipeline and " -"MNIST as our dataset. For simplicity reasons we will use the complete " -"Flower client with CoreML, that has been implemented and stored inside " -"the Swift SDK. The client implementation can be seen below:" +"distributed training using CoreML as our local training pipeline and MNIST " +"as our dataset. For simplicity reasons we will use the complete Flower " +"client with CoreML, that has been implemented and stored inside the Swift " +"SDK. The client implementation can be seen below:" msgstr "" -"现在我们已经安装了所有依赖项,让我们使用 CoreML 作为本地训练框架和 MNIST " -"作为数据集,运行一个简单的分布式训练。为了简单起见,我们将使用 CoreML 的完整 Flower 客户端,该客户端已在 Swift SDK " -"中实现并存储。客户端实现如下:" +"现在我们已经安装了所有依赖项,让我们使用 CoreML 作为本地训练框架和 MNIST 作为" +"数据集,运行一个简单的分布式训练。为了简单起见,我们将使用 CoreML 的完整 " +"Flower 客户端,该客户端已在 Swift SDK 中实现并存储。客户端实现如下:" #: ../../source/tutorial-quickstart-ios.rst:72 msgid "" -"Let's create a new application project in Xcode and add :code:`flwr` as a" -" dependency in your project. For our application, we will store the logic" -" of our app in :code:`FLiOSModel.swift` and the UI elements in " -":code:`ContentView.swift`. We will focus more on :code:`FLiOSModel.swift`" -" in this quickstart. Please refer to the `full code example " -"`_ to learn more " -"about the app." +"Let's create a new application project in Xcode and add :code:`flwr` as a " +"dependency in your project. For our application, we will store the logic of " +"our app in :code:`FLiOSModel.swift` and the UI elements in :code:" +"`ContentView.swift`. We will focus more on :code:`FLiOSModel.swift` in this " +"quickstart. Please refer to the `full code example `_ to learn more about the app." msgstr "" -"让我们在 Xcode 中创建一个新的应用程序项目,并在项目中添加 :code:`flwr` 作为依赖关系。对于我们的应用程序,我们将在 " -":code:`FLiOSModel.swift` 中存储应用程序的逻辑,在 :code:`ContentView.swift` 中存储 UI " -"元素。在本快速入门中,我们将更多地关注 :code:`FLiOSModel.swift`。请参阅 `完整代码示例 " -"`_ 以了解更多有关应用程序的信息。" +"让我们在 Xcode 中创建一个新的应用程序项目,并在项目中添加 :code:`flwr` 作为依" +"赖关系。对于我们的应用程序,我们将在 :code:`FLiOSModel.swift` 中存储应用程序" +"的逻辑,在 :code:`ContentView.swift` 中存储 UI 元素。在本快速入门中,我们将更" +"多地关注 :code:`FLiOSModel.swift`。请参阅 `完整代码示例 `_ 以了解更多有关应用程序的信息。" #: ../../source/tutorial-quickstart-ios.rst:75 msgid "Import Flower and CoreML related packages in :code:`FLiOSModel.swift`:" @@ -19983,30 +20153,30 @@ msgstr "在 :code:`FLiOSModel.swift` 中导入 Flower 和 CoreML 相关软件包 #: ../../source/tutorial-quickstart-ios.rst:83 msgid "" "Then add the mlmodel to the project simply by drag-and-drop, the mlmodel " -"will be bundled inside the application during deployment to your iOS " -"device. We need to pass the url to access mlmodel and run CoreML machine " -"learning processes, it can be retrieved by calling the function " -":code:`Bundle.main.url`. For the MNIST dataset, we need to preprocess it " -"into :code:`MLBatchProvider` object. The preprocessing is done inside " -":code:`DataLoader.swift`." -msgstr "" -"然后通过拖放将 mlmodel 添加到项目中,在部署到 iOS 设备时,mlmodel 将被捆绑到应用程序中。我们需要传递 url 以访问 " -"mlmodel 并运行 CoreML 机器学习进程,可通过调用函数 :code:`Bundle.main.url` 获取。对于 MNIST " -"数据集,我们需要将其预处理为 :code:`MLBatchProvider` 对象。预处理在 :code:`DataLoader.swift` " -"中完成。" +"will be bundled inside the application during deployment to your iOS device. " +"We need to pass the url to access mlmodel and run CoreML machine learning " +"processes, it can be retrieved by calling the function :code:`Bundle.main." +"url`. For the MNIST dataset, we need to preprocess it into :code:" +"`MLBatchProvider` object. The preprocessing is done inside :code:`DataLoader." +"swift`." +msgstr "" +"然后通过拖放将 mlmodel 添加到项目中,在部署到 iOS 设备时,mlmodel 将被捆绑到" +"应用程序中。我们需要传递 url 以访问 mlmodel 并运行 CoreML 机器学习进程,可通" +"过调用函数 :code:`Bundle.main.url` 获取。对于 MNIST 数据集,我们需要将其预处" +"理为 :code:`MLBatchProvider` 对象。预处理在 :code:`DataLoader.swift` 中完成。" #: ../../source/tutorial-quickstart-ios.rst:99 #, fuzzy msgid "" -"Since CoreML does not allow the model parameters to be seen before " -"training, and accessing the model parameters during or after the training" -" can only be done by specifying the layer name, we need to know this " -"information beforehand, through looking at the model specification, which" -" are written as proto files. The implementation can be seen in " -":code:`MLModelInspect`." +"Since CoreML does not allow the model parameters to be seen before training, " +"and accessing the model parameters during or after the training can only be " +"done by specifying the layer name, we need to know this information " +"beforehand, through looking at the model specification, which are written as " +"proto files. The implementation can be seen in :code:`MLModelInspect`." msgstr "" -"由于 CoreML 不允许在训练前查看模型参数,而在训练过程中或训练后访问模型参数只能通过指定层名来完成,因此我们需要事先通过查看模型规范(写成 " -"proto 文件)来了解这些信息。具体实现可参见 :code:`MLModelInspect`。" +"由于 CoreML 不允许在训练前查看模型参数,而在训练过程中或训练后访问模型参数只" +"能通过指定层名来完成,因此我们需要事先通过查看模型规范(写成 proto 文件)来了" +"解这些信息。具体实现可参见 :code:`MLModelInspect`。" #: ../../source/tutorial-quickstart-ios.rst:102 #, fuzzy @@ -20017,22 +20187,25 @@ msgstr "获得所有必要信息后,让我们创建 Flower 客户端。" #: ../../source/tutorial-quickstart-ios.rst:117 msgid "" -"Then start the Flower gRPC client and start communicating to the server " -"by passing our Flower client to the function :code:`startFlwrGRPC`." -msgstr "然后启动 Flower gRPC 客户端,并通过将 Flower 客户端传递给函数 :code:`startFlwrGRPC` 来开始与服务器通信。" +"Then start the Flower gRPC client and start communicating to the server by " +"passing our Flower client to the function :code:`startFlwrGRPC`." +msgstr "" +"然后启动 Flower gRPC 客户端,并通过将 Flower 客户端传递给函数 :code:" +"`startFlwrGRPC` 来开始与服务器通信。" #: ../../source/tutorial-quickstart-ios.rst:124 msgid "" -"That's it for the client. We only have to implement :code:`Client` or " -"call the provided :code:`MLFlwrClient` and call :code:`startFlwrGRPC()`. " -"The attribute :code:`hostname` and :code:`port` tells the client which " -"server to connect to. This can be done by entering the hostname and port " -"in the application before clicking the start button to start the " -"federated learning process." +"That's it for the client. We only have to implement :code:`Client` or call " +"the provided :code:`MLFlwrClient` and call :code:`startFlwrGRPC()`. The " +"attribute :code:`hostname` and :code:`port` tells the client which server to " +"connect to. This can be done by entering the hostname and port in the " +"application before clicking the start button to start the federated learning " +"process." msgstr "" -"这就是客户端。我们只需实现 :code:`Client` 或调用提供的 :code:`MLFlwrClient` 并调用 " -":code:`startFlwrGRPC()`。属性 :code:`hostname` 和 :code:`port` " -"会告诉客户端要连接到哪个服务器。这可以通过在应用程序中输入主机名和端口来实现,然后再点击开始按钮启动联邦学习进程。" +"这就是客户端。我们只需实现 :code:`Client` 或调用提供的 :code:`MLFlwrClient` " +"并调用 :code:`startFlwrGRPC()`。属性 :code:`hostname` 和 :code:`port` 会告诉" +"客户端要连接到哪个服务器。这可以通过在应用程序中输入主机名和端口来实现,然后" +"再点击开始按钮启动联邦学习进程。" #: ../../source/tutorial-quickstart-ios.rst:129 #: ../../source/tutorial-quickstart-mxnet.rst:226 @@ -20049,11 +20222,11 @@ msgstr "Flower 服务器" #: ../../source/tutorial-quickstart-tensorflow.rst:100 msgid "" "For simple workloads we can start a Flower server and leave all the " -"configuration possibilities at their default values. In a file named " -":code:`server.py`, import Flower and start the server:" +"configuration possibilities at their default values. In a file named :code:" +"`server.py`, import Flower and start the server:" msgstr "" -"对于简单的工作负载,我们可以启动 Flower 服务器,并将所有配置选项保留为默认值。在名为 :code:`server.py` 的文件中,导入 " -"Flower 并启动服务器:" +"对于简单的工作负载,我们可以启动 Flower 服务器,并将所有配置选项保留为默认" +"值。在名为 :code:`server.py` 的文件中,导入 Flower 并启动服务器:" #: ../../source/tutorial-quickstart-ios.rst:142 #: ../../source/tutorial-quickstart-mxnet.rst:239 @@ -20069,40 +20242,43 @@ msgstr "联邦训练模型!" #: ../../source/tutorial-quickstart-xgboost.rst:525 msgid "" "With both client and server ready, we can now run everything and see " -"federated learning in action. FL systems usually have a server and " -"multiple clients. We therefore have to start the server first:" -msgstr "客户端和服务器都已准备就绪,我们现在可以运行一切,看看联邦学习的实际效果。FL 系统通常有一个服务器和多个客户端。因此,我们必须先启动服务器:" +"federated learning in action. FL systems usually have a server and multiple " +"clients. We therefore have to start the server first:" +msgstr "" +"客户端和服务器都已准备就绪,我们现在可以运行一切,看看联邦学习的实际效果。FL " +"系统通常有一个服务器和多个客户端。因此,我们必须先启动服务器:" #: ../../source/tutorial-quickstart-ios.rst:152 msgid "" -"Once the server is running we can start the clients in different " -"terminals. Build and run the client through your Xcode, one through Xcode" -" Simulator and the other by deploying it to your iPhone. To see more " -"about how to deploy your app to iPhone or Simulator visit `here " -"`_." +"Once the server is running we can start the clients in different terminals. " +"Build and run the client through your Xcode, one through Xcode Simulator and " +"the other by deploying it to your iPhone. To see more about how to deploy " +"your app to iPhone or Simulator visit `here `_." msgstr "" -"服务器运行后,我们就可以在不同的终端启动客户端。通过 Xcode 构建并运行客户端,一个通过 Xcode 模拟器,另一个通过部署到 " -"iPhone。要了解更多有关如何将应用程序部署到 iPhone 或模拟器的信息,请访问 `此处 " -"`_。" +"服务器运行后,我们就可以在不同的终端启动客户端。通过 Xcode 构建并运行客户端," +"一个通过 Xcode 模拟器,另一个通过部署到 iPhone。要了解更多有关如何将应用程序" +"部署到 iPhone 或模拟器的信息,请访问 `此处 `_。" #: ../../source/tutorial-quickstart-ios.rst:156 msgid "" "Congratulations! You've successfully built and run your first federated " -"learning system in your ios device. The full `source code " -"`_ for this " -"example can be found in :code:`examples/ios`." +"learning system in your ios device. The full `source code `_ for this example can be found in :" +"code:`examples/ios`." msgstr "" -"恭喜您! 您已经成功地在 ios 设备中构建并运行了第一个联邦学习系统。本示例的`完整源代码 " -"`_ 可在 " -":code:`examples/ios` 中找到。" +"恭喜您! 您已经成功地在 ios 设备中构建并运行了第一个联邦学习系统。本示例的`完" +"整源代码 `_ 可在 :" +"code:`examples/ios` 中找到。" #: ../../source/tutorial-quickstart-jax.rst:-1 msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with Jax to train a linear regression model on a scikit-learn dataset." -msgstr "查看此联邦学习快速入门教程,了解如何使用 Flower 和 Jax 在 scikit-learn 数据集上训练线性回归模型。" +"Check out this Federated Learning quickstart tutorial for using Flower with " +"Jax to train a linear regression model on a scikit-learn dataset." +msgstr "" +"查看此联邦学习快速入门教程,了解如何使用 Flower 和 Jax 在 scikit-learn 数据集" +"上训练线性回归模型。" #: ../../source/tutorial-quickstart-jax.rst:5 msgid "Quickstart JAX" @@ -20110,9 +20286,11 @@ msgstr "快速入门 JAX" #: ../../source/tutorial-quickstart-mxnet.rst:-1 msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with MXNet to train a Sequential model on MNIST." -msgstr "查看此联邦学习 快速入门教程,了解如何使用 Flower 和 MXNet 在 MNIST 上训练序列模型。" +"Check out this Federated Learning quickstart tutorial for using Flower with " +"MXNet to train a Sequential model on MNIST." +msgstr "" +"查看此联邦学习 快速入门教程,了解如何使用 Flower 和 MXNet 在 MNIST 上训练序列" +"模型。" #: ../../source/tutorial-quickstart-mxnet.rst:5 msgid "Quickstart MXNet" @@ -20120,53 +20298,59 @@ msgstr "快速入门 MXNet" #: ../../source/tutorial-quickstart-mxnet.rst:7 msgid "" -"MXNet is no longer maintained and has been moved into `Attic " -"`_. As a result, we would " -"encourage you to use other ML frameworks alongside Flower, for example, " -"PyTorch. This tutorial might be removed in future versions of Flower." +"MXNet is no longer maintained and has been moved into `Attic `_. As a result, we would encourage you to " +"use other ML frameworks alongside Flower, for example, PyTorch. This " +"tutorial might be removed in future versions of Flower." msgstr "" #: ../../source/tutorial-quickstart-mxnet.rst:12 msgid "" -"In this tutorial, we will learn how to train a :code:`Sequential` model " -"on MNIST using Flower and MXNet." -msgstr "在本教程中,我们将学习如何使用 Flower 和 MXNet 在 MNIST 上训练 :code:`Sequential` 模型。" +"In this tutorial, we will learn how to train a :code:`Sequential` model on " +"MNIST using Flower and MXNet." +msgstr "" +"在本教程中,我们将学习如何使用 Flower 和 MXNet 在 MNIST 上训练 :code:" +"`Sequential` 模型。" #: ../../source/tutorial-quickstart-mxnet.rst:14 #: ../../source/tutorial-quickstart-scikitlearn.rst:12 #, fuzzy msgid "" -"It is recommended to create a virtual environment and run everything " -"within this :doc:`virtualenv `." +"It is recommended to create a virtual environment and run everything within " +"this :doc:`virtualenv `." msgstr "" -"建议创建一个虚拟环境,并在此 `virtualenv `_ 中运行所有内容。" +"建议创建一个虚拟环境,并在此 `virtualenv `_ 中运行所有内容。" #: ../../source/tutorial-quickstart-mxnet.rst:16 #: ../../source/tutorial-quickstart-pytorch.rst:17 #: ../../source/tutorial-quickstart-scikitlearn.rst:14 msgid "" -"Our example consists of one *server* and two *clients* all having the " -"same model." +"Our example consists of one *server* and two *clients* all having the same " +"model." msgstr "我们的例子包括一个*服务器*和两个*客户端*,它们都有相同的模型。" #: ../../source/tutorial-quickstart-mxnet.rst:18 #: ../../source/tutorial-quickstart-scikitlearn.rst:16 msgid "" -"*Clients* are responsible for generating individual model parameter " -"updates for the model based on their local datasets. These updates are " -"then sent to the *server* which will aggregate them to produce an updated" -" global model. Finally, the *server* sends this improved version of the " -"model back to each *client*. A complete cycle of parameters updates is " -"called a *round*." -msgstr "*客户端*负责根据其本地数据集为模型生成单独的模型参数更新。然后,这些参数更新将被发送到*服务器*,由*服务器*汇总后生成一个更新的全局模型。最后,*服务器*将这一改进版模型发回给每个*客户端*。一个完整的参数更新周期称为一*轮*。" +"*Clients* are responsible for generating individual model parameter updates " +"for the model based on their local datasets. These updates are then sent to " +"the *server* which will aggregate them to produce an updated global model. " +"Finally, the *server* sends this improved version of the model back to each " +"*client*. A complete cycle of parameters updates is called a *round*." +msgstr "" +"*客户端*负责根据其本地数据集为模型生成单独的模型参数更新。然后,这些参数更新" +"将被发送到*服务器*,由*服务器*汇总后生成一个更新的全局模型。最后,*服务器*将" +"这一改进版模型发回给每个*客户端*。一个完整的参数更新周期称为一*轮*。" #: ../../source/tutorial-quickstart-mxnet.rst:22 #: ../../source/tutorial-quickstart-scikitlearn.rst:20 msgid "" "Now that we have a rough idea of what is going on, let's get started. We " "first need to install Flower. You can do this by running:" -msgstr "现在,我们已经有了一个大致的概念,让我们开始吧。首先,我们需要安装 Flower。运行:" +msgstr "" +"现在,我们已经有了一个大致的概念,让我们开始吧。首先,我们需要安装 Flower。运" +"行:" #: ../../source/tutorial-quickstart-mxnet.rst:28 msgid "Since we want to use MXNet, let's go ahead and install it:" @@ -20175,19 +20359,19 @@ msgstr "既然我们要使用 MXNet,那就继续安装吧:" #: ../../source/tutorial-quickstart-mxnet.rst:38 msgid "" "Now that we have all our dependencies installed, let's run a simple " -"distributed training with two clients and one server. Our training " -"procedure and network architecture are based on MXNet´s `Hand-written " -"Digit Recognition tutorial " -"`_." +"distributed training with two clients and one server. Our training procedure " +"and network architecture are based on MXNet´s `Hand-written Digit " +"Recognition tutorial `_." msgstr "" -"现在,我们已经安装了所有依赖项,让我们用两个客户端和一个服务器来运行一个简单的分布式训练。我们的训练程序和网络架构基于 MXNet 的 " -"`手写数字识别教程 " -"`_\"。" +"现在,我们已经安装了所有依赖项,让我们用两个客户端和一个服务器来运行一个简单" +"的分布式训练。我们的训练程序和网络架构基于 MXNet 的 `手写数字识别教程 " +"`_\"。" #: ../../source/tutorial-quickstart-mxnet.rst:40 msgid "" -"In a file called :code:`client.py`, import Flower and MXNet related " -"packages:" +"In a file called :code:`client.py`, import Flower and MXNet related packages:" msgstr "在名为 :code:`client.py` 的文件中,导入 Flower 和 MXNet 相关软件包:" #: ../../source/tutorial-quickstart-mxnet.rst:55 @@ -20197,29 +20381,35 @@ msgstr "此外,还可以在 MXNet 中定义设备分配:" #: ../../source/tutorial-quickstart-mxnet.rst:61 msgid "" "We use MXNet to load MNIST, a popular image classification dataset of " -"handwritten digits for machine learning. The MXNet utility " -":code:`mx.test_utils.get_mnist()` downloads the training and test data." +"handwritten digits for machine learning. The MXNet utility :code:`mx." +"test_utils.get_mnist()` downloads the training and test data." msgstr "" -"我们使用 MXNet 加载 MNIST,这是一个用于机器学习的流行手写数字图像分类数据集。MXNet 工具 " -":code:`mx.test_utils.get_mnist()` 会下载训练和测试数据。" +"我们使用 MXNet 加载 MNIST,这是一个用于机器学习的流行手写数字图像分类数据集。" +"MXNet 工具 :code:`mx.test_utils.get_mnist()` 会下载训练和测试数据。" #: ../../source/tutorial-quickstart-mxnet.rst:75 msgid "" -"Define the training and loss with MXNet. We train the model by looping " -"over the dataset, measure the corresponding loss, and optimize it." -msgstr "用 MXNet 定义训练和损失值。我们在数据集上循环训练模型,测量相应的损失值,并对其进行优化。" +"Define the training and loss with MXNet. We train the model by looping over " +"the dataset, measure the corresponding loss, and optimize it." +msgstr "" +"用 MXNet 定义训练和损失值。我们在数据集上循环训练模型,测量相应的损失值,并对" +"其进行优化。" #: ../../source/tutorial-quickstart-mxnet.rst:113 msgid "" -"Next, we define the validation of our machine learning model. We loop " -"over the test set and measure both loss and accuracy on the test set." -msgstr "接下来,我们定义机器学习模型的验证。我们在测试集上循环,测量测试集上的损失值和准确率。" +"Next, we define the validation of our machine learning model. We loop over " +"the test set and measure both loss and accuracy on the test set." +msgstr "" +"接下来,我们定义机器学习模型的验证。我们在测试集上循环,测量测试集上的损失值" +"和准确率。" #: ../../source/tutorial-quickstart-mxnet.rst:137 msgid "" -"After defining the training and testing of a MXNet machine learning " -"model, we use these functions to implement a Flower client." -msgstr "在定义了 MXNet 机器学习模型的训练和测试后,我们使用这些函数实现了 Flower 客户端。" +"After defining the training and testing of a MXNet machine learning model, " +"we use these functions to implement a Flower client." +msgstr "" +"在定义了 MXNet 机器学习模型的训练和测试后,我们使用这些函数实现了 Flower 客户" +"端。" #: ../../source/tutorial-quickstart-mxnet.rst:139 msgid "Our Flower clients will use a simple :code:`Sequential` model:" @@ -20227,38 +20417,37 @@ msgstr "我们的 Flower 客户端将使用简单的 :code:`Sequential` 模型 #: ../../source/tutorial-quickstart-mxnet.rst:158 msgid "" -"After loading the dataset with :code:`load_data()` we perform one forward" -" propagation to initialize the model and model parameters with " -":code:`model(init)`. Next, we implement a Flower client." +"After loading the dataset with :code:`load_data()` we perform one forward " +"propagation to initialize the model and model parameters with :code:" +"`model(init)`. Next, we implement a Flower client." msgstr "" -"使用 :code:`load_data()` 加载数据集后,我们会执行一次前向传播,使用 :code:`model(init)` " -"初始化模型和模型参数。接下来,我们实现一个 Flower 客户端。" +"使用 :code:`load_data()` 加载数据集后,我们会执行一次前向传播,使用 :code:" +"`model(init)` 初始化模型和模型参数。接下来,我们实现一个 Flower 客户端。" #: ../../source/tutorial-quickstart-mxnet.rst:160 #: ../../source/tutorial-quickstart-pytorch.rst:144 #: ../../source/tutorial-quickstart-tensorflow.rst:54 msgid "" -"The Flower server interacts with clients through an interface called " -":code:`Client`. When the server selects a particular client for training," -" it sends training instructions over the network. The client receives " -"those instructions and calls one of the :code:`Client` methods to run " -"your code (i.e., to train the neural network we defined earlier)." +"The Flower server interacts with clients through an interface called :code:" +"`Client`. When the server selects a particular client for training, it sends " +"training instructions over the network. The client receives those " +"instructions and calls one of the :code:`Client` methods to run your code (i." +"e., to train the neural network we defined earlier)." msgstr "" -"Flower 服务器通过一个名为 :code:`Client` " -"的接口与客户端交互。当服务器选择一个特定的客户端进行训练时,它会通过网络发送训练指令。客户端接收到这些指令后,会调用 :code:`Client`" -" 方法之一来运行您的代码(即训练我们之前定义的神经网络)。" +"Flower 服务器通过一个名为 :code:`Client` 的接口与客户端交互。当服务器选择一个" +"特定的客户端进行训练时,它会通过网络发送训练指令。客户端接收到这些指令后,会" +"调用 :code:`Client` 方法之一来运行您的代码(即训练我们之前定义的神经网络)。" #: ../../source/tutorial-quickstart-mxnet.rst:166 msgid "" -"Flower provides a convenience class called :code:`NumPyClient` which " -"makes it easier to implement the :code:`Client` interface when your " -"workload uses MXNet. Implementing :code:`NumPyClient` usually means " -"defining the following methods (:code:`set_parameters` is optional " -"though):" +"Flower provides a convenience class called :code:`NumPyClient` which makes " +"it easier to implement the :code:`Client` interface when your workload uses " +"MXNet. Implementing :code:`NumPyClient` usually means defining the following " +"methods (:code:`set_parameters` is optional though):" msgstr "" -"Flower 提供了一个名为 :code:`NumPyClient` 的便捷类,当您的工作负载使用 MXNet 时,它可以让您更轻松地实现 " -":code:`Client` 接口。实现 :code:`NumPyClient` " -"通常意味着定义以下方法(:code:`set_parameters` 是可选的):" +"Flower 提供了一个名为 :code:`NumPyClient` 的便捷类,当您的工作负载使用 MXNet " +"时,它可以让您更轻松地实现 :code:`Client` 接口。实现 :code:`NumPyClient` 通常" +"意味着定义以下方法(:code:`set_parameters` 是可选的):" #: ../../source/tutorial-quickstart-mxnet.rst:172 #: ../../source/tutorial-quickstart-pytorch.rst:156 @@ -20276,8 +20465,7 @@ msgstr ":code:`set_parameters` (可选)" #: ../../source/tutorial-quickstart-pytorch.rst:158 #: ../../source/tutorial-quickstart-scikitlearn.rst:111 msgid "" -"update the local model weights with the parameters received from the " -"server" +"update the local model weights with the parameters received from the server" msgstr "用从服务器接收到的参数更新本地模型参数" #: ../../source/tutorial-quickstart-mxnet.rst:176 @@ -20310,34 +20498,38 @@ msgstr "它们可以通过以下方式实现:" #: ../../source/tutorial-quickstart-mxnet.rst:212 msgid "" -"We can now create an instance of our class :code:`MNISTClient` and add " -"one line to actually run this client:" -msgstr "现在我们可以创建一个 :code:`MNISTClient` 类的实例,并添加一行来实际运行该客户端:" +"We can now create an instance of our class :code:`MNISTClient` and add one " +"line to actually run this client:" +msgstr "" +"现在我们可以创建一个 :code:`MNISTClient` 类的实例,并添加一行来实际运行该客户" +"端:" #: ../../source/tutorial-quickstart-mxnet.rst:219 msgid "" -"That's it for the client. We only have to implement :code:`Client` or " -":code:`NumPyClient` and call :code:`fl.client.start_client()` or " -":code:`fl.client.start_numpy_client()`. The string " -":code:`\"0.0.0.0:8080\"` tells the client which server to connect to. In " -"our case we can run the server and the client on the same machine, " -"therefore we use :code:`\"0.0.0.0:8080\"`. If we run a truly federated " -"workload with the server and clients running on different machines, all " -"that needs to change is the :code:`server_address` we pass to the client." -msgstr "" -"这就是客户端。我们只需实现 :code:`Client` 或 :code:`NumPyClient` 并调用 " -":code:`fl.client.start_client()` 或 " -":code:`fl.client.start_numpy_client()`。字符串 " -":code:`\"0.0.0.0:8080\"`会告诉客户端要连接的服务器。在本例中,我们可以在同一台机器上运行服务器和客户端,因此我们使用 " -":code:`\"0.0.0.0:8080\"`。如果我们运行的是真正的联邦工作负载,服务器和客户端运行在不同的机器上,那么需要改变的只是传递给客户端的" -" :code:`server_address`。" +"That's it for the client. We only have to implement :code:`Client` or :code:" +"`NumPyClient` and call :code:`fl.client.start_client()` or :code:`fl.client." +"start_numpy_client()`. The string :code:`\"0.0.0.0:8080\"` tells the client " +"which server to connect to. In our case we can run the server and the client " +"on the same machine, therefore we use :code:`\"0.0.0.0:8080\"`. If we run a " +"truly federated workload with the server and clients running on different " +"machines, all that needs to change is the :code:`server_address` we pass to " +"the client." +msgstr "" +"这就是客户端。我们只需实现 :code:`Client` 或 :code:`NumPyClient` 并调用 :" +"code:`fl.client.start_client()` 或 :code:`fl.client.start_numpy_client()`。字" +"符串 :code:`\"0.0.0.0:8080\"`会告诉客户端要连接的服务器。在本例中,我们可以在" +"同一台机器上运行服务器和客户端,因此我们使用 :code:`\"0.0.0.0:8080\"`。如果我" +"们运行的是真正的联邦工作负载,服务器和客户端运行在不同的机器上,那么需要改变" +"的只是传递给客户端的 :code:`server_address`。" #: ../../source/tutorial-quickstart-mxnet.rst:241 msgid "" "With both client and server ready, we can now run everything and see " "federated learning in action. Federated learning systems usually have a " "server and multiple clients. We therefore have to start the server first:" -msgstr "客户端和服务器都准备就绪后,我们现在就可以运行一切,看看联邦学习的运行情况。联邦学习系统通常有一个服务器和多个客户端。因此,我们必须先启动服务器:" +msgstr "" +"客户端和服务器都准备就绪后,我们现在就可以运行一切,看看联邦学习的运行情况。" +"联邦学习系统通常有一个服务器和多个客户端。因此,我们必须先启动服务器:" #: ../../source/tutorial-quickstart-mxnet.rst:249 #: ../../source/tutorial-quickstart-pytorch.rst:226 @@ -20345,9 +20537,11 @@ msgstr "客户端和服务器都准备就绪后,我们现在就可以运行一 #: ../../source/tutorial-quickstart-tensorflow.rst:122 #: ../../source/tutorial-quickstart-xgboost.rst:533 msgid "" -"Once the server is running we can start the clients in different " -"terminals. Open a new terminal and start the first client:" -msgstr "服务器运行后,我们就可以在不同终端启动客户端了。打开一个新终端,启动第一个客户端:" +"Once the server is running we can start the clients in different terminals. " +"Open a new terminal and start the first client:" +msgstr "" +"服务器运行后,我们就可以在不同终端启动客户端了。打开一个新终端,启动第一个客" +"户端:" #: ../../source/tutorial-quickstart-mxnet.rst:256 #: ../../source/tutorial-quickstart-pytorch.rst:233 @@ -20362,28 +20556,29 @@ msgstr "打开另一台终端,启动第二个客户端:" #: ../../source/tutorial-quickstart-scikitlearn.rst:237 #: ../../source/tutorial-quickstart-xgboost.rst:546 msgid "" -"Each client will have its own dataset. You should now see how the " -"training does in the very first terminal (the one that started the " -"server):" -msgstr "每个客户端都有自己的数据集。现在你应该看到第一个终端(启动服务器的终端)的训练效果了:" +"Each client will have its own dataset. You should now see how the training " +"does in the very first terminal (the one that started the server):" +msgstr "" +"每个客户端都有自己的数据集。现在你应该看到第一个终端(启动服务器的终端)的训" +"练效果了:" #: ../../source/tutorial-quickstart-mxnet.rst:294 msgid "" "Congratulations! You've successfully built and run your first federated " -"learning system. The full `source code " -"`_ for this example can be found in :code:`examples" -"/quickstart-mxnet`." +"learning system. The full `source code `_ for this example can be found " +"in :code:`examples/quickstart-mxnet`." msgstr "" "恭喜您!您已经成功构建并运行了第一个联邦学习系统。本示例的`完整源代码 " -"`_ 可在 :code:`examples/quickstart-mxnet` 中找到。" +"`_ 可在 :code:`examples/quickstart-mxnet` 中找到。" #: ../../source/tutorial-quickstart-pandas.rst:-1 msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with Pandas to perform Federated Analytics." -msgstr "查看此联邦学习快速入门教程,了解如何使用 Flower 和 Pandas 执行联邦分析。" +"Check out this Federated Learning quickstart tutorial for using Flower with " +"Pandas to perform Federated Analytics." +msgstr "" +"查看此联邦学习快速入门教程,了解如何使用 Flower 和 Pandas 执行联邦分析。" #: ../../source/tutorial-quickstart-pandas.rst:5 msgid "Quickstart Pandas" @@ -20395,68 +20590,78 @@ msgstr "让我们使用 Pandas 和 Flower 建立一个联邦分析系统!" #: ../../source/tutorial-quickstart-pandas.rst:12 msgid "" -"Please refer to the `full code example " -"`_ " -"to learn more." +"Please refer to the `full code example `_ to learn more." msgstr "" -"请参阅 `完整代码示例 `_\" 了解更多信息。" +"请参阅 `完整代码示例 `_\" 了解更多信息。" #: ../../source/tutorial-quickstart-pytorch.rst:-1 msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with PyTorch to train a CNN model on MNIST." -msgstr "查看此联邦学习快速入门教程,了解如何使用 Flower 和 PyTorch 在 MNIST 上训练 CNN 模型。" +"Check out this Federated Learning quickstart tutorial for using Flower with " +"PyTorch to train a CNN model on MNIST." +msgstr "" +"查看此联邦学习快速入门教程,了解如何使用 Flower 和 PyTorch 在 MNIST 上训练 " +"CNN 模型。" #: ../../source/tutorial-quickstart-pytorch.rst:13 msgid "" -"In this tutorial we will learn how to train a Convolutional Neural " -"Network on CIFAR10 using Flower and PyTorch." -msgstr "在本教程中,我们将学习如何使用 Flower 和 PyTorch 在 CIFAR10 上训练卷积神经网络。" +"In this tutorial we will learn how to train a Convolutional Neural Network " +"on CIFAR10 using Flower and PyTorch." +msgstr "" +"在本教程中,我们将学习如何使用 Flower 和 PyTorch 在 CIFAR10 上训练卷积神经网" +"络。" #: ../../source/tutorial-quickstart-pytorch.rst:15 #: ../../source/tutorial-quickstart-xgboost.rst:39 #, fuzzy msgid "" "First of all, it is recommended to create a virtual environment and run " -"everything within a :doc:`virtualenv `." +"everything within a :doc:`virtualenv `." msgstr "" -"首先,建议创建一个虚拟环境,并在 `virtualenv `_ 中运行一切。" +"首先,建议创建一个虚拟环境,并在 `virtualenv `_ 中运行一切。" #: ../../source/tutorial-quickstart-pytorch.rst:19 msgid "" -"*Clients* are responsible for generating individual weight-updates for " -"the model based on their local datasets. These updates are then sent to " -"the *server* which will aggregate them to produce a better model. " -"Finally, the *server* sends this improved version of the model back to " -"each *client*. A complete cycle of weight updates is called a *round*." -msgstr "*客户端*负责在其本地数据集上更新模型参数。然后,这些参数会被发送到*服务器*,由*服务器*聚合后生成一个更好的模型。最后,*服务器*将改进后的模型发送回每个*客户端*。一个完整的模型参数更新周期称为一*轮*。" +"*Clients* are responsible for generating individual weight-updates for the " +"model based on their local datasets. These updates are then sent to the " +"*server* which will aggregate them to produce a better model. Finally, the " +"*server* sends this improved version of the model back to each *client*. A " +"complete cycle of weight updates is called a *round*." +msgstr "" +"*客户端*负责在其本地数据集上更新模型参数。然后,这些参数会被发送到*服务器*," +"由*服务器*聚合后生成一个更好的模型。最后,*服务器*将改进后的模型发送回每个*客" +"户端*。一个完整的模型参数更新周期称为一*轮*。" #: ../../source/tutorial-quickstart-pytorch.rst:23 msgid "" "Now that we have a rough idea of what is going on, let's get started. We " "first need to install Flower. You can do this by running :" -msgstr "现在,我们已经有了一个大致的概念了,那就让我们开始吧。首先,我们需要安装 Flower。可以通过运行 :" +msgstr "" +"现在,我们已经有了一个大致的概念了,那就让我们开始吧。首先,我们需要安装 " +"Flower。可以通过运行 :" #: ../../source/tutorial-quickstart-pytorch.rst:29 msgid "" -"Since we want to use PyTorch to solve a computer vision task, let's go " -"ahead and install PyTorch and the **torchvision** library:" -msgstr "既然我们想用 PyTorch 解决计算机视觉任务,那就继续安装 PyTorch 和 **torchvision** 库吧:" +"Since we want to use PyTorch to solve a computer vision task, let's go ahead " +"and install PyTorch and the **torchvision** library:" +msgstr "" +"既然我们想用 PyTorch 解决计算机视觉任务,那就继续安装 PyTorch 和 " +"**torchvision** 库吧:" #: ../../source/tutorial-quickstart-pytorch.rst:39 msgid "" "Now that we have all our dependencies installed, let's run a simple " -"distributed training with two clients and one server. Our training " -"procedure and network architecture are based on PyTorch's `Deep Learning " -"with PyTorch " +"distributed training with two clients and one server. Our training procedure " +"and network architecture are based on PyTorch's `Deep Learning with PyTorch " "`_." msgstr "" -"现在我们已经安装了所有的依赖项,让我们用两个客户端和一个服务器来运行一个简单的分布式训练。我们的训练过程和网络架构基于 PyTorch " -"的《Deep Learning with PyTorch " -"`_》。" +"现在我们已经安装了所有的依赖项,让我们用两个客户端和一个服务器来运行一个简单" +"的分布式训练。我们的训练过程和网络架构基于 PyTorch 的《Deep Learning with " +"PyTorch `_》。" #: ../../source/tutorial-quickstart-pytorch.rst:41 msgid "" @@ -20471,35 +20676,40 @@ msgstr "此外,我们还在 PyTorch 中定义了设备分配:" #: ../../source/tutorial-quickstart-pytorch.rst:62 msgid "" "We use PyTorch to load CIFAR10, a popular colored image classification " -"dataset for machine learning. The PyTorch :code:`DataLoader()` downloads " -"the training and test data that are then normalized." +"dataset for machine learning. The PyTorch :code:`DataLoader()` downloads the " +"training and test data that are then normalized." msgstr "" -"我们使用 PyTorch 来加载 CIFAR10,这是一个用于机器学习的流行彩色图像分类数据集。PyTorch " -":code:`DataLoader()`下载训练数据和测试数据,然后进行归一化处理。" +"我们使用 PyTorch 来加载 CIFAR10,这是一个用于机器学习的流行彩色图像分类数据" +"集。PyTorch :code:`DataLoader()`下载训练数据和测试数据,然后进行归一化处理。" #: ../../source/tutorial-quickstart-pytorch.rst:78 msgid "" -"Define the loss and optimizer with PyTorch. The training of the dataset " -"is done by looping over the dataset, measure the corresponding loss and " +"Define the loss and optimizer with PyTorch. The training of the dataset is " +"done by looping over the dataset, measure the corresponding loss and " "optimize it." -msgstr "使用 PyTorch 定义损失和优化器。数据集的训练是通过循环数据集、测量相应的损失值并对其进行优化来完成的。" +msgstr "" +"使用 PyTorch 定义损失和优化器。数据集的训练是通过循环数据集、测量相应的损失值" +"并对其进行优化来完成的。" #: ../../source/tutorial-quickstart-pytorch.rst:94 msgid "" -"Define then the validation of the machine learning network. We loop over" -" the test set and measure the loss and accuracy of the test set." -msgstr "然后定义机器学习网络的验证。我们在测试集上循环,计算测试集的损失值和准确率。" +"Define then the validation of the machine learning network. We loop over " +"the test set and measure the loss and accuracy of the test set." +msgstr "" +"然后定义机器学习网络的验证。我们在测试集上循环,计算测试集的损失值和准确率。" #: ../../source/tutorial-quickstart-pytorch.rst:113 msgid "" -"After defining the training and testing of a PyTorch machine learning " -"model, we use the functions for the Flower clients." -msgstr "在定义了 PyTorch 机器学习模型的训练和测试之后,我们将这些功能用于 Flower 客户端。" +"After defining the training and testing of a PyTorch machine learning model, " +"we use the functions for the Flower clients." +msgstr "" +"在定义了 PyTorch 机器学习模型的训练和测试之后,我们将这些功能用于 Flower 客户" +"端。" #: ../../source/tutorial-quickstart-pytorch.rst:115 msgid "" -"The Flower clients will use a simple CNN adapted from 'PyTorch: A 60 " -"Minute Blitz':" +"The Flower clients will use a simple CNN adapted from 'PyTorch: A 60 Minute " +"Blitz':" msgstr "Flower 客户端将使用一个简单的从“PyTorch: 60 分钟突击\"改编的CNN:" #: ../../source/tutorial-quickstart-pytorch.rst:142 @@ -20510,15 +20720,14 @@ msgstr "使用 :code:`load_data()` 加载数据集后,我们定义了 Flower #: ../../source/tutorial-quickstart-pytorch.rst:150 msgid "" -"Flower provides a convenience class called :code:`NumPyClient` which " -"makes it easier to implement the :code:`Client` interface when your " -"workload uses PyTorch. Implementing :code:`NumPyClient` usually means " -"defining the following methods (:code:`set_parameters` is optional " -"though):" +"Flower provides a convenience class called :code:`NumPyClient` which makes " +"it easier to implement the :code:`Client` interface when your workload uses " +"PyTorch. Implementing :code:`NumPyClient` usually means defining the " +"following methods (:code:`set_parameters` is optional though):" msgstr "" -"Flower 提供了一个名为 :code:`NumPyClient` 的便捷类,当您的工作负载使用 PyTorch 时,它使 " -":code:`Client` 接口的实现变得更容易。实现 :code:`NumPyClient` " -"通常意味着定义以下方法(:code:`set_parameters` 是可选的):" +"Flower 提供了一个名为 :code:`NumPyClient` 的便捷类,当您的工作负载使用 " +"PyTorch 时,它使 :code:`Client` 接口的实现变得更容易。实现 :code:" +"`NumPyClient` 通常意味着定义以下方法(:code:`set_parameters` 是可选的):" #: ../../source/tutorial-quickstart-pytorch.rst:166 msgid "which can be implemented in the following way:" @@ -20527,48 +20736,51 @@ msgstr "可以通过以下方式实现:" #: ../../source/tutorial-quickstart-pytorch.rst:189 #: ../../source/tutorial-quickstart-tensorflow.rst:82 msgid "" -"We can now create an instance of our class :code:`CifarClient` and add " -"one line to actually run this client:" -msgstr "现在我们可以创建一个 :code:`CifarClient` 类的实例,并添加一行来实际运行该客户端:" +"We can now create an instance of our class :code:`CifarClient` and add one " +"line to actually run this client:" +msgstr "" +"现在我们可以创建一个 :code:`CifarClient` 类的实例,并添加一行来实际运行该客户" +"端:" #: ../../source/tutorial-quickstart-pytorch.rst:196 #: ../../source/tutorial-quickstart-tensorflow.rst:90 #, fuzzy msgid "" -"That's it for the client. We only have to implement :code:`Client` or " -":code:`NumPyClient` and call :code:`fl.client.start_client()`. If you " -"implement a client of type :code:`NumPyClient` you'll need to first call " -"its :code:`to_client()` method. The string :code:`\"[::]:8080\"` tells " -"the client which server to connect to. In our case we can run the server " -"and the client on the same machine, therefore we use " -":code:`\"[::]:8080\"`. If we run a truly federated workload with the " -"server and clients running on different machines, all that needs to " -"change is the :code:`server_address` we point the client at." +"That's it for the client. We only have to implement :code:`Client` or :code:" +"`NumPyClient` and call :code:`fl.client.start_client()`. If you implement a " +"client of type :code:`NumPyClient` you'll need to first call its :code:" +"`to_client()` method. The string :code:`\"[::]:8080\"` tells the client " +"which server to connect to. In our case we can run the server and the client " +"on the same machine, therefore we use :code:`\"[::]:8080\"`. If we run a " +"truly federated workload with the server and clients running on different " +"machines, all that needs to change is the :code:`server_address` we point " +"the client at." msgstr "" -"这就是客户端。我们只需实现 :code:`Client` 或 :code:`NumPyClient` 并调用 " -":code:`fl.client.start_client()` 或 " -":code:`fl.client.start_numpy_client()`。字符串 " -":code:`\"[::]:8080\"`会告诉客户端要连接的服务器。在本例中,我们可以在同一台机器上运行服务器和客户端,因此使用 " -":code:`\"[::]:8080\"。如果我们运行的是真正的联邦工作负载,服务器和客户端运行在不同的机器上,那么需要改变的只是客户端指向的 " -":code:`server_address`。" +"这就是客户端。我们只需实现 :code:`Client` 或 :code:`NumPyClient` 并调用 :" +"code:`fl.client.start_client()` 或 :code:`fl.client.start_numpy_client()`。字" +"符串 :code:`\"[::]:8080\"`会告诉客户端要连接的服务器。在本例中,我们可以在同" +"一台机器上运行服务器和客户端,因此使用 :code:`\"[::]:8080\"。如果我们运行的是" +"真正的联邦工作负载,服务器和客户端运行在不同的机器上,那么需要改变的只是客户" +"端指向的 :code:`server_address`。" #: ../../source/tutorial-quickstart-pytorch.rst:271 msgid "" "Congratulations! You've successfully built and run your first federated " -"learning system. The full `source code " -"`_ for this example can be found in :code:`examples" -"/quickstart-pytorch`." +"learning system. The full `source code `_ for this example can be found " +"in :code:`examples/quickstart-pytorch`." msgstr "" "恭喜您!您已经成功构建并运行了第一个联邦学习系统。本示例的`完整源代码 " -"`_ 可以在 :code:`examples/quickstart-pytorch` 中找到。" +"`_ 可以在 :code:`examples/quickstart-pytorch` 中找到。" #: ../../source/tutorial-quickstart-pytorch-lightning.rst:-1 msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with PyTorch Lightning to train an Auto Encoder model on MNIST." -msgstr "查看此联邦学习快速入门教程,了解如何使用 Flower 和 PyTorch Lightning 在 MNIST 上训练自动编码器模型。" +"Check out this Federated Learning quickstart tutorial for using Flower with " +"PyTorch Lightning to train an Auto Encoder model on MNIST." +msgstr "" +"查看此联邦学习快速入门教程,了解如何使用 Flower 和 PyTorch Lightning 在 " +"MNIST 上训练自动编码器模型。" #: ../../source/tutorial-quickstart-pytorch-lightning.rst:5 msgid "Quickstart PyTorch Lightning" @@ -20576,24 +20788,25 @@ msgstr "快速入门 PyTorch Lightning" #: ../../source/tutorial-quickstart-pytorch-lightning.rst:10 msgid "" -"Let's build a horizontal federated learning system using PyTorch " -"Lightning and Flower!" +"Let's build a horizontal federated learning system using PyTorch Lightning " +"and Flower!" msgstr "让我们使用 PyTorch Lightning 和 Flower 构建一个水平联邦学习系统!" #: ../../source/tutorial-quickstart-pytorch-lightning.rst:12 msgid "" -"Please refer to the `full code example " -"`_ to learn more." +"Please refer to the `full code example `_ to learn more." msgstr "" -"请参阅 `完整代码示例 `_ 了解更多信息。" +"请参阅 `完整代码示例 `_ 了解更多信息。" #: ../../source/tutorial-quickstart-scikitlearn.rst:-1 msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with scikit-learn to train a linear regression model." -msgstr "查看此联邦学习快速入门教程,了解如何使用 Flower 和 scikit-learn 训练线性回归模型。" +"Check out this Federated Learning quickstart tutorial for using Flower with " +"scikit-learn to train a linear regression model." +msgstr "" +"查看此联邦学习快速入门教程,了解如何使用 Flower 和 scikit-learn 训练线性回归" +"模型。" #: ../../source/tutorial-quickstart-scikitlearn.rst:5 msgid "Quickstart scikit-learn" @@ -20601,11 +20814,11 @@ msgstr "scikit-learn快速入门" #: ../../source/tutorial-quickstart-scikitlearn.rst:10 msgid "" -"In this tutorial, we will learn how to train a :code:`Logistic " -"Regression` model on MNIST using Flower and scikit-learn." +"In this tutorial, we will learn how to train a :code:`Logistic Regression` " +"model on MNIST using Flower and scikit-learn." msgstr "" -"在本教程中,我们将学习如何使用 Flower 和 scikit-learn 在 MNIST 上训练一个 :code:`Logistic " -"Regression` 模型。" +"在本教程中,我们将学习如何使用 Flower 和 scikit-learn 在 MNIST 上训练一个 :" +"code:`Logistic Regression` 模型。" #: ../../source/tutorial-quickstart-scikitlearn.rst:26 #, fuzzy @@ -20620,13 +20833,15 @@ msgstr "或者直接使用 Poetry 安装所有依赖项:" msgid "" "Now that we have all our dependencies installed, let's run a simple " "distributed training with two clients and one server. However, before " -"setting up the client and server, we will define all functionalities that" -" we need for our federated learning setup within :code:`utils.py`. The " -":code:`utils.py` contains different functions defining all the machine " -"learning basics:" +"setting up the client and server, we will define all functionalities that we " +"need for our federated learning setup within :code:`utils.py`. The :code:" +"`utils.py` contains different functions defining all the machine learning " +"basics:" msgstr "" -"现在我们已经安装了所有的依赖项,让我们用两个客户端和一个服务器来运行一个简单的分布式训练。不过,在设置客户端和服务器之前,我们将在 " -":code:`utils.py` 中定义联邦学习设置所需的所有功能。:code:`utils.py`包含定义所有机器学习基础知识的不同函数:" +"现在我们已经安装了所有的依赖项,让我们用两个客户端和一个服务器来运行一个简单" +"的分布式训练。不过,在设置客户端和服务器之前,我们将在 :code:`utils.py` 中定" +"义联邦学习设置所需的所有功能。:code:`utils.py`包含定义所有机器学习基础知识的" +"不同函数:" #: ../../source/tutorial-quickstart-scikitlearn.rst:45 msgid ":code:`get_model_parameters()`" @@ -20678,60 +20893,61 @@ msgstr "将数据集分割成多个分区" #: ../../source/tutorial-quickstart-scikitlearn.rst:58 msgid "" -"Please check out :code:`utils.py` `here " -"`_ for more details. The pre-defined functions are used in" -" the :code:`client.py` and imported. The :code:`client.py` also requires " -"to import several packages such as Flower and scikit-learn:" +"Please check out :code:`utils.py` `here `_ for more details. The pre-" +"defined functions are used in the :code:`client.py` and imported. The :code:" +"`client.py` also requires to import several packages such as Flower and " +"scikit-learn:" msgstr "" -"更多详情请查看 :code:`utils.py`` 这里 " -"`_。在 :code:`client.py` 中使用并导入了预定义函数。:code:`client.py` " -"还需要导入几个软件包,如 Flower 和 scikit-learn:" +"更多详情请查看 :code:`utils.py`` 这里 `_。在 :code:`client.py` 中使用并" +"导入了预定义函数。:code:`client.py` 还需要导入几个软件包,如 Flower 和 " +"scikit-learn:" #: ../../source/tutorial-quickstart-scikitlearn.rst:73 #, fuzzy msgid "" -"We load the MNIST dataset from `OpenML " -"`_, a popular " -"image classification dataset of handwritten digits for machine learning. " -"The utility :code:`utils.load_mnist()` downloads the training and test " -"data. The training set is split afterwards into 10 partitions with " -":code:`utils.partition()`." +"We load the MNIST dataset from `OpenML `_, a popular image classification dataset of " +"handwritten digits for machine learning. The utility :code:`utils." +"load_mnist()` downloads the training and test data. The training set is " +"split afterwards into 10 partitions with :code:`utils.partition()`." msgstr "" -"我们从 `OpenML `_ 中加载 MNIST " -"数据集,这是一个用于机器学习的流行手写数字图像分类数据集。实用程序 :code:`utils.load_mnist()` " -"下载训练和测试数据。然后使用 :code:`utils.partition()`将训练集分割成 10 个分区。" +"我们从 `OpenML `_ 中加载 MNIST 数据集,这是一个" +"用于机器学习的流行手写数字图像分类数据集。实用程序 :code:`utils." +"load_mnist()` 下载训练和测试数据。然后使用 :code:`utils.partition()`将训练集" +"分割成 10 个分区。" #: ../../source/tutorial-quickstart-scikitlearn.rst:85 msgid "" -"Next, the logistic regression model is defined and initialized with " -":code:`utils.set_initial_params()`." -msgstr "接下来,使用 :code:`utils.set_initial_params()` 对逻辑回归模型进行定义和初始化。" +"Next, the logistic regression model is defined and initialized with :code:" +"`utils.set_initial_params()`." +msgstr "" +"接下来,使用 :code:`utils.set_initial_params()` 对逻辑回归模型进行定义和初始" +"化。" #: ../../source/tutorial-quickstart-scikitlearn.rst:97 msgid "" -"The Flower server interacts with clients through an interface called " -":code:`Client`. When the server selects a particular client for training," -" it sends training instructions over the network. The client receives " -"those instructions and calls one of the :code:`Client` methods to run " -"your code (i.e., to fit the logistic regression we defined earlier)." +"The Flower server interacts with clients through an interface called :code:" +"`Client`. When the server selects a particular client for training, it sends " +"training instructions over the network. The client receives those " +"instructions and calls one of the :code:`Client` methods to run your code (i." +"e., to fit the logistic regression we defined earlier)." msgstr "" -"Flower 服务器通过一个名为 :code:`Client` " -"的接口与客户端交互。当服务器选择一个特定的客户端进行训练时,它会通过网络发送训练指令。客户端接收到这些指令后,会调用 :code:`Client`" -" 方法之一来运行您的代码(即拟合我们之前定义的逻辑回归)。" +"Flower 服务器通过一个名为 :code:`Client` 的接口与客户端交互。当服务器选择一个" +"特定的客户端进行训练时,它会通过网络发送训练指令。客户端接收到这些指令后,会" +"调用 :code:`Client` 方法之一来运行您的代码(即拟合我们之前定义的逻辑回归)。" #: ../../source/tutorial-quickstart-scikitlearn.rst:103 msgid "" -"Flower provides a convenience class called :code:`NumPyClient` which " -"makes it easier to implement the :code:`Client` interface when your " -"workload uses scikit-learn. Implementing :code:`NumPyClient` usually " -"means defining the following methods (:code:`set_parameters` is optional " -"though):" +"Flower provides a convenience class called :code:`NumPyClient` which makes " +"it easier to implement the :code:`Client` interface when your workload uses " +"scikit-learn. Implementing :code:`NumPyClient` usually means defining the " +"following methods (:code:`set_parameters` is optional though):" msgstr "" -"Flower 提供了一个名为 :code:`NumPyClient` 的便捷类,当你的工作负载使用 scikit-learn " -"时,它可以让你更容易地实现 :code:`Client` 接口。实现 :code:`NumPyClient` " -"通常意味着定义以下方法(:code:`set_parameters` 是可选的):" +"Flower 提供了一个名为 :code:`NumPyClient` 的便捷类,当你的工作负载使用 " +"scikit-learn 时,它可以让你更容易地实现 :code:`Client` 接口。实现 :code:" +"`NumPyClient` 通常意味着定义以下方法(:code:`set_parameters` 是可选的):" #: ../../source/tutorial-quickstart-scikitlearn.rst:112 msgid "is directly imported with :code:`utils.set_model_params()`" @@ -20743,38 +20959,40 @@ msgstr "这些方法可以通过以下方式实现:" #: ../../source/tutorial-quickstart-scikitlearn.rst:143 msgid "" -"We can now create an instance of our class :code:`MnistClient` and add " -"one line to actually run this client:" -msgstr "现在我们可以创建一个 :code:`MnistClient` 类的实例,并添加一行来实际运行该客户端:" +"We can now create an instance of our class :code:`MnistClient` and add one " +"line to actually run this client:" +msgstr "" +"现在我们可以创建一个 :code:`MnistClient` 类的实例,并添加一行来实际运行该客户" +"端:" #: ../../source/tutorial-quickstart-scikitlearn.rst:150 #, fuzzy msgid "" -"That's it for the client. We only have to implement :code:`Client` or " -":code:`NumPyClient` and call :code:`fl.client.start_client()`. If you " -"implement a client of type :code:`NumPyClient` you'll need to first call " -"its :code:`to_client()` method. The string :code:`\"0.0.0.0:8080\"` tells" -" the client which server to connect to. In our case we can run the server" -" and the client on the same machine, therefore we use " -":code:`\"0.0.0.0:8080\"`. If we run a truly federated workload with the " -"server and clients running on different machines, all that needs to " -"change is the :code:`server_address` we pass to the client." +"That's it for the client. We only have to implement :code:`Client` or :code:" +"`NumPyClient` and call :code:`fl.client.start_client()`. If you implement a " +"client of type :code:`NumPyClient` you'll need to first call its :code:" +"`to_client()` method. The string :code:`\"0.0.0.0:8080\"` tells the client " +"which server to connect to. In our case we can run the server and the client " +"on the same machine, therefore we use :code:`\"0.0.0.0:8080\"`. If we run a " +"truly federated workload with the server and clients running on different " +"machines, all that needs to change is the :code:`server_address` we pass to " +"the client." msgstr "" -"这就是客户端。我们只需实现 :code:`Client` 或 :code:`NumPyClient` 并调用 " -":code:`fl.client.start_client()` 或 " -":code:`fl.client.start_numpy_client()`。字符串 " -":code:`\"0.0.0.0:8080\"`会告诉客户端要连接的服务器。在本例中,我们可以在同一台机器上运行服务器和客户端,因此我们使用 " -":code:`\"0.0.0.0:8080\"`。如果我们运行的是真正的联邦工作负载,服务器和客户端运行在不同的机器上,那么需要改变的只是传递给客户端的" -" :code:`server_address`。" +"这就是客户端。我们只需实现 :code:`Client` 或 :code:`NumPyClient` 并调用 :" +"code:`fl.client.start_client()` 或 :code:`fl.client.start_numpy_client()`。字" +"符串 :code:`\"0.0.0.0:8080\"`会告诉客户端要连接的服务器。在本例中,我们可以在" +"同一台机器上运行服务器和客户端,因此我们使用 :code:`\"0.0.0.0:8080\"`。如果我" +"们运行的是真正的联邦工作负载,服务器和客户端运行在不同的机器上,那么需要改变" +"的只是传递给客户端的 :code:`server_address`。" #: ../../source/tutorial-quickstart-scikitlearn.rst:159 msgid "" "The following Flower server is a little bit more advanced and returns an " -"evaluation function for the server-side evaluation. First, we import " -"again all required libraries such as Flower and scikit-learn." +"evaluation function for the server-side evaluation. First, we import again " +"all required libraries such as Flower and scikit-learn." msgstr "" -"下面的 Flower 服务器更先进一些,会返回一个用于服务器端评估的评估函数。首先,我们再次导入所有需要的库,如 Flower 和 scikit-" -"learn。" +"下面的 Flower 服务器更先进一些,会返回一个用于服务器端评估的评估函数。首先," +"我们再次导入所有需要的库,如 Flower 和 scikit-learn。" #: ../../source/tutorial-quickstart-scikitlearn.rst:162 msgid ":code:`server.py`, import Flower and start the server:" @@ -20782,8 +21000,8 @@ msgstr ":code:`server.py`, 导入 Flower 并启动服务器:" #: ../../source/tutorial-quickstart-scikitlearn.rst:173 msgid "" -"The number of federated learning rounds is set in :code:`fit_round()` and" -" the evaluation is defined in :code:`get_evaluate_fn()`. The evaluation " +"The number of federated learning rounds is set in :code:`fit_round()` and " +"the evaluation is defined in :code:`get_evaluate_fn()`. The evaluation " "function is called after each federated learning round and gives you " "information about loss and accuracy." msgstr "" @@ -20792,45 +21010,47 @@ msgstr "" #: ../../source/tutorial-quickstart-scikitlearn.rst:198 msgid "" -"The :code:`main` contains the server-side parameter initialization " -":code:`utils.set_initial_params()` as well as the aggregation strategy " -":code:`fl.server.strategy:FedAvg()`. The strategy is the default one, " -"federated averaging (or FedAvg), with two clients and evaluation after " -"each federated learning round. The server can be started with the command" -" :code:`fl.server.start_server(server_address=\"0.0.0.0:8080\", " -"strategy=strategy, config=fl.server.ServerConfig(num_rounds=3))`." +"The :code:`main` contains the server-side parameter initialization :code:" +"`utils.set_initial_params()` as well as the aggregation strategy :code:`fl." +"server.strategy:FedAvg()`. The strategy is the default one, federated " +"averaging (or FedAvg), with two clients and evaluation after each federated " +"learning round. The server can be started with the command :code:`fl.server." +"start_server(server_address=\"0.0.0.0:8080\", strategy=strategy, config=fl." +"server.ServerConfig(num_rounds=3))`." msgstr "" -":code:`main`包含服务器端参数初始化:code:`utils.set_initial_params()`以及聚合策略 " -":code:`fl.server.strategy:FedAvg()`。该策略是默认的联邦平均(或 " -"FedAvg)策略,有两个客户端,在每轮联邦学习后进行评估。可以使用 " -":code:`fl.server.start_server(server_address=\"0.0.0.0:8080\", " -"strategy=strategy, config=fl.server.ServerConfig(num_rounds=3))` 命令启动服务器。" +":code:`main`包含服务器端参数初始化:code:`utils.set_initial_params()`以及聚合" +"策略 :code:`fl.server.strategy:FedAvg()`。该策略是默认的联邦平均(或 FedAvg)" +"策略,有两个客户端,在每轮联邦学习后进行评估。可以使用 :code:`fl.server." +"start_server(server_address=\"0.0.0.0:8080\", strategy=strategy, config=fl." +"server.ServerConfig(num_rounds=3))` 命令启动服务器。" #: ../../source/tutorial-quickstart-scikitlearn.rst:217 msgid "" "With both client and server ready, we can now run everything and see " "federated learning in action. Federated learning systems usually have a " -"server and multiple clients. We, therefore, have to start the server " -"first:" -msgstr "客户端和服务器都准备就绪后,我们现在就可以运行一切,看看联邦学习的运行情况。联邦学习系统通常有一个服务器和多个客户端。因此,我们必须先启动服务器:" +"server and multiple clients. We, therefore, have to start the server first:" +msgstr "" +"客户端和服务器都准备就绪后,我们现在就可以运行一切,看看联邦学习的运行情况。" +"联邦学习系统通常有一个服务器和多个客户端。因此,我们必须先启动服务器:" #: ../../source/tutorial-quickstart-scikitlearn.rst:271 msgid "" "Congratulations! You've successfully built and run your first federated " -"learning system. The full `source code " -"`_ for this example can be found in :code:`examples/sklearn-logreg-" -"mnist`." +"learning system. The full `source code `_ for this example can be found in :code:" +"`examples/sklearn-logreg-mnist`." msgstr "" "恭喜您!您已经成功构建并运行了第一个联邦学习系统。本示例的`完整源代码 " -"`_ 可以在 :code:`examples/sklearn-logreg-mnist` 中找到。" +"`_ 可" +"以在 :code:`examples/sklearn-logreg-mnist` 中找到。" #: ../../source/tutorial-quickstart-tensorflow.rst:-1 msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with TensorFlow to train a MobilNetV2 model on CIFAR-10." -msgstr "查看此联邦学习快速入门教程,了解如何使用 Flower 和 TensorFlow 在 CIFAR-10 上训练 MobilNetV2 模型。" +"Check out this Federated Learning quickstart tutorial for using Flower with " +"TensorFlow to train a MobilNetV2 model on CIFAR-10." +msgstr "" +"查看此联邦学习快速入门教程,了解如何使用 Flower 和 TensorFlow 在 CIFAR-10 上" +"训练 MobilNetV2 模型。" #: ../../source/tutorial-quickstart-tensorflow.rst:5 msgid "Quickstart TensorFlow" @@ -20846,8 +21066,8 @@ msgstr "在导入 Flower 之前,我们必须先安装它:" #: ../../source/tutorial-quickstart-tensorflow.rst:21 msgid "" -"Since we want to use the Keras API of TensorFlow (TF), we have to install" -" TF as well:" +"Since we want to use the Keras API of TensorFlow (TF), we have to install TF " +"as well:" msgstr "由于我们要使用 TensorFlow (TF) 的 Keras API,因此还必须安装 TF:" #: ../../source/tutorial-quickstart-tensorflow.rst:31 @@ -20856,31 +21076,32 @@ msgstr "接下来,在名为 :code:`client.py` 的文件中导入 Flower 和 Te #: ../../source/tutorial-quickstart-tensorflow.rst:38 msgid "" -"We use the Keras utilities of TF to load CIFAR10, a popular colored image" -" classification dataset for machine learning. The call to " -":code:`tf.keras.datasets.cifar10.load_data()` downloads CIFAR10, caches " -"it locally, and then returns the entire training and test set as NumPy " -"ndarrays." +"We use the Keras utilities of TF to load CIFAR10, a popular colored image " +"classification dataset for machine learning. The call to :code:`tf.keras." +"datasets.cifar10.load_data()` downloads CIFAR10, caches it locally, and then " +"returns the entire training and test set as NumPy ndarrays." msgstr "" -"我们使用 TF 的 Keras 实用程序加载 CIFAR10,这是一个用于机器学习的流行彩色图像分类数据集。调用 " -":code:`tf.keras.datasets.cifar10.load_data()` 会下载 CIFAR10,将其缓存到本地,然后以 " -"NumPy ndarrays 的形式返回整个训练集和测试集。" +"我们使用 TF 的 Keras 实用程序加载 CIFAR10,这是一个用于机器学习的流行彩色图像" +"分类数据集。调用 :code:`tf.keras.datasets.cifar10.load_data()` 会下载 " +"CIFAR10,将其缓存到本地,然后以 NumPy ndarrays 的形式返回整个训练集和测试集。" #: ../../source/tutorial-quickstart-tensorflow.rst:47 msgid "" -"Next, we need a model. For the purpose of this tutorial, we use " -"MobilNetV2 with 10 output classes:" -msgstr "接下来,我们需要一个模型。在本教程中,我们使用带有 10 个输出类的 MobilNetV2:" +"Next, we need a model. For the purpose of this tutorial, we use MobilNetV2 " +"with 10 output classes:" +msgstr "" +"接下来,我们需要一个模型。在本教程中,我们使用带有 10 个输出类的 MobilNetV2:" #: ../../source/tutorial-quickstart-tensorflow.rst:60 msgid "" -"Flower provides a convenience class called :code:`NumPyClient` which " -"makes it easier to implement the :code:`Client` interface when your " -"workload uses Keras. The :code:`NumPyClient` interface defines three " -"methods which can be implemented in the following way:" +"Flower provides a convenience class called :code:`NumPyClient` which makes " +"it easier to implement the :code:`Client` interface when your workload uses " +"Keras. The :code:`NumPyClient` interface defines three methods which can be " +"implemented in the following way:" msgstr "" -"Flower 提供了一个名为 :code:`NumPyClient` 的便捷类,当您的工作负载使用 Keras 时,该类可以更轻松地实现 " -":code:`Client` 接口。:code:`NumPyClient` 接口定义了三个方法,可以通过以下方式实现:" +"Flower 提供了一个名为 :code:`NumPyClient` 的便捷类,当您的工作负载使用 Keras " +"时,该类可以更轻松地实现 :code:`Client` 接口。:code:`NumPyClient` 接口定义了" +"三个方法,可以通过以下方式实现:" #: ../../source/tutorial-quickstart-tensorflow.rst:135 msgid "Each client will have its own dataset." @@ -20888,28 +21109,27 @@ msgstr "每个客户都有自己的数据集。" #: ../../source/tutorial-quickstart-tensorflow.rst:137 msgid "" -"You should now see how the training does in the very first terminal (the " -"one that started the server):" +"You should now see how the training does in the very first terminal (the one " +"that started the server):" msgstr "现在你应该能在第一个终端(启动服务器的终端)看到训练的效果了:" #: ../../source/tutorial-quickstart-tensorflow.rst:169 msgid "" "Congratulations! You've successfully built and run your first federated " -"learning system. The full `source code " -"`_ for this can be found in :code:`examples" -"/quickstart-tensorflow/client.py`." +"learning system. The full `source code `_ for this can be found in :" +"code:`examples/quickstart-tensorflow/client.py`." msgstr "" -"恭喜您!您已经成功构建并运行了第一个联邦学习系统。`完整的源代码 " -"`_ 可以在 :code:`examples/quickstart-" -"tensorflow/client.py` 中找到。" +"恭喜您!您已经成功构建并运行了第一个联邦学习系统。`完整的源代码 `_ " +"可以在 :code:`examples/quickstart-tensorflow/client.py` 中找到。" #: ../../source/tutorial-quickstart-xgboost.rst:-1 msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with XGBoost to train classification models on trees." -msgstr "查看此联邦学习 快速入门教程,了解如何使用 Flower 和 XGBoost 上训练分类模型。" +"Check out this Federated Learning quickstart tutorial for using Flower with " +"XGBoost to train classification models on trees." +msgstr "" +"查看此联邦学习 快速入门教程,了解如何使用 Flower 和 XGBoost 上训练分类模型。" #: ../../source/tutorial-quickstart-xgboost.rst:5 msgid "Quickstart XGBoost" @@ -20922,22 +21142,24 @@ msgstr "联邦化 XGBoost" #: ../../source/tutorial-quickstart-xgboost.rst:16 msgid "" "EXtreme Gradient Boosting (**XGBoost**) is a robust and efficient " -"implementation of gradient-boosted decision tree (**GBDT**), that " -"maximises the computational boundaries for boosted tree methods. It's " -"primarily designed to enhance both the performance and computational " -"speed of machine learning models. In XGBoost, trees are constructed " -"concurrently, unlike the sequential approach taken by GBDT." +"implementation of gradient-boosted decision tree (**GBDT**), that maximises " +"the computational boundaries for boosted tree methods. It's primarily " +"designed to enhance both the performance and computational speed of machine " +"learning models. In XGBoost, trees are constructed concurrently, unlike the " +"sequential approach taken by GBDT." msgstr "" -"EXtreme Gradient " -"Boosting(**XGBoost**)是梯度提升决策树(**GBDT**)的一种稳健而高效的实现方法,能最大限度地提高提升树方法的计算边界。它主要用于提高机器学习模型的性能和计算速度。在" -" XGBoost 中,决策树是并发构建的,与 GBDT 采用的顺序方法不同。" +"EXtreme Gradient Boosting(**XGBoost**)是梯度提升决策树(**GBDT**)的一种稳" +"健而高效的实现方法,能最大限度地提高提升树方法的计算边界。它主要用于提高机器" +"学习模型的性能和计算速度。在 XGBoost 中,决策树是并发构建的,与 GBDT 采用的顺" +"序方法不同。" #: ../../source/tutorial-quickstart-xgboost.rst:20 msgid "" "Often, for tabular data on medium-sized datasets with fewer than 10k " -"training examples, XGBoost surpasses the results of deep learning " -"techniques." -msgstr "对于训练示例少于 10k 的中型数据集上的表格数据,XGBoost 的结果往往超过深度学习技术。" +"training examples, XGBoost surpasses the results of deep learning techniques." +msgstr "" +"对于训练示例少于 10k 的中型数据集上的表格数据,XGBoost 的结果往往超过深度学习" +"技术。" #: ../../source/tutorial-quickstart-xgboost.rst:23 msgid "Why federated XGBoost?" @@ -20946,39 +21168,43 @@ msgstr "为什么选择联邦 XGBoost?" #: ../../source/tutorial-quickstart-xgboost.rst:25 msgid "" "Indeed, as the demand for data privacy and decentralized learning grows, " -"there's an increasing requirement to implement federated XGBoost systems " -"for specialised applications, like survival analysis and financial fraud " +"there's an increasing requirement to implement federated XGBoost systems for " +"specialised applications, like survival analysis and financial fraud " "detection." -msgstr "事实上,随着对数据隐私和分散学习的需求不断增长,越来越多的专业应用(如生存分析和金融欺诈检测)需要实施联邦 XGBoost 系统。" +msgstr "" +"事实上,随着对数据隐私和分散学习的需求不断增长,越来越多的专业应用(如生存分" +"析和金融欺诈检测)需要实施联邦 XGBoost 系统。" #: ../../source/tutorial-quickstart-xgboost.rst:27 msgid "" -"Federated learning ensures that raw data remains on the local device, " -"making it an attractive approach for sensitive domains where data " -"security and privacy are paramount. Given the robustness and efficiency " -"of XGBoost, combining it with federated learning offers a promising " -"solution for these specific challenges." +"Federated learning ensures that raw data remains on the local device, making " +"it an attractive approach for sensitive domains where data security and " +"privacy are paramount. Given the robustness and efficiency of XGBoost, " +"combining it with federated learning offers a promising solution for these " +"specific challenges." msgstr "" -"联邦学习可确保原始数据保留在本地设备上,因此对于数据安全和隐私至关重要的敏感领域来说,这是一种极具吸引力的方法。鉴于 XGBoost " -"的稳健性和高效性,将其与联邦学习相结合为应对这些特定挑战提供了一种前景广阔的解决方案。" +"联邦学习可确保原始数据保留在本地设备上,因此对于数据安全和隐私至关重要的敏感" +"领域来说,这是一种极具吸引力的方法。鉴于 XGBoost 的稳健性和高效性,将其与联邦" +"学习相结合为应对这些特定挑战提供了一种前景广阔的解决方案。" #: ../../source/tutorial-quickstart-xgboost.rst:30 msgid "" "In this tutorial we will learn how to train a federated XGBoost model on " "HIGGS dataset using Flower and :code:`xgboost` package. We use a simple " -"example (`full code xgboost-quickstart " -"`_)" -" with two *clients* and one *server* to demonstrate how federated XGBoost" -" works, and then we dive into a more complex example (`full code xgboost-" -"comprehensive `_) to run various experiments." -msgstr "" -"在本教程中,我们将学习如何使用 Flower 和 :code:`xgboost` 软件包在 HIGGS 数据集上训练联邦 XGBoost " -"模型。我们将使用一个包含两个 * 客户端* 和一个 * 服务器* 的简单示例 (`完整代码 xgboost-quickstart " -"`_)来演示联邦 XGBoost 如何工作,然后我们将深入到一个更复杂的示例 (`完整代码 xgboost-" -"comprehensive `_),以运行各种实验。" +"example (`full code xgboost-quickstart `_) with two *clients* and one *server* to " +"demonstrate how federated XGBoost works, and then we dive into a more " +"complex example (`full code xgboost-comprehensive `_) to run various " +"experiments." +msgstr "" +"在本教程中,我们将学习如何使用 Flower 和 :code:`xgboost` 软件包在 HIGGS 数据" +"集上训练联邦 XGBoost 模型。我们将使用一个包含两个 * 客户端* 和一个 * 服务器* " +"的简单示例 (`完整代码 xgboost-quickstart `_)来演示联邦 XGBoost 如何工作,然后我" +"们将深入到一个更复杂的示例 (`完整代码 xgboost-comprehensive `_),以运行各种实" +"验。" #: ../../source/tutorial-quickstart-xgboost.rst:37 msgid "Environment Setup" @@ -20994,21 +21220,27 @@ msgstr "我们首先需要安装 Flower 和 Flower Datasets。您可以通过运 msgid "" "Since we want to use :code:`xgboost` package to build up XGBoost trees, " "let's go ahead and install :code:`xgboost`:" -msgstr "既然我们要使用 :code:`xgboost` 软件包来构建 XGBoost 树,那就继续安装 :code:`xgboost`:" +msgstr "" +"既然我们要使用 :code:`xgboost` 软件包来构建 XGBoost 树,那就继续安装 :code:" +"`xgboost`:" #: ../../source/tutorial-quickstart-xgboost.rst:57 msgid "" -"*Clients* are responsible for generating individual weight-updates for " -"the model based on their local datasets. Now that we have all our " -"dependencies installed, let's run a simple distributed training with two " -"clients and one server." -msgstr "*客户端*负责根据其本地数据集为模型生成单独的模型参数更新。现在我们已经安装了所有的依赖项,让我们用两个客户端和一个服务器来运行一个简单的分布式训练。" +"*Clients* are responsible for generating individual weight-updates for the " +"model based on their local datasets. Now that we have all our dependencies " +"installed, let's run a simple distributed training with two clients and one " +"server." +msgstr "" +"*客户端*负责根据其本地数据集为模型生成单独的模型参数更新。现在我们已经安装了" +"所有的依赖项,让我们用两个客户端和一个服务器来运行一个简单的分布式训练。" #: ../../source/tutorial-quickstart-xgboost.rst:60 msgid "" -"In a file called :code:`client.py`, import xgboost, Flower, Flower " -"Datasets and other related functions:" -msgstr "在名为 :code:`client.py` 的文件中,导入 xgboost、Flower、Flower Datasets 和其他相关函数:" +"In a file called :code:`client.py`, import xgboost, Flower, Flower Datasets " +"and other related functions:" +msgstr "" +"在名为 :code:`client.py` 的文件中,导入 xgboost、Flower、Flower Datasets 和其" +"他相关函数:" #: ../../source/tutorial-quickstart-xgboost.rst:87 msgid "Dataset partition and hyper-parameter selection" @@ -21016,30 +21248,37 @@ msgstr "数据集划分和超参数选择" #: ../../source/tutorial-quickstart-xgboost.rst:89 msgid "" -"Prior to local training, we require loading the HIGGS dataset from Flower" -" Datasets and conduct data partitioning for FL:" -msgstr "在本地训练之前,我们需要从 Flower Datasets 加载 HIGGS 数据集,并对 FL 进行数据分区:" +"Prior to local training, we require loading the HIGGS dataset from Flower " +"Datasets and conduct data partitioning for FL:" +msgstr "" +"在本地训练之前,我们需要从 Flower Datasets 加载 HIGGS 数据集,并对 FL 进行数" +"据分区:" #: ../../source/tutorial-quickstart-xgboost.rst:102 msgid "" "In this example, we split the dataset into two partitions with uniform " -"distribution (:code:`IidPartitioner(num_partitions=2)`). Then, we load " -"the partition for the given client based on :code:`node_id`:" +"distribution (:code:`IidPartitioner(num_partitions=2)`). Then, we load the " +"partition for the given client based on :code:`node_id`:" msgstr "" -"在此示例中,我们将数据集分割成两个均匀分布的分区(:code:`IidPartitioner(num_partitions=2)`)。然后,我们根据" -" :code:`node_id` 为给定客户端加载分区:" +"在此示例中,我们将数据集分割成两个均匀分布的分区(:code:" +"`IidPartitioner(num_partitions=2)`)。然后,我们根据 :code:`node_id` 为给定客" +"户端加载分区:" #: ../../source/tutorial-quickstart-xgboost.rst:121 msgid "" "After that, we do train/test splitting on the given partition (client's " "local data), and transform data format for :code:`xgboost` package." -msgstr "然后,我们在给定的分区(客户端的本地数据)上进行训练/测试分割,并为 :code:`xgboost` 软件包转换数据格式。" +msgstr "" +"然后,我们在给定的分区(客户端的本地数据)上进行训练/测试分割,并为 :code:" +"`xgboost` 软件包转换数据格式。" #: ../../source/tutorial-quickstart-xgboost.rst:134 msgid "" -"The functions of :code:`train_test_split` and " -":code:`transform_dataset_to_dmatrix` are defined as below:" -msgstr ":code:`train_test_split` 和 :code:`transform_dataset_too_dmatrix` 的函数定义如下:" +"The functions of :code:`train_test_split` and :code:" +"`transform_dataset_to_dmatrix` are defined as below:" +msgstr "" +":code:`train_test_split` 和 :code:`transform_dataset_too_dmatrix` 的函数定义" +"如下:" #: ../../source/tutorial-quickstart-xgboost.rst:158 msgid "Finally, we define the hyper-parameters used for XGBoost training." @@ -21047,13 +21286,14 @@ msgstr "最后,我们定义了用于 XGBoost 训练的超参数。" #: ../../source/tutorial-quickstart-xgboost.rst:174 msgid "" -"The :code:`num_local_round` represents the number of iterations for local" -" tree boost. We use CPU for the training in default. One can shift it to " -"GPU by setting :code:`tree_method` to :code:`gpu_hist`. We use AUC as " -"evaluation metric." +"The :code:`num_local_round` represents the number of iterations for local " +"tree boost. We use CPU for the training in default. One can shift it to GPU " +"by setting :code:`tree_method` to :code:`gpu_hist`. We use AUC as evaluation " +"metric." msgstr "" -"代码:`num_local_round`表示本地树的迭代次数。我们默认使用 CPU 进行训练。可以通过将 :code:`tree_method` " -"设置为 :code:`gpu_hist`,将其转换为 GPU。我们使用 AUC 作为评估指标。" +"代码:`num_local_round`表示本地树的迭代次数。我们默认使用 CPU 进行训练。可以通" +"过将 :code:`tree_method` 设置为 :code:`gpu_hist`,将其转换为 GPU。我们使用 " +"AUC 作为评估指标。" #: ../../source/tutorial-quickstart-xgboost.rst:181 msgid "Flower client definition for XGBoost" @@ -21061,110 +21301,116 @@ msgstr "用于 XGBoost 的 Flower 客户端定义" #: ../../source/tutorial-quickstart-xgboost.rst:183 msgid "" -"After loading the dataset we define the Flower client. We follow the " -"general rule to define :code:`XgbClient` class inherited from " -":code:`fl.client.Client`." +"After loading the dataset we define the Flower client. We follow the general " +"rule to define :code:`XgbClient` class inherited from :code:`fl.client." +"Client`." msgstr "" -"加载数据集后,我们定义 Flower 客户端。我们按照一般规则定义从 :code:`fl.client.Client` 继承而来的 " -":code:`XgbClient` 类。" +"加载数据集后,我们定义 Flower 客户端。我们按照一般规则定义从 :code:`fl." +"client.Client` 继承而来的 :code:`XgbClient` 类。" #: ../../source/tutorial-quickstart-xgboost.rst:193 msgid "" "The :code:`self.bst` is used to keep the Booster objects that remain " "consistent across rounds, allowing them to store predictions from trees " -"integrated in earlier rounds and maintain other essential data structures" -" for training." +"integrated in earlier rounds and maintain other essential data structures " +"for training." msgstr "" -"代码:`self.bst`用于保存在各轮中保持一致的 Booster " -"对象,使其能够存储在前几轮中集成的树的预测结果,并维护其他用于训练的重要数据结构。" +"代码:`self.bst`用于保存在各轮中保持一致的 Booster 对象,使其能够存储在前几轮" +"中集成的树的预测结果,并维护其他用于训练的重要数据结构。" #: ../../source/tutorial-quickstart-xgboost.rst:196 msgid "" -"Then, we override :code:`get_parameters`, :code:`fit` and " -":code:`evaluate` methods insides :code:`XgbClient` class as follows." +"Then, we override :code:`get_parameters`, :code:`fit` and :code:`evaluate` " +"methods insides :code:`XgbClient` class as follows." msgstr "" -"然后,我们在 :code:`XgbClient` 类中重写 :code:`get_parameters`、:code:`fit` 和 " -":code:`evaluate` 方法如下。" +"然后,我们在 :code:`XgbClient` 类中重写 :code:`get_parameters`、:code:`fit` " +"和 :code:`evaluate` 方法如下。" #: ../../source/tutorial-quickstart-xgboost.rst:210 msgid "" "Unlike neural network training, XGBoost trees are not started from a " -"specified random weights. In this case, we do not use " -":code:`get_parameters` and :code:`set_parameters` to initialise model " -"parameters for XGBoost. As a result, let's return an empty tensor in " -":code:`get_parameters` when it is called by the server at the first " -"round." +"specified random weights. In this case, we do not use :code:`get_parameters` " +"and :code:`set_parameters` to initialise model parameters for XGBoost. As a " +"result, let's return an empty tensor in :code:`get_parameters` when it is " +"called by the server at the first round." msgstr "" -"与神经网络训练不同,XGBoost 树不是从指定的随机参数开始的。在这种情况下,我们不使用 :code:`get_parameters` 和 " -":code:`set_parameters` 来初始化 XGBoost 的模型参数。因此,当服务器在第一轮调用 " -":code:`get_parameters` 时,让我们在 :code:`get_parameters` 中返回一个空张量。" +"与神经网络训练不同,XGBoost 树不是从指定的随机参数开始的。在这种情况下,我们" +"不使用 :code:`get_parameters` 和 :code:`set_parameters` 来初始化 XGBoost 的模" +"型参数。因此,当服务器在第一轮调用 :code:`get_parameters` 时,让我们在 :code:" +"`get_parameters` 中返回一个空张量。" #: ../../source/tutorial-quickstart-xgboost.rst:251 msgid "" -"In :code:`fit`, at the first round, we call :code:`xgb.train()` to build " -"up the first set of trees. the returned Booster object and config are " -"stored in :code:`self.bst` and :code:`self.config`, respectively. From " -"the second round, we load the global model sent from server to " -":code:`self.bst`, and then update model weights on local training data " -"with function :code:`local_boost` as follows:" +"In :code:`fit`, at the first round, we call :code:`xgb.train()` to build up " +"the first set of trees. the returned Booster object and config are stored " +"in :code:`self.bst` and :code:`self.config`, respectively. From the second " +"round, we load the global model sent from server to :code:`self.bst`, and " +"then update model weights on local training data with function :code:" +"`local_boost` as follows:" msgstr "" -"在 :code:`fit`中,第一轮我们调用 :code:`xgb.train()`来建立第一组树,返回的 Booster 对象和 config " -"分别存储在 :code:`self.bst` 和 :code:`self.config` 中。从第二轮开始,我们将服务器发送的全局模型加载到 " -":code:`self.bst`,然后使用函数 :code:`local_boost`更新本地训练数据的模型权重,如下所示:" +"在 :code:`fit`中,第一轮我们调用 :code:`xgb.train()`来建立第一组树,返回的 " +"Booster 对象和 config 分别存储在 :code:`self.bst` 和 :code:`self.config` 中。" +"从第二轮开始,我们将服务器发送的全局模型加载到 :code:`self.bst`,然后使用函" +"数 :code:`local_boost`更新本地训练数据的模型权重,如下所示:" #: ../../source/tutorial-quickstart-xgboost.rst:269 msgid "" -"Given :code:`num_local_round`, we update trees by calling " -":code:`self.bst.update` method. After training, the last " -":code:`N=num_local_round` trees will be extracted to send to the server." +"Given :code:`num_local_round`, we update trees by calling :code:`self.bst." +"update` method. After training, the last :code:`N=num_local_round` trees " +"will be extracted to send to the server." msgstr "" -"给定 :code:`num_local_round`,我们通过调用 " -":code:`self.bst.update`方法更新树。训练结束后,我们将提取最后一个 :code:`N=num_local_round` " -"树并发送给服务器。" +"给定 :code:`num_local_round`,我们通过调用 :code:`self.bst.update`方法更新" +"树。训练结束后,我们将提取最后一个 :code:`N=num_local_round` 树并发送给服务" +"器。" #: ../../source/tutorial-quickstart-xgboost.rst:291 msgid "" -"In :code:`evaluate`, we call :code:`self.bst.eval_set` function to " -"conduct evaluation on valid set. The AUC value will be returned." -msgstr "在 :code:`evaluate`中,我们调用 :code:`self.bst.eval_set`函数对有效集合进行评估。将返回 AUC 值。" +"In :code:`evaluate`, we call :code:`self.bst.eval_set` function to conduct " +"evaluation on valid set. The AUC value will be returned." +msgstr "" +"在 :code:`evaluate`中,我们调用 :code:`self.bst.eval_set`函数对有效集合进行评" +"估。将返回 AUC 值。" #: ../../source/tutorial-quickstart-xgboost.rst:294 msgid "" -"Now, we can create an instance of our class :code:`XgbClient` and add one" -" line to actually run this client:" -msgstr "现在,我们可以创建一个 :code:`XgbClient` 类的实例,并添加一行来实际运行该客户端:" +"Now, we can create an instance of our class :code:`XgbClient` and add one " +"line to actually run this client:" +msgstr "" +"现在,我们可以创建一个 :code:`XgbClient` 类的实例,并添加一行来实际运行该客户" +"端:" #: ../../source/tutorial-quickstart-xgboost.rst:300 msgid "" -"That's it for the client. We only have to implement :code:`Client`and " -"call :code:`fl.client.start_client()`. The string :code:`\"[::]:8080\"` " -"tells the client which server to connect to. In our case we can run the " -"server and the client on the same machine, therefore we use " -":code:`\"[::]:8080\"`. If we run a truly federated workload with the " -"server and clients running on different machines, all that needs to " -"change is the :code:`server_address` we point the client at." +"That's it for the client. We only have to implement :code:`Client`and call :" +"code:`fl.client.start_client()`. The string :code:`\"[::]:8080\"` tells the " +"client which server to connect to. In our case we can run the server and the " +"client on the same machine, therefore we use :code:`\"[::]:8080\"`. If we " +"run a truly federated workload with the server and clients running on " +"different machines, all that needs to change is the :code:`server_address` " +"we point the client at." msgstr "" -"这就是客户端。我们只需实现 :code:`客户端`并调用 :code:`fl.client.start_client()`。字符串 " -":code:`\"[::]:8080\"`会告诉客户端要连接的服务器。在本例中,我们可以在同一台机器上运行服务器和客户端,因此我们使用 " -":code:`\"[::]:8080\"`。如果我们运行的是真正的联邦工作负载,服务器和客户端运行在不同的机器上,那么需要改变的只是客户端指向的 " -":code:`server_address`。" +"这就是客户端。我们只需实现 :code:`客户端`并调用 :code:`fl.client." +"start_client()`。字符串 :code:`\"[::]:8080\"`会告诉客户端要连接的服务器。在本" +"例中,我们可以在同一台机器上运行服务器和客户端,因此我们使用 :code:" +"`\"[::]:8080\"`。如果我们运行的是真正的联邦工作负载,服务器和客户端运行在不同" +"的机器上,那么需要改变的只是客户端指向的 :code:`server_address`。" #: ../../source/tutorial-quickstart-xgboost.rst:311 msgid "" "These updates are then sent to the *server* which will aggregate them to " -"produce a better model. Finally, the *server* sends this improved version" -" of the model back to each *client* to finish a complete FL round." +"produce a better model. Finally, the *server* sends this improved version of " +"the model back to each *client* to finish a complete FL round." msgstr "" -"然后,这些更新会被发送到*服务器*,由*服务器*聚合后生成一个更好的模型。最后,*服务器*将这个改进版的模型发回给每个*客户端*,以完成一轮完整的" -" FL。" +"然后,这些更新会被发送到*服务器*,由*服务器*聚合后生成一个更好的模型。最后,*" +"服务器*将这个改进版的模型发回给每个*客户端*,以完成一轮完整的 FL。" #: ../../source/tutorial-quickstart-xgboost.rst:314 msgid "" -"In a file named :code:`server.py`, import Flower and FedXgbBagging from " -":code:`flwr.server.strategy`." +"In a file named :code:`server.py`, import Flower and FedXgbBagging from :" +"code:`flwr.server.strategy`." msgstr "" -"在名为 :code:`server.py` 的文件中,从 :code:`flwr.server.strategy` 导入 Flower 和 " -"FedXgbBagging。" +"在名为 :code:`server.py` 的文件中,从 :code:`flwr.server.strategy` 导入 " +"Flower 和 FedXgbBagging。" #: ../../source/tutorial-quickstart-xgboost.rst:316 msgid "We first define a strategy for XGBoost bagging aggregation." @@ -21172,12 +21418,12 @@ msgstr "我们首先定义了 XGBoost bagging聚合策略。" #: ../../source/tutorial-quickstart-xgboost.rst:339 msgid "" -"We use two clients for this example. An " -":code:`evaluate_metrics_aggregation` function is defined to collect and " -"wighted average the AUC values from clients." +"We use two clients for this example. An :code:`evaluate_metrics_aggregation` " +"function is defined to collect and wighted average the AUC values from " +"clients." msgstr "" -"本示例使用两个客户端。我们定义了一个 :code:`evaluate_metrics_aggregation` 函数,用于收集客户机的 AUC " -"值并求取平均值。" +"本示例使用两个客户端。我们定义了一个 :code:`evaluate_metrics_aggregation` 函" +"数,用于收集客户机的 AUC 值并求取平均值。" #: ../../source/tutorial-quickstart-xgboost.rst:342 msgid "Then, we start the server:" @@ -21189,45 +21435,49 @@ msgstr "基于树的bagging聚合" #: ../../source/tutorial-quickstart-xgboost.rst:356 msgid "" -"You must be curious about how bagging aggregation works. Let's look into " -"the details." +"You must be curious about how bagging aggregation works. Let's look into the " +"details." msgstr "您一定很好奇bagging聚合是如何工作的。让我们来详细了解一下。" #: ../../source/tutorial-quickstart-xgboost.rst:358 msgid "" -"In file :code:`flwr.server.strategy.fedxgb_bagging.py`, we define " -":code:`FedXgbBagging` inherited from :code:`flwr.server.strategy.FedAvg`." -" Then, we override the :code:`aggregate_fit`, :code:`aggregate_evaluate` " -"and :code:`evaluate` methods as follows:" +"In file :code:`flwr.server.strategy.fedxgb_bagging.py`, we define :code:" +"`FedXgbBagging` inherited from :code:`flwr.server.strategy.FedAvg`. Then, we " +"override the :code:`aggregate_fit`, :code:`aggregate_evaluate` and :code:" +"`evaluate` methods as follows:" msgstr "" -"在文件 :code:`flwr.server.strategy.fedxgb_bagging.py`中,我们定义了从 " -":code:`flwr.server.strategy.FedAvg`继承的 :code:`FedXgbBagging`。然后,我们覆盖 " -":code:`aggregate_fit`、:code:`aggregate_evaluate` 和 :code:`evaluate` 方法如下:" +"在文件 :code:`flwr.server.strategy.fedxgb_bagging.py`中,我们定义了从 :code:" +"`flwr.server.strategy.FedAvg`继承的 :code:`FedXgbBagging`。然后,我们覆盖 :" +"code:`aggregate_fit`、:code:`aggregate_evaluate` 和 :code:`evaluate` 方法如" +"下:" #: ../../source/tutorial-quickstart-xgboost.rst:454 msgid "" "In :code:`aggregate_fit`, we sequentially aggregate the clients' XGBoost " "trees by calling :code:`aggregate()` function:" msgstr "" -"在 :code:`aggregate_fit` 中,我们通过调用 :code:`aggregate()` 函数,按顺序聚合客户端的 XGBoost" -" 树:" +"在 :code:`aggregate_fit` 中,我们通过调用 :code:`aggregate()` 函数,按顺序聚" +"合客户端的 XGBoost 树:" #: ../../source/tutorial-quickstart-xgboost.rst:513 msgid "" "In this function, we first fetch the number of trees and the number of " -"parallel trees for the current and previous model by calling " -":code:`_get_tree_nums`. Then, the fetched information will be aggregated." -" After that, the trees (containing model weights) are aggregated to " -"generate a new tree model." +"parallel trees for the current and previous model by calling :code:" +"`_get_tree_nums`. Then, the fetched information will be aggregated. After " +"that, the trees (containing model weights) are aggregated to generate a new " +"tree model." msgstr "" -"在该函数中,我们首先通过调用 :code:`_get_tree_nums` " -"获取当前模型和上一个模型的树数和并行树数。然后,对获取的信息进行聚合。然后,聚合树(包含模型参数)生成新的树模型。" +"在该函数中,我们首先通过调用 :code:`_get_tree_nums` 获取当前模型和上一个模型" +"的树数和并行树数。然后,对获取的信息进行聚合。然后,聚合树(包含模型参数)生" +"成新的树模型。" #: ../../source/tutorial-quickstart-xgboost.rst:518 msgid "" "After traversal of all clients' models, a new global model is generated, " "followed by the serialisation, and sending back to each client." -msgstr "在遍历所有客户端的模型后,会生成一个新的全局模型,然后进行序列化,并发回给每个客户端。" +msgstr "" +"在遍历所有客户端的模型后,会生成一个新的全局模型,然后进行序列化,并发回给每" +"个客户端。" #: ../../source/tutorial-quickstart-xgboost.rst:523 msgid "Launch Federated XGBoost!" @@ -21236,21 +21486,22 @@ msgstr "启动联邦 XGBoost!" #: ../../source/tutorial-quickstart-xgboost.rst:585 msgid "" "Congratulations! You've successfully built and run your first federated " -"XGBoost system. The AUC values can be checked in " -":code:`metrics_distributed`. One can see that the average AUC increases " -"over FL rounds." +"XGBoost system. The AUC values can be checked in :code:" +"`metrics_distributed`. One can see that the average AUC increases over FL " +"rounds." msgstr "" -"恭喜您!您已成功构建并运行了第一个联邦 XGBoost 系统。可以在 :code:`metrics_distributed` 中查看 AUC " -"值。我们可以看到,平均 AUC 随 FL 轮数的增加而增加。" +"恭喜您!您已成功构建并运行了第一个联邦 XGBoost 系统。可以在 :code:" +"`metrics_distributed` 中查看 AUC 值。我们可以看到,平均 AUC 随 FL 轮数的增加" +"而增加。" #: ../../source/tutorial-quickstart-xgboost.rst:590 msgid "" -"The full `source code `_ for this example can be found in :code:`examples" -"/xgboost-quickstart`." +"The full `source code `_ for this example can be found in :code:`examples/" +"xgboost-quickstart`." msgstr "" -"此示例的`完整源代码 `_ 可在 :code:`examples/xgboost-quickstart` 中找到。" +"此示例的`完整源代码 `_ 可在 :code:`examples/xgboost-quickstart` 中找到。" #: ../../source/tutorial-quickstart-xgboost.rst:594 msgid "Comprehensive Federated XGBoost" @@ -21259,20 +21510,21 @@ msgstr "综合的联邦 XGBoost" #: ../../source/tutorial-quickstart-xgboost.rst:596 #, fuzzy msgid "" -"Now that you have known how federated XGBoost work with Flower, it's time" -" to run some more comprehensive experiments by customising the " -"experimental settings. In the xgboost-comprehensive example (`full code " -"`_), we provide more options to define various experimental" -" setups, including aggregation strategies, data partitioning and " -"centralised/distributed evaluation. We also support :doc:`Flower " -"simulation ` making it easy to simulate large " -"client cohorts in a resource-aware manner. Let's take a look!" +"Now that you have known how federated XGBoost work with Flower, it's time to " +"run some more comprehensive experiments by customising the experimental " +"settings. In the xgboost-comprehensive example (`full code `_), we provide " +"more options to define various experimental setups, including aggregation " +"strategies, data partitioning and centralised/distributed evaluation. We " +"also support :doc:`Flower simulation ` making it " +"easy to simulate large client cohorts in a resource-aware manner. Let's take " +"a look!" msgstr "" -"既然您已经知道联合 XGBoost 如何与 Flower 协同工作,那么现在就该通过自定义实验设置来运行一些更综合的实验了。在 xgboost-" -"comprehensive 示例 (`完整代码 " -"`_)中,我们提供了更多选项来定义各种实验设置,包括数据分区和集中/分布式评估。让我们一起来看看!" +"既然您已经知道联合 XGBoost 如何与 Flower 协同工作,那么现在就该通过自定义实验" +"设置来运行一些更综合的实验了。在 xgboost-comprehensive 示例 (`完整代码 " +"`_)" +"中,我们提供了更多选项来定义各种实验设置,包括数据分区和集中/分布式评估。让我" +"们一起来看看!" #: ../../source/tutorial-quickstart-xgboost.rst:603 #, fuzzy @@ -21281,41 +21533,40 @@ msgstr "集中式训练" #: ../../source/tutorial-quickstart-xgboost.rst:605 msgid "" -"In addition to bagging aggregation, we offer a cyclic training scheme, " -"which performs FL in a client-by-client fashion. Instead of aggregating " -"multiple clients, there is only one single client participating in the " -"training per round in the cyclic training scenario. The trained local " -"XGBoost trees will be passed to the next client as an initialised model " -"for next round's boosting." +"In addition to bagging aggregation, we offer a cyclic training scheme, which " +"performs FL in a client-by-client fashion. Instead of aggregating multiple " +"clients, there is only one single client participating in the training per " +"round in the cyclic training scenario. The trained local XGBoost trees will " +"be passed to the next client as an initialised model for next round's " +"boosting." msgstr "" #: ../../source/tutorial-quickstart-xgboost.rst:609 msgid "" -"To do this, we first customise a :code:`ClientManager` in " -":code:`server_utils.py`:" +"To do this, we first customise a :code:`ClientManager` in :code:" +"`server_utils.py`:" msgstr "" #: ../../source/tutorial-quickstart-xgboost.rst:649 msgid "" -"The customised :code:`ClientManager` samples all available clients in " -"each FL round based on the order of connection to the server. Then, we " -"define a new strategy :code:`FedXgbCyclic` in " -":code:`flwr.server.strategy.fedxgb_cyclic.py`, in order to sequentially " -"select only one client in given round and pass the received model to next" -" client." +"The customised :code:`ClientManager` samples all available clients in each " +"FL round based on the order of connection to the server. Then, we define a " +"new strategy :code:`FedXgbCyclic` in :code:`flwr.server.strategy." +"fedxgb_cyclic.py`, in order to sequentially select only one client in given " +"round and pass the received model to next client." msgstr "" #: ../../source/tutorial-quickstart-xgboost.rst:690 msgid "" "Unlike the original :code:`FedAvg`, we don't perform aggregation here. " -"Instead, we just make a copy of the received client model as global model" -" by overriding :code:`aggregate_fit`." +"Instead, we just make a copy of the received client model as global model by " +"overriding :code:`aggregate_fit`." msgstr "" #: ../../source/tutorial-quickstart-xgboost.rst:693 msgid "" -"Also, the customised :code:`configure_fit` and :code:`configure_evaluate`" -" methods ensure the clients to be sequentially selected given FL round:" +"Also, the customised :code:`configure_fit` and :code:`configure_evaluate` " +"methods ensure the clients to be sequentially selected given FL round:" msgstr "" #: ../../source/tutorial-quickstart-xgboost.rst:757 @@ -21324,15 +21575,16 @@ msgstr "定制数据分区" #: ../../source/tutorial-quickstart-xgboost.rst:759 msgid "" -"In :code:`dataset.py`, we have a function :code:`instantiate_partitioner`" -" to instantiate the data partitioner based on the given " -":code:`num_partitions` and :code:`partitioner_type`. Currently, we " -"provide four supported partitioner type to simulate the uniformity/non-" -"uniformity in data quantity (uniform, linear, square, exponential)." +"In :code:`dataset.py`, we have a function :code:`instantiate_partitioner` to " +"instantiate the data partitioner based on the given :code:`num_partitions` " +"and :code:`partitioner_type`. Currently, we provide four supported " +"partitioner type to simulate the uniformity/non-uniformity in data quantity " +"(uniform, linear, square, exponential)." msgstr "" -"在 :code:`dataset.py` 中,我们有一个函数 :code:`instantiate_partitioner` 来根据给定的 " -":code:`num_partitions` 和 :code:`partitioner_type` " -"来实例化数据分区器。目前,我们提供四种支持的分区器类型(均匀、线性、正方形、指数)来模拟数据量的均匀性/非均匀性。" +"在 :code:`dataset.py` 中,我们有一个函数 :code:`instantiate_partitioner` 来根" +"据给定的 :code:`num_partitions` 和 :code:`partitioner_type` 来实例化数据分区" +"器。目前,我们提供四种支持的分区器类型(均匀、线性、正方形、指数)来模拟数据" +"量的均匀性/非均匀性。" #: ../../source/tutorial-quickstart-xgboost.rst:790 msgid "Customised centralised/distributed evaluation" @@ -21341,29 +21593,29 @@ msgstr "定制的集中/分布式评估" #: ../../source/tutorial-quickstart-xgboost.rst:792 #, fuzzy msgid "" -"To facilitate centralised evaluation, we define a function in " -":code:`server_utils.py`:" +"To facilitate centralised evaluation, we define a function in :code:" +"`server_utils.py`:" msgstr "为便于集中评估,我们在 :code:`server.py` 中定义了一个函数:" #: ../../source/tutorial-quickstart-xgboost.rst:824 msgid "" -"This function returns a evaluation function which instantiates a " -":code:`Booster` object and loads the global model weights to it. The " -"evaluation is conducted by calling :code:`eval_set()` method, and the " -"tested AUC value is reported." +"This function returns a evaluation function which instantiates a :code:" +"`Booster` object and loads the global model weights to it. The evaluation is " +"conducted by calling :code:`eval_set()` method, and the tested AUC value is " +"reported." msgstr "" -"此函数返回一个评估函数,该函数实例化一个 :code:`Booster` 对象,并向其加载全局模型参数。评估通过调用 " -":code:`eval_set()` 方法进行,并报告测试的 AUC 值。" +"此函数返回一个评估函数,该函数实例化一个 :code:`Booster` 对象,并向其加载全局" +"模型参数。评估通过调用 :code:`eval_set()` 方法进行,并报告测试的 AUC 值。" #: ../../source/tutorial-quickstart-xgboost.rst:827 #, fuzzy msgid "" -"As for distributed evaluation on the clients, it's same as the quick-" -"start example by overriding the :code:`evaluate()` method insides the " -":code:`XgbClient` class in :code:`client_utils.py`." +"As for distributed evaluation on the clients, it's same as the quick-start " +"example by overriding the :code:`evaluate()` method insides the :code:" +"`XgbClient` class in :code:`client_utils.py`." msgstr "" -"至于客户端上的分布式评估,与快速启动示例相同,通过覆盖 :code:`client.py` 中 :code:`XgbClient` 类内部的 " -":code:`evaluate()` 方法。" +"至于客户端上的分布式评估,与快速启动示例相同,通过覆盖 :code:`client.py` 中 :" +"code:`XgbClient` 类内部的 :code:`evaluate()` 方法。" #: ../../source/tutorial-quickstart-xgboost.rst:831 #, fuzzy @@ -21373,21 +21625,21 @@ msgstr "运行模拟" #: ../../source/tutorial-quickstart-xgboost.rst:832 msgid "" "We also provide an example code (:code:`sim.py`) to use the simulation " -"capabilities of Flower to simulate federated XGBoost training on either a" -" single machine or a cluster of machines." +"capabilities of Flower to simulate federated XGBoost training on either a " +"single machine or a cluster of machines." msgstr "" #: ../../source/tutorial-quickstart-xgboost.rst:866 msgid "" -"After importing all required packages, we define a :code:`main()` " -"function to perform the simulation process:" +"After importing all required packages, we define a :code:`main()` function " +"to perform the simulation process:" msgstr "" #: ../../source/tutorial-quickstart-xgboost.rst:921 msgid "" "We first load the dataset and perform data partitioning, and the pre-" -"processed data is stored in a :code:`list`. After the simulation begins, " -"the clients won't need to pre-process their partitions again." +"processed data is stored in a :code:`list`. After the simulation begins, the " +"clients won't need to pre-process their partitions again." msgstr "" #: ../../source/tutorial-quickstart-xgboost.rst:924 @@ -21396,8 +21648,8 @@ msgstr "" #: ../../source/tutorial-quickstart-xgboost.rst:975 msgid "" -"After that, we start the simulation by calling " -":code:`fl.simulation.start_simulation`:" +"After that, we start the simulation by calling :code:`fl.simulation." +"start_simulation`:" msgstr "" #: ../../source/tutorial-quickstart-xgboost.rst:995 @@ -21413,22 +21665,25 @@ msgstr "参数解析器" #: ../../source/tutorial-quickstart-xgboost.rst:1040 #, fuzzy msgid "" -"In :code:`utils.py`, we define the arguments parsers for clients, server " -"and simulation, allowing users to specify different experimental " -"settings. Let's first see the sever side:" -msgstr "在 :code:`utils.py` 中,我们定义了客户端和服务器端的参数解析器,允许用户指定不同的实验设置。让我们先看看服务器端:" +"In :code:`utils.py`, we define the arguments parsers for clients, server and " +"simulation, allowing users to specify different experimental settings. Let's " +"first see the sever side:" +msgstr "" +"在 :code:`utils.py` 中,我们定义了客户端和服务器端的参数解析器,允许用户指定" +"不同的实验设置。让我们先看看服务器端:" #: ../../source/tutorial-quickstart-xgboost.rst:1086 #, fuzzy msgid "" "This allows user to specify training strategies / the number of total " -"clients / FL rounds / participating clients / clients for evaluation, and" -" evaluation fashion. Note that with :code:`--centralised-eval`, the sever" -" will do centralised evaluation and all functionalities for client " -"evaluation will be disabled." +"clients / FL rounds / participating clients / clients for evaluation, and " +"evaluation fashion. Note that with :code:`--centralised-eval`, the sever " +"will do centralised evaluation and all functionalities for client evaluation " +"will be disabled." msgstr "" -"这允许用户指定总客户数/FL 轮数/参与客户数/评估客户数以及评估方式。请注意,如果使用 :code:`--centralised-" -"eval`,服务器将进行集中评估,客户端评估的所有功能将被禁用。" +"这允许用户指定总客户数/FL 轮数/参与客户数/评估客户数以及评估方式。请注意,如" +"果使用 :code:`--centralised-eval`,服务器将进行集中评估,客户端评估的所有功能" +"将被禁用。" #: ../../source/tutorial-quickstart-xgboost.rst:1090 msgid "Then, the argument parser on client side:" @@ -21437,12 +21692,13 @@ msgstr "然后是客户端的参数解析器:" #: ../../source/tutorial-quickstart-xgboost.rst:1144 #, fuzzy msgid "" -"This defines various options for client data partitioning. Besides, " -"clients also have an option to conduct evaluation on centralised test set" -" by setting :code:`--centralised-eval`, as well as an option to perform " -"scaled learning rate based on the number of clients by setting :code" -":`--scaled-lr`." -msgstr "这定义了客户端数据分区的各种选项。此外,通过设置 :code:`-centralised-eval`,客户端还可以选择在集中测试集上进行评估。" +"This defines various options for client data partitioning. Besides, clients " +"also have an option to conduct evaluation on centralised test set by " +"setting :code:`--centralised-eval`, as well as an option to perform scaled " +"learning rate based on the number of clients by setting :code:`--scaled-lr`." +msgstr "" +"这定义了客户端数据分区的各种选项。此外,通过设置 :code:`-centralised-eval`," +"客户端还可以选择在集中测试集上进行评估。" #: ../../source/tutorial-quickstart-xgboost.rst:1148 msgid "We also have an argument parser for simulation:" @@ -21459,10 +21715,12 @@ msgstr "命令示例" #: ../../source/tutorial-quickstart-xgboost.rst:1231 #, fuzzy msgid "" -"To run a centralised evaluated experiment with bagging strategy on 5 " -"clients with exponential distribution for 50 rounds, we first start the " -"server as below:" -msgstr "为了在 5 个客户端上进行 50 轮指数分布的集中评估实验,我们首先启动服务器,如下所示:" +"To run a centralised evaluated experiment with bagging strategy on 5 clients " +"with exponential distribution for 50 rounds, we first start the server as " +"below:" +msgstr "" +"为了在 5 个客户端上进行 50 轮指数分布的集中评估实验,我们首先启动服务器,如下" +"所示:" #: ../../source/tutorial-quickstart-xgboost.rst:1238 msgid "Then, on each client terminal, we start the clients:" @@ -21475,12 +21733,13 @@ msgstr "" #: ../../source/tutorial-quickstart-xgboost.rst:1250 #, fuzzy msgid "" -"The full `code `_ for this comprehensive example can be found in" -" :code:`examples/xgboost-comprehensive`." +"The full `code `_ for this comprehensive example can be found in :code:" +"`examples/xgboost-comprehensive`." msgstr "" -"此综合示例的全部`源代码 `_ 可在 :code:`examples/xgboost-comprehensive` 中找到。" +"此综合示例的全部`源代码 `_ 可在 :code:`examples/xgboost-comprehensive` 中找" +"到。" #: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:9 msgid "Build a strategy from scratch" @@ -21490,42 +21749,43 @@ msgstr "从零开始制定策略" msgid "" "Welcome to the third part of the Flower federated learning tutorial. In " "previous parts of this tutorial, we introduced federated learning with " -"PyTorch and Flower (`part 1 `__) and we learned how strategies " -"can be used to customize the execution on both the server and the clients" -" (`part 2 `__)." -msgstr "" -"欢迎来到 Flower 联邦学习教程的第三部分。在本教程的前几部分,我们介绍了 PyTorch 和 Flower 的联邦学习(`part 1 " -"`__),并学习了如何使用策略来定制服务器和客户端的执行(`part 2 " -"`__)。" +"PyTorch and Flower (`part 1 `__) and we learned how strategies can be " +"used to customize the execution on both the server and the clients (`part 2 " +"`__)." +msgstr "" +"欢迎来到 Flower 联邦学习教程的第三部分。在本教程的前几部分,我们介绍了 " +"PyTorch 和 Flower 的联邦学习(`part 1 `__),并学习了如何使用策略来定" +"制服务器和客户端的执行(`part 2 `__)。" #: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:13 msgid "" -"In this notebook, we'll continue to customize the federated learning " -"system we built previously by creating a custom version of FedAvg (again," -" using `Flower `__ and `PyTorch " -"`__)." +"In this notebook, we'll continue to customize the federated learning system " +"we built previously by creating a custom version of FedAvg (again, using " +"`Flower `__ and `PyTorch `__)." msgstr "" -"在本笔记中,我们将通过创建 FedAvg 的自定义版本(再次使用 `Flower `__ 和 " -"`PyTorch `__),继续定制我们之前构建的联邦学习系统。" +"在本笔记中,我们将通过创建 FedAvg 的自定义版本(再次使用 `Flower `__ 和 `PyTorch `__),继续定制我们之前构建" +"的联邦学习系统。" #: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:15 #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:16 #: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:15 #: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:15 msgid "" -"`Star Flower on GitHub `__ ⭐️ and join " -"the Flower community on Slack to connect, ask questions, and get help: " -"`Join Slack `__ 🌼 We'd love to hear from " -"you in the ``#introductions`` channel! And if anything is unclear, head " -"over to the ``#questions`` channel." +"`Star Flower on GitHub `__ ⭐️ and join the " +"Flower community on Slack to connect, ask questions, and get help: `Join " +"Slack `__ 🌼 We'd love to hear from you in the " +"``#introductions`` channel! And if anything is unclear, head over to the " +"``#questions`` channel." msgstr "" -"`Star Flower on GitHub `__ ⭐️ 并加入 Slack " -"上的 Flower 社区,进行交流、提问并获得帮助: 加入 Slack `__ 🌼 " -"我们希望在 ``#introductions`` 频道听到您的声音!如果有任何不清楚的地方,请访问 ``#questions`` 频道。" +"`Star Flower on GitHub `__ ⭐️ 并加入 Slack 上" +"的 Flower 社区,进行交流、提问并获得帮助: 加入 Slack `__ 🌼 我们希望在 ``#introductions`` 频道听到您的声音!如果有任何" +"不清楚的地方,请访问 ``#questions`` 频道。" #: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:17 msgid "Let's build a new ``Strategy`` from scratch!" @@ -21570,19 +21830,20 @@ msgstr "现在我们已经安装了所有依赖项,可以导入本教程所需 #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:102 #: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:101 msgid "" -"It is possible to switch to a runtime that has GPU acceleration enabled " -"(on Google Colab: ``Runtime > Change runtime type > Hardware acclerator: " -"GPU > Save``). Note, however, that Google Colab is not always able to " -"offer GPU acceleration. If you see an error related to GPU availability " -"in one of the following sections, consider switching back to CPU-based " -"execution by setting ``DEVICE = torch.device(\"cpu\")``. If the runtime " -"has GPU acceleration enabled, you should see the output ``Training on " -"cuda``, otherwise it'll say ``Training on cpu``." -msgstr "" -"可以切换到已启用 GPU 加速的运行时(在 Google Colab 上: 运行时 > 更改运行时类型 > 硬件加速: GPU > " -"保存``)。但请注意,Google Colab 并非总能提供 GPU 加速。如果在以下部分中看到与 GPU 可用性相关的错误,请考虑通过设置 " -"``DEVICE = torch.device(\"cpu\")`` 切回基于 CPU 的执行。如果运行时已启用 GPU " -"加速,你应该会看到输出``Training on cuda``,否则会显示``Training on cpu``。" +"It is possible to switch to a runtime that has GPU acceleration enabled (on " +"Google Colab: ``Runtime > Change runtime type > Hardware acclerator: GPU > " +"Save``). Note, however, that Google Colab is not always able to offer GPU " +"acceleration. If you see an error related to GPU availability in one of the " +"following sections, consider switching back to CPU-based execution by " +"setting ``DEVICE = torch.device(\"cpu\")``. If the runtime has GPU " +"acceleration enabled, you should see the output ``Training on cuda``, " +"otherwise it'll say ``Training on cpu``." +msgstr "" +"可以切换到已启用 GPU 加速的运行时(在 Google Colab 上: 运行时 > 更改运行时类" +"型 > 硬件加速: GPU > 保存``)。但请注意,Google Colab 并非总能提供 GPU 加" +"速。如果在以下部分中看到与 GPU 可用性相关的错误,请考虑通过设置 ``DEVICE = " +"torch.device(\"cpu\")`` 切回基于 CPU 的执行。如果运行时已启用 GPU 加速,你应" +"该会看到输出``Training on cuda``,否则会显示``Training on cpu``。" #: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:114 #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:115 @@ -21593,15 +21854,16 @@ msgstr "数据加载" #: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:116 #: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:116 msgid "" -"Let's now load the CIFAR-10 training and test set, partition them into " -"ten smaller datasets (each split into training and validation set), and " -"wrap everything in their own ``DataLoader``. We introduce a new parameter" -" ``num_clients`` which allows us to call ``load_datasets`` with different" -" numbers of clients." +"Let's now load the CIFAR-10 training and test set, partition them into ten " +"smaller datasets (each split into training and validation set), and wrap " +"everything in their own ``DataLoader``. We introduce a new parameter " +"``num_clients`` which allows us to call ``load_datasets`` with different " +"numbers of clients." msgstr "" -"现在,让我们加载 CIFAR-10 训练集和测试集,将它们分割成 10 " -"个较小的数据集(每个数据集又分为训练集和验证集),并将所有数据都封装在各自的 ``DataLoader`` 中。我们引入了一个新参数 " -"``num_clients``,它允许我们使用不同数量的客户端调用 ``load_datasets``。" +"现在,让我们加载 CIFAR-10 训练集和测试集,将它们分割成 10 个较小的数据集(每" +"个数据集又分为训练集和验证集),并将所有数据都封装在各自的 ``DataLoader`` " +"中。我们引入了一个新参数 ``num_clients``,它允许我们使用不同数量的客户端调用 " +"``load_datasets``。" #: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:167 #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:168 @@ -21613,9 +21875,11 @@ msgstr "模型培训/评估" #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:170 #: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:169 msgid "" -"Let's continue with the usual model definition (including " -"``set_parameters`` and ``get_parameters``), training and test functions:" -msgstr "让我们继续使用常见的模型定义(包括 `set_parameters` 和 `get_parameters`)、训练和测试函数:" +"Let's continue with the usual model definition (including ``set_parameters`` " +"and ``get_parameters``), training and test functions:" +msgstr "" +"让我们继续使用常见的模型定义(包括 `set_parameters` 和 `get_parameters`)、训" +"练和测试函数:" #: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:258 #: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:258 @@ -21625,14 +21889,14 @@ msgstr "Flower 客户端" #: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:260 #: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:260 msgid "" -"To implement the Flower client, we (again) create a subclass of " -"``flwr.client.NumPyClient`` and implement the three methods " -"``get_parameters``, ``fit``, and ``evaluate``. Here, we also pass the " -"``cid`` to the client and use it log additional details:" +"To implement the Flower client, we (again) create a subclass of ``flwr." +"client.NumPyClient`` and implement the three methods ``get_parameters``, " +"``fit``, and ``evaluate``. Here, we also pass the ``cid`` to the client and " +"use it log additional details:" msgstr "" -"为了实现 Flower 客户端,我们(再次)创建了 ``flwr.client.NumPyClient`` 的子类,并实现了 " -"``get_parameters``、``fit`` 和 ``evaluate``三个方法。在这里,我们还将 ``cid`` " -"传递给客户端,并使用它记录其他详细信息:" +"为了实现 Flower 客户端,我们(再次)创建了 ``flwr.client.NumPyClient`` 的子" +"类,并实现了 ``get_parameters``、``fit`` 和 ``evaluate``三个方法。在这里,我" +"们还将 ``cid`` 传递给客户端,并使用它记录其他详细信息:" #: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:308 msgid "Let's test what we have so far before we continue:" @@ -21644,14 +21908,15 @@ msgstr "从零开始构建策略" #: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:341 msgid "" -"Let’s overwrite the ``configure_fit`` method such that it passes a higher" -" learning rate (potentially also other hyperparameters) to the optimizer " -"of a fraction of the clients. We will keep the sampling of the clients as" -" it is in ``FedAvg`` and then change the configuration dictionary (one of" -" the ``FitIns`` attributes)." +"Let’s overwrite the ``configure_fit`` method such that it passes a higher " +"learning rate (potentially also other hyperparameters) to the optimizer of a " +"fraction of the clients. We will keep the sampling of the clients as it is " +"in ``FedAvg`` and then change the configuration dictionary (one of the " +"``FitIns`` attributes)." msgstr "" -"让我们重写 ``configure_fit`` 方法,使其向一部分客户的优化器传递更高的学习率(可能还有其他超参数)。我们将保持 " -"``FedAvg`` 中的客户端采样,然后更改配置字典(``FitIns`` 属性之一)。" +"让我们重写 ``configure_fit`` 方法,使其向一部分客户的优化器传递更高的学习率" +"(可能还有其他超参数)。我们将保持 ``FedAvg`` 中的客户端采样,然后更改配置字" +"典(``FitIns`` 属性之一)。" #: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:507 msgid "" @@ -21667,17 +21932,18 @@ msgstr "回顾" #: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:536 msgid "" -"In this notebook, we’ve seen how to implement a custom strategy. A custom" -" strategy enables granular control over client node configuration, result" -" aggregation, and more. To define a custom strategy, you only have to " -"overwrite the abstract methods of the (abstract) base class ``Strategy``." -" To make custom strategies even more powerful, you can pass custom " -"functions to the constructor of your new class (``__init__``) and then " -"call these functions whenever needed." +"In this notebook, we’ve seen how to implement a custom strategy. A custom " +"strategy enables granular control over client node configuration, result " +"aggregation, and more. To define a custom strategy, you only have to " +"overwrite the abstract methods of the (abstract) base class ``Strategy``. To " +"make custom strategies even more powerful, you can pass custom functions to " +"the constructor of your new class (``__init__``) and then call these " +"functions whenever needed." msgstr "" -"在本笔记中,我们了解了如何实施自定义策略。自定义策略可以对客户端节点配置、结果聚合等进行细粒度控制。要定义自定义策略,只需覆盖(抽象)基类 " -"``Strategy`` " -"的抽象方法即可。为使自定义策略更加强大,您可以将自定义函数传递给新类的构造函数(`__init__``),然后在需要时调用这些函数。" +"在本笔记中,我们了解了如何实施自定义策略。自定义策略可以对客户端节点配置、结" +"果聚合等进行细粒度控制。要定义自定义策略,只需覆盖(抽象)基类 ``Strategy`` " +"的抽象方法即可。为使自定义策略更加强大,您可以将自定义函数传递给新类的构造函" +"数(`__init__``),然后在需要时调用这些函数。" #: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:550 #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:948 @@ -21685,11 +21951,11 @@ msgstr "" #: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:715 #: ../../source/tutorial-series-what-is-federated-learning.ipynb:369 msgid "" -"Before you continue, make sure to join the Flower community on Slack: " -"`Join Slack `__" +"Before you continue, make sure to join the Flower community on Slack: `Join " +"Slack `__" msgstr "" -"在继续之前,请务必加入 Slack 上的 Flower 社区:`Join Slack `__" +"在继续之前,请务必加入 Slack 上的 Flower 社区:`Join Slack `__" #: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:552 #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:950 @@ -21697,19 +21963,21 @@ msgstr "" #: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:717 #: ../../source/tutorial-series-what-is-federated-learning.ipynb:371 msgid "" -"There's a dedicated ``#questions`` channel if you need help, but we'd " -"also love to hear who you are in ``#introductions``!" -msgstr "如果您需要帮助,我们有专门的 ``#questions`` 频道,但我们也很乐意在 ``#introductions`` 中了解您是谁!" +"There's a dedicated ``#questions`` channel if you need help, but we'd also " +"love to hear who you are in ``#introductions``!" +msgstr "" +"如果您需要帮助,我们有专门的 ``#questions`` 频道,但我们也很乐意在 " +"``#introductions`` 中了解您是谁!" #: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:554 msgid "" -"The `Flower Federated Learning Tutorial - Part 4 " -"`__ introduces ``Client``, the flexible API underlying " -"``NumPyClient``." +"The `Flower Federated Learning Tutorial - Part 4 `__ introduces " +"``Client``, the flexible API underlying ``NumPyClient``." msgstr "" -"Flower联邦学习教程 - 第4部分 `__ 介绍了``Client``,它是``NumPyClient``底层的灵活应用程序接口。" +"Flower联邦学习教程 - 第4部分 `__ 介绍了``Client``,它是``NumPyClient``底" +"层的灵活应用程序接口。" #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:9 msgid "Customize the client" @@ -21717,37 +21985,39 @@ msgstr "自定义客户端" #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:11 msgid "" -"Welcome to the fourth part of the Flower federated learning tutorial. In " -"the previous parts of this tutorial, we introduced federated learning " -"with PyTorch and Flower (`part 1 `__), we learned how " -"strategies can be used to customize the execution on both the server and " -"the clients (`part 2 `__), and we built our own " -"custom strategy from scratch (`part 3 `__)." -msgstr "" -"欢迎来到 Flower 联邦学习教程的第四部分。在本教程的前几部分中,我们介绍了 PyTorch 和 Flower 的联邦学习(`part 1 " -"`__),了解了如何使用策略来定制服务器和客户端的执行(`part 2 " -"`__),并从头开始构建了我们自己的定制策略(`part 3 " -"`__)。" +"Welcome to the fourth part of the Flower federated learning tutorial. In the " +"previous parts of this tutorial, we introduced federated learning with " +"PyTorch and Flower (`part 1 `__), we learned how strategies can be used " +"to customize the execution on both the server and the clients (`part 2 " +"`__), and we built our own custom strategy from scratch (`part " +"3 `__)." +msgstr "" +"欢迎来到 Flower 联邦学习教程的第四部分。在本教程的前几部分中,我们介绍了 " +"PyTorch 和 Flower 的联邦学习(`part 1 `__),了解了如何使用策略来定制" +"服务器和客户端的执行(`part 2 `__),并从头开始构建了我们自己的" +"定制策略(`part 3 `__)。" #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:14 msgid "" -"In this notebook, we revisit ``NumPyClient`` and introduce a new " -"baseclass for building clients, simply named ``Client``. In previous " -"parts of this tutorial, we've based our client on ``NumPyClient``, a " -"convenience class which makes it easy to work with machine learning " -"libraries that have good NumPy interoperability. With ``Client``, we gain" -" a lot of flexibility that we didn't have before, but we'll also have to " -"do a few things the we didn't have to do before." +"In this notebook, we revisit ``NumPyClient`` and introduce a new baseclass " +"for building clients, simply named ``Client``. In previous parts of this " +"tutorial, we've based our client on ``NumPyClient``, a convenience class " +"which makes it easy to work with machine learning libraries that have good " +"NumPy interoperability. With ``Client``, we gain a lot of flexibility that " +"we didn't have before, but we'll also have to do a few things the we didn't " +"have to do before." msgstr "" -"在本笔记中,我们将重温 ``NumPyClient`` 并引入一个用于构建客户端的新基类,简单命名为 " -"``Client``。在本教程的前几部分中,我们的客户端基于``NumPyClient``,这是一个便捷类,可以让我们轻松地与具有良好 NumPy" -" 互操作性的机器学习库协同工作。有了 ``Client``,我们获得了很多以前没有的灵活性,但我们也必须做一些以前不需要做的事情。" +"在本笔记中,我们将重温 ``NumPyClient`` 并引入一个用于构建客户端的新基类,简单" +"命名为 ``Client``。在本教程的前几部分中,我们的客户端基于``NumPyClient``,这" +"是一个便捷类,可以让我们轻松地与具有良好 NumPy 互操作性的机器学习库协同工作。" +"有了 ``Client``,我们获得了很多以前没有的灵活性,但我们也必须做一些以前不需要" +"做的事情。" #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:18 msgid "" @@ -21762,12 +22032,12 @@ msgstr "步骤 0:准备工作" #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:117 msgid "" -"Let's now load the CIFAR-10 training and test set, partition them into " -"ten smaller datasets (each split into training and validation set), and " -"wrap everything in their own ``DataLoader``." +"Let's now load the CIFAR-10 training and test set, partition them into ten " +"smaller datasets (each split into training and validation set), and wrap " +"everything in their own ``DataLoader``." msgstr "" -"现在,让我们加载 CIFAR-10 训练集和测试集,将它们分割成十个较小的数据集(每个数据集又分为训练集和验证集),并将所有数据都封装在各自的 " -"``DataLoader`` 中。" +"现在,让我们加载 CIFAR-10 训练集和测试集,将它们分割成十个较小的数据集(每个" +"数据集又分为训练集和验证集),并将所有数据都封装在各自的 ``DataLoader`` 中。" #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:259 msgid "Step 1: Revisiting NumPyClient" @@ -21775,14 +22045,14 @@ msgstr "步骤 1:重温 NumPyClient" #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:261 msgid "" -"So far, we've implemented our client by subclassing " -"``flwr.client.NumPyClient``. The three methods we implemented are " -"``get_parameters``, ``fit``, and ``evaluate``. Finally, we wrap the " -"creation of instances of this class in a function called ``client_fn``:" +"So far, we've implemented our client by subclassing ``flwr.client." +"NumPyClient``. The three methods we implemented are ``get_parameters``, " +"``fit``, and ``evaluate``. Finally, we wrap the creation of instances of " +"this class in a function called ``client_fn``:" msgstr "" -"到目前为止,我们通过子类化 ``flwr.client.NumPyClient`` " -"实现了我们的客户端。我们实现了三个方法:``get_parameters``, ``fit`, 和``evaluate``。最后,我们用一个名为 " -"``client_fn`` 的函数来创建该类的实例:" +"到目前为止,我们通过子类化 ``flwr.client.NumPyClient`` 实现了我们的客户端。我" +"们实现了三个方法:``get_parameters``, ``fit`, 和``evaluate``。最后,我们用一" +"个名为 ``client_fn`` 的函数来创建该类的实例:" #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:309 msgid "" @@ -21791,8 +22061,9 @@ msgid "" "``FlowerClient`` to ``FlowerNumPyClient`` and ``client_fn`` to " "``numpyclient_fn``. Let's run it to see the output we get:" msgstr "" -"我们以前见过这种情况,目前没有什么新东西。与之前的笔记相比,唯一*小*的不同是命名,我们把 ``FlowerClient`` 改成了 " -"``FlowerNumPyClient``,把 `client_fn` 改成了 ``numpyclient_fn``。让我们运行它看看输出结果:" +"我们以前见过这种情况,目前没有什么新东西。与之前的笔记相比,唯一*小*的不同是" +"命名,我们把 ``FlowerClient`` 改成了 ``FlowerNumPyClient``,把 `client_fn` 改" +"成了 ``numpyclient_fn``。让我们运行它看看输出结果:" #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:339 msgid "" @@ -21805,32 +22076,32 @@ msgid "" "Let's dive a little bit deeper and discuss how Flower executes this " "simulation. Whenever a client is selected to do some work, " "``start_simulation`` calls the function ``numpyclient_fn`` to create an " -"instance of our ``FlowerNumPyClient`` (along with loading the model and " -"the data)." +"instance of our ``FlowerNumPyClient`` (along with loading the model and the " +"data)." msgstr "" -"让我们再深入一点,讨论一下 Flower 是如何执行模拟的。每当一个客户端被选中进行工作时,`start_simulation`` 就会调用函数 " -"`numpyclient_fn` 来创建我们的 ``FlowerNumPyClient`` 实例(同时加载模型和数据)。" +"让我们再深入一点,讨论一下 Flower 是如何执行模拟的。每当一个客户端被选中进行" +"工作时,`start_simulation`` 就会调用函数 `numpyclient_fn` 来创建我们的 " +"``FlowerNumPyClient`` 实例(同时加载模型和数据)。" #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:343 msgid "" "But here's the perhaps surprising part: Flower doesn't actually use the " -"``FlowerNumPyClient`` object directly. Instead, it wraps the object to " -"makes it look like a subclass of ``flwr.client.Client``, not " -"``flwr.client.NumPyClient``. In fact, the Flower core framework doesn't " -"know how to handle ``NumPyClient``'s, it only knows how to handle " -"``Client``'s. ``NumPyClient`` is just a convenience abstraction built on " -"top of ``Client``." -msgstr "" -"但令人惊讶的部分也许就在这里: Flower 实际上并不直接使用 ``FlowerNumPyClient`` " -"对象。相反,它封装了该对象,使其看起来像 ``flwr.client.Client`` 的子类,而不是 " -"``flwr.client.NumPyClient``。事实上,Flower 核心框架不知道如何处理 " -"``NumPyClient``,它只知道如何处理 ``Client``。``NumPyClient`` " +"``FlowerNumPyClient`` object directly. Instead, it wraps the object to makes " +"it look like a subclass of ``flwr.client.Client``, not ``flwr.client." +"NumPyClient``. In fact, the Flower core framework doesn't know how to handle " +"``NumPyClient``'s, it only knows how to handle ``Client``'s. ``NumPyClient`` " +"is just a convenience abstraction built on top of ``Client``." +msgstr "" +"但令人惊讶的部分也许就在这里: Flower 实际上并不直接使用 " +"``FlowerNumPyClient`` 对象。相反,它封装了该对象,使其看起来像 ``flwr.client." +"Client`` 的子类,而不是 ``flwr.client.NumPyClient``。事实上,Flower 核心框架" +"不知道如何处理 ``NumPyClient``,它只知道如何处理 ``Client``。``NumPyClient`` " "只是建立在``Client``之上的便捷抽象类。" #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:345 msgid "" -"Instead of building on top of ``NumPyClient``, we can directly build on " -"top of ``Client``." +"Instead of building on top of ``NumPyClient``, we can directly build on top " +"of ``Client``." msgstr "与其在 ``NumPyClient`` 上构建,我们可以直接在 ``Client`` 上构建。" #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:357 @@ -21839,70 +22110,80 @@ msgstr "步骤 2:从 ``NumPyClient`` 移至 ``Client``" #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:359 msgid "" -"Let's try to do the same thing using ``Client`` instead of " -"``NumPyClient``." +"Let's try to do the same thing using ``Client`` instead of ``NumPyClient``." msgstr "让我们尝试使用 ``Client`` 代替 ``NumPyClient`` 做同样的事情。" #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:465 msgid "" -"Before we discuss the code in more detail, let's try to run it! Gotta " -"make sure our new ``Client``-based client works, right?" -msgstr "在详细讨论代码之前,让我们试着运行它!必须确保我们基于 ``Client`` 的新客户端能正常运行,对吗?" +"Before we discuss the code in more detail, let's try to run it! Gotta make " +"sure our new ``Client``-based client works, right?" +msgstr "" +"在详细讨论代码之前,让我们试着运行它!必须确保我们基于 ``Client`` 的新客户端" +"能正常运行,对吗?" #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:490 msgid "" "That's it, we're now using ``Client``. It probably looks similar to what " "we've done with ``NumPyClient``. So what's the difference?" -msgstr "就是这样,我们现在开始使用 ``Client``。它看起来可能与我们使用 ``NumPyClient`` 所做的类似。那么有什么不同呢?" +msgstr "" +"就是这样,我们现在开始使用 ``Client``。它看起来可能与我们使用 " +"``NumPyClient`` 所做的类似。那么有什么不同呢?" #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:492 msgid "" -"First of all, it's more code. But why? The difference comes from the fact" -" that ``Client`` expects us to take care of parameter serialization and " -"deserialization. For Flower to be able to send parameters over the " -"network, it eventually needs to turn these parameters into ``bytes``. " -"Turning parameters (e.g., NumPy ``ndarray``'s) into raw bytes is called " +"First of all, it's more code. But why? The difference comes from the fact " +"that ``Client`` expects us to take care of parameter serialization and " +"deserialization. For Flower to be able to send parameters over the network, " +"it eventually needs to turn these parameters into ``bytes``. Turning " +"parameters (e.g., NumPy ``ndarray``'s) into raw bytes is called " "serialization. Turning raw bytes into something more useful (like NumPy " -"``ndarray``'s) is called deserialization. Flower needs to do both: it " -"needs to serialize parameters on the server-side and send them to the " -"client, the client needs to deserialize them to use them for local " -"training, and then serialize the updated parameters again to send them " -"back to the server, which (finally!) deserializes them again in order to " -"aggregate them with the updates received from other clients." -msgstr "" -"首先,它的代码更多。但为什么呢?区别在于 ``Client`` 希望我们处理参数的序列化和反序列化。Flower " -"要想通过网络发送参数,最终需要将这些参数转化为 ``字节``。把参数(例如 NumPy 的 ``ndarray`` " -"参数)变成原始字节叫做序列化。将原始字节转换成更有用的东西(如 NumPy ``ndarray`)称为反序列化。Flower " -"需要同时做这两件事:它需要在服务器端序列化参数并将其发送到客户端,客户端需要反序列化参数以便将其用于本地训练,然后再次序列化更新后的参数并将其发送回服务器,服务器(最后)再次反序列化参数以便将其与从其他客户端接收到的更新汇总在一起。" +"``ndarray``'s) is called deserialization. Flower needs to do both: it needs " +"to serialize parameters on the server-side and send them to the client, the " +"client needs to deserialize them to use them for local training, and then " +"serialize the updated parameters again to send them back to the server, " +"which (finally!) deserializes them again in order to aggregate them with the " +"updates received from other clients." +msgstr "" +"首先,它的代码更多。但为什么呢?区别在于 ``Client`` 希望我们处理参数的序列化" +"和反序列化。Flower 要想通过网络发送参数,最终需要将这些参数转化为 ``字节``。" +"把参数(例如 NumPy 的 ``ndarray`` 参数)变成原始字节叫做序列化。将原始字节转" +"换成更有用的东西(如 NumPy ``ndarray`)称为反序列化。Flower 需要同时做这两件" +"事:它需要在服务器端序列化参数并将其发送到客户端,客户端需要反序列化参数以便" +"将其用于本地训练,然后再次序列化更新后的参数并将其发送回服务器,服务器(最" +"后)再次反序列化参数以便将其与从其他客户端接收到的更新汇总在一起。" #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:495 msgid "" "The only *real* difference between Client and NumPyClient is that " -"NumPyClient takes care of serialization and deserialization for you. It " -"can do so because it expects you to return parameters as NumPy ndarray's," -" and it knows how to handle these. This makes working with machine " -"learning libraries that have good NumPy support (most of them) a breeze." +"NumPyClient takes care of serialization and deserialization for you. It can " +"do so because it expects you to return parameters as NumPy ndarray's, and it " +"knows how to handle these. This makes working with machine learning " +"libraries that have good NumPy support (most of them) a breeze." msgstr "" -"Client 与 NumPyClient 之间的唯一**真正区别在于,NumPyClient " -"会为你处理序列化和反序列化。NumPyClient之所以能做到这一点,是因为它预计你会以NumPy " -"ndarray的形式返回参数,而且它知道如何处理这些参数。这使得与具有良好 NumPy 支持的大多数机器学习库一起工作变得轻而易举。" +"Client 与 NumPyClient 之间的唯一**真正区别在于,NumPyClient 会为你处理序列化" +"和反序列化。NumPyClient之所以能做到这一点,是因为它预计你会以NumPy ndarray的" +"形式返回参数,而且它知道如何处理这些参数。这使得与具有良好 NumPy 支持的大多数" +"机器学习库一起工作变得轻而易举。" #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:497 msgid "" -"In terms of API, there's one major difference: all methods in Client take" -" exactly one argument (e.g., ``FitIns`` in ``Client.fit``) and return " -"exactly one value (e.g., ``FitRes`` in ``Client.fit``). The methods in " +"In terms of API, there's one major difference: all methods in Client take " +"exactly one argument (e.g., ``FitIns`` in ``Client.fit``) and return exactly " +"one value (e.g., ``FitRes`` in ``Client.fit``). The methods in " "``NumPyClient`` on the other hand have multiple arguments (e.g., " -"``parameters`` and ``config`` in ``NumPyClient.fit``) and multiple return" -" values (e.g., ``parameters``, ``num_example``, and ``metrics`` in " -"``NumPyClient.fit``) if there are multiple things to handle. These " -"``*Ins`` and ``*Res`` objects in ``Client`` wrap all the individual " -"values you're used to from ``NumPyClient``." -msgstr "" -"在 API 方面,有一个主要区别:Client 中的所有方法都只接受一个参数(例如,``Client.fit`` 中的 " -"``FitIns``),并只返回一个值(例如,``Client.fit`` 中的 " -"``FitRes``)。另一方面,``NumPyClient``中的方法有多个参数(例如,``NumPyClient.fit``中的``parameters``和``config``)和多个返回值(例如,``NumPyClient.fit``中的``parameters``、``num_example``和``metrics``)。在" -" ``Client`` 中的这些 ``*Ins`` 和 ``*Res`` 对象封装了你在 ``NumPyClient`` 中习惯使用的所有单个值。" +"``parameters`` and ``config`` in ``NumPyClient.fit``) and multiple return " +"values (e.g., ``parameters``, ``num_example``, and ``metrics`` in " +"``NumPyClient.fit``) if there are multiple things to handle. These ``*Ins`` " +"and ``*Res`` objects in ``Client`` wrap all the individual values you're " +"used to from ``NumPyClient``." +msgstr "" +"在 API 方面,有一个主要区别:Client 中的所有方法都只接受一个参数(例如," +"``Client.fit`` 中的 ``FitIns``),并只返回一个值(例如,``Client.fit`` 中的 " +"``FitRes``)。另一方面,``NumPyClient``中的方法有多个参数(例如," +"``NumPyClient.fit``中的``parameters``和``config``)和多个返回值(例如," +"``NumPyClient.fit``中的``parameters``、``num_example``和``metrics``)。在 " +"``Client`` 中的这些 ``*Ins`` 和 ``*Res`` 对象封装了你在 ``NumPyClient`` 中习" +"惯使用的所有单个值。" #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:510 msgid "Step 3: Custom serialization" @@ -21918,20 +22199,22 @@ msgstr "下面我们将通过一个简单的示例来探讨如何实现自定义 msgid "" "But first what is serialization? Serialization is just the process of " "converting an object into raw bytes, and equally as important, " -"deserialization is the process of converting raw bytes back into an " -"object. This is very useful for network communication. Indeed, without " +"deserialization is the process of converting raw bytes back into an object. " +"This is very useful for network communication. Indeed, without " "serialization, you could not just a Python object through the internet." msgstr "" -"首先,什么是序列化?序列化只是将对象转换为原始字节的过程,同样重要的是,反序列化是将原始字节转换回对象的过程。这对网络通信非常有用。事实上,如果没有序列化,你就无法通过互联网传输一个" -" Python 对象。" +"首先,什么是序列化?序列化只是将对象转换为原始字节的过程,同样重要的是,反序" +"列化是将原始字节转换回对象的过程。这对网络通信非常有用。事实上,如果没有序列" +"化,你就无法通过互联网传输一个 Python 对象。" #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:516 msgid "" -"Federated Learning relies heavily on internet communication for training " -"by sending Python objects back and forth between the clients and the " -"server. This means that serialization is an essential part of Federated " -"Learning." -msgstr "通过在客户端和服务器之间来回发送 Python 对象,联合学习在很大程度上依赖于互联网通信进行训练。这意味着序列化是联邦学习的重要组成部分。" +"Federated Learning relies heavily on internet communication for training by " +"sending Python objects back and forth between the clients and the server. " +"This means that serialization is an essential part of Federated Learning." +msgstr "" +"通过在客户端和服务器之间来回发送 Python 对象,联合学习在很大程度上依赖于互联" +"网通信进行训练。这意味着序列化是联邦学习的重要组成部分。" #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:518 msgid "" @@ -21943,9 +22226,10 @@ msgid "" "entries), converting them to a sparse matrix can greatly improve their " "bytesize." msgstr "" -"在下面的章节中,我们将编写一个基本示例,在发送包含参数的 ``ndarray`` 前,我们将首先把 ``ndarray`` " -"转换为稀疏矩阵,而不是发送序列化版本。这种技术可以用来节省带宽,因为在某些情况下,模型的参数是稀疏的(包含许多 0 " -"条目),将它们转换成稀疏矩阵可以大大提高它们的字节数。" +"在下面的章节中,我们将编写一个基本示例,在发送包含参数的 ``ndarray`` 前,我们" +"将首先把 ``ndarray`` 转换为稀疏矩阵,而不是发送序列化版本。这种技术可以用来节" +"省带宽,因为在某些情况下,模型的参数是稀疏的(包含许多 0 条目),将它们转换成" +"稀疏矩阵可以大大提高它们的字节数。" #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:521 msgid "Our custom serialization/deserialization functions" @@ -21953,17 +22237,18 @@ msgstr "我们的定制序列化/反序列化功能" #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:523 msgid "" -"This is where the real serialization/deserialization will happen, " -"especially in ``ndarray_to_sparse_bytes`` for serialization and " +"This is where the real serialization/deserialization will happen, especially " +"in ``ndarray_to_sparse_bytes`` for serialization and " "``sparse_bytes_to_ndarray`` for deserialization." msgstr "" -"这才是真正的序列化/反序列化,尤其是在用于序列化的 ``ndarray_too_sparse_bytes`` 和用于反序列化的 " -"``sparse_bytes_too_ndarray`` 中。" +"这才是真正的序列化/反序列化,尤其是在用于序列化的 " +"``ndarray_too_sparse_bytes`` 和用于反序列化的 ``sparse_bytes_too_ndarray`` " +"中。" #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:525 msgid "" -"Note that we imported the ``scipy.sparse`` library in order to convert " -"our arrays." +"Note that we imported the ``scipy.sparse`` library in order to convert our " +"arrays." msgstr "请注意,为了转换数组,我们导入了 ``scipy.sparse`` 库。" #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:613 @@ -21972,34 +22257,35 @@ msgstr "客户端" #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:615 msgid "" -"To be able to serialize our ``ndarray``\\ s into sparse parameters, we " -"will just have to call our custom functions in our " -"``flwr.client.Client``." -msgstr "为了能够将我们的 ``ndarray`` 序列化为稀疏参数,我们只需在 ``flwr.client.Client`` 中调用我们的自定义函数。" +"To be able to serialize our ``ndarray``\\ s into sparse parameters, we will " +"just have to call our custom functions in our ``flwr.client.Client``." +msgstr "" +"为了能够将我们的 ``ndarray`` 序列化为稀疏参数,我们只需在 ``flwr.client." +"Client`` 中调用我们的自定义函数。" #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:617 msgid "" "Indeed, in ``get_parameters`` we need to serialize the parameters we got " -"from our network using our custom ``ndarrays_to_sparse_parameters`` " -"defined above." +"from our network using our custom ``ndarrays_to_sparse_parameters`` defined " +"above." msgstr "" -"事实上,在 `get_parameters` 中,我们需要使用上文定义的自定义 `ndarrays_too_sparse_parameters` " -"序列化从网络中获取的参数。" +"事实上,在 `get_parameters` 中,我们需要使用上文定义的自定义 " +"`ndarrays_too_sparse_parameters` 序列化从网络中获取的参数。" #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:619 msgid "" "In ``fit``, we first need to deserialize the parameters coming from the " -"server using our custom ``sparse_parameters_to_ndarrays`` and then we " -"need to serialize our local results with " -"``ndarrays_to_sparse_parameters``." +"server using our custom ``sparse_parameters_to_ndarrays`` and then we need " +"to serialize our local results with ``ndarrays_to_sparse_parameters``." msgstr "" -"在 ``fit`` 中,我们首先需要使用自定义的 ``sparse_parameters_to_ndarrays`` " -"反序列化来自服务器的参数,然后使用 ``ndarrays_to_sparse_parameters`` 序列化本地结果。" +"在 ``fit`` 中,我们首先需要使用自定义的 ``sparse_parameters_to_ndarrays`` 反" +"序列化来自服务器的参数,然后使用 ``ndarrays_to_sparse_parameters`` 序列化本地" +"结果。" #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:621 msgid "" -"In ``evaluate``, we will only need to deserialize the global parameters " -"with our custom function." +"In ``evaluate``, we will only need to deserialize the global parameters with " +"our custom function." msgstr "在 ``evaluate`` 中,我们只需要用自定义函数反序列化全局参数。" #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:725 @@ -22008,14 +22294,14 @@ msgstr "服务器端" #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:727 msgid "" -"For this example, we will just use ``FedAvg`` as a strategy. To change " -"the serialization and deserialization here, we only need to reimplement " -"the ``evaluate`` and ``aggregate_fit`` functions of ``FedAvg``. The other" -" functions of the strategy will be inherited from the super class " -"``FedAvg``." +"For this example, we will just use ``FedAvg`` as a strategy. To change the " +"serialization and deserialization here, we only need to reimplement the " +"``evaluate`` and ``aggregate_fit`` functions of ``FedAvg``. The other " +"functions of the strategy will be inherited from the super class ``FedAvg``." msgstr "" -"在本例中,我们将只使用 ``FedAvg`` 作为策略。要改变这里的序列化和反序列化,我们只需重新实现 ``FedAvg`` 的 " -"``evaluate`` 和 ``aggregate_fit`` 函数。策略的其他函数将从超类 ``FedAvg`` 继承。" +"在本例中,我们将只使用 ``FedAvg`` 作为策略。要改变这里的序列化和反序列化,我" +"们只需重新实现 ``FedAvg`` 的 ``evaluate`` 和 ``aggregate_fit`` 函数。策略的其" +"他函数将从超类 ``FedAvg`` 继承。" #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:729 msgid "As you can see only one line as change in ``evaluate``:" @@ -22039,22 +22325,27 @@ msgstr "现在我们可以运行自定义序列化示例!" msgid "" "In this part of the tutorial, we've seen how we can build clients by " "subclassing either ``NumPyClient`` or ``Client``. ``NumPyClient`` is a " -"convenience abstraction that makes it easier to work with machine " -"learning libraries that have good NumPy interoperability. ``Client`` is a" -" more flexible abstraction that allows us to do things that are not " -"possible in ``NumPyClient``. In order to do so, it requires us to handle " -"parameter serialization and deserialization ourselves." -msgstr "" -"在本部分教程中,我们已经了解了如何通过子类化 ``NumPyClient`` 或 ``Client`` 来构建客户端。NumPyClient " -"\"是一个便捷的抽象类,可以让我们更容易地与具有良好NumPy互操作性的机器学习库一起工作。``Client``是一个更灵活的抽象类,允许我们做一些在`NumPyClient``中做不到的事情。为此,它要求我们自己处理参数序列化和反序列化。" +"convenience abstraction that makes it easier to work with machine learning " +"libraries that have good NumPy interoperability. ``Client`` is a more " +"flexible abstraction that allows us to do things that are not possible in " +"``NumPyClient``. In order to do so, it requires us to handle parameter " +"serialization and deserialization ourselves." +msgstr "" +"在本部分教程中,我们已经了解了如何通过子类化 ``NumPyClient`` 或 ``Client`` 来" +"构建客户端。NumPyClient \"是一个便捷的抽象类,可以让我们更容易地与具有良好" +"NumPy互操作性的机器学习库一起工作。``Client``是一个更灵活的抽象类,允许我们做" +"一些在`NumPyClient``中做不到的事情。为此,它要求我们自己处理参数序列化和反序" +"列化。" #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:952 msgid "" -"This is the final part of the Flower tutorial (for now!), " -"congratulations! You're now well equipped to understand the rest of the " -"documentation. There are many topics we didn't cover in the tutorial, we " -"recommend the following resources:" -msgstr "这暂时是 Flower 教程的最后一部分,恭喜您!您现在已经具备了理解其余文档的能力。本教程还有许多内容没有涉及,我们推荐您参考以下资源:" +"This is the final part of the Flower tutorial (for now!), congratulations! " +"You're now well equipped to understand the rest of the documentation. There " +"are many topics we didn't cover in the tutorial, we recommend the following " +"resources:" +msgstr "" +"这暂时是 Flower 教程的最后一部分,恭喜您!您现在已经具备了理解其余文档的能" +"力。本教程还有许多内容没有涉及,我们推荐您参考以下资源:" #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:954 msgid "`Read Flower Docs `__" @@ -22062,21 +22353,24 @@ msgstr "阅读Flower文档 `__" #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:955 msgid "" -"`Check out Flower Code Examples " -"`__" -msgstr "查看 Flower 代码示例 `__" +"`Check out Flower Code Examples `__" +msgstr "" +"查看 Flower 代码示例 `__" #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:956 msgid "" -"`Use Flower Baselines for your research " -"`__" -msgstr "使用 \"Flower Baselines \"进行研究 `__" +"`Use Flower Baselines for your research `__" +msgstr "" +"使用 \"Flower Baselines \"进行研究 `__" #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:957 msgid "" -"`Watch Flower Summit 2023 videos `__" -msgstr "观看 2023 年Flower峰会视频 `__" +"`Watch Flower Summit 2023 videos `__" +msgstr "" +"观看 2023 年Flower峰会视频 `__" #: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:9 msgid "Get started with Flower" @@ -22091,13 +22385,13 @@ msgstr "欢迎阅读Flower联邦学习教程!" #, fuzzy msgid "" "In this notebook, we'll build a federated learning system using Flower, " -"`Flower Datasets `__ and PyTorch. In " -"part 1, we use PyTorch for the model training pipeline and data loading. " -"In part 2, we continue to federate the PyTorch-based pipeline using " -"Flower." +"`Flower Datasets `__ and PyTorch. In part " +"1, we use PyTorch for the model training pipeline and data loading. In part " +"2, we continue to federate the PyTorch-based pipeline using Flower." msgstr "" -"在本笔记中,我们将使用 Flower 和 PyTorch 构建一个联邦学习系统。在第一部分中,我们使用 PyTorch " -"进行模型训练和数据加载。在第二部分中,我们将继续使用 Flower 联邦化基于 PyTorch 的框架。" +"在本笔记中,我们将使用 Flower 和 PyTorch 构建一个联邦学习系统。在第一部分中," +"我们使用 PyTorch 进行模型训练和数据加载。在第二部分中,我们将继续使用 Flower " +"联邦化基于 PyTorch 的框架。" #: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:17 msgid "Let's get stated!" @@ -22113,26 +22407,28 @@ msgstr "在开始编写实际代码之前,让我们先确保我们已经准备 #, fuzzy msgid "" "Next, we install the necessary packages for PyTorch (``torch`` and " -"``torchvision``), Flower Datasets (``flwr-datasets``) and Flower " -"(``flwr``):" -msgstr "接下来,我们为 PyTorch(`torch`` 和`torchvision``)和 Flower(`flwr`)安装必要的软件包:" +"``torchvision``), Flower Datasets (``flwr-datasets``) and Flower (``flwr``):" +msgstr "" +"接下来,我们为 PyTorch(`torch`` 和`torchvision``)和 Flower(`flwr`)安装必" +"要的软件包:" #: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:105 #, fuzzy msgid "" -"It is possible to switch to a runtime that has GPU acceleration enabled " -"(on Google Colab: ``Runtime > Change runtime type > Hardware accelerator:" -" GPU > Save``). Note, however, that Google Colab is not always able to " -"offer GPU acceleration. If you see an error related to GPU availability " -"in one of the following sections, consider switching back to CPU-based " -"execution by setting ``DEVICE = torch.device(\"cpu\")``. If the runtime " -"has GPU acceleration enabled, you should see the output ``Training on " -"cuda``, otherwise it'll say ``Training on cpu``." +"It is possible to switch to a runtime that has GPU acceleration enabled (on " +"Google Colab: ``Runtime > Change runtime type > Hardware accelerator: GPU > " +"Save``). Note, however, that Google Colab is not always able to offer GPU " +"acceleration. If you see an error related to GPU availability in one of the " +"following sections, consider switching back to CPU-based execution by " +"setting ``DEVICE = torch.device(\"cpu\")``. If the runtime has GPU " +"acceleration enabled, you should see the output ``Training on cuda``, " +"otherwise it'll say ``Training on cpu``." msgstr "" -"可以切换到已启用 GPU 加速的运行时(在 Google Colab 上: 运行时 > 更改运行时类型 > 硬件加速: GPU > " -"保存``)。但请注意,Google Colab 并非总能提供 GPU 加速。如果在以下部分中看到与 GPU 可用性相关的错误,请考虑通过设置 " -"``DEVICE = torch.device(\"cpu\")`` 切回基于 CPU 的执行。如果运行时已启用 GPU " -"加速,你应该会看到输出``Training on cuda``,否则会显示``Training on cpu``。" +"可以切换到已启用 GPU 加速的运行时(在 Google Colab 上: 运行时 > 更改运行时类" +"型 > 硬件加速: GPU > 保存``)。但请注意,Google Colab 并非总能提供 GPU 加" +"速。如果在以下部分中看到与 GPU 可用性相关的错误,请考虑通过设置 ``DEVICE = " +"torch.device(\"cpu\")`` 切回基于 CPU 的执行。如果运行时已启用 GPU 加速,你应" +"该会看到输出``Training on cuda``,否则会显示``Training on cpu``。" #: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:118 msgid "Loading the data" @@ -22141,77 +22437,86 @@ msgstr "加载数据" #: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:120 #, fuzzy msgid "" -"Federated learning can be applied to many different types of tasks across" -" different domains. In this tutorial, we introduce federated learning by " -"training a simple convolutional neural network (CNN) on the popular " -"CIFAR-10 dataset. CIFAR-10 can be used to train image classifiers that " -"distinguish between images from ten different classes: 'airplane', " -"'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', and " -"'truck'." +"Federated learning can be applied to many different types of tasks across " +"different domains. In this tutorial, we introduce federated learning by " +"training a simple convolutional neural network (CNN) on the popular CIFAR-10 " +"dataset. CIFAR-10 can be used to train image classifiers that distinguish " +"between images from ten different classes: 'airplane', 'automobile', 'bird', " +"'cat', 'deer', 'dog', 'frog', 'horse', 'ship', and 'truck'." msgstr "" -"联邦学习可应用于不同领域的多种不同类型任务。在本教程中,我们将通过在流行的 CIFAR-10 数据集上训练一个简单的卷积神经网络 (CNN) " -"来介绍联合学习。CIFAR-10 可用于训练图像分类器,以区分来自十个不同类别的图像:" +"联邦学习可应用于不同领域的多种不同类型任务。在本教程中,我们将通过在流行的 " +"CIFAR-10 数据集上训练一个简单的卷积神经网络 (CNN) 来介绍联合学习。CIFAR-10 可" +"用于训练图像分类器,以区分来自十个不同类别的图像:" #: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:131 msgid "" "We simulate having multiple datasets from multiple organizations (also " -"called the \"cross-silo\" setting in federated learning) by splitting the" -" original CIFAR-10 dataset into multiple partitions. Each partition will " -"represent the data from a single organization. We're doing this purely " -"for experimentation purposes, in the real world there's no need for data " -"splitting because each organization already has their own data (so the " -"data is naturally partitioned)." -msgstr "" -"我们通过将原始 CIFAR-10 数据集拆分成多个分区来模拟来自多个组织的多个数据集(也称为联邦学习中的 \"跨分区 " -"\"设置)。每个分区代表一个组织的数据。我们这样做纯粹是为了实验目的,在现实世界中不需要拆分数据,因为每个组织都已经有了自己的数据(所以数据是自然分区的)。" +"called the \"cross-silo\" setting in federated learning) by splitting the " +"original CIFAR-10 dataset into multiple partitions. Each partition will " +"represent the data from a single organization. We're doing this purely for " +"experimentation purposes, in the real world there's no need for data " +"splitting because each organization already has their own data (so the data " +"is naturally partitioned)." +msgstr "" +"我们通过将原始 CIFAR-10 数据集拆分成多个分区来模拟来自多个组织的多个数据集" +"(也称为联邦学习中的 \"跨分区 \"设置)。每个分区代表一个组织的数据。我们这样" +"做纯粹是为了实验目的,在现实世界中不需要拆分数据,因为每个组织都已经有了自己" +"的数据(所以数据是自然分区的)。" #: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:133 #, fuzzy msgid "" -"Each organization will act as a client in the federated learning system. " -"So having ten organizations participate in a federation means having ten " +"Each organization will act as a client in the federated learning system. So " +"having ten organizations participate in a federation means having ten " "clients connected to the federated learning server." -msgstr "每个组织都将充当联邦学习系统中的客户端。因此,有十个组织参与联邦学习,就意味着有十个客户端连接到联邦学习服务器:" +msgstr "" +"每个组织都将充当联邦学习系统中的客户端。因此,有十个组织参与联邦学习,就意味" +"着有十个客户端连接到联邦学习服务器:" #: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:144 msgid "" "Let's now create the Federated Dataset abstraction that from ``flwr-" -"datasets`` that partitions the CIFAR-10. We will create small training " -"and test set for each edge device and wrap each of them into a PyTorch " +"datasets`` that partitions the CIFAR-10. We will create small training and " +"test set for each edge device and wrap each of them into a PyTorch " "``DataLoader``:" msgstr "" #: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:198 msgid "" "We now have a list of ten training sets and ten validation sets " -"(``trainloaders`` and ``valloaders``) representing the data of ten " -"different organizations. Each ``trainloader``/``valloader`` pair contains" -" 4500 training examples and 500 validation examples. There's also a " -"single ``testloader`` (we did not split the test set). Again, this is " -"only necessary for building research or educational systems, actual " -"federated learning systems have their data naturally distributed across " -"multiple partitions." -msgstr "" -"现在,我们有一个包含十个训练集和十个验证集(`trainloaders`` 和`valloaders``)的列表,代表十个不同组织的数据。每对 " -"``trainloader``/``valloader`` 都包含 4500 个训练示例和 500 个验证数据。还有一个单独的 " -"``测试加载器``(我们没有拆分测试集)。同样,这只有在构建研究或教育系统时才有必要,实际的联邦学习系统的数据自然分布在多个分区中。" +"(``trainloaders`` and ``valloaders``) representing the data of ten different " +"organizations. Each ``trainloader``/``valloader`` pair contains 4500 " +"training examples and 500 validation examples. There's also a single " +"``testloader`` (we did not split the test set). Again, this is only " +"necessary for building research or educational systems, actual federated " +"learning systems have their data naturally distributed across multiple " +"partitions." +msgstr "" +"现在,我们有一个包含十个训练集和十个验证集(`trainloaders`` 和`valloaders``)" +"的列表,代表十个不同组织的数据。每对 ``trainloader``/``valloader`` 都包含 " +"4500 个训练示例和 500 个验证数据。还有一个单独的 ``测试加载器``(我们没有拆分" +"测试集)。同样,这只有在构建研究或教育系统时才有必要,实际的联邦学习系统的数" +"据自然分布在多个分区中。" #: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:201 msgid "" "Let's take a look at the first batch of images and labels in the first " "training set (i.e., ``trainloaders[0]``) before we move on:" -msgstr "在继续之前,让我们先看看第一个训练集中的第一批图像和标签(即 ``trainloaders[0]``):" +msgstr "" +"在继续之前,让我们先看看第一个训练集中的第一批图像和标签(即 " +"``trainloaders[0]``):" #: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:240 msgid "" "The output above shows a random batch of images from the first " "``trainloader`` in our list of ten ``trainloaders``. It also prints the " "labels associated with each image (i.e., one of the ten possible labels " -"we've seen above). If you run the cell again, you should see another " -"batch of images." +"we've seen above). If you run the cell again, you should see another batch " +"of images." msgstr "" -"上面的输出显示了来自十个 \"trainloader \"列表中第一个 \"trainloader " -"\"的随机图像。它还打印了与每幅图像相关的标签(即我们上面看到的十个可能标签之一)。如果您再次运行该单元,应该会看到另一批图像。" +"上面的输出显示了来自十个 \"trainloader \"列表中第一个 \"trainloader \"的随机" +"图像。它还打印了与每幅图像相关的标签(即我们上面看到的十个可能标签之一)。如" +"果您再次运行该单元,应该会看到另一批图像。" #: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:252 msgid "Step 1: Centralized Training with PyTorch" @@ -22223,12 +22528,12 @@ msgid "" "network. This introduction assumes basic familiarity with PyTorch, so it " "doesn't cover the PyTorch-related aspects in full detail. If you want to " "dive deeper into PyTorch, we recommend `DEEP LEARNING WITH PYTORCH: A 60 " -"MINUTE BLITZ " -"`__." +"MINUTE BLITZ `__." msgstr "" -"接下来,我们将使用 PyTorch 来定义一个简单的卷积神经网络。本介绍假定您对 PyTorch 有基本的了解,因此不会详细介绍与 PyTorch" -" 相关的内容。如果你想更深入地了解 PyTorch,我们推荐你阅读 `DEEP LEARNING WITH PYTORCH: a 60 " -"minute blitz " +"接下来,我们将使用 PyTorch 来定义一个简单的卷积神经网络。本介绍假定您对 " +"PyTorch 有基本的了解,因此不会详细介绍与 PyTorch 相关的内容。如果你想更深入地" +"了解 PyTorch,我们推荐你阅读 `DEEP LEARNING WITH PYTORCH: a 60 minute blitz " "`__。" #: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:275 @@ -22237,13 +22542,13 @@ msgstr "定义模型" #: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:277 msgid "" -"We use the simple CNN described in the `PyTorch tutorial " -"`__:" +"We use the simple CNN described in the `PyTorch tutorial `__:" msgstr "" -"我们使用` PyTorch 教程 " -"`__ 中描述的简单 CNN:" +"我们使用` PyTorch 教程 `__ 中描述的简单 " +"CNN:" #: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:314 msgid "Let's continue with the usual training and test functions:" @@ -22256,23 +22561,27 @@ msgstr "训练模型" #: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:376 msgid "" "We now have all the basic building blocks we need: a dataset, a model, a " -"training function, and a test function. Let's put them together to train " -"the model on the dataset of one of our organizations " -"(``trainloaders[0]``). This simulates the reality of most machine " -"learning projects today: each organization has their own data and trains " -"models only on this internal data:" -msgstr "现在我们拥有了所需的所有基本构件:数据集、模型、训练函数和测试函数。让我们把它们放在一起,在我们其中一个组织的数据集(``trainloaders[0]``)上训练模型。这模拟了当今大多数机器学习项目的实际情况:每个组织都有自己的数据,并且只在这些内部数据上训练模型:" +"training function, and a test function. Let's put them together to train the " +"model on the dataset of one of our organizations (``trainloaders[0]``). This " +"simulates the reality of most machine learning projects today: each " +"organization has their own data and trains models only on this internal data:" +msgstr "" +"现在我们拥有了所需的所有基本构件:数据集、模型、训练函数和测试函数。让我们把" +"它们放在一起,在我们其中一个组织的数据集(``trainloaders[0]``)上训练模型。这" +"模拟了当今大多数机器学习项目的实际情况:每个组织都有自己的数据,并且只在这些" +"内部数据上训练模型:" #: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:406 msgid "" -"Training the simple CNN on our CIFAR-10 split for 5 epochs should result " -"in a test set accuracy of about 41%, which is not good, but at the same " -"time, it doesn't really matter for the purposes of this tutorial. The " -"intent was just to show a simplistic centralized training pipeline that " -"sets the stage for what comes next - federated learning!" +"Training the simple CNN on our CIFAR-10 split for 5 epochs should result in " +"a test set accuracy of about 41%, which is not good, but at the same time, " +"it doesn't really matter for the purposes of this tutorial. The intent was " +"just to show a simplistic centralized training pipeline that sets the stage " +"for what comes next - federated learning!" msgstr "" "在我们的 CIFAR-10 分片上对简单 CNN 进行 5 个遍历的训练后,测试集的准确率应为 " -"41%,这并不理想,但同时对本教程而言也并不重要。我们只是想展示一个简单的集中式训练流程,为接下来的联邦学习做好铺垫!" +"41%,这并不理想,但同时对本教程而言也并不重要。我们只是想展示一个简单的集中式" +"训练流程,为接下来的联邦学习做好铺垫!" #: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:418 msgid "Step 2: Federated Learning with Flower" @@ -22280,14 +22589,15 @@ msgstr "步骤 2:使用 Flower 联邦学习" #: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:420 msgid "" -"Step 1 demonstrated a simple centralized training pipeline. All data was " -"in one place (i.e., a single ``trainloader`` and a single ``valloader``)." -" Next, we'll simulate a situation where we have multiple datasets in " -"multiple organizations and where we train a model over these " -"organizations using federated learning." +"Step 1 demonstrated a simple centralized training pipeline. All data was in " +"one place (i.e., a single ``trainloader`` and a single ``valloader``). Next, " +"we'll simulate a situation where we have multiple datasets in multiple " +"organizations and where we train a model over these organizations using " +"federated learning." msgstr "" -"步骤 1 演示了一个简单的集中式训练流程。所有数据都在一个地方(即一个 \"trainloader \"和一个 " -"\"valloader\")。接下来,我们将模拟在多个组织中拥有多个数据集的情况,并使用联邦学习在这些组织中训练一个模型。" +"步骤 1 演示了一个简单的集中式训练流程。所有数据都在一个地方(即一个 " +"\"trainloader \"和一个 \"valloader\")。接下来,我们将模拟在多个组织中拥有多" +"个数据集的情况,并使用联邦学习在这些组织中训练一个模型。" #: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:432 msgid "Updating model parameters" @@ -22295,35 +22605,40 @@ msgstr "更新模型参数" #: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:434 msgid "" -"In federated learning, the server sends the global model parameters to " -"the client, and the client updates the local model with the parameters " -"received from the server. It then trains the model on the local data " -"(which changes the model parameters locally) and sends the " -"updated/changed model parameters back to the server (or, alternatively, " -"it sends just the gradients back to the server, not the full model " -"parameters)." -msgstr "在联邦学习中,服务器将全局模型参数发送给客户端,客户端根据从服务器接收到的参数更新本地模型。然后,客户端根据本地数据对模型进行训练(在本地更改模型参数),并将更新/更改后的模型参数发回服务器(或者,客户端只将梯度参数发回服务器,而不是全部模型参数)。" +"In federated learning, the server sends the global model parameters to the " +"client, and the client updates the local model with the parameters received " +"from the server. It then trains the model on the local data (which changes " +"the model parameters locally) and sends the updated/changed model parameters " +"back to the server (or, alternatively, it sends just the gradients back to " +"the server, not the full model parameters)." +msgstr "" +"在联邦学习中,服务器将全局模型参数发送给客户端,客户端根据从服务器接收到的参" +"数更新本地模型。然后,客户端根据本地数据对模型进行训练(在本地更改模型参" +"数),并将更新/更改后的模型参数发回服务器(或者,客户端只将梯度参数发回服务" +"器,而不是全部模型参数)。" #: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:436 msgid "" "We need two helper functions to update the local model with parameters " -"received from the server and to get the updated model parameters from the" -" local model: ``set_parameters`` and ``get_parameters``. The following " -"two functions do just that for the PyTorch model above." +"received from the server and to get the updated model parameters from the " +"local model: ``set_parameters`` and ``get_parameters``. The following two " +"functions do just that for the PyTorch model above." msgstr "" -"我们需要两个辅助函数,用从服务器接收到的参数更新本地模型,并从本地模型获取更新后的模型参数:`` " -"set_parameters```和`get_parameters``。下面两个函数就是为上面的 PyTorch 模型做这些工作的。" +"我们需要两个辅助函数,用从服务器接收到的参数更新本地模型,并从本地模型获取更" +"新后的模型参数:`` set_parameters```和`get_parameters``。下面两个函数就是为上" +"面的 PyTorch 模型做这些工作的。" #: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:438 msgid "" -"The details of how this works are not really important here (feel free to" -" consult the PyTorch documentation if you want to learn more). In " -"essence, we use ``state_dict`` to access PyTorch model parameter tensors." -" The parameter tensors are then converted to/from a list of NumPy " -"ndarray's (which Flower knows how to serialize/deserialize):" +"The details of how this works are not really important here (feel free to " +"consult the PyTorch documentation if you want to learn more). In essence, we " +"use ``state_dict`` to access PyTorch model parameter tensors. The parameter " +"tensors are then converted to/from a list of NumPy ndarray's (which Flower " +"knows how to serialize/deserialize):" msgstr "" -"在这里,如何工作的细节并不重要(如果你想了解更多,请随时查阅 PyTorch 文档)。本质上,我们使用 ``state_dict`` 访问 " -"PyTorch 模型参数张量。然后,参数张量会被转换成/转换成 NumPy ndarray 列表(Flower 知道如何序列化/反序列化):" +"在这里,如何工作的细节并不重要(如果你想了解更多,请随时查阅 PyTorch 文档)。" +"本质上,我们使用 ``state_dict`` 访问 PyTorch 模型参数张量。然后,参数张量会被" +"转换成/转换成 NumPy ndarray 列表(Flower 知道如何序列化/反序列化):" #: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:466 msgid "Implementing a Flower client" @@ -22331,25 +22646,25 @@ msgstr "实现 Flower 客户端" #: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:468 msgid "" -"With that out of the way, let's move on to the interesting part. " -"Federated learning systems consist of a server and multiple clients. In " -"Flower, we create clients by implementing subclasses of " -"``flwr.client.Client`` or ``flwr.client.NumPyClient``. We use " -"``NumPyClient`` in this tutorial because it is easier to implement and " -"requires us to write less boilerplate." +"With that out of the way, let's move on to the interesting part. Federated " +"learning systems consist of a server and multiple clients. In Flower, we " +"create clients by implementing subclasses of ``flwr.client.Client`` or " +"``flwr.client.NumPyClient``. We use ``NumPyClient`` in this tutorial because " +"it is easier to implement and requires us to write less boilerplate." msgstr "" -"说完这些,让我们进入有趣的部分。联邦学习系统由一个服务器和多个客户端组成。在 Flower 中,我们通过实现 " -"``flwr.client.Client`` 或 ``flwr.client.NumPyClient`` " -"的子类来创建客户端。在本教程中,我们使用``NumPyClient``,因为它更容易实现,需要我们编写的模板也更少。" +"说完这些,让我们进入有趣的部分。联邦学习系统由一个服务器和多个客户端组成。在 " +"Flower 中,我们通过实现 ``flwr.client.Client`` 或 ``flwr.client." +"NumPyClient`` 的子类来创建客户端。在本教程中,我们使用``NumPyClient``,因为它" +"更容易实现,需要我们编写的模板也更少。" #: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:470 msgid "" -"To implement the Flower client, we create a subclass of " -"``flwr.client.NumPyClient`` and implement the three methods " -"``get_parameters``, ``fit``, and ``evaluate``:" +"To implement the Flower client, we create a subclass of ``flwr.client." +"NumPyClient`` and implement the three methods ``get_parameters``, ``fit``, " +"and ``evaluate``:" msgstr "" -"为实现 Flower 客户端,我们创建了 ``flwr.client.NumPyClient`` 的子类,并实现了 " -"``get_parameters``、``fit`` 和``evaluate`` 三个方法:" +"为实现 Flower 客户端,我们创建了 ``flwr.client.NumPyClient`` 的子类,并实现" +"了 ``get_parameters``、``fit`` 和``evaluate`` 三个方法:" #: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:472 msgid "``get_parameters``: Return the current local model parameters" @@ -22358,16 +22673,19 @@ msgstr "``get_parameters``: 返回当前本地模型参数" #: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:473 msgid "" "``fit``: Receive model parameters from the server, train the model " -"parameters on the local data, and return the (updated) model parameters " -"to the server" -msgstr "``fit``: 从服务器接收模型参数,在本地数据上训练模型参数,并将(更新的)模型参数返回服务器" +"parameters on the local data, and return the (updated) model parameters to " +"the server" +msgstr "" +"``fit``: 从服务器接收模型参数,在本地数据上训练模型参数,并将(更新的)模型" +"参数返回服务器" #: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:474 msgid "" -"``evaluate``: Receive model parameters from the server, evaluate the " -"model parameters on the local data, and return the evaluation result to " -"the server" -msgstr "``evaluate ``: 从服务器接收模型参数,在本地数据上评估模型参数,并将评估结果返回服务器" +"``evaluate``: Receive model parameters from the server, evaluate the model " +"parameters on the local data, and return the evaluation result to the server" +msgstr "" +"``evaluate ``: 从服务器接收模型参数,在本地数据上评估模型参数,并将评估结果" +"返回服务器" #: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:476 msgid "" @@ -22375,29 +22693,29 @@ msgid "" "components for model training and evaluation. Let's see a simple Flower " "client implementation that brings everything together:" msgstr "" -"我们提到,我们的客户端将使用之前定义的 PyTorch 组件进行模型训练和评估。让我们来看看一个简单的 Flower " -"客户端实现,它将一切都整合在一起:" +"我们提到,我们的客户端将使用之前定义的 PyTorch 组件进行模型训练和评估。让我们" +"来看看一个简单的 Flower 客户端实现,它将一切都整合在一起:" #: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:513 msgid "" "Our class ``FlowerClient`` defines how local training/evaluation will be " -"performed and allows Flower to call the local training/evaluation through" -" ``fit`` and ``evaluate``. Each instance of ``FlowerClient`` represents a" -" *single client* in our federated learning system. Federated learning " -"systems have multiple clients (otherwise, there's not much to federate), " -"so each client will be represented by its own instance of " -"``FlowerClient``. If we have, for example, three clients in our workload," -" then we'd have three instances of ``FlowerClient``. Flower calls " -"``FlowerClient.fit`` on the respective instance when the server selects a" -" particular client for training (and ``FlowerClient.evaluate`` for " -"evaluation)." -msgstr "" -"我们的类 ``FlowerClient`` 定义了本地训练/评估的执行方式,并允许 Flower 通过 ``fit`` 和 " -"``evaluate`` 调用本地训练/评估。每个 ``FlowerClient`` " -"实例都代表联邦学习系统中的*单个客户端*。联邦学习系统有多个客户端(否则就没有什么可联邦的),因此每个客户端都将由自己的 " -"``FlowerClient`` 实例来代表。例如,如果我们的工作负载中有三个客户端,那么我们就会有三个 ``FlowerClient`` " -"实例。当服务器选择特定客户端进行训练时,Flower 会调用相应实例上的 ``FlowerClient.fit`` (评估时调用 " -"``FlowerClient.evaluate``)。" +"performed and allows Flower to call the local training/evaluation through " +"``fit`` and ``evaluate``. Each instance of ``FlowerClient`` represents a " +"*single client* in our federated learning system. Federated learning systems " +"have multiple clients (otherwise, there's not much to federate), so each " +"client will be represented by its own instance of ``FlowerClient``. If we " +"have, for example, three clients in our workload, then we'd have three " +"instances of ``FlowerClient``. Flower calls ``FlowerClient.fit`` on the " +"respective instance when the server selects a particular client for training " +"(and ``FlowerClient.evaluate`` for evaluation)." +msgstr "" +"我们的类 ``FlowerClient`` 定义了本地训练/评估的执行方式,并允许 Flower 通过 " +"``fit`` 和 ``evaluate`` 调用本地训练/评估。每个 ``FlowerClient`` 实例都代表联" +"邦学习系统中的*单个客户端*。联邦学习系统有多个客户端(否则就没有什么可联邦" +"的),因此每个客户端都将由自己的 ``FlowerClient`` 实例来代表。例如,如果我们" +"的工作负载中有三个客户端,那么我们就会有三个 ``FlowerClient`` 实例。当服务器" +"选择特定客户端进行训练时,Flower 会调用相应实例上的 ``FlowerClient.fit`` (评" +"估时调用 ``FlowerClient.evaluate``)。" #: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:517 msgid "Using the Virtual Client Engine" @@ -22405,17 +22723,18 @@ msgstr "使用虚拟客户端引擎" #: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:519 msgid "" -"In this notebook, we want to simulate a federated learning system with 10" -" clients on a single machine. This means that the server and all 10 " -"clients will live on a single machine and share resources such as CPU, " -"GPU, and memory. Having 10 clients would mean having 10 instances of " -"``FlowerClient`` in memory. Doing this on a single machine can quickly " -"exhaust the available memory resources, even if only a subset of these " -"clients participates in a single round of federated learning." +"In this notebook, we want to simulate a federated learning system with 10 " +"clients on a single machine. This means that the server and all 10 clients " +"will live on a single machine and share resources such as CPU, GPU, and " +"memory. Having 10 clients would mean having 10 instances of ``FlowerClient`` " +"in memory. Doing this on a single machine can quickly exhaust the available " +"memory resources, even if only a subset of these clients participates in a " +"single round of federated learning." msgstr "" -"在本笔记中,我们要模拟一个联邦学习系统,在一台机器上有 10 个客户端。这意味着服务器和所有 10 个客户端都将位于一台机器上,并共享 " -"CPU、GPU 和内存等资源。有 10 个客户端就意味着内存中有 10 个 ``FlowerClient`` " -"实例。在单台机器上这样做会很快耗尽可用的内存资源,即使这些客户端中只有一个子集参与了一轮联邦学习。" +"在本笔记中,我们要模拟一个联邦学习系统,在一台机器上有 10 个客户端。这意味着" +"服务器和所有 10 个客户端都将位于一台机器上,并共享 CPU、GPU 和内存等资源。有 " +"10 个客户端就意味着内存中有 10 个 ``FlowerClient`` 实例。在单台机器上这样做会" +"很快耗尽可用的内存资源,即使这些客户端中只有一个子集参与了一轮联邦学习。" #: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:521 msgid "" @@ -22423,21 +22742,23 @@ msgid "" "multiple machines, Flower, therefore, provides special simulation " "capabilities that create ``FlowerClient`` instances only when they are " "actually necessary for training or evaluation. To enable the Flower " -"framework to create clients when necessary, we need to implement a " -"function called ``client_fn`` that creates a ``FlowerClient`` instance on" -" demand. Flower calls ``client_fn`` whenever it needs an instance of one " -"particular client to call ``fit`` or ``evaluate`` (those instances are " -"usually discarded after use, so they should not keep any local state). " -"Clients are identified by a client ID, or short ``cid``. The ``cid`` can " -"be used, for example, to load different local data partitions for " -"different clients, as can be seen below:" -msgstr "" -"除了服务器和客户端在多台机器上运行的常规功能外,Flower 还提供了特殊的模拟功能,即只有在训练或评估实际需要时才创建 " -"``FlowerClient`` 实例。为了让 Flower 框架能在必要时创建客户端,我们需要实现一个名为 ``client_fn`` " -"的函数,它能按需创建一个 ``FlowerClient`` 实例。每当 Flower 需要一个特定的客户端实例来调用 ``fit`` 或 " -"``evaluate`` 时,它就会调用 " -"``client_fn``(这些实例在使用后通常会被丢弃,因此它们不应保留任何本地状态)。客户端由一个客户端 ID 或简短的 ``cid`` " -"标识。例如,可以使用 ``cid`` 为不同的客户端加载不同的本地数据分区,如下所示:" +"framework to create clients when necessary, we need to implement a function " +"called ``client_fn`` that creates a ``FlowerClient`` instance on demand. " +"Flower calls ``client_fn`` whenever it needs an instance of one particular " +"client to call ``fit`` or ``evaluate`` (those instances are usually " +"discarded after use, so they should not keep any local state). Clients are " +"identified by a client ID, or short ``cid``. The ``cid`` can be used, for " +"example, to load different local data partitions for different clients, as " +"can be seen below:" +msgstr "" +"除了服务器和客户端在多台机器上运行的常规功能外,Flower 还提供了特殊的模拟功" +"能,即只有在训练或评估实际需要时才创建 ``FlowerClient`` 实例。为了让 Flower " +"框架能在必要时创建客户端,我们需要实现一个名为 ``client_fn`` 的函数,它能按需" +"创建一个 ``FlowerClient`` 实例。每当 Flower 需要一个特定的客户端实例来调用 " +"``fit`` 或 ``evaluate`` 时,它就会调用 ``client_fn``(这些实例在使用后通常会" +"被丢弃,因此它们不应保留任何本地状态)。客户端由一个客户端 ID 或简短的 " +"``cid`` 标识。例如,可以使用 ``cid`` 为不同的客户端加载不同的本地数据分区,如" +"下所示:" #: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:556 msgid "Starting the training" @@ -22445,40 +22766,43 @@ msgstr "开始训练" #: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:558 msgid "" -"We now have the class ``FlowerClient`` which defines client-side " -"training/evaluation and ``client_fn`` which allows Flower to create " -"``FlowerClient`` instances whenever it needs to call ``fit`` or " -"``evaluate`` on one particular client. The last step is to start the " -"actual simulation using ``flwr.simulation.start_simulation``." +"We now have the class ``FlowerClient`` which defines client-side training/" +"evaluation and ``client_fn`` which allows Flower to create ``FlowerClient`` " +"instances whenever it needs to call ``fit`` or ``evaluate`` on one " +"particular client. The last step is to start the actual simulation using " +"``flwr.simulation.start_simulation``." msgstr "" -"现在我们有了定义客户端训练/评估的类 ``FlowerClient`` 和允许 Flower 在需要调用某个客户端的 ``fit` 或 " -"``evaluate` 时创建 ``FlowerClient`` 实例的 ``client_fn` 类。最后一步是使用 " -"``flwr.simulation.start_simulation`` 启动实际模拟。" +"现在我们有了定义客户端训练/评估的类 ``FlowerClient`` 和允许 Flower 在需要调用" +"某个客户端的 ``fit` 或 ``evaluate` 时创建 ``FlowerClient`` 实例的 " +"``client_fn` 类。最后一步是使用 ``flwr.simulation.start_simulation`` 启动实际" +"模拟。" #: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:560 msgid "" "The function ``start_simulation`` accepts a number of arguments, amongst " -"them the ``client_fn`` used to create ``FlowerClient`` instances, the " -"number of clients to simulate (``num_clients``), the number of federated " -"learning rounds (``num_rounds``), and the strategy. The strategy " -"encapsulates the federated learning approach/algorithm, for example, " -"*Federated Averaging* (FedAvg)." +"them the ``client_fn`` used to create ``FlowerClient`` instances, the number " +"of clients to simulate (``num_clients``), the number of federated learning " +"rounds (``num_rounds``), and the strategy. The strategy encapsulates the " +"federated learning approach/algorithm, for example, *Federated Averaging* " +"(FedAvg)." msgstr "" -"函数 ``start_simulation`` 接受许多参数,其中包括用于创建 ``FlowerClient`` 实例的 " -"``client_fn``、要模拟的客户端数量(``num_clients``)、联邦学习轮数(``num_rounds``)和策略。策略封装了联邦学习方法/算法,例如*联邦平均*" -" (FedAvg)。" +"函数 ``start_simulation`` 接受许多参数,其中包括用于创建 ``FlowerClient`` 实" +"例的 ``client_fn``、要模拟的客户端数量(``num_clients``)、联邦学习轮数" +"(``num_rounds``)和策略。策略封装了联邦学习方法/算法,例如*联邦平均* " +"(FedAvg)。" #: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:562 msgid "" "Flower has a number of built-in strategies, but we can also use our own " -"strategy implementations to customize nearly all aspects of the federated" -" learning approach. For this example, we use the built-in ``FedAvg`` " -"implementation and customize it using a few basic parameters. The last " -"step is the actual call to ``start_simulation`` which - you guessed it - " -"starts the simulation:" +"strategy implementations to customize nearly all aspects of the federated " +"learning approach. For this example, we use the built-in ``FedAvg`` " +"implementation and customize it using a few basic parameters. The last step " +"is the actual call to ``start_simulation`` which - you guessed it - starts " +"the simulation:" msgstr "" -"Flower 有许多内置策略,但我们也可以使用自己的策略实现来定制联邦学习方法的几乎所有方面。在本例中,我们使用内置的 ``FedAvg`` " -"实现,并使用一些基本参数对其进行定制。最后一步是实际调用 ``start_simulation``开始模拟:" +"Flower 有许多内置策略,但我们也可以使用自己的策略实现来定制联邦学习方法的几乎" +"所有方面。在本例中,我们使用内置的 ``FedAvg`` 实现,并使用一些基本参数对其进" +"行定制。最后一步是实际调用 ``start_simulation``开始模拟:" #: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:608 msgid "Behind the scenes" @@ -22491,26 +22815,28 @@ msgstr "那么它是如何工作的呢?Flower 如何进行模拟?" #: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:612 #, python-format msgid "" -"When we call ``start_simulation``, we tell Flower that there are 10 " -"clients (``num_clients=10``). Flower then goes ahead an asks the " -"``FedAvg`` strategy to select clients. ``FedAvg`` knows that it should " -"select 100% of the available clients (``fraction_fit=1.0``), so it goes " -"ahead and selects 10 random clients (i.e., 100% of 10)." +"When we call ``start_simulation``, we tell Flower that there are 10 clients " +"(``num_clients=10``). Flower then goes ahead an asks the ``FedAvg`` strategy " +"to select clients. ``FedAvg`` knows that it should select 100% of the " +"available clients (``fraction_fit=1.0``), so it goes ahead and selects 10 " +"random clients (i.e., 100% of 10)." msgstr "" -"当我们调用 ``start_simulation`` 时,我们会告诉 Flower 有 10 " -"个客户(`num_clients=10``)。然后,Flower 会要求 ``FedAvg`` 策略选择客户。``FedAvg`` 知道它应该选择" -" 100%的可用客户(``fraction_fit=1.0``),所以它会随机选择 10 个客户(即 10 的 100%)。" +"当我们调用 ``start_simulation`` 时,我们会告诉 Flower 有 10 个客户" +"(`num_clients=10``)。然后,Flower 会要求 ``FedAvg`` 策略选择客户。" +"``FedAvg`` 知道它应该选择 100%的可用客户(``fraction_fit=1.0``),所以它会随" +"机选择 10 个客户(即 10 的 100%)。" #: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:614 msgid "" -"Flower then asks the selected 10 clients to train the model. When the " -"server receives the model parameter updates from the clients, it hands " -"those updates over to the strategy (*FedAvg*) for aggregation. The " -"strategy aggregates those updates and returns the new global model, which" -" then gets used in the next round of federated learning." +"Flower then asks the selected 10 clients to train the model. When the server " +"receives the model parameter updates from the clients, it hands those " +"updates over to the strategy (*FedAvg*) for aggregation. The strategy " +"aggregates those updates and returns the new global model, which then gets " +"used in the next round of federated learning." msgstr "" -"然后,Flower 会要求选定的 10 " -"个客户端对模型进行训练。服务器收到客户端的模型参数更新后,会将这些更新交给策略(*FedAvg*)进行聚合。策略会聚合这些更新并返回新的全局模型,然后将其用于下一轮联邦学习。" +"然后,Flower 会要求选定的 10 个客户端对模型进行训练。服务器收到客户端的模型参" +"数更新后,会将这些更新交给策略(*FedAvg*)进行聚合。策略会聚合这些更新并返回" +"新的全局模型,然后将其用于下一轮联邦学习。" #: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:626 msgid "Where's the accuracy?" @@ -22518,67 +22844,75 @@ msgstr "准确度在哪里找?" #: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:628 msgid "" -"You may have noticed that all metrics except for ``losses_distributed`` " -"are empty. Where did the ``{\"accuracy\": float(accuracy)}`` go?" +"You may have noticed that all metrics except for ``losses_distributed`` are " +"empty. Where did the ``{\"accuracy\": float(accuracy)}`` go?" msgstr "" -"您可能已经注意到,除了 ``losses_distributed`` 以外,所有指标都是空的。{\"准确度\": " -"float(准确度)}``去哪儿了?" +"您可能已经注意到,除了 ``losses_distributed`` 以外,所有指标都是空的。{\"准确" +"度\": float(准确度)}``去哪儿了?" #: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:630 msgid "" -"Flower can automatically aggregate losses returned by individual clients," -" but it cannot do the same for metrics in the generic metrics dictionary " -"(the one with the ``accuracy`` key). Metrics dictionaries can contain " -"very different kinds of metrics and even key/value pairs that are not " -"metrics at all, so the framework does not (and can not) know how to " -"handle these automatically." +"Flower can automatically aggregate losses returned by individual clients, " +"but it cannot do the same for metrics in the generic metrics dictionary (the " +"one with the ``accuracy`` key). Metrics dictionaries can contain very " +"different kinds of metrics and even key/value pairs that are not metrics at " +"all, so the framework does not (and can not) know how to handle these " +"automatically." msgstr "" -"Flower 可以自动汇总单个客户端返回的损失值,但无法对通用度量字典中的度量进行同样的处理(即带有 \"准确度 " -"\"键的度量字典)。度量值字典可以包含非常不同种类的度量值,甚至包含根本不是度量值的键/值对,因此框架不知道(也无法知道)如何自动处理这些度量值。" +"Flower 可以自动汇总单个客户端返回的损失值,但无法对通用度量字典中的度量进行同" +"样的处理(即带有 \"准确度 \"键的度量字典)。度量值字典可以包含非常不同种类的" +"度量值,甚至包含根本不是度量值的键/值对,因此框架不知道(也无法知道)如何自动" +"处理这些度量值。" #: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:632 msgid "" -"As users, we need to tell the framework how to handle/aggregate these " -"custom metrics, and we do so by passing metric aggregation functions to " -"the strategy. The strategy will then call these functions whenever it " -"receives fit or evaluate metrics from clients. The two possible functions" -" are ``fit_metrics_aggregation_fn`` and " -"``evaluate_metrics_aggregation_fn``." +"As users, we need to tell the framework how to handle/aggregate these custom " +"metrics, and we do so by passing metric aggregation functions to the " +"strategy. The strategy will then call these functions whenever it receives " +"fit or evaluate metrics from clients. The two possible functions are " +"``fit_metrics_aggregation_fn`` and ``evaluate_metrics_aggregation_fn``." msgstr "" -"作为用户,我们需要告诉框架如何处理/聚合这些自定义指标,为此,我们将指标聚合函数传递给策略。然后,只要从客户端接收到拟合或评估指标,策略就会调用这些函数。两个可能的函数是" -" ``fit_metrics_aggregation_fn`` 和 ``evaluate_metrics_aggregation_fn``。" +"作为用户,我们需要告诉框架如何处理/聚合这些自定义指标,为此,我们将指标聚合函" +"数传递给策略。然后,只要从客户端接收到拟合或评估指标,策略就会调用这些函数。" +"两个可能的函数是 ``fit_metrics_aggregation_fn`` 和 " +"``evaluate_metrics_aggregation_fn``。" #: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:634 msgid "" "Let's create a simple weighted averaging function to aggregate the " "``accuracy`` metric we return from ``evaluate``:" -msgstr "让我们创建一个简单的加权平均函数来汇总从 ``evaluate`` 返回的 ``accuracy`` 指标:" +msgstr "" +"让我们创建一个简单的加权平均函数来汇总从 ``evaluate`` 返回的 ``accuracy`` 指" +"标:" #: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:660 msgid "" "The only thing left to do is to tell the strategy to call this function " "whenever it receives evaluation metric dictionaries from the clients:" -msgstr "剩下要做的就是告诉策略,每当它从客户端接收到评估度量字典时,都要调用这个函数:" +msgstr "" +"剩下要做的就是告诉策略,每当它从客户端接收到评估度量字典时,都要调用这个函" +"数:" #: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:697 msgid "" "We now have a full system that performs federated training and federated " -"evaluation. It uses the ``weighted_average`` function to aggregate custom" -" evaluation metrics and calculates a single ``accuracy`` metric across " -"all clients on the server side." +"evaluation. It uses the ``weighted_average`` function to aggregate custom " +"evaluation metrics and calculates a single ``accuracy`` metric across all " +"clients on the server side." msgstr "" -"我们现在有了一个完整的系统,可以执行联邦训练和联邦评估。它使用 ``weighted_average`` " -"函数汇总自定义评估指标,并在服务器端计算所有客户端的单一 ``accuracy`` 指标。" +"我们现在有了一个完整的系统,可以执行联邦训练和联邦评估。它使用 " +"``weighted_average`` 函数汇总自定义评估指标,并在服务器端计算所有客户端的单" +"一 ``accuracy`` 指标。" #: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:699 msgid "" "The other two categories of metrics (``losses_centralized`` and " "``metrics_centralized``) are still empty because they only apply when " -"centralized evaluation is being used. Part two of the Flower tutorial " -"will cover centralized evaluation." +"centralized evaluation is being used. Part two of the Flower tutorial will " +"cover centralized evaluation." msgstr "" -"其他两类指标(`losses_centralized`` 和 " -"`metrics_centralized`)仍然是空的,因为它们只适用于集中评估。Flower 教程的第二部分将介绍集中式评估。" +"其他两类指标(`losses_centralized`` 和 `metrics_centralized`)仍然是空的,因" +"为它们只适用于集中评估。Flower 教程的第二部分将介绍集中式评估。" #: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:711 #: ../../source/tutorial-series-what-is-federated-learning.ipynb:351 @@ -22587,34 +22921,39 @@ msgstr "结束语" #: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:713 msgid "" -"Congratulations, you just trained a convolutional neural network, " -"federated over 10 clients! With that, you understand the basics of " -"federated learning with Flower. The same approach you've seen can be used" -" with other machine learning frameworks (not just PyTorch) and tasks (not" -" just CIFAR-10 images classification), for example NLP with Hugging Face " -"Transformers or speech with SpeechBrain." +"Congratulations, you just trained a convolutional neural network, federated " +"over 10 clients! With that, you understand the basics of federated learning " +"with Flower. The same approach you've seen can be used with other machine " +"learning frameworks (not just PyTorch) and tasks (not just CIFAR-10 images " +"classification), for example NLP with Hugging Face Transformers or speech " +"with SpeechBrain." msgstr "" -"恭喜您,你刚刚训练了一个由 10 个客户端组成的卷积神经网络!这样,你就了解了使用 Flower " -"进行联邦学习的基础知识。你所看到的方法同样适用于其他机器学习框架(不只是 PyTorch)和任务(不只是 CIFAR-10 图像分类),例如使用 " -"Hugging Face Transformers 的 NLP 或使用 SpeechBrain 的语音。" +"恭喜您,你刚刚训练了一个由 10 个客户端组成的卷积神经网络!这样,你就了解了使" +"用 Flower 进行联邦学习的基础知识。你所看到的方法同样适用于其他机器学习框架" +"(不只是 PyTorch)和任务(不只是 CIFAR-10 图像分类),例如使用 Hugging Face " +"Transformers 的 NLP 或使用 SpeechBrain 的语音。" #: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:715 msgid "" -"In the next notebook, we're going to cover some more advanced concepts. " -"Want to customize your strategy? Initialize parameters on the server " -"side? Or evaluate the aggregated model on the server side? We'll cover " -"all this and more in the next tutorial." -msgstr "在下一个笔记中,我们将介绍一些更先进的概念。想定制你的策略吗?在服务器端初始化参数?或者在服务器端评估聚合模型?我们将在下一个教程中介绍所有这些内容以及更多。" +"In the next notebook, we're going to cover some more advanced concepts. Want " +"to customize your strategy? Initialize parameters on the server side? Or " +"evaluate the aggregated model on the server side? We'll cover all this and " +"more in the next tutorial." +msgstr "" +"在下一个笔记中,我们将介绍一些更先进的概念。想定制你的策略吗?在服务器端初始" +"化参数?或者在服务器端评估聚合模型?我们将在下一个教程中介绍所有这些内容以及" +"更多。" #: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:733 msgid "" -"The `Flower Federated Learning Tutorial - Part 2 " -"`__ goes into more depth about strategies and all " -"the advanced things you can build with them." +"The `Flower Federated Learning Tutorial - Part 2 `__ goes " +"into more depth about strategies and all the advanced things you can build " +"with them." msgstr "" -"`Flower 联邦学习教程 - 第 2 部分 `__ 更深入地介绍了策略以及可以使用策略构建的所有高级功能。" +"`Flower 联邦学习教程 - 第 2 部分 `__ 更深入地介绍了策略以及可以" +"使用策略构建的所有高级功能。" #: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:9 msgid "Use a federated learning strategy" @@ -22623,22 +22962,22 @@ msgstr "使用联邦学习策略" #: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:11 msgid "" "Welcome to the next part of the federated learning tutorial. In previous " -"parts of this tutorial, we introduced federated learning with PyTorch and" -" Flower (`part 1 `__)." +"parts of this tutorial, we introduced federated learning with PyTorch and " +"Flower (`part 1 `__)." msgstr "" -"欢迎来到联邦学习教程的下一部分。在本教程的前几部分,我们介绍了使用 PyTorch 和 Flower 进行联邦学习(`第 1 部分 " -"`___)。" +"欢迎来到联邦学习教程的下一部分。在本教程的前几部分,我们介绍了使用 PyTorch " +"和 Flower 进行联邦学习(`第 1 部分 `___)。" #: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:13 msgid "" -"In this notebook, we'll begin to customize the federated learning system " -"we built in the introductory notebook (again, using `Flower " -"`__ and `PyTorch `__)." +"In this notebook, we'll begin to customize the federated learning system we " +"built in the introductory notebook (again, using `Flower `__ and `PyTorch `__)." msgstr "" -"在本笔记中,我们将开始定制在入门笔记中构建的联邦学习系统(再次使用 `Flower `__ 和 " -"`PyTorch `__)。" +"在本笔记中,我们将开始定制在入门笔记中构建的联邦学习系统(再次使用 `Flower " +"`__ 和 `PyTorch `__)。" #: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:17 msgid "Let's move beyond FedAvg with Flower strategies!" @@ -22651,9 +22990,11 @@ msgstr "策略定制" #: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:311 msgid "" "So far, everything should look familiar if you've worked through the " -"introductory notebook. With that, we're ready to introduce a number of " -"new features." -msgstr "到目前为止,如果您已经阅读过入门笔记本,那么一切都应该很熟悉了。接下来,我们将介绍一些新功能。" +"introductory notebook. With that, we're ready to introduce a number of new " +"features." +msgstr "" +"到目前为止,如果您已经阅读过入门笔记本,那么一切都应该很熟悉了。接下来,我们" +"将介绍一些新功能。" #: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:323 msgid "Server-side parameter **initialization**" @@ -22661,24 +23002,25 @@ msgstr "服务器端参数 **初始化**" #: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:325 msgid "" -"Flower, by default, initializes the global model by asking one random " -"client for the initial parameters. In many cases, we want more control " -"over parameter initialization though. Flower therefore allows you to " -"directly pass the initial parameters to the Strategy:" +"Flower, by default, initializes the global model by asking one random client " +"for the initial parameters. In many cases, we want more control over " +"parameter initialization though. Flower therefore allows you to directly " +"pass the initial parameters to the Strategy:" msgstr "" -"默认情况下,Flower 会通过向一个随机客户端询问初始参数来初始化全局模型。但在许多情况下,我们需要对参数初始化进行更多控制。因此,Flower" -" 允许您直接将初始参数传递给策略:" +"默认情况下,Flower 会通过向一个随机客户端询问初始参数来初始化全局模型。但在许" +"多情况下,我们需要对参数初始化进行更多控制。因此,Flower 允许您直接将初始参数" +"传递给策略:" #: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:370 msgid "" -"Passing ``initial_parameters`` to the ``FedAvg`` strategy prevents Flower" -" from asking one of the clients for the initial parameters. If we look " +"Passing ``initial_parameters`` to the ``FedAvg`` strategy prevents Flower " +"from asking one of the clients for the initial parameters. If we look " "closely, we can see that the logs do not show any calls to the " "``FlowerClient.get_parameters`` method." msgstr "" -"向 ``FedAvg`` 策略传递 ``initial_parameters`` 可以防止 Flower " -"向其中一个客户端询问初始参数。如果我们仔细观察,就会发现日志中没有显示对 ``FlowerClient.get_parameters`` " -"方法的任何调用。" +"向 ``FedAvg`` 策略传递 ``initial_parameters`` 可以防止 Flower 向其中一个客户" +"端询问初始参数。如果我们仔细观察,就会发现日志中没有显示对 ``FlowerClient." +"get_parameters`` 方法的任何调用。" #: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:382 msgid "Starting with a customized strategy" @@ -22686,20 +23028,23 @@ msgstr "从定制战略开始" #: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:384 msgid "" -"We've seen the function ``start_simulation`` before. It accepts a number " -"of arguments, amongst them the ``client_fn`` used to create " -"``FlowerClient`` instances, the number of clients to simulate " -"``num_clients``, the number of rounds ``num_rounds``, and the strategy." +"We've seen the function ``start_simulation`` before. It accepts a number of " +"arguments, amongst them the ``client_fn`` used to create ``FlowerClient`` " +"instances, the number of clients to simulate ``num_clients``, the number of " +"rounds ``num_rounds``, and the strategy." msgstr "" -"我们以前见过函数 ``start_simulation``。它接受许多参数,其中包括用于创建 ``FlowerClient`` 实例的 " -"``client_fn``、要模拟的客户数量 ``num_clients``、回合数 ``num_rounds``和策略。" +"我们以前见过函数 ``start_simulation``。它接受许多参数,其中包括用于创建 " +"``FlowerClient`` 实例的 ``client_fn``、要模拟的客户数量 ``num_clients``、回合" +"数 ``num_rounds``和策略。" #: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:386 msgid "" "The strategy encapsulates the federated learning approach/algorithm, for " -"example, ``FedAvg`` or ``FedAdagrad``. Let's try to use a different " -"strategy this time:" -msgstr "该策略封装了联邦学习方法/算法,例如`FedAvg``或`FedAdagrad``。这次让我们尝试使用不同的策略:" +"example, ``FedAvg`` or ``FedAdagrad``. Let's try to use a different strategy " +"this time:" +msgstr "" +"该策略封装了联邦学习方法/算法,例如`FedAvg``或`FedAdagrad``。这次让我们尝试使" +"用不同的策略:" #: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:424 msgid "Server-side parameter **evaluation**" @@ -22707,46 +23052,59 @@ msgstr "服务器端参数**评估**" #: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:426 msgid "" -"Flower can evaluate the aggregated model on the server-side or on the " -"client-side. Client-side and server-side evaluation are similar in some " -"ways, but different in others." -msgstr "Flower 可以在服务器端或客户端评估聚合模型。客户端和服务器端评估在某些方面相似,但也有不同之处。" +"Flower can evaluate the aggregated model on the server-side or on the client-" +"side. Client-side and server-side evaluation are similar in some ways, but " +"different in others." +msgstr "" +"Flower 可以在服务器端或客户端评估聚合模型。客户端和服务器端评估在某些方面相" +"似,但也有不同之处。" #: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:428 msgid "" "**Centralized Evaluation** (or *server-side evaluation*) is conceptually " "simple: it works the same way that evaluation in centralized machine " "learning does. If there is a server-side dataset that can be used for " -"evaluation purposes, then that's great. We can evaluate the newly " -"aggregated model after each round of training without having to send the " -"model to clients. We're also fortunate in the sense that our entire " -"evaluation dataset is available at all times." -msgstr "**集中评估**(或*服务器端评估*)在概念上很简单:它的工作方式与集中式机器学习中的评估方式相同。如果有一个服务器端数据集可用于评估目的,那就太好了。我们可以在每一轮训练后对新聚合的模型进行评估,而无需将模型发送给客户端。我们也很幸运,因为我们的整个评估数据集随时可用。" +"evaluation purposes, then that's great. We can evaluate the newly aggregated " +"model after each round of training without having to send the model to " +"clients. We're also fortunate in the sense that our entire evaluation " +"dataset is available at all times." +msgstr "" +"**集中评估**(或*服务器端评估*)在概念上很简单:它的工作方式与集中式机器学习" +"中的评估方式相同。如果有一个服务器端数据集可用于评估目的,那就太好了。我们可" +"以在每一轮训练后对新聚合的模型进行评估,而无需将模型发送给客户端。我们也很幸" +"运,因为我们的整个评估数据集随时可用。" #: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:430 msgid "" -"**Federated Evaluation** (or *client-side evaluation*) is more complex, " -"but also more powerful: it doesn't require a centralized dataset and " -"allows us to evaluate models over a larger set of data, which often " -"yields more realistic evaluation results. In fact, many scenarios require" -" us to use **Federated Evaluation** if we want to get representative " -"evaluation results at all. But this power comes at a cost: once we start " -"to evaluate on the client side, we should be aware that our evaluation " -"dataset can change over consecutive rounds of learning if those clients " -"are not always available. Moreover, the dataset held by each client can " -"also change over consecutive rounds. This can lead to evaluation results " -"that are not stable, so even if we would not change the model, we'd see " -"our evaluation results fluctuate over consecutive rounds." -msgstr "**联邦评估**(或*客户端评估*)更为复杂,但也更为强大:它不需要集中的数据集,允许我们在更大的数据集上对模型进行评估,这通常会产生更真实的评估结果。事实上,如果我们想得到有代表性的评估结果,很多情况下都需要使用**联邦评估**。但是,这种能力是有代价的:一旦我们开始在客户端进行评估,我们就应该意识到,如果这些客户端并不总是可用,我们的评估数据集可能会在连续几轮学习中发生变化。此外,每个客户端所拥有的数据集也可能在连续几轮学习中发生变化。这可能会导致评估结果不稳定,因此即使我们不改变模型,也会看到评估结果在连续几轮中波动。" +"**Federated Evaluation** (or *client-side evaluation*) is more complex, but " +"also more powerful: it doesn't require a centralized dataset and allows us " +"to evaluate models over a larger set of data, which often yields more " +"realistic evaluation results. In fact, many scenarios require us to use " +"**Federated Evaluation** if we want to get representative evaluation results " +"at all. But this power comes at a cost: once we start to evaluate on the " +"client side, we should be aware that our evaluation dataset can change over " +"consecutive rounds of learning if those clients are not always available. " +"Moreover, the dataset held by each client can also change over consecutive " +"rounds. This can lead to evaluation results that are not stable, so even if " +"we would not change the model, we'd see our evaluation results fluctuate " +"over consecutive rounds." +msgstr "" +"**联邦评估**(或*客户端评估*)更为复杂,但也更为强大:它不需要集中的数据集," +"允许我们在更大的数据集上对模型进行评估,这通常会产生更真实的评估结果。事实" +"上,如果我们想得到有代表性的评估结果,很多情况下都需要使用**联邦评估**。但" +"是,这种能力是有代价的:一旦我们开始在客户端进行评估,我们就应该意识到,如果" +"这些客户端并不总是可用,我们的评估数据集可能会在连续几轮学习中发生变化。此" +"外,每个客户端所拥有的数据集也可能在连续几轮学习中发生变化。这可能会导致评估" +"结果不稳定,因此即使我们不改变模型,也会看到评估结果在连续几轮中波动。" #: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:433 msgid "" "We've seen how federated evaluation works on the client side (i.e., by " -"implementing the ``evaluate`` method in ``FlowerClient``). Now let's see " -"how we can evaluate aggregated model parameters on the server-side:" +"implementing the ``evaluate`` method in ``FlowerClient``). Now let's see how " +"we can evaluate aggregated model parameters on the server-side:" msgstr "" -"我们已经了解了联邦评估如何在客户端工作(即通过在 ``FlowerClient`` 中实现 ``evaluate`` " -"方法)。现在让我们看看如何在服务器端评估聚合模型参数:" +"我们已经了解了联邦评估如何在客户端工作(即通过在 ``FlowerClient`` 中实现 " +"``evaluate`` 方法)。现在让我们看看如何在服务器端评估聚合模型参数:" #: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:490 msgid "Sending/receiving arbitrary values to/from clients" @@ -22754,63 +23112,66 @@ msgstr "向/从客户端发送/接收任意值" #: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:492 msgid "" -"In some situations, we want to configure client-side execution (training," -" evaluation) from the server-side. One example for that is the server " -"asking the clients to train for a certain number of local epochs. Flower " -"provides a way to send configuration values from the server to the " -"clients using a dictionary. Let's look at an example where the clients " -"receive values from the server through the ``config`` parameter in " -"``fit`` (``config`` is also available in ``evaluate``). The ``fit`` " -"method receives the configuration dictionary through the ``config`` " -"parameter and can then read values from this dictionary. In this example," -" it reads ``server_round`` and ``local_epochs`` and uses those values to " -"improve the logging and configure the number of local training epochs:" -msgstr "" -"在某些情况下,我们希望从服务器端配置客户端的执行(训练、评估)。其中一个例子就是服务器要求客户端训练一定数量的本地遍历。Flower " -"提供了一种使用字典从服务器向客户端发送配置值的方法。让我们来看一个例子:客户端通过 ``fit`` 中的 ``config`` " -"参数从服务器接收配置值(``evaluate`` 中也有 ``config`` 参数)。``fit`` 方法通过 ``config`` " -"参数接收配置字典,然后从字典中读取值。在本例中,它读取了 ``server_round`` 和 " -"``local_epochs``,并使用这些值来改进日志记录和配置本地训练遍历的数量:" +"In some situations, we want to configure client-side execution (training, " +"evaluation) from the server-side. One example for that is the server asking " +"the clients to train for a certain number of local epochs. Flower provides a " +"way to send configuration values from the server to the clients using a " +"dictionary. Let's look at an example where the clients receive values from " +"the server through the ``config`` parameter in ``fit`` (``config`` is also " +"available in ``evaluate``). The ``fit`` method receives the configuration " +"dictionary through the ``config`` parameter and can then read values from " +"this dictionary. In this example, it reads ``server_round`` and " +"``local_epochs`` and uses those values to improve the logging and configure " +"the number of local training epochs:" +msgstr "" +"在某些情况下,我们希望从服务器端配置客户端的执行(训练、评估)。其中一个例子" +"就是服务器要求客户端训练一定数量的本地遍历。Flower 提供了一种使用字典从服务器" +"向客户端发送配置值的方法。让我们来看一个例子:客户端通过 ``fit`` 中的 " +"``config`` 参数从服务器接收配置值(``evaluate`` 中也有 ``config`` 参数)。" +"``fit`` 方法通过 ``config`` 参数接收配置字典,然后从字典中读取值。在本例中," +"它读取了 ``server_round`` 和 ``local_epochs``,并使用这些值来改进日志记录和配" +"置本地训练遍历的数量:" #: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:546 msgid "" -"So how can we send this config dictionary from server to clients? The " -"built-in Flower Strategies provide way to do this, and it works similarly" -" to the way server-side evaluation works. We provide a function to the " -"strategy, and the strategy calls this function for every round of " -"federated learning:" +"So how can we send this config dictionary from server to clients? The built-" +"in Flower Strategies provide way to do this, and it works similarly to the " +"way server-side evaluation works. We provide a function to the strategy, and " +"the strategy calls this function for every round of federated learning:" msgstr "" "那么,如何将配置字典从服务器发送到客户端呢?内置的 \"Flower策略\"(Flower " -"Strategies)提供了这样的方法,其工作原理与服务器端评估的工作原理类似。我们为策略提供一个函数,策略会在每一轮联邦学习中调用这个函数:" +"Strategies)提供了这样的方法,其工作原理与服务器端评估的工作原理类似。我们为" +"策略提供一个函数,策略会在每一轮联邦学习中调用这个函数:" #: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:576 msgid "" -"Next, we'll just pass this function to the FedAvg strategy before " -"starting the simulation:" +"Next, we'll just pass this function to the FedAvg strategy before starting " +"the simulation:" msgstr "接下来,我们只需在开始模拟前将此函数传递给 FedAvg 策略即可:" #: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:613 msgid "" -"As we can see, the client logs now include the current round of federated" -" learning (which they read from the ``config`` dictionary). We can also " -"configure local training to run for one epoch during the first and second" -" round of federated learning, and then for two epochs during the third " -"round." +"As we can see, the client logs now include the current round of federated " +"learning (which they read from the ``config`` dictionary). We can also " +"configure local training to run for one epoch during the first and second " +"round of federated learning, and then for two epochs during the third round." msgstr "" -"我们可以看到,客户端日志现在包含了当前一轮的联邦学习(从 ``config`` " -"字典中读取)。我们还可以将本地训练配置为在第一轮和第二轮联邦学习期间运行一个遍历,然后在第三轮联邦学习期间运行两个遍历。" +"我们可以看到,客户端日志现在包含了当前一轮的联邦学习(从 ``config`` 字典中读" +"取)。我们还可以将本地训练配置为在第一轮和第二轮联邦学习期间运行一个遍历,然" +"后在第三轮联邦学习期间运行两个遍历。" #: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:615 msgid "" "Clients can also return arbitrary values to the server. To do so, they " -"return a dictionary from ``fit`` and/or ``evaluate``. We have seen and " -"used this concept throughout this notebook without mentioning it " -"explicitly: our ``FlowerClient`` returns a dictionary containing a custom" -" key/value pair as the third return value in ``evaluate``." +"return a dictionary from ``fit`` and/or ``evaluate``. We have seen and used " +"this concept throughout this notebook without mentioning it explicitly: our " +"``FlowerClient`` returns a dictionary containing a custom key/value pair as " +"the third return value in ``evaluate``." msgstr "" -"客户端还可以向服务器返回任意值。为此,它们会从 ``fit`` 和/或 ``evaluate`` " -"返回一个字典。我们在本笔记中看到并使用了这一概念,但并未明确提及:我们的 ``FlowerClient`` 返回一个包含自定义键/值对的字典,作为" -" ``evaluate`` 中的第三个返回值。" +"客户端还可以向服务器返回任意值。为此,它们会从 ``fit`` 和/或 ``evaluate`` 返" +"回一个字典。我们在本笔记中看到并使用了这一概念,但并未明确提及:我们的 " +"``FlowerClient`` 返回一个包含自定义键/值对的字典,作为 ``evaluate`` 中的第三" +"个返回值。" #: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:627 msgid "Scaling federated learning" @@ -22826,19 +23187,19 @@ msgstr "作为本笔记的最后一步,让我们看看如何使用 Flower 对 #, python-format msgid "" "We now have 1000 partitions, each holding 45 training and 5 validation " -"examples. Given that the number of training examples on each client is " -"quite small, we should probably train the model a bit longer, so we " -"configure the clients to perform 3 local training epochs. We should also " -"adjust the fraction of clients selected for training during each round " -"(we don't want all 1000 clients participating in every round), so we " -"adjust ``fraction_fit`` to ``0.05``, which means that only 5% of " -"available clients (so 50 clients) will be selected for training each " -"round:" -msgstr "" -"现在我们有 1000 个分区,每个分区有 45 个训练数据和 5 " -"个验证数据。鉴于每个客户端上的训练示例数量较少,我们可能需要对模型进行更长时间的训练,因此我们将客户端配置为执行 3 " -"个本地训练遍历。我们还应该调整每轮训练中被选中的客户端的比例(我们不希望每轮训练都有 1000 个客户端参与),因此我们将 " -"``fraction_fit`` 调整为 ``0.05``,这意味着每轮训练只选中 5%的可用客户端(即 50 个客户端):" +"examples. Given that the number of training examples on each client is quite " +"small, we should probably train the model a bit longer, so we configure the " +"clients to perform 3 local training epochs. We should also adjust the " +"fraction of clients selected for training during each round (we don't want " +"all 1000 clients participating in every round), so we adjust " +"``fraction_fit`` to ``0.05``, which means that only 5% of available clients " +"(so 50 clients) will be selected for training each round:" +msgstr "" +"现在我们有 1000 个分区,每个分区有 45 个训练数据和 5 个验证数据。鉴于每个客户" +"端上的训练示例数量较少,我们可能需要对模型进行更长时间的训练,因此我们将客户" +"端配置为执行 3 个本地训练遍历。我们还应该调整每轮训练中被选中的客户端的比例" +"(我们不希望每轮训练都有 1000 个客户端参与),因此我们将 ``fraction_fit`` 调" +"整为 ``0.05``,这意味着每轮训练只选中 5%的可用客户端(即 50 个客户端):" #: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:699 msgid "" @@ -22846,29 +23207,33 @@ msgid "" "customizing the strategy, initializing parameters on the server side, " "choosing a different strategy, and evaluating models on the server-side. " "That's quite a bit of flexibility with so little code, right?" -msgstr "在本笔记中,我们看到了如何通过自定义策略、在服务器端初始化参数、选择不同的策略以及在服务器端评估模型来逐步增强我们的系统。用这么少的代码就能实现这么大的灵活性,不是吗?" +msgstr "" +"在本笔记中,我们看到了如何通过自定义策略、在服务器端初始化参数、选择不同的策" +"略以及在服务器端评估模型来逐步增强我们的系统。用这么少的代码就能实现这么大的" +"灵活性,不是吗?" #: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:701 msgid "" -"In the later sections, we've seen how we can communicate arbitrary values" -" between server and clients to fully customize client-side execution. " -"With that capability, we built a large-scale Federated Learning " -"simulation using the Flower Virtual Client Engine and ran an experiment " -"involving 1000 clients in the same workload - all in a Jupyter Notebook!" +"In the later sections, we've seen how we can communicate arbitrary values " +"between server and clients to fully customize client-side execution. With " +"that capability, we built a large-scale Federated Learning simulation using " +"the Flower Virtual Client Engine and ran an experiment involving 1000 " +"clients in the same workload - all in a Jupyter Notebook!" msgstr "" -"在后面的章节中,我们将看到如何在服务器和客户端之间传递任意值,以完全自定义客户端执行。有了这种能力,我们使用 Flower " -"虚拟客户端引擎构建了一个大规模的联邦学习模拟,并在 Jupyter Notebook 中进行了一次实验,在相同的工作负载中运行了 1000 " -"个客户端!" +"在后面的章节中,我们将看到如何在服务器和客户端之间传递任意值,以完全自定义客" +"户端执行。有了这种能力,我们使用 Flower 虚拟客户端引擎构建了一个大规模的联邦" +"学习模拟,并在 Jupyter Notebook 中进行了一次实验,在相同的工作负载中运行了 " +"1000 个客户端!" #: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:719 msgid "" -"The `Flower Federated Learning Tutorial - Part 3 " -"`__ shows how to build a fully custom ``Strategy`` from " -"scratch." +"The `Flower Federated Learning Tutorial - Part 3 `__ shows how " +"to build a fully custom ``Strategy`` from scratch." msgstr "" "`Flower 联邦学习教程 - 第 3 部分 `__ 展示了如何从头开始构建完全自定义的 \"策略\"。" +"build-a-strategy-from-scratch-pytorch.html>`__ 展示了如何从头开始构建完全自定" +"义的 \"策略\"。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:9 msgid "What is Federated Learning?" @@ -22878,30 +23243,34 @@ msgstr "什么是联邦学习?" msgid "" "In this tutorial, you will learn what federated learning is, build your " "first system in Flower, and gradually extend it. If you work through all " -"parts of the tutorial, you will be able to build advanced federated " -"learning systems that approach the current state of the art in the field." +"parts of the tutorial, you will be able to build advanced federated learning " +"systems that approach the current state of the art in the field." msgstr "" -"在本教程中,你将了解什么是联邦学习,用 Flower " -"搭建第一个系统,并逐步对其进行扩展。如果你能完成本教程的所有部分,你就能构建高级的联邦学习系统,从而接近该领域当前的技术水平。" +"在本教程中,你将了解什么是联邦学习,用 Flower 搭建第一个系统,并逐步对其进行" +"扩展。如果你能完成本教程的所有部分,你就能构建高级的联邦学习系统,从而接近该" +"领域当前的技术水平。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:15 msgid "" -"🧑‍🏫 This tutorial starts at zero and expects no familiarity with " -"federated learning. Only a basic understanding of data science and Python" -" programming is assumed." -msgstr "🧑‍🏫 本教程从零开始,不要求熟悉联邦学习。仅假定对数据科学和 Python 编程有基本了解。" +"🧑‍🏫 This tutorial starts at zero and expects no familiarity with federated " +"learning. Only a basic understanding of data science and Python programming " +"is assumed." +msgstr "" +"🧑‍🏫 本教程从零开始,不要求熟悉联邦学习。仅假定对数据科学和 Python 编程有基本" +"了解。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:17 msgid "" -"`Star Flower on GitHub `__ ⭐️ and join " -"the open-source Flower community on Slack to connect, ask questions, and " -"get help: `Join Slack `__ 🌼 We'd love to " -"hear from you in the ``#introductions`` channel! And if anything is " -"unclear, head over to the ``#questions`` channel." +"`Star Flower on GitHub `__ ⭐️ and join the " +"open-source Flower community on Slack to connect, ask questions, and get " +"help: `Join Slack `__ 🌼 We'd love to hear " +"from you in the ``#introductions`` channel! And if anything is unclear, head " +"over to the ``#questions`` channel." msgstr "" -"`Star Flower on GitHub `__ ⭐️ 并加入 Slack " -"上的开源 Flower 社区,进行交流、提问并获得帮助: 加入 Slack `__ 🌼" -" 我们希望在 ``#introductions`` 频道听到您的声音!如果有任何不清楚的地方,请访问 ``#questions`` 频道。" +"`Star Flower on GitHub `__ ⭐️ 并加入 Slack 上" +"的开源 Flower 社区,进行交流、提问并获得帮助: 加入 Slack `__ 🌼 我们希望在 ``#introductions`` 频道听到您的声音!如果有任何" +"不清楚的地方,请访问 ``#questions`` 频道。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:19 msgid "Let's get started!" @@ -22913,16 +23282,19 @@ msgstr "经典机器学习" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:33 msgid "" -"Before we begin to discuss federated learning, let us quickly recap how " -"most machine learning works today." -msgstr "在开始讨论联邦学习之前,让我们先快速回顾一下目前大多数机器学习的工作原理。" +"Before we begin to discuss federated learning, let us quickly recap how most " +"machine learning works today." +msgstr "" +"在开始讨论联邦学习之前,让我们先快速回顾一下目前大多数机器学习的工作原理。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:35 msgid "" -"In machine learning, we have a model, and we have data. The model could " -"be a neural network (as depicted here), or something else, like classical" -" linear regression." -msgstr "在机器学习中,我们有一个模型和数据。模型可以是一个神经网络(如图所示),也可以是其他东西,比如经典的线性回归。" +"In machine learning, we have a model, and we have data. The model could be a " +"neural network (as depicted here), or something else, like classical linear " +"regression." +msgstr "" +"在机器学习中,我们有一个模型和数据。模型可以是一个神经网络(如图所示),也可" +"以是其他东西,比如经典的线性回归。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:41 msgid "|2b5c62c529f6416f840c594cce062fbb|" @@ -22934,10 +23306,12 @@ msgstr "模型和数据" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:47 msgid "" -"We train the model using the data to perform a useful task. A task could " -"be to detect objects in images, transcribe an audio recording, or play a " -"game like Go." -msgstr "我们使用数据来训练模型,以完成一项有用的任务。任务可以是检测图像中的物体、转录音频或玩围棋等游戏。" +"We train the model using the data to perform a useful task. A task could be " +"to detect objects in images, transcribe an audio recording, or play a game " +"like Go." +msgstr "" +"我们使用数据来训练模型,以完成一项有用的任务。任务可以是检测图像中的物体、转" +"录音频或玩围棋等游戏。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:53 msgid "|90b334680cb7467d9a04d39b8e8dca9f|" @@ -22949,16 +23323,19 @@ msgstr "使用数据训练模型" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:59 msgid "" -"Now, in practice, the training data we work with doesn't originate on the" -" machine we train the model on. It gets created somewhere else." -msgstr "实际上,我们使用的训练数据并不来自我们训练模型的机器。它是在其他地方创建的。" +"Now, in practice, the training data we work with doesn't originate on the " +"machine we train the model on. It gets created somewhere else." +msgstr "" +"实际上,我们使用的训练数据并不来自我们训练模型的机器。它是在其他地方创建的。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:61 msgid "" "It originates on a smartphone by the user interacting with an app, a car " "collecting sensor data, a laptop receiving input via the keyboard, or a " "smart speaker listening to someone trying to sing a song." -msgstr "它源于智能手机上用户与应用程序的交互、汽车上传感器数据的收集、笔记本电脑上键盘输入的接收,或者智能扬声器上某人试着唱的歌。" +msgstr "" +"它源于智能手机上用户与应用程序的交互、汽车上传感器数据的收集、笔记本电脑上键" +"盘输入的接收,或者智能扬声器上某人试着唱的歌。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:67 msgid "|65764ceee89f4335bfd93fd0b115e831|" @@ -22971,12 +23348,12 @@ msgstr "手机上的数据" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:73 msgid "" "What's also important to mention, this \"somewhere else\" is usually not " -"just one place, it's many places. It could be several devices all running" -" the same app. But it could also be several organizations, all generating" -" data for the same task." +"just one place, it's many places. It could be several devices all running " +"the same app. But it could also be several organizations, all generating " +"data for the same task." msgstr "" -"值得一提的是,这个 \"其他地方 " -"\"通常不只是一个地方,而是很多地方。它可能是多个运行同一应用程序的设备。但也可能是多个组织,都在为同一任务生成数据。" +"值得一提的是,这个 \"其他地方 \"通常不只是一个地方,而是很多地方。它可能是多" +"个运行同一应用程序的设备。但也可能是多个组织,都在为同一任务生成数据。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:79 msgid "|d97319ec28bb407ea0ab9705e38f3bcf|" @@ -22988,11 +23365,12 @@ msgstr "数据存在于多种设备中" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:85 msgid "" -"So to use machine learning, or any kind of data analysis, the approach " -"that has been used in the past was to collect all data on a central " -"server. This server can be somewhere in a data center, or somewhere in " -"the cloud." -msgstr "因此,要使用机器学习或任何类型的数据分析,过去使用的方法是在中央服务器上收集所有数据。这个服务器可以在数据中心的某个地方,也可以在云端的某个地方。" +"So to use machine learning, or any kind of data analysis, the approach that " +"has been used in the past was to collect all data on a central server. This " +"server can be somewhere in a data center, or somewhere in the cloud." +msgstr "" +"因此,要使用机器学习或任何类型的数据分析,过去使用的方法是在中央服务器上收集" +"所有数据。这个服务器可以在数据中心的某个地方,也可以在云端的某个地方。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:91 msgid "|11e95ac83a8548d8b3505b4663187d07|" @@ -23007,7 +23385,9 @@ msgid "" "Once all the data is collected in one place, we can finally use machine " "learning algorithms to train our model on the data. This is the machine " "learning approach that we've basically always relied on." -msgstr "一旦所有数据都收集到一处,我们最终就可以使用机器学习算法在数据上训练我们的模型。这就是我们基本上一直依赖的机器学习方法。" +msgstr "" +"一旦所有数据都收集到一处,我们最终就可以使用机器学习算法在数据上训练我们的模" +"型。这就是我们基本上一直依赖的机器学习方法。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:103 msgid "|1dab2f3a23674abc8a6731f20fa10730|" @@ -23023,11 +23403,13 @@ msgstr "经典机器学习面临的挑战" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:132 msgid "" -"The classic machine learning approach we've just seen can be used in some" -" cases. Great examples include categorizing holiday photos, or analyzing " -"web traffic. Cases, where all the data is naturally available on a " -"centralized server." -msgstr "我们刚刚看到的经典机器学习方法可以在某些情况下使用。很好的例子包括对假日照片进行分类或分析网络流量。在这些案例中,所有数据自然都可以在中央服务器上获得。" +"The classic machine learning approach we've just seen can be used in some " +"cases. Great examples include categorizing holiday photos, or analyzing web " +"traffic. Cases, where all the data is naturally available on a centralized " +"server." +msgstr "" +"我们刚刚看到的经典机器学习方法可以在某些情况下使用。很好的例子包括对假日照片" +"进行分类或分析网络流量。在这些案例中,所有数据自然都可以在中央服务器上获得。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:138 msgid "|7f0ee162da38450788493a21627306f7|" @@ -23039,10 +23421,12 @@ msgstr "可集中管理" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:144 msgid "" -"But the approach can not be used in many other cases. Cases, where the " -"data is not available on a centralized server, or cases where the data " -"available on one server is not enough to train a good model." -msgstr "但这种方法并不适用于许多其他情况。例如,集中服务器上没有数据,或者一台服务器上的数据不足以训练出一个好的模型。" +"But the approach can not be used in many other cases. Cases, where the data " +"is not available on a centralized server, or cases where the data available " +"on one server is not enough to train a good model." +msgstr "" +"但这种方法并不适用于许多其他情况。例如,集中服务器上没有数据,或者一台服务器" +"上的数据不足以训练出一个好的模型。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:150 msgid "|296a1fb72c514b23b3d8905ff0ff98c6|" @@ -23054,10 +23438,12 @@ msgstr "无法集中" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:156 msgid "" -"There are many reasons why the classic centralized machine learning " -"approach does not work for a large number of highly important real-world " -"use cases. Those reasons include:" -msgstr "传统的集中式机器学习方法无法满足现实世界中大量极为重要的使用案例,原因有很多。这些原因包括:" +"There are many reasons why the classic centralized machine learning approach " +"does not work for a large number of highly important real-world use cases. " +"Those reasons include:" +msgstr "" +"传统的集中式机器学习方法无法满足现实世界中大量极为重要的使用案例,原因有很" +"多。这些原因包括:" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:158 msgid "" @@ -23066,39 +23452,48 @@ msgid "" "(Russia), CDPR (China), PDPB (India), PIPA (Korea), APPI (Japan), PDP " "(Indonesia), PDPA (Singapore), APP (Australia), and other regulations " "protect sensitive data from being moved. In fact, those regulations " -"sometimes even prevent single organizations from combining their own " -"users' data for artificial intelligence training because those users live" -" in different parts of the world, and their data is governed by different" -" data protection regulations." -msgstr "" -"**法规**: " -"GDPR(欧洲)、CCPA(加利福尼亚)、PIPEDA(加拿大)、LGPD(巴西)、PDPL(阿根廷)、KVKK(土耳其)、POPI(南非)、FSS(俄罗斯)、CDPR(中国)、PDPB(印度)、PIPA(韩国)、APPI(日本)、PDP(印度尼西亚)、PDPA(新加坡)、APP(澳大利亚)等法规保护敏感数据不被移动。事实上,这些法规有时甚至会阻止单个组织将自己的用户数据用于人工智能培训,因为这些用户生活在世界不同地区,他们的数据受不同的数据保护法规管辖。" +"sometimes even prevent single organizations from combining their own users' " +"data for artificial intelligence training because those users live in " +"different parts of the world, and their data is governed by different data " +"protection regulations." +msgstr "" +"**法规**: GDPR(欧洲)、CCPA(加利福尼亚)、PIPEDA(加拿大)、LGPD(巴西)、" +"PDPL(阿根廷)、KVKK(土耳其)、POPI(南非)、FSS(俄罗斯)、CDPR(中国)、" +"PDPB(印度)、PIPA(韩国)、APPI(日本)、PDP(印度尼西亚)、PDPA(新加坡)、" +"APP(澳大利亚)等法规保护敏感数据不被移动。事实上,这些法规有时甚至会阻止单个" +"组织将自己的用户数据用于人工智能培训,因为这些用户生活在世界不同地区,他们的" +"数据受不同的数据保护法规管辖。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:160 msgid "" -"**User preference**: In addition to regulation, there are use cases where" -" users just expect that no data leaves their device, ever. If you type " -"your passwords and credit card info into the digital keyboard of your " -"phone, you don't expect those passwords to end up on the server of the " -"company that developed that keyboard, do you? In fact, that use case was " -"the reason federated learning was invented in the first place." +"**User preference**: In addition to regulation, there are use cases where " +"users just expect that no data leaves their device, ever. If you type your " +"passwords and credit card info into the digital keyboard of your phone, you " +"don't expect those passwords to end up on the server of the company that " +"developed that keyboard, do you? In fact, that use case was the reason " +"federated learning was invented in the first place." msgstr "" -"**用户偏好**: " -"除了法规之外,在一些使用案例中,用户只是希望数据永远不会离开他们的设备。如果你在手机的数字键盘上输入密码和信用卡信息,你不会希望这些密码最终出现在开发该键盘的公司的服务器上吧?事实上,这种用例正是联邦学习发明的初衷。" +"**用户偏好**: 除了法规之外,在一些使用案例中,用户只是希望数据永远不会离开他" +"们的设备。如果你在手机的数字键盘上输入密码和信用卡信息,你不会希望这些密码最" +"终出现在开发该键盘的公司的服务器上吧?事实上,这种用例正是联邦学习发明的初" +"衷。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:161 msgid "" -"**Data volume**: Some sensors, like cameras, produce such a high data " -"volume that it is neither feasible nor economic to collect all the data " -"(due to, for example, bandwidth or communication efficiency). Think about" -" a national rail service with hundreds of train stations across the " -"country. If each of these train stations is outfitted with a number of " -"security cameras, the volume of raw on-device data they produce requires " -"incredibly powerful and exceedingly expensive infrastructure to process " -"and store. And most of the data isn't even useful." -msgstr "" -"**数据量**: " -"有些传感器(如摄像头)产生的数据量很大,收集所有数据既不可行,也不经济(例如,由于带宽或通信效率的原因)。试想一下全国铁路服务,全国有数百个火车站。如果每个火车站都安装了许多安全摄像头,那么它们所产生的大量原始设备数据就需要功能强大且极其昂贵的基础设施来处理和存储。而大部分数据甚至都是无用的。" +"**Data volume**: Some sensors, like cameras, produce such a high data volume " +"that it is neither feasible nor economic to collect all the data (due to, " +"for example, bandwidth or communication efficiency). Think about a national " +"rail service with hundreds of train stations across the country. If each of " +"these train stations is outfitted with a number of security cameras, the " +"volume of raw on-device data they produce requires incredibly powerful and " +"exceedingly expensive infrastructure to process and store. And most of the " +"data isn't even useful." +msgstr "" +"**数据量**: 有些传感器(如摄像头)产生的数据量很大,收集所有数据既不可行,也" +"不经济(例如,由于带宽或通信效率的原因)。试想一下全国铁路服务,全国有数百个" +"火车站。如果每个火车站都安装了许多安全摄像头,那么它们所产生的大量原始设备数" +"据就需要功能强大且极其昂贵的基础设施来处理和存储。而大部分数据甚至都是无用" +"的。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:164 msgid "Examples where centralized machine learning does not work include:" @@ -23112,8 +23507,7 @@ msgstr "用多家医院的敏感医疗记录训练癌症检测模型" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:167 msgid "" -"Financial information from different organizations to detect financial " -"fraud" +"Financial information from different organizations to detect financial fraud" msgstr "不同组织的财务信息,以侦查财务欺诈行为" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:168 @@ -23126,16 +23520,18 @@ msgstr "端到端加密信息可训练出更好的自动完成模型" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:171 msgid "" -"The popularity of privacy-enhancing systems like the `Brave " -"`__ browser or the `Signal `__ " -"messenger shows that users care about privacy. In fact, they choose the " -"privacy-enhancing version over other alternatives, if such an alternative" -" exists. But what can we do to apply machine learning and data science to" -" these cases to utilize private data? After all, these are all areas that" -" would benefit significantly from recent advances in AI." +"The popularity of privacy-enhancing systems like the `Brave `__ browser or the `Signal `__ messenger shows " +"that users care about privacy. In fact, they choose the privacy-enhancing " +"version over other alternatives, if such an alternative exists. But what can " +"we do to apply machine learning and data science to these cases to utilize " +"private data? After all, these are all areas that would benefit " +"significantly from recent advances in AI." msgstr "" -"像 `Brave `__浏览器或 `Signal " -"`__信息管理器这样的隐私增强系统的流行表明,用户关心隐私。事实上,他们会选择隐私性更好的产品。但是,我们能做些什么来将机器学习和数据科学应用到这些情况中,以利用隐私数据呢?毕竟,这些领域都将从人工智能的最新进展中受益匪浅。" +"像 `Brave `__浏览器或 `Signal `__信" +"息管理器这样的隐私增强系统的流行表明,用户关心隐私。事实上,他们会选择隐私性" +"更好的产品。但是,我们能做些什么来将机器学习和数据科学应用到这些情况中,以利" +"用隐私数据呢?毕竟,这些领域都将从人工智能的最新进展中受益匪浅。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:186 msgid "Federated learning" @@ -23144,10 +23540,11 @@ msgstr "联邦学习" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:188 msgid "" "Federated learning simply reverses this approach. It enables machine " -"learning on distributed data by moving the training to the data, instead " -"of moving the data to the training. Here's the single-sentence " -"explanation:" -msgstr "联邦学习简单地颠覆了这种方法。它通过将训练转移到数据上,而不是将数据转移到训练上,在分布式数据上实现机器学习。下面是一句话的解释:" +"learning on distributed data by moving the training to the data, instead of " +"moving the data to the training. Here's the single-sentence explanation:" +msgstr "" +"联邦学习简单地颠覆了这种方法。它通过将训练转移到数据上,而不是将数据转移到训" +"练上,在分布式数据上实现机器学习。下面是一句话的解释:" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:190 msgid "Central machine learning: move the data to the computation" @@ -23159,22 +23556,28 @@ msgstr "联邦式(机器)学习:将计算转移到数据上" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:193 msgid "" -"By doing so, it enables us to use machine learning (and other data " -"science approaches) in areas where it wasn't possible before. We can now " -"train excellent medical AI models by enabling different hospitals to work" -" together. We can solve financial fraud by training AI models on the data" -" of different financial institutions. We can build novel privacy-" -"enhancing applications (such as secure messaging) that have better built-" -"in AI than their non-privacy-enhancing alternatives. And those are just a" -" few of the examples that come to mind. As we deploy federated learning, " -"we discover more and more areas that can suddenly be reinvented because " -"they now have access to vast amounts of previously inaccessible data." -msgstr "这样,我们就能在以前不可能的领域使用机器学习(和其他数据科学方法)。现在,我们可以通过让不同的医院协同工作来训练优秀的医疗人工智能模型。我们可以通过在不同金融机构的数据上训练人工智能模型来解决金融欺诈问题。我们可以构建新颖的隐私增强型应用(如安全信息),其内置的人工智能比非隐私增强型应用更好。以上只是我想到的几个例子。随着联邦学习的部署,我们会发现越来越多的领域可以突然重获新生,因为它们现在可以访问大量以前无法访问的数据。" +"By doing so, it enables us to use machine learning (and other data science " +"approaches) in areas where it wasn't possible before. We can now train " +"excellent medical AI models by enabling different hospitals to work " +"together. We can solve financial fraud by training AI models on the data of " +"different financial institutions. We can build novel privacy-enhancing " +"applications (such as secure messaging) that have better built-in AI than " +"their non-privacy-enhancing alternatives. And those are just a few of the " +"examples that come to mind. As we deploy federated learning, we discover " +"more and more areas that can suddenly be reinvented because they now have " +"access to vast amounts of previously inaccessible data." +msgstr "" +"这样,我们就能在以前不可能的领域使用机器学习(和其他数据科学方法)。现在,我" +"们可以通过让不同的医院协同工作来训练优秀的医疗人工智能模型。我们可以通过在不" +"同金融机构的数据上训练人工智能模型来解决金融欺诈问题。我们可以构建新颖的隐私" +"增强型应用(如安全信息),其内置的人工智能比非隐私增强型应用更好。以上只是我" +"想到的几个例子。随着联邦学习的部署,我们会发现越来越多的领域可以突然重获新" +"生,因为它们现在可以访问大量以前无法访问的数据。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:196 msgid "" -"So how does federated learning work, exactly? Let's start with an " -"intuitive explanation." +"So how does federated learning work, exactly? Let's start with an intuitive " +"explanation." msgstr "那么,联邦学习究竟是如何运作的呢?让我们从直观的解释开始。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:199 @@ -23187,10 +23590,12 @@ msgstr "步骤 0:初始化全局模型" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:204 msgid "" -"We start by initializing the model on the server. This is exactly the " -"same in classic centralized learning: we initialize the model parameters," -" either randomly or from a previously saved checkpoint." -msgstr "我们首先在服务器上初始化模型。这与经典的集中式学习完全相同:我们随机或从先前保存的检查点初始化模型参数。" +"We start by initializing the model on the server. This is exactly the same " +"in classic centralized learning: we initialize the model parameters, either " +"randomly or from a previously saved checkpoint." +msgstr "" +"我们首先在服务器上初始化模型。这与经典的集中式学习完全相同:我们随机或从先前" +"保存的检查点初始化模型参数。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:210 msgid "|5b1408eec0d746cdb91162a9107b6089|" @@ -23202,19 +23607,23 @@ msgstr "初始化全局模型" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:217 msgid "" -"Step 1: Send model to a number of connected organizations/devices (client" -" nodes)" +"Step 1: Send model to a number of connected organizations/devices (client " +"nodes)" msgstr "第 1 步:将模型发送到多个连接的组织/设备(客户节点)" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:219 msgid "" "Next, we send the parameters of the global model to the connected client " "nodes (think: edge devices like smartphones or servers belonging to " -"organizations). This is to ensure that each participating node starts " -"their local training using the same model parameters. We often use only a" -" few of the connected nodes instead of all nodes. The reason for this is " -"that selecting more and more client nodes has diminishing returns." -msgstr "接下来,我们会将全局模型的参数发送到连接的客户端节点(如智能手机等边缘设备或企业的服务器)。这是为了确保每个参与节点都使用相同的模型参数开始本地训练。我们通常只使用几个连接节点,而不是所有节点。这样做的原因是,选择越来越多的客户端节点会导致收益递减。" +"organizations). This is to ensure that each participating node starts their " +"local training using the same model parameters. We often use only a few of " +"the connected nodes instead of all nodes. The reason for this is that " +"selecting more and more client nodes has diminishing returns." +msgstr "" +"接下来,我们会将全局模型的参数发送到连接的客户端节点(如智能手机等边缘设备或" +"企业的服务器)。这是为了确保每个参与节点都使用相同的模型参数开始本地训练。我" +"们通常只使用几个连接节点,而不是所有节点。这样做的原因是,选择越来越多的客户" +"端节点会导致收益递减。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:225 msgid "|aef19f4b122c4e8d9f4c57f99bcd5dd2|" @@ -23226,21 +23635,23 @@ msgstr "发送全局模型" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:232 msgid "" -"Step 2: Train model locally on the data of each organization/device " -"(client node)" +"Step 2: Train model locally on the data of each organization/device (client " +"node)" msgstr "步骤 2:在本地对每个机构/设备(客户端节点)的数据进行模型训练" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:234 msgid "" -"Now that all (selected) client nodes have the latest version of the " -"global model parameters, they start the local training. They use their " -"own local dataset to train their own local model. They don't train the " -"model until full convergence, but they only train for a little while. " -"This could be as little as one epoch on the local data, or even just a " -"few steps (mini-batches)." +"Now that all (selected) client nodes have the latest version of the global " +"model parameters, they start the local training. They use their own local " +"dataset to train their own local model. They don't train the model until " +"full convergence, but they only train for a little while. This could be as " +"little as one epoch on the local data, or even just a few steps (mini-" +"batches)." msgstr "" -"现在,所有(选定的)客户端节点都有了最新版本的全局模型参数,它们开始进行本地训练。它们使用自己的本地数据集来训练自己的本地模型。它们不会一直训练到模型完全收敛为止,而只是训练一小段时间。这可能只是本地数据上的一个遍历,甚至只是几个步骤" -"(mini-batches)。" +"现在,所有(选定的)客户端节点都有了最新版本的全局模型参数,它们开始进行本地" +"训练。它们使用自己的本地数据集来训练自己的本地模型。它们不会一直训练到模型完" +"全收敛为止,而只是训练一小段时间。这可能只是本地数据上的一个遍历,甚至只是几" +"个步骤(mini-batches)。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:240 msgid "|2881a86d8fc54ba29d96b29fc2819f4a|" @@ -23256,14 +23667,17 @@ msgstr "步骤 3:将模型参数更新返回服务器" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:249 msgid "" -"After local training, each client node has a slightly different version " -"of the model parameters they originally received. The parameters are all " +"After local training, each client node has a slightly different version of " +"the model parameters they originally received. The parameters are all " "different because each client node has different examples in its local " -"dataset. The client nodes then send those model updates back to the " -"server. The model updates they send can either be the full model " -"parameters or just the gradients that were accumulated during local " -"training." -msgstr "经过本地训练后,每个客户节点最初收到的模型参数都会略有不同。参数之所以不同,是因为每个客户端节点的本地数据集中都有不同的数据。然后,客户端节点将这些模型更新发回服务器。它们发送的模型更新既可以是完整的模型参数,也可以只是本地训练过程中积累的梯度。" +"dataset. The client nodes then send those model updates back to the server. " +"The model updates they send can either be the full model parameters or just " +"the gradients that were accumulated during local training." +msgstr "" +"经过本地训练后,每个客户节点最初收到的模型参数都会略有不同。参数之所以不同," +"是因为每个客户端节点的本地数据集中都有不同的数据。然后,客户端节点将这些模型" +"更新发回服务器。它们发送的模型更新既可以是完整的模型参数,也可以只是本地训练" +"过程中积累的梯度。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:255 msgid "|ec1fe880237247e0975f52766775ab84|" @@ -23280,36 +23694,38 @@ msgstr "步骤 4:将模型更新聚合到新的全局模型中" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:264 msgid "" "The server receives model updates from the selected client nodes. If it " -"selected 100 client nodes, it now has 100 slightly different versions of " -"the original global model, each trained on the local data of one client. " -"But didn't we want to have one model that contains the learnings from the" -" data of all 100 client nodes?" +"selected 100 client nodes, it now has 100 slightly different versions of the " +"original global model, each trained on the local data of one client. But " +"didn't we want to have one model that contains the learnings from the data " +"of all 100 client nodes?" msgstr "" -"服务器从选定的客户端节点接收模型更新。如果服务器选择了 100 个客户端节点,那么它现在就拥有 100 " -"个略有不同的原始全局模型版本,每个版本都是根据一个客户端的本地数据训练出来的。难道我们不希望有一个包含所有 100 个客户节点数据的模型吗?" +"服务器从选定的客户端节点接收模型更新。如果服务器选择了 100 个客户端节点,那么" +"它现在就拥有 100 个略有不同的原始全局模型版本,每个版本都是根据一个客户端的本" +"地数据训练出来的。难道我们不希望有一个包含所有 100 个客户节点数据的模型吗?" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:266 msgid "" -"In order to get one single model, we have to combine all the model " -"updates we received from the client nodes. This process is called " -"*aggregation*, and there are many different ways to do it. The most basic" -" way to do it is called *Federated Averaging* (`McMahan et al., 2016 " -"`__), often abbreviated as *FedAvg*. " -"*FedAvg* takes the 100 model updates and, as the name suggests, averages " -"them. To be more precise, it takes the *weighted average* of the model " -"updates, weighted by the number of examples each client used for " -"training. The weighting is important to make sure that each data example " -"has the same \"influence\" on the resulting global model. If one client " -"has 10 examples, and another client has 100 examples, then - without " -"weighting - each of the 10 examples would influence the global model ten " -"times as much as each of the 100 examples." -msgstr "" -"为了得到一个单一的模型,我们必须将从客户端节点收到的所有模型更新合并起来。这个过程称为*聚合*,有许多不同的方法。最基本的方法称为 " -"*Federated Averaging* (`McMahan等人,2016 " -"`__),通常缩写为*FedAvg*。*FedAvg* 可以把100 " -"个模型更新进行平均。更准确地说,它取的是模型更新的*加权平均值*,根据每个客户端用于训练的数据数量进行加权。加权对于确保每个数据示例对生成的全局模型具有相同的" -" \"影响 \"非常重要。如果一个客户端有 10 个数据点,而另一个客户有 100 个数据点,那么在不加权的情况下,10 个示例对全局模型的影响是" -" 100 个示例的 10 倍。" +"In order to get one single model, we have to combine all the model updates " +"we received from the client nodes. This process is called *aggregation*, and " +"there are many different ways to do it. The most basic way to do it is " +"called *Federated Averaging* (`McMahan et al., 2016 `__), often abbreviated as *FedAvg*. *FedAvg* takes the 100 " +"model updates and, as the name suggests, averages them. To be more precise, " +"it takes the *weighted average* of the model updates, weighted by the number " +"of examples each client used for training. The weighting is important to " +"make sure that each data example has the same \"influence\" on the resulting " +"global model. If one client has 10 examples, and another client has 100 " +"examples, then - without weighting - each of the 10 examples would influence " +"the global model ten times as much as each of the 100 examples." +msgstr "" +"为了得到一个单一的模型,我们必须将从客户端节点收到的所有模型更新合并起来。这" +"个过程称为*聚合*,有许多不同的方法。最基本的方法称为 *Federated Averaging* " +"(`McMahan等人,2016 `__),通常缩写为" +"*FedAvg*。*FedAvg* 可以把100 个模型更新进行平均。更准确地说,它取的是模型更新" +"的*加权平均值*,根据每个客户端用于训练的数据数量进行加权。加权对于确保每个数" +"据示例对生成的全局模型具有相同的 \"影响 \"非常重要。如果一个客户端有 10 个数" +"据点,而另一个客户有 100 个数据点,那么在不加权的情况下,10 个示例对全局模型" +"的影响是 100 个示例的 10 倍。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:273 msgid "|9fdf048ed58d4467b2718cdf4aaf1ec3|" @@ -23326,50 +23742,54 @@ msgstr "步骤 5:重复步骤 1 至 4,直至模型收敛" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:282 msgid "" "Steps 1 to 4 are what we call a single round of federated learning. The " -"global model parameters get sent to the participating client nodes (step " -"1), the client nodes train on their local data (step 2), they send their " -"updated models to the server (step 3), and the server then aggregates the" -" model updates to get a new version of the global model (step 4)." +"global model parameters get sent to the participating client nodes (step 1), " +"the client nodes train on their local data (step 2), they send their updated " +"models to the server (step 3), and the server then aggregates the model " +"updates to get a new version of the global model (step 4)." msgstr "" -"步骤 1 至 4 就是我们所说的单轮联邦学习。全局模型参数被发送到参与的客户端节点(第 1 步),客户端节点对其本地数据进行训练(第 2 " -"步),然后将更新后的模型发送到服务器(第 3 步),服务器汇总模型更新,得到新版本的全局模型(第 4 步)。" +"步骤 1 至 4 就是我们所说的单轮联邦学习。全局模型参数被发送到参与的客户端节点" +"(第 1 步),客户端节点对其本地数据进行训练(第 2 步),然后将更新后的模型发" +"送到服务器(第 3 步),服务器汇总模型更新,得到新版本的全局模型(第 4 步)。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:284 msgid "" -"During a single round, each client node that participates in that " -"iteration only trains for a little while. This means that after the " -"aggregation step (step 4), we have a model that has been trained on all " -"the data of all participating client nodes, but only for a little while. " -"We then have to repeat this training process over and over again to " -"eventually arrive at a fully trained model that performs well across the " -"data of all client nodes." +"During a single round, each client node that participates in that iteration " +"only trains for a little while. This means that after the aggregation step " +"(step 4), we have a model that has been trained on all the data of all " +"participating client nodes, but only for a little while. We then have to " +"repeat this training process over and over again to eventually arrive at a " +"fully trained model that performs well across the data of all client nodes." msgstr "" -"在一轮迭代中,每个参与迭代的客户节点只训练一小段时间。这意味着,在聚合步骤(步骤 " -"4)之后,我们的模型已经在所有参与的客户节点的所有数据上训练过了,但只训练了一小会儿。然后,我们必须一次又一次地重复这一训练过程,最终得到一个经过全面训练的模型,该模型在所有客户节点的数据中都表现良好。" +"在一轮迭代中,每个参与迭代的客户节点只训练一小段时间。这意味着,在聚合步骤" +"(步骤 4)之后,我们的模型已经在所有参与的客户节点的所有数据上训练过了,但只" +"训练了一小会儿。然后,我们必须一次又一次地重复这一训练过程,最终得到一个经过" +"全面训练的模型,该模型在所有客户节点的数据中都表现良好。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:289 msgid "" "Congratulations, you now understand the basics of federated learning. " -"There's a lot more to discuss, of course, but that was federated learning" -" in a nutshell. In later parts of this tutorial, we will go into more " -"detail. Interesting questions include: How can we select the best client " -"nodes that should participate in the next round? What's the best way to " -"aggregate model updates? How can we handle failing client nodes " -"(stragglers)?" +"There's a lot more to discuss, of course, but that was federated learning in " +"a nutshell. In later parts of this tutorial, we will go into more detail. " +"Interesting questions include: How can we select the best client nodes that " +"should participate in the next round? What's the best way to aggregate model " +"updates? How can we handle failing client nodes (stragglers)?" msgstr "" -"恭喜您,现在您已经了解了联邦学习的基础知识。当然,要讨论的内容还有很多,但这只是联邦学习的一个缩影。在本教程的后半部分,我们将进行更详细的介绍。有趣的问题包括" -" 我们如何选择最好的客户端节点参与下一轮学习?聚合模型更新的最佳方法是什么?如何处理失败的客户端节点(落伍者)?" +"恭喜您,现在您已经了解了联邦学习的基础知识。当然,要讨论的内容还有很多,但这" +"只是联邦学习的一个缩影。在本教程的后半部分,我们将进行更详细的介绍。有趣的问" +"题包括 我们如何选择最好的客户端节点参与下一轮学习?聚合模型更新的最佳方法是什" +"么?如何处理失败的客户端节点(落伍者)?" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:294 msgid "" -"Just like we can train a model on the decentralized data of different " -"client nodes, we can also evaluate the model on that data to receive " -"valuable metrics. This is called federated evaluation, sometimes " -"abbreviated as FE. In fact, federated evaluation is an integral part of " -"most federated learning systems." +"Just like we can train a model on the decentralized data of different client " +"nodes, we can also evaluate the model on that data to receive valuable " +"metrics. This is called federated evaluation, sometimes abbreviated as FE. " +"In fact, federated evaluation is an integral part of most federated learning " +"systems." msgstr "" -"就像我们可以在不同客户节点的分散数据上训练一个模型一样,我们也可以在这些数据上对模型进行评估,以获得有价值的指标。这就是所谓的联邦评估,有时简称为" -" FE。事实上,联邦评估是大多数联邦学习系统不可或缺的一部分。" +"就像我们可以在不同客户节点的分散数据上训练一个模型一样,我们也可以在这些数据" +"上对模型进行评估,以获得有价值的指标。这就是所谓的联邦评估,有时简称为 FE。事" +"实上,联邦评估是大多数联邦学习系统不可或缺的一部分。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:297 msgid "Federated analytics" @@ -23377,28 +23797,33 @@ msgstr "联邦分析" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:299 msgid "" -"In many cases, machine learning isn't necessary to derive value from " -"data. Data analysis can yield valuable insights, but again, there's often" -" not enough data to get a clear answer. What's the average age at which " -"people develop a certain type of health condition? Federated analytics " -"enables such queries over multiple client nodes. It is usually used in " -"conjunction with other privacy-enhancing technologies like secure " -"aggregation to prevent the server from seeing the results submitted by " -"individual client nodes." -msgstr "在很多情况下,机器学习并不是从数据中获取价值的必要条件。数据分析可以产生有价值的见解,但同样,往往没有足够的数据来获得明确的答案。人们患某种健康疾病的平均年龄是多少?联邦分析可以通过多个客户端节点进行此类查询。它通常与安全聚合等其他隐私增强技术结合使用,以防止服务器看到单个客户端节点提交的结果。" +"In many cases, machine learning isn't necessary to derive value from data. " +"Data analysis can yield valuable insights, but again, there's often not " +"enough data to get a clear answer. What's the average age at which people " +"develop a certain type of health condition? Federated analytics enables such " +"queries over multiple client nodes. It is usually used in conjunction with " +"other privacy-enhancing technologies like secure aggregation to prevent the " +"server from seeing the results submitted by individual client nodes." +msgstr "" +"在很多情况下,机器学习并不是从数据中获取价值的必要条件。数据分析可以产生有价" +"值的见解,但同样,往往没有足够的数据来获得明确的答案。人们患某种健康疾病的平" +"均年龄是多少?联邦分析可以通过多个客户端节点进行此类查询。它通常与安全聚合等" +"其他隐私增强技术结合使用,以防止服务器看到单个客户端节点提交的结果。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:305 msgid "" "Differential privacy (DP) is often mentioned in the context of Federated " -"Learning. It is a privacy-preserving method used when analyzing and " -"sharing statistical data, ensuring the privacy of individual " -"participants. DP achieves this by adding statistical noise to the model " -"updates, ensuring any individual participants’ information cannot be " -"distinguished or re-identified. This technique can be considered an " -"optimization that provides a quantifiable privacy protection measure." -msgstr "" -"差分隐私(DP)经常在联邦学习中被提及。这是一种在分析和共享统计数据时使用的隐私保护方法,可确保单个参与者的隐私。DP " -"通过在模型更新中添加统计噪声来实现这一目的,确保任何个体参与者的信息都无法被区分或重新识别。这种技术可被视为一种优化,提供了一种可量化的隐私保护措施。" +"Learning. It is a privacy-preserving method used when analyzing and sharing " +"statistical data, ensuring the privacy of individual participants. DP " +"achieves this by adding statistical noise to the model updates, ensuring any " +"individual participants’ information cannot be distinguished or re-" +"identified. This technique can be considered an optimization that provides a " +"quantifiable privacy protection measure." +msgstr "" +"差分隐私(DP)经常在联邦学习中被提及。这是一种在分析和共享统计数据时使用的隐" +"私保护方法,可确保单个参与者的隐私。DP 通过在模型更新中添加统计噪声来实现这一" +"目的,确保任何个体参与者的信息都无法被区分或重新识别。这种技术可被视为一种优" +"化,提供了一种可量化的隐私保护措施。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:326 msgid "Flower" @@ -23406,17 +23831,18 @@ msgstr "Flower" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:328 msgid "" -"Federated learning, federated evaluation, and federated analytics require" -" infrastructure to move machine learning models back and forth, train and" -" evaluate them on local data, and then aggregate the updated models. " -"Flower provides the infrastructure to do exactly that in an easy, " -"scalable, and secure way. In short, Flower presents a unified approach to" -" federated learning, analytics, and evaluation. It allows the user to " -"federate any workload, any ML framework, and any programming language." +"Federated learning, federated evaluation, and federated analytics require " +"infrastructure to move machine learning models back and forth, train and " +"evaluate them on local data, and then aggregate the updated models. Flower " +"provides the infrastructure to do exactly that in an easy, scalable, and " +"secure way. In short, Flower presents a unified approach to federated " +"learning, analytics, and evaluation. It allows the user to federate any " +"workload, any ML framework, and any programming language." msgstr "" -"联邦学习、联邦评估和联邦分析需要基础框架来来回移动机器学习模型,在本地数据上对其进行训练和评估,然后汇总更新的模型。Flower " -"提供的基础架构正是以简单、可扩展和安全的方式实现这些目标的。简而言之,Flower " -"为联邦学习、分析和评估提供了一种统一的方法。它允许用户联邦化任何工作负载、任何 ML 框架和任何编程语言。" +"联邦学习、联邦评估和联邦分析需要基础框架来来回移动机器学习模型,在本地数据上" +"对其进行训练和评估,然后汇总更新的模型。Flower 提供的基础架构正是以简单、可扩" +"展和安全的方式实现这些目标的。简而言之,Flower 为联邦学习、分析和评估提供了一" +"种统一的方法。它允许用户联邦化任何工作负载、任何 ML 框架和任何编程语言。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:334 msgid "|ff726bc5505e432388ee2fdd6ef420b9|" @@ -23424,92 +23850,84 @@ msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:340 msgid "" -"Flower federated learning server and client nodes (car, scooter, personal" -" computer, roomba, and phone)" -msgstr "Flower联邦学习服务器和客户端节点(汽车、滑板车、个人电脑、roomba 和电话)" +"Flower federated learning server and client nodes (car, scooter, personal " +"computer, roomba, and phone)" +msgstr "" +"Flower联邦学习服务器和客户端节点(汽车、滑板车、个人电脑、roomba 和电话)" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:353 msgid "" -"Congratulations, you just learned the basics of federated learning and " -"how it relates to the classic (centralized) machine learning!" -msgstr "恭喜您,您刚刚了解了联邦学习的基础知识,以及它与传统(集中式)机器学习的关系!" +"Congratulations, you just learned the basics of federated learning and how " +"it relates to the classic (centralized) machine learning!" +msgstr "" +"恭喜您,您刚刚了解了联邦学习的基础知识,以及它与传统(集中式)机器学习的关" +"系!" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:355 msgid "" -"In the next part of this tutorial, we are going to build a first " -"federated learning system with Flower." +"In the next part of this tutorial, we are going to build a first federated " +"learning system with Flower." msgstr "在本教程的下一部分,我们将用 Flower 建立第一个联邦学习系统。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:373 msgid "" -"The `Flower Federated Learning Tutorial - Part 1 " -"`__ shows how to build a simple federated learning system " -"with PyTorch and Flower." +"The `Flower Federated Learning Tutorial - Part 1 `__ shows how to " +"build a simple federated learning system with PyTorch and Flower." msgstr "" -"`Flower 联邦学习教程 - 第 1 部分 `__ 展示了如何使用 PyTorch 和 Flower " -"构建一个简单的联邦学习系统。" +"`Flower 联邦学习教程 - 第 1 部分 `__ 展示了如何使用 PyTorch 和 Flower 构" +"建一个简单的联邦学习系统。" #~ msgid "Before the release" #~ msgstr "发布前" #~ msgid "" -#~ "Update the changelog (``changelog.md``) with" -#~ " all relevant changes that happened " -#~ "after the last release. If the " -#~ "last release was tagged ``v1.2.0``, you" -#~ " can use the following URL to " -#~ "see all commits that got merged " -#~ "into ``main`` since then:" +#~ "Update the changelog (``changelog.md``) with all relevant changes that " +#~ "happened after the last release. If the last release was tagged " +#~ "``v1.2.0``, you can use the following URL to see all commits that got " +#~ "merged into ``main`` since then:" #~ msgstr "" -#~ "更新更新日志 (``changelog.md``),加入上次发布后发生的所有相关变更。如果上次发布的版本被标记为 " -#~ "``v1.2.0``,则可以使用以下 URL 查看此后合并到 ``main`` 的所有提交:" +#~ "更新更新日志 (``changelog.md``),加入上次发布后发生的所有相关变更。如果上" +#~ "次发布的版本被标记为 ``v1.2.0``,则可以使用以下 URL 查看此后合并到 " +#~ "``main`` 的所有提交:" #~ msgid "" -#~ "`GitHub: Compare v1.2.0...main " -#~ "`_" +#~ "`GitHub: Compare v1.2.0...main `_" #~ msgstr "" -#~ "`GitHub: Compare v1.2.0...main " -#~ "`_" +#~ "`GitHub: Compare v1.2.0...main `_" #~ msgid "" -#~ "Thank the authors who contributed since" -#~ " the last release. This can be " -#~ "done by running the ``./dev/add-" -#~ "shortlog.sh`` convenience script (it can " -#~ "be ran multiple times and will " -#~ "update the names in the list if" -#~ " new contributors were added in the" -#~ " meantime)." +#~ "Thank the authors who contributed since the last release. This can be " +#~ "done by running the ``./dev/add-shortlog.sh`` convenience script (it can " +#~ "be ran multiple times and will update the names in the list if new " +#~ "contributors were added in the meantime)." #~ msgstr "" #~ "感谢自上次发布以来做出贡献的作者。可以通过运行 ``./dev/add-shortlog.sh`` " -#~ "方便脚本来完成(可以多次运行,如果在此期间有新的贡献者加入,则会更新列表中的名字)。" +#~ "方便脚本来完成(可以多次运行,如果在此期间有新的贡献者加入,则会更新列表中" +#~ "的名字)。" #~ msgid "" -#~ "Update the ``changelog.md`` section header " -#~ "``Unreleased`` to contain the version " -#~ "number and date for the release " -#~ "you are building. Create a pull " +#~ "Update the ``changelog.md`` section header ``Unreleased`` to contain the " +#~ "version number and date for the release you are building. Create a pull " #~ "request with the change." #~ msgstr "" -#~ "更新 ``changelog.md`` 部分的标题 ``Unreleased`` " -#~ "以包含你正在构建的版本的版本号和日期。创建一个包含更改的拉取请求。" +#~ "更新 ``changelog.md`` 部分的标题 ``Unreleased`` 以包含你正在构建的版本的版" +#~ "本号和日期。创建一个包含更改的拉取请求。" #~ msgid "" -#~ "Second, create a virtual environment " -#~ "(and activate it). If you chose to" -#~ " use :code:`pyenv` (with the :code" -#~ ":`pyenv-virtualenv` plugin) and already " -#~ "have it installed , you can use" -#~ " the following convenience script (by " -#~ "default it will use :code:`Python " -#~ "3.8.17`, but you can change it by" -#~ " providing a specific :code:``)::" +#~ "Second, create a virtual environment (and activate it). If you chose to " +#~ "use :code:`pyenv` (with the :code:`pyenv-virtualenv` plugin) and already " +#~ "have it installed , you can use the following convenience script (by " +#~ "default it will use :code:`Python 3.8.17`, but you can change it by " +#~ "providing a specific :code:``)::" #~ msgstr "" -#~ "其次,创建虚拟环境(并激活它)。如果您选择使用 :code:`pyenv`(使用 :code:`pyenv-" -#~ "virtualenv`插件),并且已经安装了该插件,则可以使用下面的便捷脚本(默认情况下使用 " -#~ ":code:`Python3.8.17`,但您可以通过提供特定的 :code:`<版本>`来更改)::" +#~ "其次,创建虚拟环境(并激活它)。如果您选择使用 :code:`pyenv`(使用 :code:" +#~ "`pyenv-virtualenv`插件),并且已经安装了该插件,则可以使用下面的便捷脚本" +#~ "(默认情况下使用 :code:`Python3.8.17`,但您可以通过提供特定的 :code:`<版本" +#~ ">`来更改)::" #~ msgid "flwr (Python API reference)" #~ msgstr "flwr(Python API 参考)" @@ -23602,88 +24020,69 @@ msgstr "" #~ msgstr "server.strategy.DPFedAvgAdaptive" #~ msgid "" -#~ "**Fix the incorrect return types of " -#~ "Strategy** " -#~ "([#2432](https://github.com/adap/flower/pull/2432/files))" +#~ "**Fix the incorrect return types of Strategy** ([#2432](https://github." +#~ "com/adap/flower/pull/2432/files))" #~ msgstr "" -#~ "**修复策略的错误返回类型** " -#~ "([#2432](https://github.com/adap/flower/pull/2432/files))" +#~ "**修复策略的错误返回类型** ([#2432](https://github.com/adap/flower/" +#~ "pull/2432/files))" #~ msgid "" -#~ "The types of the return values in" -#~ " the docstrings in two methods " -#~ "(`aggregate_fit` and `aggregate_evaluate`) now " -#~ "match the hint types in the code." +#~ "The types of the return values in the docstrings in two methods " +#~ "(`aggregate_fit` and `aggregate_evaluate`) now match the hint types in " +#~ "the code." #~ msgstr "" -#~ "两个方法(\"aggregate_fit \"和 " -#~ "\"aggregate_evaluate\")的文档说明中的返回值类型现在与代码中的提示类型一致。" +#~ "两个方法(\"aggregate_fit \"和 \"aggregate_evaluate\")的文档说明中的返回" +#~ "值类型现在与代码中的提示类型一致。" #~ msgid "" -#~ "**Update Flower Examples** " -#~ "([#2384](https://github.com/adap/flower/pull/2384),[#2425](https://github.com/adap/flower/pull/2425)," -#~ " [#2526](https://github.com/adap/flower/pull/2526))" +#~ "**Update Flower Examples** ([#2384](https://github.com/adap/flower/" +#~ "pull/2384),[#2425](https://github.com/adap/flower/pull/2425), [#2526]" +#~ "(https://github.com/adap/flower/pull/2526))" #~ msgstr "" -#~ "** 更新 Flower Examples** " -#~ "([#2384](https://github.com/adap/flower/pull/2384),[#2425](https://github.com/adap/flower/pull/2425)," -#~ " [#2526](https://github.com/adap/flower/pull/2526))" +#~ "** 更新 Flower Examples** ([#2384](https://github.com/adap/flower/" +#~ "pull/2384),[#2425](https://github.com/adap/flower/pull/2425), [#2526]" +#~ "(https://github.com/adap/flower/pull/2526))" #~ msgid "" -#~ "That's it for the client. We only" -#~ " have to implement :code:`Client` or " -#~ ":code:`NumPyClient` and call " -#~ ":code:`fl.client.start_client()`. The string " -#~ ":code:`\"0.0.0.0:8080\"` tells the client " -#~ "which server to connect to. In our" -#~ " case we can run the server and" -#~ " the client on the same machine, " -#~ "therefore we use :code:`\"0.0.0.0:8080\"`. If" -#~ " we run a truly federated workload" -#~ " with the server and clients running" -#~ " on different machines, all that " -#~ "needs to change is the " -#~ ":code:`server_address` we pass to the " -#~ "client." +#~ "That's it for the client. We only have to implement :code:`Client` or :" +#~ "code:`NumPyClient` and call :code:`fl.client.start_client()`. The string :" +#~ "code:`\"0.0.0.0:8080\"` tells the client which server to connect to. In " +#~ "our case we can run the server and the client on the same machine, " +#~ "therefore we use :code:`\"0.0.0.0:8080\"`. If we run a truly federated " +#~ "workload with the server and clients running on different machines, all " +#~ "that needs to change is the :code:`server_address` we pass to the client." #~ msgstr "" -#~ "对于客户端就需要做这么多。我们仅需要实现 " -#~ ":code:`Client`或者:code:`NumPyClient`然后调用:code:`fl.client.start_client()`。字符串" -#~ " :code:`\"0.0.0.0:8080\"` " -#~ "告诉客户端要连接到哪个服务器。在我们的例子中,我们可以在同一台机器上运行服务器和客户端,因此我们使用:code:`\"0.0.0.0:8080\"`。如果我们运行真正联邦学习的工作负载,服务器和客户端在不同的机器上运行,则需要更改的只是我们传递给客户端的" -#~ " server_address 。" +#~ "对于客户端就需要做这么多。我们仅需要实现 :code:`Client`或者:code:" +#~ "`NumPyClient`然后调用:code:`fl.client.start_client()`。字符串 :code:" +#~ "`\"0.0.0.0:8080\"` 告诉客户端要连接到哪个服务器。在我们的例子中,我们可以" +#~ "在同一台机器上运行服务器和客户端,因此我们使用:code:`\"0.0.0.0:8080\"`。如" +#~ "果我们运行真正联邦学习的工作负载,服务器和客户端在不同的机器上运行,则需要" +#~ "更改的只是我们传递给客户端的 server_address 。" #~ msgid "" -#~ "That's it for the client. We only" -#~ " have to implement :code:`Client` or " -#~ ":code:`NumPyClient` and call " -#~ ":code:`fl.client.start_client()`. The string " -#~ ":code:`\"[::]:8080\"` tells the client which" -#~ " server to connect to. In our " -#~ "case we can run the server and " -#~ "the client on the same machine, " -#~ "therefore we use :code:`\"[::]:8080\"`. If " -#~ "we run a truly federated workload " -#~ "with the server and clients running " -#~ "on different machines, all that needs" -#~ " to change is the :code:`server_address`" -#~ " we point the client at." +#~ "That's it for the client. We only have to implement :code:`Client` or :" +#~ "code:`NumPyClient` and call :code:`fl.client.start_client()`. The string :" +#~ "code:`\"[::]:8080\"` tells the client which server to connect to. In our " +#~ "case we can run the server and the client on the same machine, therefore " +#~ "we use :code:`\"[::]:8080\"`. If we run a truly federated workload with " +#~ "the server and clients running on different machines, all that needs to " +#~ "change is the :code:`server_address` we point the client at." #~ msgstr "" -#~ "对于客户来说就是这样了。我们只需实现 :code:`Client` 或 " -#~ ":code:`NumPyClient` 并调用:code:`fl.client.start_client()` " -#~ "即可。字符串 :code:`\"[::]:8080\"` " -#~ "告诉客户端要连接到哪个服务器。在我们的例子中,我们可以在同一台机器上运行服务器和客户端,因此我们使用 " -#~ ":code:`\"[::]:8080\"`。如果我们运行真正联邦的工作负载,服务器和客户端运行在不同的机器上,则需要更改的只是我们指向客户端的" -#~ " server_address 。" +#~ "对于客户来说就是这样了。我们只需实现 :code:`Client` 或 :code:" +#~ "`NumPyClient` 并调用:code:`fl.client.start_client()` 即可。字符串 :code:" +#~ "`\"[::]:8080\"` 告诉客户端要连接到哪个服务器。在我们的例子中,我们可以在同" +#~ "一台机器上运行服务器和客户端,因此我们使用 :code:`\"[::]:8080\"`。如果我们" +#~ "运行真正联邦的工作负载,服务器和客户端运行在不同的机器上,则需要更改的只是" +#~ "我们指向客户端的 server_address 。" #~ msgid "" -#~ "Let's now load the CIFAR-10 training " -#~ "and test set, partition them into " -#~ "ten smaller datasets (each split into" -#~ " training and validation set), and " -#~ "wrap the resulting partitions by " -#~ "creating a PyTorch ``DataLoader`` for " +#~ "Let's now load the CIFAR-10 training and test set, partition them into " +#~ "ten smaller datasets (each split into training and validation set), and " +#~ "wrap the resulting partitions by creating a PyTorch ``DataLoader`` for " #~ "each of them:" #~ msgstr "" -#~ "现在,让我们加载 CIFAR-10 训练集和测试集,将它们分割成 10 " -#~ "个较小的数据集(每个数据集又分为训练集和验证集),并通过为每个数据集创建 PyTorch " +#~ "现在,让我们加载 CIFAR-10 训练集和测试集,将它们分割成 10 个较小的数据集" +#~ "(每个数据集又分为训练集和验证集),并通过为每个数据集创建 PyTorch " #~ "``DataLoader`` 来包装由此产生的分割集:" #~ msgid "|e1dd4b4129b040bea23a894266227080|" @@ -23732,169 +24131,127 @@ msgstr "" #~ msgstr "上传 whl(例如 ``flwr-1.6.0-py3-none-any.whl``)" #~ msgid "" -#~ "Change ``!pip install -q 'flwr[simulation]'" -#~ " torch torchvision matplotlib`` to ``!pip" -#~ " install -q 'flwr-1.6.0-py3-none-" -#~ "any.whl[simulation]' torch torchvision matplotlib``" -#~ msgstr "" -#~ "将``!pip install -q 'flwr[simulation]' torch" -#~ " torchvision matplotlib``更改为``!pip install -q " -#~ "'flwr-1.6.0-py3-none-any.whl[simulation]' torch " -#~ "torch torchvision matplotlib``" - -#~ msgid "" -#~ "All that's left to do it to " -#~ "define a function that loads both " -#~ "model and data, creates a " -#~ ":code:`CifarClient`, and starts this client." -#~ " You load your data and model " -#~ "by using :code:`cifar.py`. Start " -#~ ":code:`CifarClient` with the function " -#~ ":code:`fl.client.start_numpy_client()` by pointing " -#~ "it at the same IP address we " -#~ "used in :code:`server.py`:" -#~ msgstr "" -#~ "剩下要做的就是定义一个加载模型和数据的函数,创建一个 :code:`CifarClient` 并启动该客户端。使用" -#~ " :code:`cifar.py` 加载数据和模型。使用函数 " -#~ ":code:`fl.client.start_numpy_client()` 启动 " -#~ ":code:`CifarClient`,将其指向我们在 :code:`server.py` 中使用的相同 " -#~ "IP 地址:" - -#~ msgid "" -#~ "The :code:`VirtualClientEngine` schedules, launches" -#~ " and manages `virtual` clients. These " -#~ "clients are identical to `non-virtual`" -#~ " clients (i.e. the ones you launch" -#~ " via the command `flwr.client.start_numpy_client" -#~ " `_)" -#~ " in the sense that they can be" -#~ " configure by creating a class " -#~ "inheriting, for example, from " -#~ "`flwr.client.NumPyClient `_ and therefore " -#~ "behave in an identical way. In " -#~ "addition to that, clients managed by " -#~ "the :code:`VirtualClientEngine` are:" +#~ "Change ``!pip install -q 'flwr[simulation]' torch torchvision " +#~ "matplotlib`` to ``!pip install -q 'flwr-1.6.0-py3-none-any." +#~ "whl[simulation]' torch torchvision matplotlib``" #~ msgstr "" -#~ "代码:`VirtualClientEngine`调度、启动和管理`虚拟`客户端。这些客户端与 \"非虚拟 " -#~ "\"客户端(即通过命令 `flwr.client.start_numpy_client `_启动的客户端)完全相同,它们可以通过创建一个继承自 \"flwr.client.NumPyClient " -#~ "`_\" " -#~ "的类来配置,因此行为方式也完全相同。除此之外,由 :code:`VirtualClientEngine` " -#~ "管理的客户端还包括:" +#~ "将``!pip install -q 'flwr[simulation]' torch torchvision matplotlib``更改" +#~ "为``!pip install -q 'flwr-1.6.0-py3-none-any.whl[simulation]' torch torch " +#~ "torchvision matplotlib``" #~ msgid "" -#~ "Please follow the first section on " -#~ "`Run Flower using Docker " -#~ "`_ which covers this" -#~ " step in more detail." +#~ "All that's left to do it to define a function that loads both model and " +#~ "data, creates a :code:`CifarClient`, and starts this client. You load " +#~ "your data and model by using :code:`cifar.py`. Start :code:`CifarClient` " +#~ "with the function :code:`fl.client.start_numpy_client()` by pointing it " +#~ "at the same IP address we used in :code:`server.py`:" #~ msgstr "" +#~ "剩下要做的就是定义一个加载模型和数据的函数,创建一个 :code:`CifarClient` " +#~ "并启动该客户端。使用 :code:`cifar.py` 加载数据和模型。使用函数 :code:`fl." +#~ "client.start_numpy_client()` 启动 :code:`CifarClient`,将其指向我们在 :" +#~ "code:`server.py` 中使用的相同 IP 地址:" #~ msgid "" -#~ "If the section is completely empty " -#~ "(without any token) or non-existant, " -#~ "the changelog will just contain the " -#~ "title of the PR for the changelog" -#~ " entry, without any description." +#~ "The :code:`VirtualClientEngine` schedules, launches and manages `virtual` " +#~ "clients. These clients are identical to `non-virtual` clients (i.e. the " +#~ "ones you launch via the command `flwr.client.start_numpy_client `_) in the sense that they can be configure " +#~ "by creating a class inheriting, for example, from `flwr.client." +#~ "NumPyClient `_ and therefore " +#~ "behave in an identical way. In addition to that, clients managed by the :" +#~ "code:`VirtualClientEngine` are:" #~ msgstr "" +#~ "代码:`VirtualClientEngine`调度、启动和管理`虚拟`客户端。这些客户端与 \"非" +#~ "虚拟 \"客户端(即通过命令 `flwr.client.start_numpy_client `_启动的客户端)完全相同,它们可以通过创建一个继承" +#~ "自 \"flwr.client.NumPyClient `_\" 的类来配置,因此行为方式也完全相同。除此之外,由 :code:" +#~ "`VirtualClientEngine` 管理的客户端还包括:" #~ msgid "Example: Walk-Through PyTorch & MNIST" #~ msgstr "实例: PyTorch 和 MNIST 的演练" #~ msgid "" -#~ "In this tutorial we will learn, " -#~ "how to train a Convolutional Neural " -#~ "Network on MNIST using Flower and " -#~ "PyTorch." -#~ msgstr "在本教程中,我们将学习如何使用 Flower 和 PyTorch 在 MNIST 上训练卷积神经网络。" +#~ "In this tutorial we will learn, how to train a Convolutional Neural " +#~ "Network on MNIST using Flower and PyTorch." +#~ msgstr "" +#~ "在本教程中,我们将学习如何使用 Flower 和 PyTorch 在 MNIST 上训练卷积神经网" +#~ "络。" #~ msgid "" -#~ "Since we want to use PyTorch to" -#~ " solve a computer vision task, let's" -#~ " go ahead an install PyTorch and " -#~ "the **torchvision** library:" -#~ msgstr "我们想用 PyTorch 来做计算机视觉任务,需要先安装 PyTorch 和 **torchvision** 库:" +#~ "Since we want to use PyTorch to solve a computer vision task, let's go " +#~ "ahead an install PyTorch and the **torchvision** library:" +#~ msgstr "" +#~ "我们想用 PyTorch 来做计算机视觉任务,需要先安装 PyTorch 和 " +#~ "**torchvision** 库:" #~ msgid "Ready... Set... Train!" #~ msgstr "准备...设置...训练!" #~ msgid "" -#~ "Now that we have all our " -#~ "dependencies installed, let's run a " -#~ "simple distributed training with two " -#~ "clients and one server. Our training " -#~ "procedure and network architecture are " -#~ "based on PyTorch's `Basic MNIST Example" -#~ " `_. " -#~ "This will allow you see how easy" -#~ " it is to wrap your code with" -#~ " Flower and begin training in a " -#~ "federated way. We provide you with " -#~ "two helper scripts, namely *run-" -#~ "server.sh*, and *run-clients.sh*. Don't " -#~ "be afraid to look inside, they are" -#~ " simple enough =)." +#~ "Now that we have all our dependencies installed, let's run a simple " +#~ "distributed training with two clients and one server. Our training " +#~ "procedure and network architecture are based on PyTorch's `Basic MNIST " +#~ "Example `_. This " +#~ "will allow you see how easy it is to wrap your code with Flower and begin " +#~ "training in a federated way. We provide you with two helper scripts, " +#~ "namely *run-server.sh*, and *run-clients.sh*. Don't be afraid to look " +#~ "inside, they are simple enough =)." #~ msgstr "" -#~ "现在我们已经安装了所有的依赖包,让我们用两个客户端和一个服务器来运行一个简单的分布式训练。我们的训练过程和网络架构基于 " -#~ "PyTorch 的 `Basic MNIST Example " -#~ "`_。您会发现用 " -#~ "Flower 来封装您的代码并进行联邦学习训练是多么容易。我们为您提供了两个辅助脚本,即 *run-" -#~ "server.sh* 和 *run-clients.sh*。别害怕,它们很简单 =)。" +#~ "现在我们已经安装了所有的依赖包,让我们用两个客户端和一个服务器来运行一个简" +#~ "单的分布式训练。我们的训练过程和网络架构基于 PyTorch 的 `Basic MNIST " +#~ "Example `_。您会发" +#~ "现用 Flower 来封装您的代码并进行联邦学习训练是多么容易。我们为您提供了两个" +#~ "辅助脚本,即 *run-server.sh* 和 *run-clients.sh*。别害怕,它们很简单 =)。" #~ msgid "" -#~ "Go ahead and launch on a terminal" -#~ " the *run-server.sh* script first as" -#~ " follows:" +#~ "Go ahead and launch on a terminal the *run-server.sh* script first as " +#~ "follows:" #~ msgstr "首先在终端上启动 *run-server.sh* 脚本,如下所示:" -#~ msgid "Now that the server is up and running, go ahead and launch the clients." +#~ msgid "" +#~ "Now that the server is up and running, go ahead and launch the clients." #~ msgstr "现在服务器已经启动并运行,请继续启动客户端。" #~ msgid "" -#~ "Et voilà! You should be seeing the" -#~ " training procedure and, after a few" -#~ " iterations, the test accuracy for " -#~ "each client." -#~ msgstr "然后就可以了!您应该能看到训练过程,以及经过几次反复后,每个客户端的测试准确率。" +#~ "Et voilà! You should be seeing the training procedure and, after a few " +#~ "iterations, the test accuracy for each client." +#~ msgstr "" +#~ "然后就可以了!您应该能看到训练过程,以及经过几次反复后,每个客户端的测试准" +#~ "确率。" #~ msgid "Now, let's see what is really happening inside." #~ msgstr "现在,让我们看看里面到底发生了什么。" #~ msgid "" -#~ "Inside the server helper script *run-" -#~ "server.sh* you will find the following" -#~ " code that basically runs the " -#~ ":code:`server.py`" -#~ msgstr "在服务器辅助脚本 *run-server.sh* 中,你可以找到以下代码,这些代码基本上都是运行 :code:`server.py` 的代码" +#~ "Inside the server helper script *run-server.sh* you will find the " +#~ "following code that basically runs the :code:`server.py`" +#~ msgstr "" +#~ "在服务器辅助脚本 *run-server.sh* 中,你可以找到以下代码,这些代码基本上都" +#~ "是运行 :code:`server.py` 的代码" #~ msgid "" -#~ "We can go a bit deeper and " -#~ "see that :code:`server.py` simply launches " -#~ "a server that will coordinate three " -#~ "rounds of training. Flower Servers are" -#~ " very customizable, but for simple " -#~ "workloads, we can start a server " -#~ "using the :ref:`start_server ` function and leave " -#~ "all the configuration possibilities at " -#~ "their default values, as seen below." +#~ "We can go a bit deeper and see that :code:`server.py` simply launches a " +#~ "server that will coordinate three rounds of training. Flower Servers are " +#~ "very customizable, but for simple workloads, we can start a server using " +#~ "the :ref:`start_server ` function and " +#~ "leave all the configuration possibilities at their default values, as " +#~ "seen below." #~ msgstr "" -#~ "我们可以再深入一点,:code:`server.py` 只是启动了一个服务器,该服务器将协调三轮训练。Flower " -#~ "服务器是非常容易修改的,但对于简单的工作,我们可以使用 :ref:`start_server `函数启动服务器,并将所有可能的配置保留为默认值,如下所示。" +#~ "我们可以再深入一点,:code:`server.py` 只是启动了一个服务器,该服务器将协调" +#~ "三轮训练。Flower 服务器是非常容易修改的,但对于简单的工作,我们可以使用 :" +#~ "ref:`start_server `函数启动服务器,并将所" +#~ "有可能的配置保留为默认值,如下所示。" #~ msgid "" -#~ "Next, let's take a look at the " -#~ "*run-clients.sh* file. You will see " -#~ "that it contains the main loop " -#~ "that starts a set of *clients*." -#~ msgstr "接下来,让我们看看 *run-clients.sh* 文件。您会看到它包含了用来启动多个 *客户端* 的代码。" +#~ "Next, let's take a look at the *run-clients.sh* file. You will see that " +#~ "it contains the main loop that starts a set of *clients*." +#~ msgstr "" +#~ "接下来,让我们看看 *run-clients.sh* 文件。您会看到它包含了用来启动多个 *客" +#~ "户端* 的代码。" #~ msgid "" -#~ "**cid**: is the client ID. It is" -#~ " an integer that uniquely identifies " +#~ "**cid**: is the client ID. It is an integer that uniquely identifies " #~ "client identifier." #~ msgstr "**cid**:是客户 ID。它是一个整数,可唯一标识客户标识符。" @@ -23902,169 +24259,140 @@ msgstr "" #~ msgstr "**sever_address**: 标识服务器 IP 和端口的字符串。" #~ msgid "" -#~ "**nb_clients**: This defines the number " -#~ "of clients being created. This piece " -#~ "of information is not required by " -#~ "the client, but it helps us " -#~ "partition the original MNIST dataset to" -#~ " make sure that every client is " -#~ "working on unique subsets of both " -#~ "*training* and *test* sets." +#~ "**nb_clients**: This defines the number of clients being created. This " +#~ "piece of information is not required by the client, but it helps us " +#~ "partition the original MNIST dataset to make sure that every client is " +#~ "working on unique subsets of both *training* and *test* sets." #~ msgstr "" -#~ "**nb_clients**: 这定义了正在创建的客户端数量。客户端并不需要这一信息,但它有助于我们对原始 " -#~ "MNIST 数据集进行划分,以确保每个客户端都在 *training* 和 *test*" -#~ " 数据集上有独立的数据。" +#~ "**nb_clients**: 这定义了正在创建的客户端数量。客户端并不需要这一信息,但" +#~ "它有助于我们对原始 MNIST 数据集进行划分,以确保每个客户端都在 *training* " +#~ "和 *test* 数据集上有独立的数据。" #~ msgid "" -#~ "Again, we can go deeper and look" -#~ " inside :code:`flwr_example/quickstart-" -#~ "pytorch/client.py`. After going through the" -#~ " argument parsing code at the " -#~ "beginning of our :code:`main` function, " -#~ "you will find a call to " -#~ ":code:`mnist.load_data`. This function is " -#~ "responsible for partitioning the original " -#~ "MNIST datasets (*training* and *test*) " -#~ "and returning a :code:`torch.utils.data.DataLoader`" -#~ " s for each of them. We then" -#~ " instantiate a :code:`PytorchMNISTClient` object" -#~ " with our client ID, our DataLoaders," -#~ " the number of epochs in each " -#~ "round, and which device we want to" -#~ " use for training (CPU or GPU)." +#~ "Again, we can go deeper and look inside :code:`flwr_example/quickstart-" +#~ "pytorch/client.py`. After going through the argument parsing code at the " +#~ "beginning of our :code:`main` function, you will find a call to :code:" +#~ "`mnist.load_data`. This function is responsible for partitioning the " +#~ "original MNIST datasets (*training* and *test*) and returning a :code:" +#~ "`torch.utils.data.DataLoader` s for each of them. We then instantiate a :" +#~ "code:`PytorchMNISTClient` object with our client ID, our DataLoaders, the " +#~ "number of epochs in each round, and which device we want to use for " +#~ "training (CPU or GPU)." #~ msgstr "" -#~ "我们可以深入看一下 :code:`flwr_example/quickstart-" -#~ "pytorch/client.py`。查看 :code:`main` 函数开头的参数解析代码后,你会发现一个对" -#~ " :code:`mnist.load_data` 的调用。该函数负责分割原始 MNIST " -#~ "数据集(*training* 和 *test*),并为每个数据集返回一个 " -#~ ":code:`torch.utils.data.DataLoader` 。然后,我们实例化一个 " -#~ ":code:`PytorchMNISTClient` 对象,其中包含我们的客户端 ID、 " -#~ "DataLoader、每一轮中的遍历数,以及我们希望用于训练的设备(CPU 或 GPU)。" +#~ "我们可以深入看一下 :code:`flwr_example/quickstart-pytorch/client.py`。查" +#~ "看 :code:`main` 函数开头的参数解析代码后,你会发现一个对 :code:`mnist." +#~ "load_data` 的调用。该函数负责分割原始 MNIST 数据集(*training* 和 " +#~ "*test*),并为每个数据集返回一个 :code:`torch.utils.data.DataLoader` 。然" +#~ "后,我们实例化一个 :code:`PytorchMNISTClient` 对象,其中包含我们的客户端 " +#~ "ID、 DataLoader、每一轮中的遍历数,以及我们希望用于训练的设备(CPU 或 " +#~ "GPU)。" #~ msgid "" -#~ "The :code:`PytorchMNISTClient` object when " -#~ "finally passed to :code:`fl.client.start_client` " -#~ "along with the server's address as " -#~ "the training process begins." +#~ "The :code:`PytorchMNISTClient` object when finally passed to :code:`fl." +#~ "client.start_client` along with the server's address as the training " +#~ "process begins." #~ msgstr "" -#~ "当训练过程开始时,:code:`PytorchMNISTClient` 对象会连同服务器地址一起传递给 " -#~ ":code:`fl.client.start_client`。" +#~ "当训练过程开始时,:code:`PytorchMNISTClient` 对象会连同服务器地址一起传递" +#~ "给 :code:`fl.client.start_client`。" #~ msgid "A Closer Look" #~ msgstr "仔细看一下" #~ msgid "" -#~ "Now, let's look closely into the " -#~ ":code:`PytorchMNISTClient` inside :code:`flwr_example" -#~ ".quickstart-pytorch.mnist` and see what it" -#~ " is doing:" +#~ "Now, let's look closely into the :code:`PytorchMNISTClient` inside :code:" +#~ "`flwr_example.quickstart-pytorch.mnist` and see what it is doing:" #~ msgstr "" -#~ "现在,让我们仔细研究一下 :code:`flwr_example.quickstart-pytorch.mnist`" -#~ " 中的 :code:`PytorchMNISTClient`,看看它在做什么:" +#~ "现在,让我们仔细研究一下 :code:`flwr_example.quickstart-pytorch.mnist` 中" +#~ "的 :code:`PytorchMNISTClient`,看看它在做什么:" #~ msgid "" -#~ "The first thing to notice is that" -#~ " :code:`PytorchMNISTClient` instantiates a CNN" -#~ " model inside its constructor" -#~ msgstr "首先要注意的是 :code:`PytorchMNISTClient` 在其构造函数中实例化了一个 CNN 模型" +#~ "The first thing to notice is that :code:`PytorchMNISTClient` instantiates " +#~ "a CNN model inside its constructor" +#~ msgstr "" +#~ "首先要注意的是 :code:`PytorchMNISTClient` 在其构造函数中实例化了一个 CNN " +#~ "模型" #~ msgid "" -#~ "The code for the CNN is available" -#~ " under :code:`quickstart-pytorch.mnist` and " -#~ "it is reproduced below. It is the" -#~ " same network found in `Basic MNIST" -#~ " Example " -#~ "`_." +#~ "The code for the CNN is available under :code:`quickstart-pytorch.mnist` " +#~ "and it is reproduced below. It is the same network found in `Basic MNIST " +#~ "Example `_." #~ msgstr "" -#~ "CNN 的代码可在 :code:`quickstart-pytorch.mnist` " -#~ "下找到,现复制如下。它与 `Basic MNIST Example " -#~ "`_中的网络相同。" +#~ "CNN 的代码可在 :code:`quickstart-pytorch.mnist` 下找到,现复制如下。它与 " +#~ "`Basic MNIST Example `_中的网络相同。" #~ msgid "" -#~ "The second thing to notice is that" -#~ " :code:`PytorchMNISTClient` class inherits from" -#~ " the :code:`fl.client.Client`, and hence it" -#~ " must implement the following methods:" +#~ "The second thing to notice is that :code:`PytorchMNISTClient` class " +#~ "inherits from the :code:`fl.client.Client`, and hence it must implement " +#~ "the following methods:" #~ msgstr "" -#~ "第二件要注意的事是 :code:`PytorchMNISTClient` 类继承自 " -#~ ":code:`fl.client.Client`,因此它必须实现以下方法:" +#~ "第二件要注意的事是 :code:`PytorchMNISTClient` 类继承自 :code:`fl.client." +#~ "Client`,因此它必须实现以下方法:" #~ msgid "" -#~ "When comparing the abstract class to " -#~ "its derived class :code:`PytorchMNISTClient` " -#~ "you will notice that :code:`fit` calls" -#~ " a :code:`train` function and that " -#~ ":code:`evaluate` calls a :code:`test`: " -#~ "function." +#~ "When comparing the abstract class to its derived class :code:" +#~ "`PytorchMNISTClient` you will notice that :code:`fit` calls a :code:" +#~ "`train` function and that :code:`evaluate` calls a :code:`test`: function." #~ msgstr "" -#~ "将抽象类与其派生类 :code:`PytorchMNISTClient` 进行比较时,您会发现 " -#~ ":code:`fit` 调用了一个 :code:`train` 函数,而 " -#~ ":code:`evaluate` 则调用了一个 :code:`test`: 函数。" +#~ "将抽象类与其派生类 :code:`PytorchMNISTClient` 进行比较时,您会发现 :code:" +#~ "`fit` 调用了一个 :code:`train` 函数,而 :code:`evaluate` 则调用了一个 :" +#~ "code:`test`: 函数。" #~ msgid "" -#~ "These functions can both be found " -#~ "inside the same :code:`quickstart-" +#~ "These functions can both be found inside the same :code:`quickstart-" #~ "pytorch.mnist` module:" -#~ msgstr "这些函数都可以在同一个 :code:`quickstart-pytorch.mnist` 模块中找到:" +#~ msgstr "" +#~ "这些函数都可以在同一个 :code:`quickstart-pytorch.mnist` 模块中找到:" #~ msgid "" -#~ "Observe that these functions encapsulate " -#~ "regular training and test loops and " -#~ "provide :code:`fit` and :code:`evaluate` with" -#~ " final statistics for each round. You" -#~ " could substitute them with your " -#~ "custom train and test loops and " -#~ "change the network architecture, and the" -#~ " entire example would still work " -#~ "flawlessly. As a matter of fact, " -#~ "why not try and modify the code" -#~ " to an example of your liking?" +#~ "Observe that these functions encapsulate regular training and test loops " +#~ "and provide :code:`fit` and :code:`evaluate` with final statistics for " +#~ "each round. You could substitute them with your custom train and test " +#~ "loops and change the network architecture, and the entire example would " +#~ "still work flawlessly. As a matter of fact, why not try and modify the " +#~ "code to an example of your liking?" #~ msgstr "" -#~ "请注意,这些函数封装了常规的训练和测试循环,并为 :code:`fit` 和 " -#~ ":code:`evaluate` " -#~ "提供了每轮的最终统计数据。您可以用自定义的训练和测试循环来替代它们,并改变网络结构,整个示例仍然可以完美运行。事实上,为什么不按照自己的喜好修改代码呢?" +#~ "请注意,这些函数封装了常规的训练和测试循环,并为 :code:`fit` 和 :code:" +#~ "`evaluate` 提供了每轮的最终统计数据。您可以用自定义的训练和测试循环来替代" +#~ "它们,并改变网络结构,整个示例仍然可以完美运行。事实上,为什么不按照自己的" +#~ "喜好修改代码呢?" #~ msgid "Give It a Try" #~ msgstr "试试看" #~ msgid "" -#~ "Looking through the quickstart code " -#~ "description above will have given a " -#~ "good understanding of how *clients* and" -#~ " *servers* work in Flower, how to " -#~ "run a simple experiment, and the " -#~ "internals of a client wrapper. Here " -#~ "are a few things you could try " -#~ "on your own and get more " -#~ "experience with Flower:" +#~ "Looking through the quickstart code description above will have given a " +#~ "good understanding of how *clients* and *servers* work in Flower, how to " +#~ "run a simple experiment, and the internals of a client wrapper. Here are " +#~ "a few things you could try on your own and get more experience with " +#~ "Flower:" #~ msgstr "" -#~ "通过上面的快速入门代码描述,你将对 Flower " -#~ "中*客户端*和*服务器*的工作方式、如何运行一个简单的实验以及客户端封装器的内部结构有一个很好的了解。您可以自己尝试以下内容,以获得更多使用" -#~ " Flower 的经验:" +#~ "通过上面的快速入门代码描述,你将对 Flower 中*客户端*和*服务器*的工作方式、" +#~ "如何运行一个简单的实验以及客户端封装器的内部结构有一个很好的了解。您可以自" +#~ "己尝试以下内容,以获得更多使用 Flower 的经验:" #~ msgid "" -#~ "Try and change :code:`PytorchMNISTClient` so" -#~ " it can accept different architectures." +#~ "Try and change :code:`PytorchMNISTClient` so it can accept different " +#~ "architectures." #~ msgstr "尝试修改 :code:`PytorchMNISTClient`,使其可以接受不同的架构。" #~ msgid "" -#~ "Modify the :code:`train` function so " -#~ "that it accepts different optimizers" +#~ "Modify the :code:`train` function so that it accepts different optimizers" #~ msgstr "修改 :code:`train` 函数,使其接受不同的优化器" #~ msgid "" -#~ "Modify the :code:`test` function so that" -#~ " it proves not only the top-1 " -#~ "(regular accuracy) but also the top-5" -#~ " accuracy?" -#~ msgstr "修改 :code:`test` 函数,使其不仅能输出前 1 名(常规精确度),还能证明前 5 名的精确度?" +#~ "Modify the :code:`test` function so that it proves not only the top-1 " +#~ "(regular accuracy) but also the top-5 accuracy?" +#~ msgstr "" +#~ "修改 :code:`test` 函数,使其不仅能输出前 1 名(常规精确度),还能证明前 5 " +#~ "名的精确度?" #~ msgid "" -#~ "Go larger! Try to adapt the code" -#~ " to larger images and datasets. Why" -#~ " not try training on ImageNet with" -#~ " a ResNet-50?" -#~ msgstr "让我们尝试让代码适应更大的图像和数据集。为什么不尝试使用 ResNet-50 在 ImageNet 上进行训练呢?" +#~ "Go larger! Try to adapt the code to larger images and datasets. Why not " +#~ "try training on ImageNet with a ResNet-50?" +#~ msgstr "" +#~ "让我们尝试让代码适应更大的图像和数据集。为什么不尝试使用 ResNet-50 在 " +#~ "ImageNet 上进行训练呢?" #~ msgid "You are ready now. Enjoy learning in a federated way!" #~ msgstr "您现在已经准备就绪。尽情享受联邦学习的乐趣吧!" @@ -24073,127 +24401,112 @@ msgstr "" #~ msgstr "差别隐私" #~ msgid "" -#~ "Flower provides differential privacy (DP) " -#~ "wrapper classes for the easy integration" -#~ " of the central DP guarantees " -#~ "provided by DP-FedAvg into training " -#~ "pipelines defined in any of the " -#~ "various ML frameworks that Flower is " -#~ "compatible with." +#~ "Flower provides differential privacy (DP) wrapper classes for the easy " +#~ "integration of the central DP guarantees provided by DP-FedAvg into " +#~ "training pipelines defined in any of the various ML frameworks that " +#~ "Flower is compatible with." #~ msgstr "" -#~ "Flower 提供了差分隐私 (DP) 封装类,可将 DP-FedAvg " -#~ "提供的核心 DP 轻松集成到 Flower 兼容的各种 ML " -#~ "框架中定义的训练模式中。" +#~ "Flower 提供了差分隐私 (DP) 封装类,可将 DP-FedAvg 提供的核心 DP 轻松集成" +#~ "到 Flower 兼容的各种 ML 框架中定义的训练模式中。" #~ msgid "" -#~ "Please note that these components are" -#~ " still experimental; the correct " -#~ "configuration of DP for a specific " -#~ "task is still an unsolved problem." -#~ msgstr "请注意,这些组件仍处于试验阶段,如何为特定任务正确配置 DP 仍是一个尚未解决的问题。" +#~ "Please note that these components are still experimental; the correct " +#~ "configuration of DP for a specific task is still an unsolved problem." +#~ msgstr "" +#~ "请注意,这些组件仍处于试验阶段,如何为特定任务正确配置 DP 仍是一个尚未解决" +#~ "的问题。" #~ msgid "" -#~ "The name DP-FedAvg is misleading " -#~ "since it can be applied on top " -#~ "of any FL algorithm that conforms " -#~ "to the general structure prescribed by" -#~ " the FedOpt family of algorithms." -#~ msgstr "DP-FedAvg 这个名称容易引起误解,因为它可以应用于任何符合 FedOpt 系列算法规定的一般结构的 FL 算法之上。" +#~ "The name DP-FedAvg is misleading since it can be applied on top of any FL " +#~ "algorithm that conforms to the general structure prescribed by the FedOpt " +#~ "family of algorithms." +#~ msgstr "" +#~ "DP-FedAvg 这个名称容易引起误解,因为它可以应用于任何符合 FedOpt 系列算法规" +#~ "定的一般结构的 FL 算法之上。" #~ msgid "DP-FedAvg" #~ msgstr "DP-FedAvg" #~ msgid "" -#~ "DP-FedAvg, originally proposed by " -#~ "McMahan et al. [mcmahan]_ and extended" -#~ " by Andrew et al. [andrew]_, is " -#~ "essentially FedAvg with the following " +#~ "DP-FedAvg, originally proposed by McMahan et al. [mcmahan]_ and extended " +#~ "by Andrew et al. [andrew]_, is essentially FedAvg with the following " #~ "modifications." #~ msgstr "DP-FedAvg 最初由McMahan等人提出,并由Andrew等人加以扩展。" #~ msgid "" -#~ "**Clipping** : The influence of each " -#~ "client's update is bounded by clipping" -#~ " it. This is achieved by enforcing" -#~ " a cap on the L2 norm of " -#~ "the update, scaling it down if " -#~ "needed." -#~ msgstr "**裁剪** : 裁剪会影响到每个客户端的模型参数。具体做法是对参数的 L2 准则设置上限,必要时将其缩减。" +#~ "**Clipping** : The influence of each client's update is bounded by " +#~ "clipping it. This is achieved by enforcing a cap on the L2 norm of the " +#~ "update, scaling it down if needed." +#~ msgstr "" +#~ "**裁剪** : 裁剪会影响到每个客户端的模型参数。具体做法是对参数的 L2 准则设" +#~ "置上限,必要时将其缩减。" #~ msgid "" -#~ "**Noising** : Gaussian noise, calibrated " -#~ "to the clipping threshold, is added " -#~ "to the average computed at the " -#~ "server." -#~ msgstr "**噪声** : 在服务器计算出的平均值中加入高斯噪声,该噪声根据剪切阈值进行校准。" +#~ "**Noising** : Gaussian noise, calibrated to the clipping threshold, is " +#~ "added to the average computed at the server." +#~ msgstr "" +#~ "**噪声** : 在服务器计算出的平均值中加入高斯噪声,该噪声根据剪切阈值进行校" +#~ "准。" #~ msgid "" -#~ "The distribution of the update norm " -#~ "has been shown to vary from " -#~ "task-to-task and to evolve as " -#~ "training progresses. This variability is " -#~ "crucial in understanding its impact on" -#~ " differential privacy guarantees, emphasizing " -#~ "the need for an adaptive approach " -#~ "[andrew]_ that continuously adjusts the " -#~ "clipping threshold to track a " -#~ "prespecified quantile of the update norm" -#~ " distribution." -#~ msgstr "事实证明,参数更新准则的分布会随着任务的不同而变化,并随着训练的进展而演变。因此,我们采用了一种自适应方法,该方法会不断调整剪切阈值,以跟踪参数更新准则分布的预设量化值。" +#~ "The distribution of the update norm has been shown to vary from task-to-" +#~ "task and to evolve as training progresses. This variability is crucial in " +#~ "understanding its impact on differential privacy guarantees, emphasizing " +#~ "the need for an adaptive approach [andrew]_ that continuously adjusts the " +#~ "clipping threshold to track a prespecified quantile of the update norm " +#~ "distribution." +#~ msgstr "" +#~ "事实证明,参数更新准则的分布会随着任务的不同而变化,并随着训练的进展而演" +#~ "变。因此,我们采用了一种自适应方法,该方法会不断调整剪切阈值,以跟踪参数更" +#~ "新准则分布的预设量化值。" #~ msgid "Simplifying Assumptions" #~ msgstr "简化假设" #~ msgid "" -#~ "We make (and attempt to enforce) a" -#~ " number of assumptions that must be" -#~ " satisfied to ensure that the " -#~ "training process actually realizes the " -#~ ":math:`(\\epsilon, \\delta)` guarantees the " -#~ "user has in mind when configuring " +#~ "We make (and attempt to enforce) a number of assumptions that must be " +#~ "satisfied to ensure that the training process actually realizes the :math:" +#~ "`(\\epsilon, \\delta)` guarantees the user has in mind when configuring " #~ "the setup." #~ msgstr "" -#~ "我们提出(并试图执行)了一系列必须满足的假设,以确保训练过程真正实现用户在配置设置时所定的 " -#~ ":math:`(\\epsilon,\\delta)` 。" +#~ "我们提出(并试图执行)了一系列必须满足的假设,以确保训练过程真正实现用户在" +#~ "配置设置时所定的 :math:`(\\epsilon,\\delta)` 。" #~ msgid "" -#~ "**Fixed-size subsampling** :Fixed-size " -#~ "subsamples of the clients must be " -#~ "taken at each round, as opposed to" -#~ " variable-sized Poisson subsamples." -#~ msgstr "** 固定大小的子样本** :与可变大小的泊松分布子样本相比,每轮必须抽取固定大小的客户端子样本。" +#~ "**Fixed-size subsampling** :Fixed-size subsamples of the clients must be " +#~ "taken at each round, as opposed to variable-sized Poisson subsamples." +#~ msgstr "" +#~ "** 固定大小的子样本** :与可变大小的泊松分布子样本相比,每轮必须抽取固定大" +#~ "小的客户端子样本。" #~ msgid "" -#~ "**Unweighted averaging** : The contributions" -#~ " from all the clients must weighted" -#~ " equally in the aggregate to " -#~ "eliminate the requirement for the server" -#~ " to know in advance the sum of" -#~ " the weights of all clients available" -#~ " for selection." -#~ msgstr "**非加权平均**: 所有客户端的贡献必须加权相等,这样服务器就不需要事先知道所有客户的权重总和。" +#~ "**Unweighted averaging** : The contributions from all the clients must " +#~ "weighted equally in the aggregate to eliminate the requirement for the " +#~ "server to know in advance the sum of the weights of all clients available " +#~ "for selection." +#~ msgstr "" +#~ "**非加权平均**: 所有客户端的贡献必须加权相等,这样服务器就不需要事先知道" +#~ "所有客户的权重总和。" #~ msgid "" -#~ "**No client failures** : The set " -#~ "of available clients must stay constant" -#~ " across all rounds of training. In" -#~ " other words, clients cannot drop out" -#~ " or fail." -#~ msgstr "**没有失败的客户端** : 在各轮训练中,可用客户端的数量必须保持不变。换句话说,客户端不能退出或失败。" +#~ "**No client failures** : The set of available clients must stay constant " +#~ "across all rounds of training. In other words, clients cannot drop out or " +#~ "fail." +#~ msgstr "" +#~ "**没有失败的客户端** : 在各轮训练中,可用客户端的数量必须保持不变。换句话" +#~ "说,客户端不能退出或失败。" #~ msgid "" -#~ "The first two are useful for " -#~ "eliminating a multitude of complications " -#~ "associated with calibrating the noise to" -#~ " the clipping threshold, while the " -#~ "third one is required to comply " -#~ "with the assumptions of the privacy " +#~ "The first two are useful for eliminating a multitude of complications " +#~ "associated with calibrating the noise to the clipping threshold, while " +#~ "the third one is required to comply with the assumptions of the privacy " #~ "analysis." -#~ msgstr "前两种方法有助于消除将噪声校准为削波阈值所带来的诸多复杂问题,而第三种方法则需要符合隐私分析的假设。" +#~ msgstr "" +#~ "前两种方法有助于消除将噪声校准为削波阈值所带来的诸多复杂问题,而第三种方法" +#~ "则需要符合隐私分析的假设。" #~ msgid "" -#~ "These restrictions are in line with " -#~ "constraints imposed by Andrew et al. " +#~ "These restrictions are in line with constraints imposed by Andrew et al. " #~ "[andrew]_." #~ msgstr "这些限制与 Andrew 等人所施加的限制一致。" @@ -24201,299 +24514,219 @@ msgstr "" #~ msgstr "可定制的噪声注入" #~ msgid "" -#~ "In contrast to other implementations " -#~ "where the addition of noise is " -#~ "performed at the server, you can " -#~ "configure the site of noise injection" -#~ " to better match your threat model." -#~ " We provide users with the " -#~ "flexibility to set up the training " -#~ "such that each client independently adds" -#~ " a small amount of noise to the" -#~ " clipped update, with the result that" -#~ " simply aggregating the noisy updates " -#~ "is equivalent to the explicit addition" -#~ " of noise to the non-noisy " -#~ "aggregate at the server." -#~ msgstr "与其他在服务器上添加噪声的实现方法不同,您可以配置噪声注入的位置,以便更好地匹配您的威胁模型。我们为用户提供了设置训练的灵活性,使每个客户端都能独立地为剪切参数更新添加少量噪声,这样,只需聚合噪声更新,就相当于在服务器上为非噪声聚合添加噪声了。" +#~ "In contrast to other implementations where the addition of noise is " +#~ "performed at the server, you can configure the site of noise injection to " +#~ "better match your threat model. We provide users with the flexibility to " +#~ "set up the training such that each client independently adds a small " +#~ "amount of noise to the clipped update, with the result that simply " +#~ "aggregating the noisy updates is equivalent to the explicit addition of " +#~ "noise to the non-noisy aggregate at the server." +#~ msgstr "" +#~ "与其他在服务器上添加噪声的实现方法不同,您可以配置噪声注入的位置,以便更好" +#~ "地匹配您的威胁模型。我们为用户提供了设置训练的灵活性,使每个客户端都能独立" +#~ "地为剪切参数更新添加少量噪声,这样,只需聚合噪声更新,就相当于在服务器上为" +#~ "非噪声聚合添加噪声了。" #~ msgid "" -#~ "To be precise, if we let :math:`m`" -#~ " be the number of clients sampled " -#~ "each round and :math:`\\sigma_\\Delta` be " -#~ "the scale of the total Gaussian " -#~ "noise that needs to be added to" -#~ " the sum of the model updates, " -#~ "we can use simple maths to show" -#~ " that this is equivalent to each " -#~ "client adding noise with scale " -#~ ":math:`\\sigma_\\Delta/\\sqrt{m}`." +#~ "To be precise, if we let :math:`m` be the number of clients sampled each " +#~ "round and :math:`\\sigma_\\Delta` be the scale of the total Gaussian " +#~ "noise that needs to be added to the sum of the model updates, we can use " +#~ "simple maths to show that this is equivalent to each client adding noise " +#~ "with scale :math:`\\sigma_\\Delta/\\sqrt{m}`." #~ msgstr "" -#~ "准确地说,我们假设每轮采样的客户端数量为:math:`m`,:math:`\\sigma_\\Delta` " -#~ "为需要添加到模型更新总和中的总高斯噪声的规模,我们就可以用简单的数学方法证明了,这相当于每个客户端都添加了规模为 " -#~ ":math:`\\sigma_\\Delta/\\sqrt{m}` 的噪声。" +#~ "准确地说,我们假设每轮采样的客户端数量为:math:`m`,:math:" +#~ "`\\sigma_\\Delta` 为需要添加到模型更新总和中的总高斯噪声的规模,我们就可以" +#~ "用简单的数学方法证明了,这相当于每个客户端都添加了规模为 :math:" +#~ "`\\sigma_\\Delta/\\sqrt{m}` 的噪声。" #~ msgid "Wrapper-based approach" #~ msgstr "基于封装的方法" #~ msgid "" -#~ "Introducing DP to an existing workload" -#~ " can be thought of as adding an" -#~ " extra layer of security around it." -#~ " This inspired us to provide the " -#~ "additional server and client-side logic" -#~ " needed to make the training process" -#~ " differentially private as wrappers for " -#~ "instances of the :code:`Strategy` and " -#~ ":code:`NumPyClient` abstract classes respectively." -#~ " This wrapper-based approach has the" -#~ " advantage of being easily composable " -#~ "with other wrappers that someone might" -#~ " contribute to the Flower library in" -#~ " the future, e.g., for secure " -#~ "aggregation. Using Inheritance instead can " -#~ "be tedious because that would require" -#~ " the creation of new sub- classes " -#~ "every time a new class implementing " -#~ ":code:`Strategy` or :code:`NumPyClient` is " +#~ "Introducing DP to an existing workload can be thought of as adding an " +#~ "extra layer of security around it. This inspired us to provide the " +#~ "additional server and client-side logic needed to make the training " +#~ "process differentially private as wrappers for instances of the :code:" +#~ "`Strategy` and :code:`NumPyClient` abstract classes respectively. This " +#~ "wrapper-based approach has the advantage of being easily composable with " +#~ "other wrappers that someone might contribute to the Flower library in the " +#~ "future, e.g., for secure aggregation. Using Inheritance instead can be " +#~ "tedious because that would require the creation of new sub- classes every " +#~ "time a new class implementing :code:`Strategy` or :code:`NumPyClient` is " #~ "defined." #~ msgstr "" -#~ "在现有工作负载中引入 DP " -#~ "可以被认为是在其周围增加了一层额外的安全性。受此启发,我们提供了额外的服务器端和客户端逻辑,分别作为 " -#~ ":code:`Strategy` 和 :code:`NumPyClient` " -#~ "抽象类实例的封装器,使训练过程具有不同的隐私性。这种基于封装器的方法的优点是可以很容易地与将来有人贡献给 Flower " -#~ "的其他封装器(例如用于安全聚合的封装器)进行组合。使用继承可能会比较繁琐,因为每次定义实现 :code:`Strategy`" -#~ " 或 :code:`NumPyClient` 的新类时,都需要创建新的子类。" +#~ "在现有工作负载中引入 DP 可以被认为是在其周围增加了一层额外的安全性。受此启" +#~ "发,我们提供了额外的服务器端和客户端逻辑,分别作为 :code:`Strategy` 和 :" +#~ "code:`NumPyClient` 抽象类实例的封装器,使训练过程具有不同的隐私性。这种基" +#~ "于封装器的方法的优点是可以很容易地与将来有人贡献给 Flower 的其他封装器(例" +#~ "如用于安全聚合的封装器)进行组合。使用继承可能会比较繁琐,因为每次定义实" +#~ "现 :code:`Strategy` 或 :code:`NumPyClient` 的新类时,都需要创建新的子类。" #~ msgid "" -#~ "The first version of our solution " -#~ "was to define a decorator whose " -#~ "constructor accepted, among other things, " -#~ "a boolean-valued variable indicating " -#~ "whether adaptive clipping was to be " -#~ "enabled or not. We quickly realized " -#~ "that this would clutter its " -#~ ":code:`__init__()` function with variables " -#~ "corresponding to hyperparameters of adaptive" -#~ " clipping that would remain unused " -#~ "when it was disabled. A cleaner " -#~ "implementation could be achieved by " -#~ "splitting the functionality into two " -#~ "decorators, :code:`DPFedAvgFixed` and " -#~ ":code:`DPFedAvgAdaptive`, with the latter sub-" -#~ " classing the former. The constructors " -#~ "for both classes accept a boolean " -#~ "parameter :code:`server_side_noising`, which, as " -#~ "the name suggests, determines where " -#~ "noising is to be performed." +#~ "The first version of our solution was to define a decorator whose " +#~ "constructor accepted, among other things, a boolean-valued variable " +#~ "indicating whether adaptive clipping was to be enabled or not. We quickly " +#~ "realized that this would clutter its :code:`__init__()` function with " +#~ "variables corresponding to hyperparameters of adaptive clipping that " +#~ "would remain unused when it was disabled. A cleaner implementation could " +#~ "be achieved by splitting the functionality into two decorators, :code:" +#~ "`DPFedAvgFixed` and :code:`DPFedAvgAdaptive`, with the latter sub- " +#~ "classing the former. The constructors for both classes accept a boolean " +#~ "parameter :code:`server_side_noising`, which, as the name suggests, " +#~ "determines where noising is to be performed." #~ msgstr "" -#~ "我们的第一版解决方案是定义一个装饰器,其构造函数接受一个布尔值变量,表示是否启用自适应剪裁。我们很快意识到,这样会使其 " -#~ ":code:`__init__()` " -#~ "函数中与自适应裁剪超参数相对应的变量变得杂乱无章,而这些变量在自适应裁剪被禁用时将保持未使用状态。要实现更简洁的功能,可以将该功能拆分为两个装饰器,即" -#~ " :code:`DPFedAvgFixed` 和 " -#~ ":code:`DPFedAvgAdaptive`,后者是前者的子类。这两个类的构造函数都接受一个布尔参数 " -#~ ":code:`server_side_noising`,顾名思义,它决定了在哪里加噪声。" +#~ "我们的第一版解决方案是定义一个装饰器,其构造函数接受一个布尔值变量,表示是" +#~ "否启用自适应剪裁。我们很快意识到,这样会使其 :code:`__init__()` 函数中与自" +#~ "适应裁剪超参数相对应的变量变得杂乱无章,而这些变量在自适应裁剪被禁用时将保" +#~ "持未使用状态。要实现更简洁的功能,可以将该功能拆分为两个装饰器,即 :code:" +#~ "`DPFedAvgFixed` 和 :code:`DPFedAvgAdaptive`,后者是前者的子类。这两个类的" +#~ "构造函数都接受一个布尔参数 :code:`server_side_noising`,顾名思义,它决定了" +#~ "在哪里加噪声。" #~ msgid "" -#~ "The server-side capabilities required " -#~ "for the original version of DP-" -#~ "FedAvg, i.e., the one which performed" -#~ " fixed clipping, can be completely " -#~ "captured with the help of wrapper " -#~ "logic for just the following two " -#~ "methods of the :code:`Strategy` abstract " -#~ "class." +#~ "The server-side capabilities required for the original version of DP-" +#~ "FedAvg, i.e., the one which performed fixed clipping, can be completely " +#~ "captured with the help of wrapper logic for just the following two " +#~ "methods of the :code:`Strategy` abstract class." #~ msgstr "" #~ "只需对 :code:`Strategy` 抽象类的以下两个方法进行封装,就能完全捕获 DP-" #~ "FedAvg 原始版本(即执行固定剪裁的版本)所需的服务器端功能。" #~ msgid "" -#~ ":code:`configure_fit()` : The config " -#~ "dictionary being sent by the wrapped " -#~ ":code:`Strategy` to each client needs to" -#~ " be augmented with an additional " -#~ "value equal to the clipping threshold" -#~ " (keyed under :code:`dpfedavg_clip_norm`) and," -#~ " if :code:`server_side_noising=true`, another one" -#~ " equal to the scale of the " -#~ "Gaussian noise that needs to be " -#~ "added at the client (keyed under " -#~ ":code:`dpfedavg_noise_stddev`). This entails " -#~ "*post*-processing of the results returned " -#~ "by the wrappee's implementation of " -#~ ":code:`configure_fit()`." +#~ ":code:`configure_fit()` : The config dictionary being sent by the " +#~ "wrapped :code:`Strategy` to each client needs to be augmented with an " +#~ "additional value equal to the clipping threshold (keyed under :code:" +#~ "`dpfedavg_clip_norm`) and, if :code:`server_side_noising=true`, another " +#~ "one equal to the scale of the Gaussian noise that needs to be added at " +#~ "the client (keyed under :code:`dpfedavg_noise_stddev`). This entails " +#~ "*post*-processing of the results returned by the wrappee's implementation " +#~ "of :code:`configure_fit()`." #~ msgstr "" -#~ ":code:`configure_fit()` :由封装的 :code:`Strategy` " -#~ "发送到每个客户端的配置字典需要使用等于裁剪阈值的附加值(在 :code:`dpfedavg_clip_norm` " -#~ "下键入)进行扩充。并且,如果 " -#~ "server_side_noising=true,则另一个值等于需要在客户端添加的高斯噪声的大小(在 " -#~ "dpfedavg_noise_stddev 下键入)。这需要对封装后的configure_fit() " -#~ "所返回的结果进行后处理。" +#~ ":code:`configure_fit()` :由封装的 :code:`Strategy` 发送到每个客户端的配置" +#~ "字典需要使用等于裁剪阈值的附加值(在 :code:`dpfedavg_clip_norm` 下键入)进" +#~ "行扩充。并且,如果 server_side_noising=true,则另一个值等于需要在客户端添" +#~ "加的高斯噪声的大小(在 dpfedavg_noise_stddev 下键入)。这需要对封装后的" +#~ "configure_fit() 所返回的结果进行后处理。" #~ msgid "" -#~ ":code:`aggregate_fit()`: We check whether any" -#~ " of the sampled clients dropped out" -#~ " or failed to upload an update " -#~ "before the round timed out. In " -#~ "that case, we need to abort the" -#~ " current round, discarding any successful" -#~ " updates that were received, and move" -#~ " on to the next one. On the " -#~ "other hand, if all clients responded " -#~ "successfully, we must force the " -#~ "averaging of the updates to happen " -#~ "in an unweighted manner by intercepting" -#~ " the :code:`parameters` field of " -#~ ":code:`FitRes` for each received update " -#~ "and setting it to 1. Furthermore, " -#~ "if :code:`server_side_noising=true`, each update " -#~ "is perturbed with an amount of " -#~ "noise equal to what it would have" -#~ " been subjected to had client-side" -#~ " noising being enabled. This entails " -#~ "*pre*-processing of the arguments to " -#~ "this method before passing them on " -#~ "to the wrappee's implementation of " -#~ ":code:`aggregate_fit()`." +#~ ":code:`aggregate_fit()`: We check whether any of the sampled clients " +#~ "dropped out or failed to upload an update before the round timed out. In " +#~ "that case, we need to abort the current round, discarding any successful " +#~ "updates that were received, and move on to the next one. On the other " +#~ "hand, if all clients responded successfully, we must force the averaging " +#~ "of the updates to happen in an unweighted manner by intercepting the :" +#~ "code:`parameters` field of :code:`FitRes` for each received update and " +#~ "setting it to 1. Furthermore, if :code:`server_side_noising=true`, each " +#~ "update is perturbed with an amount of noise equal to what it would have " +#~ "been subjected to had client-side noising being enabled. This entails " +#~ "*pre*-processing of the arguments to this method before passing them on " +#~ "to the wrappee's implementation of :code:`aggregate_fit()`." #~ msgstr "" -#~ ":code:`aggregate_fit()`: " -#~ "我们会检查是否有任何客户端在本轮超时前退出或未能上传参数更新。在这种情况下,我们需要中止当前一轮,丢弃已收到的所有参数更新,然后继续下一轮。另一方面,如果所有客户端都成功响应,我们就必须通过拦截" -#~ " :code:`FitRes` 的 :code:`parameters` 字段并将其设置为 " -#~ "1,强制以不加权的方式平均更新。此外,如果 " -#~ ":code:`server_side_noising=true`,每次更新都会受到一定量的噪声扰动,其扰动量相当于启用客户端噪声时的扰动量。" -#~ " 这就需要在将本方法的参数传递给封装的 :code:`aggregate_fit()` " -#~ "之前,对参数进行*预*处理。" +#~ ":code:`aggregate_fit()`: 我们会检查是否有任何客户端在本轮超时前退出或未能" +#~ "上传参数更新。在这种情况下,我们需要中止当前一轮,丢弃已收到的所有参数更" +#~ "新,然后继续下一轮。另一方面,如果所有客户端都成功响应,我们就必须通过拦" +#~ "截 :code:`FitRes` 的 :code:`parameters` 字段并将其设置为 1,强制以不加权的" +#~ "方式平均更新。此外,如果 :code:`server_side_noising=true`,每次更新都会受" +#~ "到一定量的噪声扰动,其扰动量相当于启用客户端噪声时的扰动量。 这就需要在将" +#~ "本方法的参数传递给封装的 :code:`aggregate_fit()` 之前,对参数进行*预*处" +#~ "理。" #~ msgid "" -#~ "We can't directly change the aggregation" -#~ " function of the wrapped strategy to" -#~ " force it to add noise to the" -#~ " aggregate, hence we simulate client-" -#~ "side noising to implement server-side" -#~ " noising." -#~ msgstr "我们无法直接改变封装策略的聚合函数,迫使它在聚合中添加噪声,因此我们模拟客户端噪声来实现服务器端噪声。" +#~ "We can't directly change the aggregation function of the wrapped strategy " +#~ "to force it to add noise to the aggregate, hence we simulate client-side " +#~ "noising to implement server-side noising." +#~ msgstr "" +#~ "我们无法直接改变封装策略的聚合函数,迫使它在聚合中添加噪声,因此我们模拟客" +#~ "户端噪声来实现服务器端噪声。" #~ msgid "" -#~ "These changes have been put together " -#~ "into a class called :code:`DPFedAvgFixed`, " -#~ "whose constructor accepts the strategy " -#~ "being decorated, the clipping threshold " -#~ "and the number of clients sampled " -#~ "every round as compulsory arguments. The" -#~ " user is expected to specify the " -#~ "clipping threshold since the order of" -#~ " magnitude of the update norms is " -#~ "highly dependent on the model being " -#~ "trained and providing a default value" -#~ " would be misleading. The number of" -#~ " clients sampled at every round is" -#~ " required to calculate the amount of" -#~ " noise that must be added to " -#~ "each individual update, either by the" -#~ " server or the clients." +#~ "These changes have been put together into a class called :code:" +#~ "`DPFedAvgFixed`, whose constructor accepts the strategy being decorated, " +#~ "the clipping threshold and the number of clients sampled every round as " +#~ "compulsory arguments. The user is expected to specify the clipping " +#~ "threshold since the order of magnitude of the update norms is highly " +#~ "dependent on the model being trained and providing a default value would " +#~ "be misleading. The number of clients sampled at every round is required " +#~ "to calculate the amount of noise that must be added to each individual " +#~ "update, either by the server or the clients." #~ msgstr "" -#~ "这些变化被整合到一个名为 :code:`DPFedAvgFixed` " -#~ "的类中,其构造函数接受被装饰的策略、剪切阈值和每轮采样的客户数作为必选参数。用户需要指定剪切阈值,因为参数更新规范的数量级在很大程度上取决于正在训练的模型,提供默认值会产生误导。每轮采样的客户端数量是计算服务器或客户在每次参数更新时添加的噪音量所必需的。" +#~ "这些变化被整合到一个名为 :code:`DPFedAvgFixed` 的类中,其构造函数接受被装" +#~ "饰的策略、剪切阈值和每轮采样的客户数作为必选参数。用户需要指定剪切阈值,因" +#~ "为参数更新规范的数量级在很大程度上取决于正在训练的模型,提供默认值会产生误" +#~ "导。每轮采样的客户端数量是计算服务器或客户在每次参数更新时添加的噪音量所必" +#~ "需的。" #~ msgid "" -#~ "The additional functionality required to " -#~ "facilitate adaptive clipping has been " -#~ "provided in :code:`DPFedAvgAdaptive`, a " -#~ "subclass of :code:`DPFedAvgFixed`. It " -#~ "overrides the above-mentioned methods to" -#~ " do the following." +#~ "The additional functionality required to facilitate adaptive clipping has " +#~ "been provided in :code:`DPFedAvgAdaptive`, a subclass of :code:" +#~ "`DPFedAvgFixed`. It overrides the above-mentioned methods to do the " +#~ "following." #~ msgstr "" -#~ "自适应剪裁所需的附加功能在 :code:`DPFedAvgAdaptive` 中提供,其是 " -#~ ":code:`DPFedAvgFixed` 的子类。它重写了上述方法,以实现以下功能。" +#~ "自适应剪裁所需的附加功能在 :code:`DPFedAvgAdaptive` 中提供,其是 :code:" +#~ "`DPFedAvgFixed` 的子类。它重写了上述方法,以实现以下功能。" #~ msgid "" -#~ ":code:`configure_fit()` : It intercepts the" -#~ " config dict returned by " -#~ ":code:`super.configure_fit()` to add the " -#~ "key-value pair " -#~ ":code:`dpfedavg_adaptive_clip_enabled:True` to it, " -#~ "which the client interprets as an " -#~ "instruction to include an indicator bit" -#~ " (1 if update norm <= clipping " -#~ "threshold, 0 otherwise) in the results" -#~ " returned by it." +#~ ":code:`configure_fit()` : It intercepts the config dict returned by :code:" +#~ "`super.configure_fit()` to add the key-value pair :code:" +#~ "`dpfedavg_adaptive_clip_enabled:True` to it, which the client interprets " +#~ "as an instruction to include an indicator bit (1 if update norm <= " +#~ "clipping threshold, 0 otherwise) in the results returned by it." #~ msgstr "" -#~ ":code:`configure_fit()`:它截取由 :code:`super.configure_fit()` " -#~ "返回的 config 字典,并在其中添加键-值对 " -#~ ":code:`dpfedavg_adaptive_clip_enabled:True\",客户端将其解释为在返回结果中包含一个指示位(如果参数更新范式" -#~ " <= 剪裁阈值,则为 1,否则为 0)的指令。" +#~ ":code:`configure_fit()`:它截取由 :code:`super.configure_fit()` 返回的 " +#~ "config 字典,并在其中添加键-值对 :code:`dpfedavg_adaptive_clip_enabled:" +#~ "True\",客户端将其解释为在返回结果中包含一个指示位(如果参数更新范式 <= 剪" +#~ "裁阈值,则为 1,否则为 0)的指令。" #~ msgid "" -#~ ":code:`aggregate_fit()` : It follows a " -#~ "call to :code:`super.aggregate_fit()` with one" -#~ " to :code:`__update_clip_norm__()`, a procedure" -#~ " which adjusts the clipping threshold " -#~ "on the basis of the indicator bits" -#~ " received from the sampled clients." -#~ msgstr ":code:`aggregate_fit()`:在调用:code:`super.aggregate_fit()`后,再调用:code:`__update_clip_norm__()`,该过程根据从采样客户端接收到的指示位调整裁剪阈值。" +#~ ":code:`aggregate_fit()` : It follows a call to :code:`super." +#~ "aggregate_fit()` with one to :code:`__update_clip_norm__()`, a procedure " +#~ "which adjusts the clipping threshold on the basis of the indicator bits " +#~ "received from the sampled clients." +#~ msgstr "" +#~ ":code:`aggregate_fit()`:在调用:code:`super.aggregate_fit()`后,再调用:" +#~ "code:`__update_clip_norm__()`,该过程根据从采样客户端接收到的指示位调整裁" +#~ "剪阈值。" #~ msgid "" -#~ "The client-side capabilities required " -#~ "can be completely captured through " -#~ "wrapper logic for just the :code:`fit()`" -#~ " method of the :code:`NumPyClient` abstract" -#~ " class. To be precise, we need " -#~ "to *post-process* the update computed" -#~ " by the wrapped client to clip " -#~ "it, if necessary, to the threshold " -#~ "value supplied by the server as " -#~ "part of the config dictionary. In " -#~ "addition to this, it may need to" -#~ " perform some extra work if either" -#~ " (or both) of the following keys " -#~ "are also present in the dict." +#~ "The client-side capabilities required can be completely captured through " +#~ "wrapper logic for just the :code:`fit()` method of the :code:" +#~ "`NumPyClient` abstract class. To be precise, we need to *post-process* " +#~ "the update computed by the wrapped client to clip it, if necessary, to " +#~ "the threshold value supplied by the server as part of the config " +#~ "dictionary. In addition to this, it may need to perform some extra work " +#~ "if either (or both) of the following keys are also present in the dict." #~ msgstr "" -#~ "客户端所需的功能完全可以通过 :code:`NumPyClient` 抽象类的 " -#~ ":code:`fit()` " -#~ "方法的封装逻辑来实现。准确地说,我们需要对封装客户端计算的参数更新进行处理,以便在必要时将其剪切到服务器作为配置字典的一部分提供的阈值。除此之外,如果配置字典中还存在以下任一(或两个)键,客户端可能还需要执行一些额外的工作。" +#~ "客户端所需的功能完全可以通过 :code:`NumPyClient` 抽象类的 :code:`fit()` 方" +#~ "法的封装逻辑来实现。准确地说,我们需要对封装客户端计算的参数更新进行处理," +#~ "以便在必要时将其剪切到服务器作为配置字典的一部分提供的阈值。除此之外,如果" +#~ "配置字典中还存在以下任一(或两个)键,客户端可能还需要执行一些额外的工作。" #~ msgid "" -#~ ":code:`dpfedavg_noise_stddev` : Generate and " -#~ "add the specified amount of noise " -#~ "to the clipped update." -#~ msgstr "code:`dpfedavg_noise_stddev`:生成并在剪切参数更新中添加指定数量的噪声。" +#~ ":code:`dpfedavg_noise_stddev` : Generate and add the specified amount of " +#~ "noise to the clipped update." +#~ msgstr "" +#~ "code:`dpfedavg_noise_stddev`:生成并在剪切参数更新中添加指定数量的噪声。" #~ msgid "" -#~ ":code:`dpfedavg_adaptive_clip_enabled` : Augment the" -#~ " metrics dict in the :code:`FitRes` " -#~ "object being returned to the server " -#~ "with an indicator bit, calculated as " -#~ "described earlier." +#~ ":code:`dpfedavg_adaptive_clip_enabled` : Augment the metrics dict in the :" +#~ "code:`FitRes` object being returned to the server with an indicator bit, " +#~ "calculated as described earlier." #~ msgstr "" -#~ ":code:`dpfedavg_adaptive_clip_enabled`:在返回给服务器的 :code:`FitRes`" -#~ " 对象中的度量值字典中增加一个指标位,计算方法如前所述。" +#~ ":code:`dpfedavg_adaptive_clip_enabled`:在返回给服务器的 :code:`FitRes` 对" +#~ "象中的度量值字典中增加一个指标位,计算方法如前所述。" #~ msgid "Performing the :math:`(\\epsilon, \\delta)` analysis" #~ msgstr "进行 :math:`(epsilon, \\delta)` 分析" #~ msgid "" -#~ "Assume you have trained for :math:`n`" -#~ " rounds with sampling fraction :math:`q`" -#~ " and noise multiplier :math:`z`. In " -#~ "order to calculate the :math:`\\epsilon` " -#~ "value this would result in for a" -#~ " particular :math:`\\delta`, the following " -#~ "script may be used." -#~ msgstr "" -#~ "假设您已经训练了 :math:`n` 轮,采样比例为 :math:`q`,噪声乘数为 " -#~ ":math:`z`。为了计算特定 :math:`\\delta` 的 :math:`epsilon`" -#~ " 值,可以使用下面的脚本。" - -#~ msgid "Enjoy building more robust and flexible ``ClientApp``s with mods!" -#~ msgstr "" - -#~ msgid "" -#~ ":py:obj:`ClientApp `\\ " -#~ "\\(client\\_fn\\[\\, mods\\]\\)" -#~ msgstr "" - -#~ msgid ":py:obj:`flwr.server.driver `\\" +#~ "Assume you have trained for :math:`n` rounds with sampling fraction :math:" +#~ "`q` and noise multiplier :math:`z`. In order to calculate the :math:" +#~ "`\\epsilon` value this would result in for a particular :math:`\\delta`, " +#~ "the following script may be used." #~ msgstr "" +#~ "假设您已经训练了 :math:`n` 轮,采样比例为 :math:`q`,噪声乘数为 :math:" +#~ "`z`。为了计算特定 :math:`\\delta` 的 :math:`epsilon` 值,可以使用下面的脚" +#~ "本。" #~ msgid "Flower driver SDK." #~ msgstr "Flower 服务器。" @@ -24501,100 +24734,24 @@ msgstr "" #~ msgid "driver" #~ msgstr "服务器" -#~ msgid "" -#~ ":py:obj:`start_driver `\\ " -#~ "\\(\\*\\[\\, server\\_address\\, server\\, ...\\]\\)" -#~ msgstr "" - -#~ msgid "" -#~ ":py:obj:`Driver `\\ " -#~ "\\(\\[driver\\_service\\_address\\, ...\\]\\)" -#~ msgstr "" - -#~ msgid "" -#~ ":py:obj:`GrpcDriver `\\ " -#~ "\\(\\[driver\\_service\\_address\\, ...\\]\\)" -#~ msgstr "" - -#~ msgid "`GrpcDriver` provides access to the gRPC Driver API/service." -#~ msgstr "" - -#~ msgid ":py:obj:`get_nodes `\\ \\(\\)" -#~ msgstr "" - -#~ msgid "" -#~ ":py:obj:`pull_task_res " -#~ "`\\ \\(task\\_ids\\)" -#~ msgstr "" - #~ msgid "Get task results." #~ msgstr "汇总训练结果。" -#~ msgid "" -#~ ":py:obj:`push_task_ins " -#~ "`\\ " -#~ "\\(task\\_ins\\_list\\)" -#~ msgstr "" - -#~ msgid "Schedule tasks." -#~ msgstr "" - -#~ msgid "GrpcDriver" -#~ msgstr "" - -#~ msgid ":py:obj:`connect `\\ \\(\\)" -#~ msgstr "" - -#~ msgid "Connect to the Driver API." -#~ msgstr "" - -#~ msgid "" -#~ ":py:obj:`create_run " -#~ "`\\ \\(req\\)" -#~ msgstr "" - #~ msgid "Request for run ID." #~ msgstr "Flower 基线申请" -#~ msgid "" -#~ ":py:obj:`disconnect " -#~ "`\\ \\(\\)" -#~ msgstr "" - -#~ msgid "Disconnect from the Driver API." -#~ msgstr "" - -#~ msgid "" -#~ ":py:obj:`get_nodes `\\" -#~ " \\(req\\)" -#~ msgstr "" - #~ msgid "Get client IDs." #~ msgstr "返回客户端(本身)。" #~ msgid "" -#~ ":py:obj:`pull_task_res " -#~ "`\\ \\(req\\)" -#~ msgstr "" - -#~ msgid "" -#~ ":py:obj:`push_task_ins " -#~ "`\\ \\(req\\)" -#~ msgstr "" - -#~ msgid "" -#~ "Flower usage examples used to be " -#~ "bundled with Flower in a package " -#~ "called ``flwr_example``. We are migrating " -#~ "those examples to standalone projects to" -#~ " make them easier to use. All " -#~ "new examples are based in the " -#~ "directory `examples " -#~ "`_." +#~ "Flower usage examples used to be bundled with Flower in a package called " +#~ "``flwr_example``. We are migrating those examples to standalone projects " +#~ "to make them easier to use. All new examples are based in the directory " +#~ "`examples `_." #~ msgstr "" -#~ "Flower 的使用示例曾与 Flower 捆绑在一个名为 ``flwr_example``" -#~ " 的软件包中。我们正在将这些示例迁移到独立项目中,以使它们更易于使用。所有新示例都位于目录 `examples " -#~ "`_。" +#~ "Flower 的使用示例曾与 Flower 捆绑在一个名为 ``flwr_example`` 的软件包中。" +#~ "我们正在将这些示例迁移到独立项目中,以使它们更易于使用。所有新示例都位于目" +#~ "录 `examples `_。" #~ msgid "Quickstart TensorFlow/Keras" #~ msgstr "快速入门 TensorFlow/Keras" @@ -24603,30 +24760,25 @@ msgstr "" #~ msgstr "传统示例 (`flwr_example`)" #~ msgid "" -#~ "The useage examples in `flwr_example` " -#~ "are deprecated and will be removed " -#~ "in the future. New examples are " -#~ "provided as standalone projects in " +#~ "The useage examples in `flwr_example` are deprecated and will be removed " +#~ "in the future. New examples are provided as standalone projects in " #~ "`examples `_." #~ msgstr "" -#~ "在 `flwr_example` 中的使用示例已被弃用,今后将被移除。新示例将作为独立项目在 " -#~ "`examples `_" -#~ " 中提供。" +#~ "在 `flwr_example` 中的使用示例已被弃用,今后将被移除。新示例将作为独立项目" +#~ "在 `examples `_ 中提" +#~ "供。" #~ msgid "Extra Dependencies" #~ msgstr "额外依赖" #~ msgid "" -#~ "The core Flower framework keeps a " -#~ "minimal set of dependencies. The " -#~ "examples demonstrate Flower in the " -#~ "context of different machine learning " -#~ "frameworks, so additional dependencies need" -#~ " to be installed before an example" -#~ " can be run." +#~ "The core Flower framework keeps a minimal set of dependencies. The " +#~ "examples demonstrate Flower in the context of different machine learning " +#~ "frameworks, so additional dependencies need to be installed before an " +#~ "example can be run." #~ msgstr "" -#~ "Flower 核心框架只保留了最低限度的依赖项。这些示例在不同机器学习框架的背景下演示了 " -#~ "Flower,因此在运行示例之前需要安装额外的依赖项。" +#~ "Flower 核心框架只保留了最低限度的依赖项。这些示例在不同机器学习框架的背景" +#~ "下演示了 Flower,因此在运行示例之前需要安装额外的依赖项。" #~ msgid "For PyTorch examples::" #~ msgstr "PyTorch 示例::" @@ -24638,39 +24790,34 @@ msgstr "" #~ msgstr "PyTorch 和 TensorFlow 示例::" #~ msgid "" -#~ "Please consult :code:`pyproject.toml` for a" -#~ " full list of possible extras " +#~ "Please consult :code:`pyproject.toml` for a full list of possible extras " #~ "(section :code:`[tool.poetry.extras]`)." #~ msgstr "" -#~ "请参阅 :code:`pyproject.toml`,了解可能的 extras 的完整列表(章节 " -#~ ":code:`[tool.poems.extras]`)。" +#~ "请参阅 :code:`pyproject.toml`,了解可能的 extras 的完整列表(章节 :code:" +#~ "`[tool.poems.extras]`)。" #~ msgid "PyTorch Examples" #~ msgstr "PyTorch 示例" #~ msgid "" -#~ "Our PyTorch examples are based on " -#~ "PyTorch 1.7. They should work with " -#~ "other releases as well. So far, we" -#~ " provide the following examples." -#~ msgstr "我们的 PyTorch 示例基于 PyTorch 1.7。它们应该也能在其他版本中使用。到目前为止,我们提供了以下示例。" +#~ "Our PyTorch examples are based on PyTorch 1.7. They should work with " +#~ "other releases as well. So far, we provide the following examples." +#~ msgstr "" +#~ "我们的 PyTorch 示例基于 PyTorch 1.7。它们应该也能在其他版本中使用。到目前" +#~ "为止,我们提供了以下示例。" #~ msgid "CIFAR-10 Image Classification" #~ msgstr "CIFAR-10 图像分类" #~ msgid "" -#~ "`CIFAR-10 and CIFAR-100 " -#~ "`_ are " -#~ "popular RGB image datasets. The Flower" -#~ " CIFAR-10 example uses PyTorch to " -#~ "train a simple CNN classifier in a" -#~ " federated learning setup with two " +#~ "`CIFAR-10 and CIFAR-100 `_ " +#~ "are popular RGB image datasets. The Flower CIFAR-10 example uses PyTorch " +#~ "to train a simple CNN classifier in a federated learning setup with two " #~ "clients." #~ msgstr "" -#~ "CIFAR-10 和 CIFAR-100 " -#~ "``_ 是流行的 RGB" -#~ " 图像数据集。Flower CIFAR-10 示例使用 PyTorch " -#~ "在有两个客户端的联邦学习设置中训练一个简单的 CNN 分类器。" +#~ "CIFAR-10 和 CIFAR-100 ``_ 是" +#~ "流行的 RGB 图像数据集。Flower CIFAR-10 示例使用 PyTorch 在有两个客户端的联" +#~ "邦学习设置中训练一个简单的 CNN 分类器。" #~ msgid "First, start a Flower server:" #~ msgstr "首先,启动 Flower 服务器:" @@ -24691,16 +24838,13 @@ msgstr "" #~ msgstr "ImageNet-2012 图像分类" #~ msgid "" -#~ "`ImageNet-2012 `_ is " -#~ "one of the major computer vision " -#~ "datasets. The Flower ImageNet example " -#~ "uses PyTorch to train a ResNet-18 " -#~ "classifier in a federated learning setup" -#~ " with ten clients." +#~ "`ImageNet-2012 `_ is one of the major computer " +#~ "vision datasets. The Flower ImageNet example uses PyTorch to train a " +#~ "ResNet-18 classifier in a federated learning setup with ten clients." #~ msgstr "" -#~ "ImageNet-2012 `_ " -#~ "是主要的计算机视觉数据集之一。Flower ImageNet 示例使用 PyTorch " -#~ "在有十个客户端的联邦学习设置中训练 ResNet-18 分类器。" +#~ "ImageNet-2012 `_ 是主要的计算机视觉数据集之一。" +#~ "Flower ImageNet 示例使用 PyTorch 在有十个客户端的联邦学习设置中训练 " +#~ "ResNet-18 分类器。" #~ msgid "$ ./src/py/flwr_example/pytorch_imagenet/run-server.sh" #~ msgstr "$ ./src/py/flwr_example/pytorch_imagenet/run-server.sh" @@ -24715,27 +24859,26 @@ msgstr "" #~ msgstr "TensorFlow 示例" #~ msgid "" -#~ "Our TensorFlow examples are based on " -#~ "TensorFlow 2.0 or newer. So far, " -#~ "we provide the following examples." -#~ msgstr "我们的 TensorFlow 示例基于 TensorFlow 2.0 或更新版本。到目前为止,我们提供了以下示例。" +#~ "Our TensorFlow examples are based on TensorFlow 2.0 or newer. So far, we " +#~ "provide the following examples." +#~ msgstr "" +#~ "我们的 TensorFlow 示例基于 TensorFlow 2.0 或更新版本。到目前为止,我们提供" +#~ "了以下示例。" #~ msgid "Fashion-MNIST Image Classification" #~ msgstr "Fashion-MNIST 图像分类" #~ msgid "" -#~ "`Fashion-MNIST `_ is often used as " -#~ "the \"Hello, world!\" of machine " -#~ "learning. We follow this tradition and" -#~ " provide an example which samples " -#~ "random local datasets from Fashion-MNIST" -#~ " and trains a simple image " -#~ "classification model over those partitions." +#~ "`Fashion-MNIST `_ is " +#~ "often used as the \"Hello, world!\" of machine learning. We follow this " +#~ "tradition and provide an example which samples random local datasets from " +#~ "Fashion-MNIST and trains a simple image classification model over those " +#~ "partitions." #~ msgstr "" -#~ "`Fashion-MNIST `_ 经常被用作机器学习的 \"你好,世界!\"。我们遵循这一传统" -#~ ",提供了一个从Fashion-MNIST 中随机抽样本地数据集的示例,并在这些分区上训练一个简单的图像分类模型。" +#~ "`Fashion-MNIST `_ 经常被" +#~ "用作机器学习的 \"你好,世界!\"。我们遵循这一传统,提供了一个从Fashion-" +#~ "MNIST 中随机抽样本地数据集的示例,并在这些分区上训练一个简单的图像分类模" +#~ "型。" #~ msgid "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-server.sh" #~ msgstr "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-server.sh" @@ -24744,59 +24887,7 @@ msgstr "" #~ msgstr "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-clients.sh" #~ msgid "" -#~ "For more details, see " -#~ ":code:`src/py/flwr_example/tensorflow_fashion_mnist`." -#~ msgstr "更多详情,请参阅 :code:`src/py/flwr_example/tensorflow_fashion_mnist`。" - -#~ msgid "" -#~ "MXNet is no longer maintained and " -#~ "has been moved into `Attic " -#~ "`_. As a " -#~ "result, we would encourage you to " -#~ "use other ML frameworks alongise Flower," -#~ " for example, PyTorch. This tutorial " -#~ "might be removed in future versions " -#~ "of Flower." -#~ msgstr "" - -#~ msgid "|31e4b1afa87c4b968327bbeafbf184d4|" -#~ msgstr "" - -#~ msgid "|c9d935b4284e4c389a33d86b33e07c0a|" -#~ msgstr "" - -#~ msgid "|00727b5faffb468f84dd1b03ded88638|" -#~ msgstr "" - -#~ msgid "|daf0cf0ff4c24fd29439af78416cf47b|" -#~ msgstr "" - -#~ msgid "|9f093007080d471d94ca90d3e9fde9b6|" -#~ msgstr "" - -#~ msgid "|46a26e6150e0479fbd3dfd655f36eb13|" -#~ msgstr "" - -#~ msgid "|3daba297595c4c7fb845d90404a6179a|" -#~ msgstr "" - -#~ msgid "|5769874fa9c4455b80b2efda850d39d7|" -#~ msgstr "" - -#~ msgid "|ba47ffb421814b0f8f9fa5719093d839|" -#~ msgstr "" - -#~ msgid "|aeac5bf79cbf497082e979834717e01b|" -#~ msgstr "" - -#~ msgid "|ce27ed4bbe95459dba016afc42486ba2|" -#~ msgstr "" - -#~ msgid "|ae94a7f71dda443cbec2385751427d41|" -#~ msgstr "" - -#~ msgid "|e61fce4d43d243e7bb08bdde97d81ce6|" -#~ msgstr "" - -#~ msgid "|08cb60859b07461588fe44e55810b050|" +#~ "For more details, see :code:`src/py/flwr_example/" +#~ "tensorflow_fashion_mnist`." #~ msgstr "" +#~ "更多详情,请参阅 :code:`src/py/flwr_example/tensorflow_fashion_mnist`。"